This commit is contained in:
Lukian LEIZOUR 2023-03-02 16:26:41 +01:00
parent c997a4f034
commit 4afa276479
26 changed files with 1437 additions and 204 deletions

36
app.js
View file

@ -129,19 +129,7 @@ client.on('messageCreate', async msg => {
} }
else if (msg.content.startsWith('/q')) { else if (msg.content.startsWith('/q')) {
answerQuestion(msg.content.slice(+3)).then((res) => { msg.reply("utilise la slash commande enculé")
console.log('[Discord] Sent answer to : ' + msg.content.slice(+3));
addToLogs('[Discord] Sent answer to : ' + msg.content.slice(+3));
msg.reply(res.data.choices[0].text);
}).catch((err) => {
console.log(err);
addToLogs(err);
msg.reply("Something went wrong");
})
console.log('[Discord] Generating answer to : ' + msg.content.slice(+3));
addToLogs('[Discord] Generating answer to : ' + msg.content.slice(+3));
msg.reply('Generating the answer...');
} }
else if (msg.content.startsWith('/g')) { else if (msg.content.startsWith('/g')) {
@ -161,6 +149,28 @@ client.on('messageCreate', async msg => {
} }
}); });
client.on('interactionCreate', async interaction => {
if (!interaction.isCommand()) return;
if (interaction.commandName === 'gptrequest') {
answerQuestion(interaction.options.get('question').value).then((res) => {
const embed = new discord.EmbedBuilder().setTitle("test").setDescription(res.data.choices[0].message.content);
console.log('[Discord] Sent answer to : ' + interaction.options.get('question').value);
addToLogs('[Discord] Sent answer to : ' +interaction.options.get('question').value);
interaction.reply({ embeds : [embed] });
}).catch((err) => {
console.log(err);
addToLogs(err);
interaction.reply("Something went wrong");
})
console.log('[Discord] Generating answer to : ' + interaction.options.get('question').value);
addToLogs('[Discord] Generating answer to : ' + interaction.options.get('question').value);
}
});
//bot launch //bot launch
bot.launch() bot.launch()
client.login(process.env.DISCORD); client.login(process.env.DISCORD);

View file

@ -1,4 +1,5 @@
const { Configuration, OpenAIApi } = require("openai"); const { Configuration, OpenAIApi } = require("openai");
const { addToLogs } = require("./botTools");
const configuration = new Configuration({ const configuration = new Configuration({
apiKey: process.env.OPENAI, apiKey: process.env.OPENAI,
@ -18,15 +19,12 @@ async function generateImage(query, ctx, bot) {
}); });
return image; return image;
//image link : image.data[0].url
} }
async function answerQuestion(query) { async function answerQuestion(query) {
response = await openai.createCompletion({ response = await openai.createChatCompletion({
//model: "text-davinci-003",
model: "gpt-3.5-turbo", model: "gpt-3.5-turbo",
prompt: query, messages: [{ "role" : "user", "content" : query}],
max_tokens: 500, max_tokens: 500,
temperature: 0.9, temperature: 0.9,
}).catch((err) => { }).catch((err) => {
@ -34,6 +32,7 @@ async function answerQuestion(query) {
addToLogs("--> error : " + err); addToLogs("--> error : " + err);
}) })
console.log(response);
return response; return response;
} }

View file

@ -0,0 +1,2 @@
--> error : Error: Request failed with status code 401
--> res : undefined

8
node_modules/.package-lock.json generated vendored
View file

@ -1,7 +1,7 @@
{ {
"name": "roberto-bot", "name": "roberto-bot",
"version": "1.0.0", "version": "1.0.0",
"lockfileVersion": 2, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"node_modules/@discordjs/builders": { "node_modules/@discordjs/builders": {
@ -882,9 +882,9 @@
"integrity": "sha512-qAMrwuk2xLEutlASoiPiAMW3EN3K96Ka/ilSXYr6qR1zSVXw2j7+yDSqGTC4T9apfLYxM3tLLjKvgPdAUK7kYQ==" "integrity": "sha512-qAMrwuk2xLEutlASoiPiAMW3EN3K96Ka/ilSXYr6qR1zSVXw2j7+yDSqGTC4T9apfLYxM3tLLjKvgPdAUK7kYQ=="
}, },
"node_modules/openai": { "node_modules/openai": {
"version": "3.1.0", "version": "3.2.1",
"resolved": "https://registry.npmjs.org/openai/-/openai-3.1.0.tgz", "resolved": "https://registry.npmjs.org/openai/-/openai-3.2.1.tgz",
"integrity": "sha512-v5kKFH5o+8ld+t0arudj833Mgm3GcgBnbyN9946bj6u7bvel4Yg6YFz2A4HLIYDzmMjIo0s6vSG9x73kOwvdCg==", "integrity": "sha512-762C9BNlJPbjjlWZi4WYK9iM2tAVAv0uUp1UmI34vb0CN5T2mjB/qM6RYBmNKMh/dN9fC+bxqPwWJZUTWW052A==",
"dependencies": { "dependencies": {
"axios": "^0.26.0", "axios": "^0.26.0",
"form-data": "^4.0.0" "form-data": "^4.0.0"

25
node_modules/openai/.github/workflows/test.yml generated vendored Normal file
View file

@ -0,0 +1,25 @@
name: Node.js CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [16.x, 18.x]
steps:
- uses: actions/checkout@v3
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
cache: 'npm'
- run: npm ci
- run: npm run build

View file

@ -1 +1 @@
6.2.1 6.4.0

13
node_modules/openai/README.md generated vendored
View file

@ -23,7 +23,7 @@ const configuration = new Configuration({
const openai = new OpenAIApi(configuration); const openai = new OpenAIApi(configuration);
const completion = await openai.createCompletion({ const completion = await openai.createCompletion({
model: "text-davinci-002", model: "text-davinci-003",
prompt: "Hello world", prompt: "Hello world",
}); });
console.log(completion.data.choices[0].text); console.log(completion.data.choices[0].text);
@ -39,7 +39,7 @@ All of the available API request functions additionally contain an optional fina
```javascript ```javascript
const completion = await openai.createCompletion( const completion = await openai.createCompletion(
{ {
model: "text-davinci-002", model: "text-davinci-003",
prompt: "Hello world", prompt: "Hello world",
}, },
{ {
@ -58,7 +58,7 @@ API requests can potentially return errors due to invalid inputs or other issues
```javascript ```javascript
try { try {
const completion = await openai.createCompletion({ const completion = await openai.createCompletion({
model: "text-davinci-002", model: "text-davinci-003",
prompt: "Hello world", prompt: "Hello world",
}); });
console.log(completion.data.choices[0].text); console.log(completion.data.choices[0].text);
@ -72,13 +72,18 @@ try {
} }
``` ```
### Streaming completions
Streaming completions (`stream=true`) are not natively supported in this package yet, but [a workaround exists](https://github.com/openai/openai-node/issues/18#issuecomment-1369996933) if needed.
## Upgrade guide ## Upgrade guide
All breaking changes for major version releases are listed below. All breaking changes for major version releases are listed below.
### 3.0.0 ### 3.0.0
- The function signature of `createCompletion(engineId, params)` changed to `createCompletion(params)`. The value previously passed in as the `engineId` argument should now be passed in as `model` in the params object (e.g. `createCompletion({ model: "text-davinci-002, ... })`) - The function signature of `createCompletion(engineId, params)` changed to `createCompletion(params)`. The value previously passed in as the `engineId` argument should now be passed in as `model` in the params object (e.g. `createCompletion({ model: "text-davinci-003", ... })`)
- Replace any `createCompletionFromModel(params)` calls with `createCompletion(params)`
## Thanks ## Thanks

639
node_modules/openai/api.ts generated vendored
View file

@ -4,7 +4,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@ -13,13 +13,77 @@
*/ */
import { Configuration } from './configuration'; import type { Configuration } from './configuration';
import globalAxios, { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios'; import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
import globalAxios from 'axios';
// Some imports not used depending on template conditions // Some imports not used depending on template conditions
// @ts-ignore // @ts-ignore
import { DUMMY_BASE_URL, assertParamExists, setApiKeyToObject, setBasicAuthToObject, setBearerAuthToObject, setOAuthToObject, setSearchParams, serializeDataIfNeeded, toPathString, createRequestFunction } from './common'; import { DUMMY_BASE_URL, assertParamExists, setApiKeyToObject, setBasicAuthToObject, setBearerAuthToObject, setOAuthToObject, setSearchParams, serializeDataIfNeeded, toPathString, createRequestFunction } from './common';
import type { RequestArgs } from './base';
// @ts-ignore // @ts-ignore
import { BASE_PATH, COLLECTION_FORMATS, RequestArgs, BaseAPI, RequiredError } from './base'; import { BASE_PATH, COLLECTION_FORMATS, BaseAPI, RequiredError } from './base';
/**
*
* @export
* @interface ChatCompletionRequestMessage
*/
export interface ChatCompletionRequestMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'role': ChatCompletionRequestMessageRoleEnum;
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'content': string;
/**
* The name of the user in a multi-user chat
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'name'?: string;
}
export const ChatCompletionRequestMessageRoleEnum = {
System: 'system',
User: 'user',
Assistant: 'assistant'
} as const;
export type ChatCompletionRequestMessageRoleEnum = typeof ChatCompletionRequestMessageRoleEnum[keyof typeof ChatCompletionRequestMessageRoleEnum];
/**
*
* @export
* @interface ChatCompletionResponseMessage
*/
export interface ChatCompletionResponseMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
'role': ChatCompletionResponseMessageRoleEnum;
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
'content': string;
}
export const ChatCompletionResponseMessageRoleEnum = {
System: 'system',
User: 'user',
Assistant: 'assistant'
} as const;
export type ChatCompletionResponseMessageRoleEnum = typeof ChatCompletionResponseMessageRoleEnum[keyof typeof ChatCompletionResponseMessageRoleEnum];
/** /**
* *
@ -76,7 +140,7 @@ export interface CreateAnswerRequest {
*/ */
'max_rerank'?: number | null; 'max_rerank'?: number | null;
/** /**
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values mean the model will take more risks and value 0 (argmax sampling) works better for scenarios with a well-defined answer. * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
* @type {number} * @type {number}
* @memberof CreateAnswerRequest * @memberof CreateAnswerRequest
*/ */
@ -130,7 +194,7 @@ export interface CreateAnswerRequest {
*/ */
'expand'?: Array<any> | null; 'expand'?: Array<any> | null;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateAnswerRequest * @memberof CreateAnswerRequest
*/ */
@ -205,6 +269,160 @@ export interface CreateAnswerResponseSelectedDocumentsInner {
*/ */
'text'?: string; 'text'?: string;
} }
/**
*
* @export
* @interface CreateChatCompletionRequest
*/
export interface CreateChatCompletionRequest {
/**
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
* @type {string}
* @memberof CreateChatCompletionRequest
*/
'model': string;
/**
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
* @type {Array<ChatCompletionRequestMessage>}
* @memberof CreateChatCompletionRequest
*/
'messages': Array<ChatCompletionRequestMessage>;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'temperature'?: number | null;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'top_p'?: number | null;
/**
* How many chat completion choices to generate for each input message.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'n'?: number | null;
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* @type {boolean}
* @memberof CreateChatCompletionRequest
*/
'stream'?: boolean | null;
/**
*
* @type {CreateChatCompletionRequestStop}
* @memberof CreateChatCompletionRequest
*/
'stop'?: CreateChatCompletionRequestStop;
/**
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'max_tokens'?: number;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'presence_penalty'?: number | null;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'frequency_penalty'?: number | null;
/**
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
* @type {object}
* @memberof CreateChatCompletionRequest
*/
'logit_bias'?: object | null;
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string}
* @memberof CreateChatCompletionRequest
*/
'user'?: string;
}
/**
* @type CreateChatCompletionRequestStop
* Up to 4 sequences where the API will stop generating further tokens.
* @export
*/
export type CreateChatCompletionRequestStop = Array<string> | string;
/**
*
* @export
* @interface CreateChatCompletionResponse
*/
export interface CreateChatCompletionResponse {
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
'id': string;
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
'object': string;
/**
*
* @type {number}
* @memberof CreateChatCompletionResponse
*/
'created': number;
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
'model': string;
/**
*
* @type {Array<CreateChatCompletionResponseChoicesInner>}
* @memberof CreateChatCompletionResponse
*/
'choices': Array<CreateChatCompletionResponseChoicesInner>;
/**
*
* @type {CreateCompletionResponseUsage}
* @memberof CreateChatCompletionResponse
*/
'usage'?: CreateCompletionResponseUsage;
}
/**
*
* @export
* @interface CreateChatCompletionResponseChoicesInner
*/
export interface CreateChatCompletionResponseChoicesInner {
/**
*
* @type {number}
* @memberof CreateChatCompletionResponseChoicesInner
*/
'index'?: number;
/**
*
* @type {ChatCompletionResponseMessage}
* @memberof CreateChatCompletionResponseChoicesInner
*/
'message'?: ChatCompletionResponseMessage;
/**
*
* @type {string}
* @memberof CreateChatCompletionResponseChoicesInner
*/
'finish_reason'?: string;
}
/** /**
* *
* @export * @export
@ -248,7 +466,7 @@ export interface CreateClassificationRequest {
*/ */
'search_model'?: string | null; 'search_model'?: string | null;
/** /**
* What sampling `temperature` to use. Higher values mean the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
* @type {number} * @type {number}
* @memberof CreateClassificationRequest * @memberof CreateClassificationRequest
*/ */
@ -290,7 +508,7 @@ export interface CreateClassificationRequest {
*/ */
'expand'?: Array<any> | null; 'expand'?: Array<any> | null;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateClassificationRequest * @memberof CreateClassificationRequest
*/ */
@ -395,7 +613,7 @@ export interface CreateCompletionRequest {
*/ */
'max_tokens'?: number | null; 'max_tokens'?: number | null;
/** /**
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both. * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number} * @type {number}
* @memberof CreateCompletionRequest * @memberof CreateCompletionRequest
*/ */
@ -461,7 +679,7 @@ export interface CreateCompletionRequest {
*/ */
'logit_bias'?: object | null; 'logit_bias'?: object | null;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateCompletionRequest * @memberof CreateCompletionRequest
*/ */
@ -618,7 +836,7 @@ export interface CreateCompletionResponseUsage {
*/ */
export interface CreateEditRequest { export interface CreateEditRequest {
/** /**
* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
* @type {string} * @type {string}
* @memberof CreateEditRequest * @memberof CreateEditRequest
*/ */
@ -642,7 +860,7 @@ export interface CreateEditRequest {
*/ */
'n'?: number | null; 'n'?: number | null;
/** /**
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both. * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number} * @type {number}
* @memberof CreateEditRequest * @memberof CreateEditRequest
*/ */
@ -660,12 +878,6 @@ export interface CreateEditRequest {
* @interface CreateEditResponse * @interface CreateEditResponse
*/ */
export interface CreateEditResponse { export interface CreateEditResponse {
/**
*
* @type {string}
* @memberof CreateEditResponse
*/
'id': string;
/** /**
* *
* @type {string} * @type {string}
@ -678,12 +890,6 @@ export interface CreateEditResponse {
* @memberof CreateEditResponse * @memberof CreateEditResponse
*/ */
'created': number; 'created': number;
/**
*
* @type {string}
* @memberof CreateEditResponse
*/
'model': string;
/** /**
* *
* @type {Array<CreateCompletionResponseChoicesInner>} * @type {Array<CreateCompletionResponseChoicesInner>}
@ -716,7 +922,7 @@ export interface CreateEmbeddingRequest {
*/ */
'input': CreateEmbeddingRequestInput; 'input': CreateEmbeddingRequestInput;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateEmbeddingRequest * @memberof CreateEmbeddingRequest
*/ */
@ -724,7 +930,7 @@ export interface CreateEmbeddingRequest {
} }
/** /**
* @type CreateEmbeddingRequestInput * @type CreateEmbeddingRequestInput
* Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 2048 tokens in length. Unless you are embedding code, we suggest replacing newlines (`\\n`) in your input with a single space, as we have observed inferior results when newlines are present. * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.
* @export * @export
*/ */
export type CreateEmbeddingRequestInput = Array<any> | Array<number> | Array<string> | string; export type CreateEmbeddingRequestInput = Array<any> | Array<number> | Array<string> | string;
@ -823,7 +1029,7 @@ export interface CreateFineTuneRequest {
*/ */
'validation_file'?: string | null; 'validation_file'?: string | null;
/** /**
* The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://beta.openai.com/docs/models) documentation. * The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation.
* @type {string} * @type {string}
* @memberof CreateFineTuneRequest * @memberof CreateFineTuneRequest
*/ */
@ -914,7 +1120,7 @@ export interface CreateImageRequest {
*/ */
'response_format'?: CreateImageRequestResponseFormatEnum; 'response_format'?: CreateImageRequestResponseFormatEnum;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateImageRequest * @memberof CreateImageRequest
*/ */
@ -1146,7 +1352,7 @@ export interface CreateSearchRequest {
*/ */
'return_metadata'?: boolean | null; 'return_metadata'?: boolean | null;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateSearchRequest * @memberof CreateSearchRequest
*/ */
@ -1202,6 +1408,32 @@ export interface CreateSearchResponseDataInner {
*/ */
'score'?: number; 'score'?: number;
} }
/**
*
* @export
* @interface CreateTranscriptionResponse
*/
export interface CreateTranscriptionResponse {
/**
*
* @type {string}
* @memberof CreateTranscriptionResponse
*/
'text': string;
}
/**
*
* @export
* @interface CreateTranslationResponse
*/
export interface CreateTranslationResponse {
/**
*
* @type {string}
* @memberof CreateTranslationResponse
*/
'text': string;
}
/** /**
* *
* @export * @export
@ -1696,6 +1928,42 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
options: localVarRequestOptions, options: localVarRequestOptions,
}; };
}, },
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion: async (createChatCompletionRequest: CreateChatCompletionRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
// verify required parameter 'createChatCompletionRequest' is not null or undefined
assertParamExists('createChatCompletion', 'createChatCompletionRequest', createChatCompletionRequest)
const localVarPath = `/chat/completions`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
const localVarHeaderParameter = {} as any;
const localVarQueryParameter = {} as any;
localVarHeaderParameter['Content-Type'] = 'application/json';
setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
localVarRequestOptions.data = serializeDataIfNeeded(createChatCompletionRequest, localVarRequestOptions, configuration)
return {
url: toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
},
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -1771,7 +2039,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
}, },
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -1964,21 +2232,19 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createImageEdit: async (image: File, mask: File, prompt: string, n?: number, size?: string, responseFormat?: string, user?: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => { createImageEdit: async (image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
// verify required parameter 'image' is not null or undefined // verify required parameter 'image' is not null or undefined
assertParamExists('createImageEdit', 'image', image) assertParamExists('createImageEdit', 'image', image)
// verify required parameter 'mask' is not null or undefined
assertParamExists('createImageEdit', 'mask', mask)
// verify required parameter 'prompt' is not null or undefined // verify required parameter 'prompt' is not null or undefined
assertParamExists('createImageEdit', 'prompt', prompt) assertParamExists('createImageEdit', 'prompt', prompt)
const localVarPath = `/images/edits`; const localVarPath = `/images/edits`;
@ -2043,7 +2309,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -2174,6 +2440,137 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
options: localVarRequestOptions, options: localVarRequestOptions,
}; };
}, },
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription: async (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
// verify required parameter 'file' is not null or undefined
assertParamExists('createTranscription', 'file', file)
// verify required parameter 'model' is not null or undefined
assertParamExists('createTranscription', 'model', model)
const localVarPath = `/audio/transcriptions`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
const localVarHeaderParameter = {} as any;
const localVarQueryParameter = {} as any;
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
if (file !== undefined) {
localVarFormParams.append('file', file as any);
}
if (model !== undefined) {
localVarFormParams.append('model', model as any);
}
if (prompt !== undefined) {
localVarFormParams.append('prompt', prompt as any);
}
if (responseFormat !== undefined) {
localVarFormParams.append('response_format', responseFormat as any);
}
if (temperature !== undefined) {
localVarFormParams.append('temperature', temperature as any);
}
if (language !== undefined) {
localVarFormParams.append('language', language as any);
}
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
localVarRequestOptions.data = localVarFormParams;
return {
url: toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
},
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation: async (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
// verify required parameter 'file' is not null or undefined
assertParamExists('createTranslation', 'file', file)
// verify required parameter 'model' is not null or undefined
assertParamExists('createTranslation', 'model', model)
const localVarPath = `/audio/translations`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
const localVarHeaderParameter = {} as any;
const localVarQueryParameter = {} as any;
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
if (file !== undefined) {
localVarFormParams.append('file', file as any);
}
if (model !== undefined) {
localVarFormParams.append('model', model as any);
}
if (prompt !== undefined) {
localVarFormParams.append('prompt', prompt as any);
}
if (responseFormat !== undefined) {
localVarFormParams.append('response_format', responseFormat as any);
}
if (temperature !== undefined) {
localVarFormParams.append('temperature', temperature as any);
}
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
localVarRequestOptions.data = localVarFormParams;
return {
url: toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
},
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -2606,6 +3003,17 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
const localVarAxiosArgs = await localVarAxiosParamCreator.createAnswer(createAnswerRequest, options); const localVarAxiosArgs = await localVarAxiosParamCreator.createAnswer(createAnswerRequest, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration); return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
}, },
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateChatCompletionResponse>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.createChatCompletion(createChatCompletionRequest, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -2631,7 +3039,7 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
}, },
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -2688,18 +3096,18 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
async createImageEdit(image: File, mask: File, prompt: string, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>> { async createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.createImageEdit(image, mask, prompt, n, size, responseFormat, user, options); const localVarAxiosArgs = await localVarAxiosParamCreator.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration); return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
}, },
/** /**
@ -2709,7 +3117,7 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -2741,6 +3149,37 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
const localVarAxiosArgs = await localVarAxiosParamCreator.createSearch(engineId, createSearchRequest, options); const localVarAxiosArgs = await localVarAxiosParamCreator.createSearch(engineId, createSearchRequest, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration); return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
}, },
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranscriptionResponse>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, language, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranslationResponse>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.createTranslation(file, model, prompt, responseFormat, temperature, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -2903,6 +3342,16 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise<CreateAnswerResponse> { createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise<CreateAnswerResponse> {
return localVarFp.createAnswer(createAnswerRequest, options).then((request) => request(axios, basePath)); return localVarFp.createAnswer(createAnswerRequest, options).then((request) => request(axios, basePath));
}, },
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: any): AxiosPromise<CreateChatCompletionResponse> {
return localVarFp.createChatCompletion(createChatCompletionRequest, options).then((request) => request(axios, basePath));
},
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -2926,7 +3375,7 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
}, },
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -2978,18 +3427,18 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createImageEdit(image: File, mask: File, prompt: string, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise<ImagesResponse> { createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise<ImagesResponse> {
return localVarFp.createImageEdit(image, mask, prompt, n, size, responseFormat, user, options).then((request) => request(axios, basePath)); return localVarFp.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(axios, basePath));
}, },
/** /**
* *
@ -2998,7 +3447,7 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -3027,6 +3476,35 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: any): AxiosPromise<CreateSearchResponse> { createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: any): AxiosPromise<CreateSearchResponse> {
return localVarFp.createSearch(engineId, createSearchRequest, options).then((request) => request(axios, basePath)); return localVarFp.createSearch(engineId, createSearchRequest, options).then((request) => request(axios, basePath));
}, },
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: any): AxiosPromise<CreateTranscriptionResponse> {
return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: any): AxiosPromise<CreateTranslationResponse> {
return localVarFp.createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(axios, basePath));
},
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -3181,6 +3659,18 @@ export class OpenAIApi extends BaseAPI {
return OpenAIApiFp(this.configuration).createAnswer(createAnswerRequest, options).then((request) => request(this.axios, this.basePath)); return OpenAIApiFp(this.configuration).createAnswer(createAnswerRequest, options).then((request) => request(this.axios, this.basePath));
} }
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
public createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig) {
return OpenAIApiFp(this.configuration).createChatCompletion(createChatCompletionRequest, options).then((request) => request(this.axios, this.basePath));
}
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -3208,7 +3698,7 @@ export class OpenAIApi extends BaseAPI {
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -3270,19 +3760,19 @@ export class OpenAIApi extends BaseAPI {
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
* @memberof OpenAIApi * @memberof OpenAIApi
*/ */
public createImageEdit(image: File, mask: File, prompt: string, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) { public createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) {
return OpenAIApiFp(this.configuration).createImageEdit(image, mask, prompt, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath)); return OpenAIApiFp(this.configuration).createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath));
} }
/** /**
@ -3292,7 +3782,7 @@ export class OpenAIApi extends BaseAPI {
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
* @memberof OpenAIApi * @memberof OpenAIApi
@ -3327,6 +3817,39 @@ export class OpenAIApi extends BaseAPI {
return OpenAIApiFp(this.configuration).createSearch(engineId, createSearchRequest, options).then((request) => request(this.axios, this.basePath)); return OpenAIApiFp(this.configuration).createSearch(engineId, createSearchRequest, options).then((request) => request(this.axios, this.basePath));
} }
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
public createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig) {
return OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(this.axios, this.basePath));
}
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
public createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig) {
return OpenAIApiFp(this.configuration).createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(this.axios, this.basePath));
}
/** /**
* *
* @summary Delete a file. * @summary Delete a file.

9
node_modules/openai/base.ts generated vendored
View file

@ -4,7 +4,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@ -13,10 +13,11 @@
*/ */
import { Configuration } from "./configuration"; import type { Configuration } from './configuration';
// Some imports not used depending on template conditions // Some imports not used depending on template conditions
// @ts-ignore // @ts-ignore
import globalAxios, { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios'; import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
import globalAxios from 'axios';
export const BASE_PATH = "https://api.openai.com/v1".replace(/\/+$/, ""); export const BASE_PATH = "https://api.openai.com/v1".replace(/\/+$/, "");
@ -64,8 +65,8 @@ export class BaseAPI {
* @extends {Error} * @extends {Error}
*/ */
export class RequiredError extends Error { export class RequiredError extends Error {
name: "RequiredError" = "RequiredError";
constructor(public field: string, msg?: string) { constructor(public field: string, msg?: string) {
super(msg); super(msg);
this.name = "RequiredError"
} }
} }

10
node_modules/openai/common.ts generated vendored
View file

@ -4,7 +4,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@ -13,9 +13,10 @@
*/ */
import { Configuration } from "./configuration"; import type { Configuration } from "./configuration";
import { RequiredError, RequestArgs } from "./base"; import type { RequestArgs } from "./base";
import { AxiosInstance, AxiosResponse } from 'axios'; import type { AxiosInstance, AxiosResponse } from 'axios';
import { RequiredError } from "./base";
/** /**
* *
@ -84,6 +85,7 @@ export const setOAuthToObject = async function (object: any, name: string, scope
} }
function setFlattenedQueryParams(urlSearchParams: URLSearchParams, parameter: any, key: string = ""): void { function setFlattenedQueryParams(urlSearchParams: URLSearchParams, parameter: any, key: string = ""): void {
if (parameter == null) return;
if (typeof parameter === "object") { if (typeof parameter === "object") {
if (Array.isArray(parameter)) { if (Array.isArray(parameter)) {
(parameter as any[]).forEach(item => setFlattenedQueryParams(urlSearchParams, item, key)); (parameter as any[]).forEach(item => setFlattenedQueryParams(urlSearchParams, item, key));

View file

@ -4,7 +4,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

465
node_modules/openai/dist/api.d.ts generated vendored
View file

@ -2,16 +2,73 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech * https://openapi-generator.tech
* Do not edit the class manually. * Do not edit the class manually.
*/ */
import { Configuration } from './configuration'; import type { Configuration } from './configuration';
import { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios'; import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
import { RequestArgs, BaseAPI } from './base'; import type { RequestArgs } from './base';
import { BaseAPI } from './base';
/**
*
* @export
* @interface ChatCompletionRequestMessage
*/
export interface ChatCompletionRequestMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'role': ChatCompletionRequestMessageRoleEnum;
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'content': string;
/**
* The name of the user in a multi-user chat
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'name'?: string;
}
export declare const ChatCompletionRequestMessageRoleEnum: {
readonly System: "system";
readonly User: "user";
readonly Assistant: "assistant";
};
export declare type ChatCompletionRequestMessageRoleEnum = typeof ChatCompletionRequestMessageRoleEnum[keyof typeof ChatCompletionRequestMessageRoleEnum];
/**
*
* @export
* @interface ChatCompletionResponseMessage
*/
export interface ChatCompletionResponseMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
'role': ChatCompletionResponseMessageRoleEnum;
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
'content': string;
}
export declare const ChatCompletionResponseMessageRoleEnum: {
readonly System: "system";
readonly User: "user";
readonly Assistant: "assistant";
};
export declare type ChatCompletionResponseMessageRoleEnum = typeof ChatCompletionResponseMessageRoleEnum[keyof typeof ChatCompletionResponseMessageRoleEnum];
/** /**
* *
* @export * @export
@ -67,7 +124,7 @@ export interface CreateAnswerRequest {
*/ */
'max_rerank'?: number | null; 'max_rerank'?: number | null;
/** /**
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values mean the model will take more risks and value 0 (argmax sampling) works better for scenarios with a well-defined answer. * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
* @type {number} * @type {number}
* @memberof CreateAnswerRequest * @memberof CreateAnswerRequest
*/ */
@ -121,7 +178,7 @@ export interface CreateAnswerRequest {
*/ */
'expand'?: Array<any> | null; 'expand'?: Array<any> | null;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateAnswerRequest * @memberof CreateAnswerRequest
*/ */
@ -195,6 +252,159 @@ export interface CreateAnswerResponseSelectedDocumentsInner {
*/ */
'text'?: string; 'text'?: string;
} }
/**
*
* @export
* @interface CreateChatCompletionRequest
*/
export interface CreateChatCompletionRequest {
/**
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
* @type {string}
* @memberof CreateChatCompletionRequest
*/
'model': string;
/**
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
* @type {Array<ChatCompletionRequestMessage>}
* @memberof CreateChatCompletionRequest
*/
'messages': Array<ChatCompletionRequestMessage>;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'temperature'?: number | null;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'top_p'?: number | null;
/**
* How many chat completion choices to generate for each input message.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'n'?: number | null;
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* @type {boolean}
* @memberof CreateChatCompletionRequest
*/
'stream'?: boolean | null;
/**
*
* @type {CreateChatCompletionRequestStop}
* @memberof CreateChatCompletionRequest
*/
'stop'?: CreateChatCompletionRequestStop;
/**
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'max_tokens'?: number;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'presence_penalty'?: number | null;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
'frequency_penalty'?: number | null;
/**
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
* @type {object}
* @memberof CreateChatCompletionRequest
*/
'logit_bias'?: object | null;
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string}
* @memberof CreateChatCompletionRequest
*/
'user'?: string;
}
/**
* @type CreateChatCompletionRequestStop
* Up to 4 sequences where the API will stop generating further tokens.
* @export
*/
export declare type CreateChatCompletionRequestStop = Array<string> | string;
/**
*
* @export
* @interface CreateChatCompletionResponse
*/
export interface CreateChatCompletionResponse {
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
'id': string;
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
'object': string;
/**
*
* @type {number}
* @memberof CreateChatCompletionResponse
*/
'created': number;
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
'model': string;
/**
*
* @type {Array<CreateChatCompletionResponseChoicesInner>}
* @memberof CreateChatCompletionResponse
*/
'choices': Array<CreateChatCompletionResponseChoicesInner>;
/**
*
* @type {CreateCompletionResponseUsage}
* @memberof CreateChatCompletionResponse
*/
'usage'?: CreateCompletionResponseUsage;
}
/**
*
* @export
* @interface CreateChatCompletionResponseChoicesInner
*/
export interface CreateChatCompletionResponseChoicesInner {
/**
*
* @type {number}
* @memberof CreateChatCompletionResponseChoicesInner
*/
'index'?: number;
/**
*
* @type {ChatCompletionResponseMessage}
* @memberof CreateChatCompletionResponseChoicesInner
*/
'message'?: ChatCompletionResponseMessage;
/**
*
* @type {string}
* @memberof CreateChatCompletionResponseChoicesInner
*/
'finish_reason'?: string;
}
/** /**
* *
* @export * @export
@ -238,7 +448,7 @@ export interface CreateClassificationRequest {
*/ */
'search_model'?: string | null; 'search_model'?: string | null;
/** /**
* What sampling `temperature` to use. Higher values mean the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
* @type {number} * @type {number}
* @memberof CreateClassificationRequest * @memberof CreateClassificationRequest
*/ */
@ -280,7 +490,7 @@ export interface CreateClassificationRequest {
*/ */
'expand'?: Array<any> | null; 'expand'?: Array<any> | null;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateClassificationRequest * @memberof CreateClassificationRequest
*/ */
@ -385,7 +595,7 @@ export interface CreateCompletionRequest {
*/ */
'max_tokens'?: number | null; 'max_tokens'?: number | null;
/** /**
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both. * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number} * @type {number}
* @memberof CreateCompletionRequest * @memberof CreateCompletionRequest
*/ */
@ -451,7 +661,7 @@ export interface CreateCompletionRequest {
*/ */
'logit_bias'?: object | null; 'logit_bias'?: object | null;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateCompletionRequest * @memberof CreateCompletionRequest
*/ */
@ -606,7 +816,7 @@ export interface CreateCompletionResponseUsage {
*/ */
export interface CreateEditRequest { export interface CreateEditRequest {
/** /**
* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
* @type {string} * @type {string}
* @memberof CreateEditRequest * @memberof CreateEditRequest
*/ */
@ -630,7 +840,7 @@ export interface CreateEditRequest {
*/ */
'n'?: number | null; 'n'?: number | null;
/** /**
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both. * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number} * @type {number}
* @memberof CreateEditRequest * @memberof CreateEditRequest
*/ */
@ -648,12 +858,6 @@ export interface CreateEditRequest {
* @interface CreateEditResponse * @interface CreateEditResponse
*/ */
export interface CreateEditResponse { export interface CreateEditResponse {
/**
*
* @type {string}
* @memberof CreateEditResponse
*/
'id': string;
/** /**
* *
* @type {string} * @type {string}
@ -666,12 +870,6 @@ export interface CreateEditResponse {
* @memberof CreateEditResponse * @memberof CreateEditResponse
*/ */
'created': number; 'created': number;
/**
*
* @type {string}
* @memberof CreateEditResponse
*/
'model': string;
/** /**
* *
* @type {Array<CreateCompletionResponseChoicesInner>} * @type {Array<CreateCompletionResponseChoicesInner>}
@ -704,7 +902,7 @@ export interface CreateEmbeddingRequest {
*/ */
'input': CreateEmbeddingRequestInput; 'input': CreateEmbeddingRequestInput;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateEmbeddingRequest * @memberof CreateEmbeddingRequest
*/ */
@ -712,7 +910,7 @@ export interface CreateEmbeddingRequest {
} }
/** /**
* @type CreateEmbeddingRequestInput * @type CreateEmbeddingRequestInput
* Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 2048 tokens in length. Unless you are embedding code, we suggest replacing newlines (`\\n`) in your input with a single space, as we have observed inferior results when newlines are present. * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.
* @export * @export
*/ */
export declare type CreateEmbeddingRequestInput = Array<any> | Array<number> | Array<string> | string; export declare type CreateEmbeddingRequestInput = Array<any> | Array<number> | Array<string> | string;
@ -810,7 +1008,7 @@ export interface CreateFineTuneRequest {
*/ */
'validation_file'?: string | null; 'validation_file'?: string | null;
/** /**
* The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://beta.openai.com/docs/models) documentation. * The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation.
* @type {string} * @type {string}
* @memberof CreateFineTuneRequest * @memberof CreateFineTuneRequest
*/ */
@ -901,7 +1099,7 @@ export interface CreateImageRequest {
*/ */
'response_format'?: CreateImageRequestResponseFormatEnum; 'response_format'?: CreateImageRequestResponseFormatEnum;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateImageRequest * @memberof CreateImageRequest
*/ */
@ -1128,7 +1326,7 @@ export interface CreateSearchRequest {
*/ */
'return_metadata'?: boolean | null; 'return_metadata'?: boolean | null;
/** /**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string} * @type {string}
* @memberof CreateSearchRequest * @memberof CreateSearchRequest
*/ */
@ -1184,6 +1382,32 @@ export interface CreateSearchResponseDataInner {
*/ */
'score'?: number; 'score'?: number;
} }
/**
*
* @export
* @interface CreateTranscriptionResponse
*/
export interface CreateTranscriptionResponse {
/**
*
* @type {string}
* @memberof CreateTranscriptionResponse
*/
'text': string;
}
/**
*
* @export
* @interface CreateTranslationResponse
*/
export interface CreateTranslationResponse {
/**
*
* @type {string}
* @memberof CreateTranslationResponse
*/
'text': string;
}
/** /**
* *
* @export * @export
@ -1622,6 +1846,14 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createAnswer: (createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>; createAnswer: (createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion: (createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -1641,7 +1873,7 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
createCompletion: (createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>; createCompletion: (createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -1683,17 +1915,17 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createImageEdit: (image: File, mask: File, prompt: string, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) => Promise<RequestArgs>; createImageEdit: (image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/** /**
* *
* @summary Creates a variation of a given image. * @summary Creates a variation of a given image.
@ -1701,7 +1933,7 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -1724,6 +1956,31 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createSearch: (engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>; createSearch: (engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription: (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation: (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -1842,6 +2099,14 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateAnswerResponse>>; createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateAnswerResponse>>;
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateChatCompletionResponse>>;
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -1861,7 +2126,7 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>>; createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>>;
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -1903,17 +2168,17 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createImageEdit(image: File, mask: File, prompt: string, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>>; createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>>;
/** /**
* *
* @summary Creates a variation of a given image. * @summary Creates a variation of a given image.
@ -1921,7 +2186,7 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -1944,6 +2209,31 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateSearchResponse>>; createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateSearchResponse>>;
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranscriptionResponse>>;
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranslationResponse>>;
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -2062,6 +2352,14 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise<CreateAnswerResponse>; createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise<CreateAnswerResponse>;
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: any): AxiosPromise<CreateChatCompletionResponse>;
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -2081,7 +2379,7 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
createCompletion(createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse>; createCompletion(createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse>;
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -2123,17 +2421,17 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createImageEdit(image: File, mask: File, prompt: string, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise<ImagesResponse>; createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise<ImagesResponse>;
/** /**
* *
* @summary Creates a variation of a given image. * @summary Creates a variation of a given image.
@ -2141,7 +2439,7 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -2164,6 +2462,31 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: any): AxiosPromise<CreateSearchResponse>; createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: any): AxiosPromise<CreateSearchResponse>;
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: any): AxiosPromise<CreateTranscriptionResponse>;
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: any): AxiosPromise<CreateTranslationResponse>;
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -2286,6 +2609,15 @@ export declare class OpenAIApi extends BaseAPI {
* @memberof OpenAIApi * @memberof OpenAIApi
*/ */
createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateAnswerResponse, any>>; createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateAnswerResponse, any>>;
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateChatCompletionResponse, any>>;
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -2307,7 +2639,7 @@ export declare class OpenAIApi extends BaseAPI {
createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateCompletionResponse, any>>; createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateCompletionResponse, any>>;
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -2354,18 +2686,18 @@ export declare class OpenAIApi extends BaseAPI {
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
* @memberof OpenAIApi * @memberof OpenAIApi
*/ */
createImageEdit(image: File, mask: File, prompt: string, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ImagesResponse, any>>; createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<ImagesResponse, any>>;
/** /**
* *
* @summary Creates a variation of a given image. * @summary Creates a variation of a given image.
@ -2373,7 +2705,7 @@ export declare class OpenAIApi extends BaseAPI {
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
* @memberof OpenAIApi * @memberof OpenAIApi
@ -2399,6 +2731,33 @@ export declare class OpenAIApi extends BaseAPI {
* @memberof OpenAIApi * @memberof OpenAIApi
*/ */
createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateSearchResponse, any>>; createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateSearchResponse, any>>;
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateTranscriptionResponse, any>>;
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateTranslationResponse, any>>;
/** /**
* *
* @summary Delete a file. * @summary Delete a file.

337
node_modules/openai/dist/api.js generated vendored
View file

@ -5,7 +5,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@ -22,13 +22,23 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
}); });
}; };
Object.defineProperty(exports, "__esModule", { value: true }); Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIApi = exports.OpenAIApiFactory = exports.OpenAIApiFp = exports.OpenAIApiAxiosParamCreator = exports.CreateImageRequestResponseFormatEnum = exports.CreateImageRequestSizeEnum = void 0; exports.OpenAIApi = exports.OpenAIApiFactory = exports.OpenAIApiFp = exports.OpenAIApiAxiosParamCreator = exports.CreateImageRequestResponseFormatEnum = exports.CreateImageRequestSizeEnum = exports.ChatCompletionResponseMessageRoleEnum = exports.ChatCompletionRequestMessageRoleEnum = void 0;
const axios_1 = require("axios"); const axios_1 = require("axios");
// Some imports not used depending on template conditions // Some imports not used depending on template conditions
// @ts-ignore // @ts-ignore
const common_1 = require("./common"); const common_1 = require("./common");
// @ts-ignore // @ts-ignore
const base_1 = require("./base"); const base_1 = require("./base");
exports.ChatCompletionRequestMessageRoleEnum = {
System: 'system',
User: 'user',
Assistant: 'assistant'
};
exports.ChatCompletionResponseMessageRoleEnum = {
System: 'system',
User: 'user',
Assistant: 'assistant'
};
exports.CreateImageRequestSizeEnum = { exports.CreateImageRequestSizeEnum = {
_256x256: '256x256', _256x256: '256x256',
_512x512: '512x512', _512x512: '512x512',
@ -104,6 +114,36 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
options: localVarRequestOptions, options: localVarRequestOptions,
}; };
}), }),
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion: (createChatCompletionRequest, options = {}) => __awaiter(this, void 0, void 0, function* () {
// verify required parameter 'createChatCompletionRequest' is not null or undefined
common_1.assertParamExists('createChatCompletion', 'createChatCompletionRequest', createChatCompletionRequest);
const localVarPath = `/chat/completions`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, common_1.DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = Object.assign(Object.assign({ method: 'POST' }, baseOptions), options);
const localVarHeaderParameter = {};
const localVarQueryParameter = {};
localVarHeaderParameter['Content-Type'] = 'application/json';
common_1.setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = Object.assign(Object.assign(Object.assign({}, localVarHeaderParameter), headersFromBaseOptions), options.headers);
localVarRequestOptions.data = common_1.serializeDataIfNeeded(createChatCompletionRequest, localVarRequestOptions, configuration);
return {
url: common_1.toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
}),
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -167,7 +207,7 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
}), }),
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -328,21 +368,19 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createImageEdit: (image, mask, prompt, n, size, responseFormat, user, options = {}) => __awaiter(this, void 0, void 0, function* () { createImageEdit: (image, prompt, mask, n, size, responseFormat, user, options = {}) => __awaiter(this, void 0, void 0, function* () {
// verify required parameter 'image' is not null or undefined // verify required parameter 'image' is not null or undefined
common_1.assertParamExists('createImageEdit', 'image', image); common_1.assertParamExists('createImageEdit', 'image', image);
// verify required parameter 'mask' is not null or undefined
common_1.assertParamExists('createImageEdit', 'mask', mask);
// verify required parameter 'prompt' is not null or undefined // verify required parameter 'prompt' is not null or undefined
common_1.assertParamExists('createImageEdit', 'prompt', prompt); common_1.assertParamExists('createImageEdit', 'prompt', prompt);
const localVarPath = `/images/edits`; const localVarPath = `/images/edits`;
@ -394,7 +432,7 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -502,6 +540,114 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
options: localVarRequestOptions, options: localVarRequestOptions,
}; };
}), }),
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription: (file, model, prompt, responseFormat, temperature, language, options = {}) => __awaiter(this, void 0, void 0, function* () {
// verify required parameter 'file' is not null or undefined
common_1.assertParamExists('createTranscription', 'file', file);
// verify required parameter 'model' is not null or undefined
common_1.assertParamExists('createTranscription', 'model', model);
const localVarPath = `/audio/transcriptions`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, common_1.DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = Object.assign(Object.assign({ method: 'POST' }, baseOptions), options);
const localVarHeaderParameter = {};
const localVarQueryParameter = {};
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
if (file !== undefined) {
localVarFormParams.append('file', file);
}
if (model !== undefined) {
localVarFormParams.append('model', model);
}
if (prompt !== undefined) {
localVarFormParams.append('prompt', prompt);
}
if (responseFormat !== undefined) {
localVarFormParams.append('response_format', responseFormat);
}
if (temperature !== undefined) {
localVarFormParams.append('temperature', temperature);
}
if (language !== undefined) {
localVarFormParams.append('language', language);
}
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
common_1.setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = Object.assign(Object.assign(Object.assign(Object.assign({}, localVarHeaderParameter), localVarFormParams.getHeaders()), headersFromBaseOptions), options.headers);
localVarRequestOptions.data = localVarFormParams;
return {
url: common_1.toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
}),
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation: (file, model, prompt, responseFormat, temperature, options = {}) => __awaiter(this, void 0, void 0, function* () {
// verify required parameter 'file' is not null or undefined
common_1.assertParamExists('createTranslation', 'file', file);
// verify required parameter 'model' is not null or undefined
common_1.assertParamExists('createTranslation', 'model', model);
const localVarPath = `/audio/translations`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, common_1.DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = Object.assign(Object.assign({ method: 'POST' }, baseOptions), options);
const localVarHeaderParameter = {};
const localVarQueryParameter = {};
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
if (file !== undefined) {
localVarFormParams.append('file', file);
}
if (model !== undefined) {
localVarFormParams.append('model', model);
}
if (prompt !== undefined) {
localVarFormParams.append('prompt', prompt);
}
if (responseFormat !== undefined) {
localVarFormParams.append('response_format', responseFormat);
}
if (temperature !== undefined) {
localVarFormParams.append('temperature', temperature);
}
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
common_1.setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = Object.assign(Object.assign(Object.assign(Object.assign({}, localVarHeaderParameter), localVarFormParams.getHeaders()), headersFromBaseOptions), options.headers);
localVarRequestOptions.data = localVarFormParams;
return {
url: common_1.toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
}),
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -876,6 +1022,19 @@ exports.OpenAIApiFp = function (configuration) {
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration); return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
}); });
}, },
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion(createChatCompletionRequest, options) {
return __awaiter(this, void 0, void 0, function* () {
const localVarAxiosArgs = yield localVarAxiosParamCreator.createChatCompletion(createChatCompletionRequest, options);
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -905,7 +1064,7 @@ exports.OpenAIApiFp = function (configuration) {
}, },
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -972,19 +1131,19 @@ exports.OpenAIApiFp = function (configuration) {
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createImageEdit(image, mask, prompt, n, size, responseFormat, user, options) { createImageEdit(image, prompt, mask, n, size, responseFormat, user, options) {
return __awaiter(this, void 0, void 0, function* () { return __awaiter(this, void 0, void 0, function* () {
const localVarAxiosArgs = yield localVarAxiosParamCreator.createImageEdit(image, mask, prompt, n, size, responseFormat, user, options); const localVarAxiosArgs = yield localVarAxiosParamCreator.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options);
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration); return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
}); });
}, },
@ -995,7 +1154,7 @@ exports.OpenAIApiFp = function (configuration) {
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -1033,6 +1192,41 @@ exports.OpenAIApiFp = function (configuration) {
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration); return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
}); });
}, },
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
return __awaiter(this, void 0, void 0, function* () {
const localVarAxiosArgs = yield localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, language, options);
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation(file, model, prompt, responseFormat, temperature, options) {
return __awaiter(this, void 0, void 0, function* () {
const localVarAxiosArgs = yield localVarAxiosParamCreator.createTranslation(file, model, prompt, responseFormat, temperature, options);
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -1218,6 +1412,16 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
createAnswer(createAnswerRequest, options) { createAnswer(createAnswerRequest, options) {
return localVarFp.createAnswer(createAnswerRequest, options).then((request) => request(axios, basePath)); return localVarFp.createAnswer(createAnswerRequest, options).then((request) => request(axios, basePath));
}, },
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion(createChatCompletionRequest, options) {
return localVarFp.createChatCompletion(createChatCompletionRequest, options).then((request) => request(axios, basePath));
},
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -1241,7 +1445,7 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
}, },
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -1293,18 +1497,18 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
createImageEdit(image, mask, prompt, n, size, responseFormat, user, options) { createImageEdit(image, prompt, mask, n, size, responseFormat, user, options) {
return localVarFp.createImageEdit(image, mask, prompt, n, size, responseFormat, user, options).then((request) => request(axios, basePath)); return localVarFp.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(axios, basePath));
}, },
/** /**
* *
@ -1313,7 +1517,7 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
*/ */
@ -1342,6 +1546,35 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
createSearch(engineId, createSearchRequest, options) { createSearch(engineId, createSearchRequest, options) {
return localVarFp.createSearch(engineId, createSearchRequest, options).then((request) => request(axios, basePath)); return localVarFp.createSearch(engineId, createSearchRequest, options).then((request) => request(axios, basePath));
}, },
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation(file, model, prompt, responseFormat, temperature, options) {
return localVarFp.createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(axios, basePath));
},
/** /**
* *
* @summary Delete a file. * @summary Delete a file.
@ -1493,6 +1726,17 @@ class OpenAIApi extends base_1.BaseAPI {
createAnswer(createAnswerRequest, options) { createAnswer(createAnswerRequest, options) {
return exports.OpenAIApiFp(this.configuration).createAnswer(createAnswerRequest, options).then((request) => request(this.axios, this.basePath)); return exports.OpenAIApiFp(this.configuration).createAnswer(createAnswerRequest, options).then((request) => request(this.axios, this.basePath));
} }
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createChatCompletion(createChatCompletionRequest, options) {
return exports.OpenAIApiFp(this.configuration).createChatCompletion(createChatCompletionRequest, options).then((request) => request(this.axios, this.basePath));
}
/** /**
* *
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -1518,7 +1762,7 @@ class OpenAIApi extends base_1.BaseAPI {
} }
/** /**
* *
* @summary Creates a new edit for the provided input, instruction, and parameters * @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest * @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
@ -1575,19 +1819,19 @@ class OpenAIApi extends base_1.BaseAPI {
/** /**
* *
* @summary Creates an edited or extended image given an original image and a prompt. * @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters. * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where &#x60;image&#x60; should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as &#x60;image&#x60;.
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
* @memberof OpenAIApi * @memberof OpenAIApi
*/ */
createImageEdit(image, mask, prompt, n, size, responseFormat, user, options) { createImageEdit(image, prompt, mask, n, size, responseFormat, user, options) {
return exports.OpenAIApiFp(this.configuration).createImageEdit(image, mask, prompt, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath)); return exports.OpenAIApiFp(this.configuration).createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath));
} }
/** /**
* *
@ -1596,7 +1840,7 @@ class OpenAIApi extends base_1.BaseAPI {
* @param {number} [n] The number of images to generate. Must be between 1 and 10. * @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;. * @param {string} [size] The size of the generated images. Must be one of &#x60;256x256&#x60;, &#x60;512x512&#x60;, or &#x60;1024x1024&#x60;.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;. * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of &#x60;url&#x60; or &#x60;b64_json&#x60;.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids). * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option. * @param {*} [options] Override http request option.
* @throws {RequiredError} * @throws {RequiredError}
* @memberof OpenAIApi * @memberof OpenAIApi
@ -1628,6 +1872,37 @@ class OpenAIApi extends base_1.BaseAPI {
createSearch(engineId, createSearchRequest, options) { createSearch(engineId, createSearchRequest, options) {
return exports.OpenAIApiFp(this.configuration).createSearch(engineId, createSearchRequest, options).then((request) => request(this.axios, this.basePath)); return exports.OpenAIApiFp(this.configuration).createSearch(engineId, createSearchRequest, options).then((request) => request(this.axios, this.basePath));
} }
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
return exports.OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(this.axios, this.basePath));
}
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createTranslation(file, model, prompt, responseFormat, temperature, options) {
return exports.OpenAIApiFp(this.configuration).createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(this.axios, this.basePath));
}
/** /**
* *
* @summary Delete a file. * @summary Delete a file.

7
node_modules/openai/dist/base.d.ts generated vendored
View file

@ -2,15 +2,15 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech * https://openapi-generator.tech
* Do not edit the class manually. * Do not edit the class manually.
*/ */
import { Configuration } from "./configuration"; import type { Configuration } from './configuration';
import { AxiosInstance, AxiosRequestConfig } from 'axios'; import type { AxiosInstance, AxiosRequestConfig } from 'axios';
export declare const BASE_PATH: string; export declare const BASE_PATH: string;
/** /**
* *
@ -50,6 +50,5 @@ export declare class BaseAPI {
*/ */
export declare class RequiredError extends Error { export declare class RequiredError extends Error {
field: string; field: string;
name: "RequiredError";
constructor(field: string, msg?: string); constructor(field: string, msg?: string);
} }

4
node_modules/openai/dist/base.js generated vendored
View file

@ -5,7 +5,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@ -14,8 +14,6 @@
*/ */
Object.defineProperty(exports, "__esModule", { value: true }); Object.defineProperty(exports, "__esModule", { value: true });
exports.RequiredError = exports.BaseAPI = exports.COLLECTION_FORMATS = exports.BASE_PATH = void 0; exports.RequiredError = exports.BaseAPI = exports.COLLECTION_FORMATS = exports.BASE_PATH = void 0;
// Some imports not used depending on template conditions
// @ts-ignore
const axios_1 = require("axios"); const axios_1 = require("axios");
exports.BASE_PATH = "https://api.openai.com/v1".replace(/\/+$/, ""); exports.BASE_PATH = "https://api.openai.com/v1".replace(/\/+$/, "");
/** /**

View file

@ -2,16 +2,16 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech * https://openapi-generator.tech
* Do not edit the class manually. * Do not edit the class manually.
*/ */
import { Configuration } from "./configuration"; import type { Configuration } from "./configuration";
import { RequestArgs } from "./base"; import type { RequestArgs } from "./base";
import { AxiosInstance, AxiosResponse } from 'axios'; import type { AxiosInstance, AxiosResponse } from 'axios';
/** /**
* *
* @export * @export

4
node_modules/openai/dist/common.js generated vendored
View file

@ -5,7 +5,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@ -91,6 +91,8 @@ exports.setOAuthToObject = function (object, name, scopes, configuration) {
}); });
}; };
function setFlattenedQueryParams(urlSearchParams, parameter, key = "") { function setFlattenedQueryParams(urlSearchParams, parameter, key = "") {
if (parameter == null)
return;
if (typeof parameter === "object") { if (typeof parameter === "object") {
if (Array.isArray(parameter)) { if (Array.isArray(parameter)) {
parameter.forEach(item => setFlattenedQueryParams(urlSearchParams, item, key)); parameter.forEach(item => setFlattenedQueryParams(urlSearchParams, item, key));

View file

@ -2,7 +2,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View file

@ -5,7 +5,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View file

@ -2,7 +2,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

2
node_modules/openai/dist/index.js generated vendored
View file

@ -5,7 +5,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

2
node_modules/openai/index.ts generated vendored
View file

@ -4,7 +4,7 @@
* OpenAI API * OpenAI API
* APIs for sampling from and fine-tuning language models * APIs for sampling from and fine-tuning language models
* *
* The version of the OpenAPI document: 1.1.0 * The version of the OpenAPI document: 1.2.0
* *
* *
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

2
node_modules/openai/package.json generated vendored
View file

@ -1,6 +1,6 @@
{ {
"name": "openai", "name": "openai",
"version": "3.1.0", "version": "3.2.1",
"description": "Node.js library for the OpenAI API", "description": "Node.js library for the OpenAI API",
"repository": { "repository": {
"type": "git", "type": "git",

14
package-lock.json generated
View file

@ -14,7 +14,7 @@
"fs": "^0.0.1-security", "fs": "^0.0.1-security",
"googlethis": "^1.6.0", "googlethis": "^1.6.0",
"https": "^1.0.0", "https": "^1.0.0",
"openai": "^3.1.0", "openai": "^3.2.1",
"r34api.js": "^1.1.6", "r34api.js": "^1.1.6",
"request": "^2.88.2", "request": "^2.88.2",
"requests": "^0.3.0", "requests": "^0.3.0",
@ -899,9 +899,9 @@
"integrity": "sha512-qAMrwuk2xLEutlASoiPiAMW3EN3K96Ka/ilSXYr6qR1zSVXw2j7+yDSqGTC4T9apfLYxM3tLLjKvgPdAUK7kYQ==" "integrity": "sha512-qAMrwuk2xLEutlASoiPiAMW3EN3K96Ka/ilSXYr6qR1zSVXw2j7+yDSqGTC4T9apfLYxM3tLLjKvgPdAUK7kYQ=="
}, },
"node_modules/openai": { "node_modules/openai": {
"version": "3.1.0", "version": "3.2.1",
"resolved": "https://registry.npmjs.org/openai/-/openai-3.1.0.tgz", "resolved": "https://registry.npmjs.org/openai/-/openai-3.2.1.tgz",
"integrity": "sha512-v5kKFH5o+8ld+t0arudj833Mgm3GcgBnbyN9946bj6u7bvel4Yg6YFz2A4HLIYDzmMjIo0s6vSG9x73kOwvdCg==", "integrity": "sha512-762C9BNlJPbjjlWZi4WYK9iM2tAVAv0uUp1UmI34vb0CN5T2mjB/qM6RYBmNKMh/dN9fC+bxqPwWJZUTWW052A==",
"dependencies": { "dependencies": {
"axios": "^0.26.0", "axios": "^0.26.0",
"form-data": "^4.0.0" "form-data": "^4.0.0"
@ -1995,9 +1995,9 @@
"integrity": "sha512-qAMrwuk2xLEutlASoiPiAMW3EN3K96Ka/ilSXYr6qR1zSVXw2j7+yDSqGTC4T9apfLYxM3tLLjKvgPdAUK7kYQ==" "integrity": "sha512-qAMrwuk2xLEutlASoiPiAMW3EN3K96Ka/ilSXYr6qR1zSVXw2j7+yDSqGTC4T9apfLYxM3tLLjKvgPdAUK7kYQ=="
}, },
"openai": { "openai": {
"version": "3.1.0", "version": "3.2.1",
"resolved": "https://registry.npmjs.org/openai/-/openai-3.1.0.tgz", "resolved": "https://registry.npmjs.org/openai/-/openai-3.2.1.tgz",
"integrity": "sha512-v5kKFH5o+8ld+t0arudj833Mgm3GcgBnbyN9946bj6u7bvel4Yg6YFz2A4HLIYDzmMjIo0s6vSG9x73kOwvdCg==", "integrity": "sha512-762C9BNlJPbjjlWZi4WYK9iM2tAVAv0uUp1UmI34vb0CN5T2mjB/qM6RYBmNKMh/dN9fC+bxqPwWJZUTWW052A==",
"requires": { "requires": {
"axios": "^0.26.0", "axios": "^0.26.0",
"form-data": "^4.0.0" "form-data": "^4.0.0"

View file

@ -14,7 +14,7 @@
"fs": "^0.0.1-security", "fs": "^0.0.1-security",
"googlethis": "^1.6.0", "googlethis": "^1.6.0",
"https": "^1.0.0", "https": "^1.0.0",
"openai": "^3.1.0", "openai": "^3.2.1",
"r34api.js": "^1.1.6", "r34api.js": "^1.1.6",
"request": "^2.88.2", "request": "^2.88.2",
"requests": "^0.3.0", "requests": "^0.3.0",

33
register-commands.js Normal file
View file

@ -0,0 +1,33 @@
const {REST , Routes, ApplicationCommandOptionType} = require('discord.js');
const commands = [
{
name: 'gptrequest',
description: 'Make a request to the GPT-3.5-Turbo API',
options: [
{
name: 'question',
description: 'The question you want to ask to the API',
type: ApplicationCommandOptionType.String,
required: true,
},
],
}
];
const rest = new REST({ version: '10' }).setToken(process.env.DISCORD);
(async () => {
try {
console.log('Started refreshing application (/) commands.');
await rest.put(
Routes.applicationGuildCommands('1059559067846189067', '1062473997297668108'),
{ body: commands },
);
console.log('Successfully reloaded application (/) commands.');
} catch (error) {
console.error(error);
}
})();