This commit is contained in:
Lukian LEIZOUR 2023-03-02 16:26:41 +01:00
parent c997a4f034
commit 4afa276479
26 changed files with 1437 additions and 204 deletions

337
node_modules/openai/dist/api.js generated vendored
View file

@ -5,7 +5,7 @@
* OpenAI API
* APIs for sampling from and fine-tuning language models
*
* The version of the OpenAPI document: 1.1.0
* The version of the OpenAPI document: 1.2.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@ -22,13 +22,23 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIApi = exports.OpenAIApiFactory = exports.OpenAIApiFp = exports.OpenAIApiAxiosParamCreator = exports.CreateImageRequestResponseFormatEnum = exports.CreateImageRequestSizeEnum = void 0;
exports.OpenAIApi = exports.OpenAIApiFactory = exports.OpenAIApiFp = exports.OpenAIApiAxiosParamCreator = exports.CreateImageRequestResponseFormatEnum = exports.CreateImageRequestSizeEnum = exports.ChatCompletionResponseMessageRoleEnum = exports.ChatCompletionRequestMessageRoleEnum = void 0;
const axios_1 = require("axios");
// Some imports not used depending on template conditions
// @ts-ignore
const common_1 = require("./common");
// @ts-ignore
const base_1 = require("./base");
exports.ChatCompletionRequestMessageRoleEnum = {
System: 'system',
User: 'user',
Assistant: 'assistant'
};
exports.ChatCompletionResponseMessageRoleEnum = {
System: 'system',
User: 'user',
Assistant: 'assistant'
};
exports.CreateImageRequestSizeEnum = {
_256x256: '256x256',
_512x512: '512x512',
@ -104,6 +114,36 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
options: localVarRequestOptions,
};
}),
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion: (createChatCompletionRequest, options = {}) => __awaiter(this, void 0, void 0, function* () {
// verify required parameter 'createChatCompletionRequest' is not null or undefined
common_1.assertParamExists('createChatCompletion', 'createChatCompletionRequest', createChatCompletionRequest);
const localVarPath = `/chat/completions`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, common_1.DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = Object.assign(Object.assign({ method: 'POST' }, baseOptions), options);
const localVarHeaderParameter = {};
const localVarQueryParameter = {};
localVarHeaderParameter['Content-Type'] = 'application/json';
common_1.setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = Object.assign(Object.assign(Object.assign({}, localVarHeaderParameter), headersFromBaseOptions), options.headers);
localVarRequestOptions.data = common_1.serializeDataIfNeeded(createChatCompletionRequest, localVarRequestOptions, configuration);
return {
url: common_1.toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
}),
/**
*
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -167,7 +207,7 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
}),
/**
*
* @summary Creates a new edit for the provided input, instruction, and parameters
* @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -328,21 +368,19 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
/**
*
* @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids).
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createImageEdit: (image, mask, prompt, n, size, responseFormat, user, options = {}) => __awaiter(this, void 0, void 0, function* () {
createImageEdit: (image, prompt, mask, n, size, responseFormat, user, options = {}) => __awaiter(this, void 0, void 0, function* () {
// verify required parameter 'image' is not null or undefined
common_1.assertParamExists('createImageEdit', 'image', image);
// verify required parameter 'mask' is not null or undefined
common_1.assertParamExists('createImageEdit', 'mask', mask);
// verify required parameter 'prompt' is not null or undefined
common_1.assertParamExists('createImageEdit', 'prompt', prompt);
const localVarPath = `/images/edits`;
@ -394,7 +432,7 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids).
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
@ -502,6 +540,114 @@ exports.OpenAIApiAxiosParamCreator = function (configuration) {
options: localVarRequestOptions,
};
}),
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription: (file, model, prompt, responseFormat, temperature, language, options = {}) => __awaiter(this, void 0, void 0, function* () {
// verify required parameter 'file' is not null or undefined
common_1.assertParamExists('createTranscription', 'file', file);
// verify required parameter 'model' is not null or undefined
common_1.assertParamExists('createTranscription', 'model', model);
const localVarPath = `/audio/transcriptions`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, common_1.DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = Object.assign(Object.assign({ method: 'POST' }, baseOptions), options);
const localVarHeaderParameter = {};
const localVarQueryParameter = {};
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
if (file !== undefined) {
localVarFormParams.append('file', file);
}
if (model !== undefined) {
localVarFormParams.append('model', model);
}
if (prompt !== undefined) {
localVarFormParams.append('prompt', prompt);
}
if (responseFormat !== undefined) {
localVarFormParams.append('response_format', responseFormat);
}
if (temperature !== undefined) {
localVarFormParams.append('temperature', temperature);
}
if (language !== undefined) {
localVarFormParams.append('language', language);
}
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
common_1.setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = Object.assign(Object.assign(Object.assign(Object.assign({}, localVarHeaderParameter), localVarFormParams.getHeaders()), headersFromBaseOptions), options.headers);
localVarRequestOptions.data = localVarFormParams;
return {
url: common_1.toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
}),
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation: (file, model, prompt, responseFormat, temperature, options = {}) => __awaiter(this, void 0, void 0, function* () {
// verify required parameter 'file' is not null or undefined
common_1.assertParamExists('createTranslation', 'file', file);
// verify required parameter 'model' is not null or undefined
common_1.assertParamExists('createTranslation', 'model', model);
const localVarPath = `/audio/translations`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, common_1.DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = Object.assign(Object.assign({ method: 'POST' }, baseOptions), options);
const localVarHeaderParameter = {};
const localVarQueryParameter = {};
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
if (file !== undefined) {
localVarFormParams.append('file', file);
}
if (model !== undefined) {
localVarFormParams.append('model', model);
}
if (prompt !== undefined) {
localVarFormParams.append('prompt', prompt);
}
if (responseFormat !== undefined) {
localVarFormParams.append('response_format', responseFormat);
}
if (temperature !== undefined) {
localVarFormParams.append('temperature', temperature);
}
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
common_1.setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = Object.assign(Object.assign(Object.assign(Object.assign({}, localVarHeaderParameter), localVarFormParams.getHeaders()), headersFromBaseOptions), options.headers);
localVarRequestOptions.data = localVarFormParams;
return {
url: common_1.toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
}),
/**
*
* @summary Delete a file.
@ -876,6 +1022,19 @@ exports.OpenAIApiFp = function (configuration) {
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion(createChatCompletionRequest, options) {
return __awaiter(this, void 0, void 0, function* () {
const localVarAxiosArgs = yield localVarAxiosParamCreator.createChatCompletion(createChatCompletionRequest, options);
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
/**
*
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -905,7 +1064,7 @@ exports.OpenAIApiFp = function (configuration) {
},
/**
*
* @summary Creates a new edit for the provided input, instruction, and parameters
* @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -972,19 +1131,19 @@ exports.OpenAIApiFp = function (configuration) {
/**
*
* @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids).
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createImageEdit(image, mask, prompt, n, size, responseFormat, user, options) {
createImageEdit(image, prompt, mask, n, size, responseFormat, user, options) {
return __awaiter(this, void 0, void 0, function* () {
const localVarAxiosArgs = yield localVarAxiosParamCreator.createImageEdit(image, mask, prompt, n, size, responseFormat, user, options);
const localVarAxiosArgs = yield localVarAxiosParamCreator.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options);
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
@ -995,7 +1154,7 @@ exports.OpenAIApiFp = function (configuration) {
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids).
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
@ -1033,6 +1192,41 @@ exports.OpenAIApiFp = function (configuration) {
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
return __awaiter(this, void 0, void 0, function* () {
const localVarAxiosArgs = yield localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, language, options);
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation(file, model, prompt, responseFormat, temperature, options) {
return __awaiter(this, void 0, void 0, function* () {
const localVarAxiosArgs = yield localVarAxiosParamCreator.createTranslation(file, model, prompt, responseFormat, temperature, options);
return common_1.createRequestFunction(localVarAxiosArgs, axios_1.default, base_1.BASE_PATH, configuration);
});
},
/**
*
* @summary Delete a file.
@ -1218,6 +1412,16 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
createAnswer(createAnswerRequest, options) {
return localVarFp.createAnswer(createAnswerRequest, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createChatCompletion(createChatCompletionRequest, options) {
return localVarFp.createChatCompletion(createChatCompletionRequest, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -1241,7 +1445,7 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
},
/**
*
* @summary Creates a new edit for the provided input, instruction, and parameters
* @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -1293,18 +1497,18 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
/**
*
* @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids).
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createImageEdit(image, mask, prompt, n, size, responseFormat, user, options) {
return localVarFp.createImageEdit(image, mask, prompt, n, size, responseFormat, user, options).then((request) => request(axios, basePath));
createImageEdit(image, prompt, mask, n, size, responseFormat, user, options) {
return localVarFp.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(axios, basePath));
},
/**
*
@ -1313,7 +1517,7 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids).
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
@ -1342,6 +1546,35 @@ exports.OpenAIApiFactory = function (configuration, basePath, axios) {
createSearch(engineId, createSearchRequest, options) {
return localVarFp.createSearch(engineId, createSearchRequest, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
createTranslation(file, model, prompt, responseFormat, temperature, options) {
return localVarFp.createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(axios, basePath));
},
/**
*
* @summary Delete a file.
@ -1493,6 +1726,17 @@ class OpenAIApi extends base_1.BaseAPI {
createAnswer(createAnswerRequest, options) {
return exports.OpenAIApiFp(this.configuration).createAnswer(createAnswerRequest, options).then((request) => request(this.axios, this.basePath));
}
/**
*
* @summary Creates a completion for the chat message
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createChatCompletion(createChatCompletionRequest, options) {
return exports.OpenAIApiFp(this.configuration).createChatCompletion(createChatCompletionRequest, options).then((request) => request(this.axios, this.basePath));
}
/**
*
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
@ -1518,7 +1762,7 @@ class OpenAIApi extends base_1.BaseAPI {
}
/**
*
* @summary Creates a new edit for the provided input, instruction, and parameters
* @summary Creates a new edit for the provided input, instruction, and parameters.
* @param {CreateEditRequest} createEditRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -1575,19 +1819,19 @@ class OpenAIApi extends base_1.BaseAPI {
/**
*
* @summary Creates an edited or extended image given an original image and a prompt.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square.
* @param {File} mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids).
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createImageEdit(image, mask, prompt, n, size, responseFormat, user, options) {
return exports.OpenAIApiFp(this.configuration).createImageEdit(image, mask, prompt, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath));
createImageEdit(image, prompt, mask, n, size, responseFormat, user, options) {
return exports.OpenAIApiFp(this.configuration).createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath));
}
/**
*
@ -1596,7 +1840,7 @@ class OpenAIApi extends base_1.BaseAPI {
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
* @param {string} [user] A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](/docs/usage-policies/end-user-ids).
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
@ -1628,6 +1872,37 @@ class OpenAIApi extends base_1.BaseAPI {
createSearch(engineId, createSearchRequest, options) {
return exports.OpenAIApiFp(this.configuration).createSearch(engineId, createSearchRequest, options).then((request) => request(this.axios, this.basePath));
}
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createTranscription(file, model, prompt, responseFormat, temperature, language, options) {
return exports.OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(this.axios, this.basePath));
}
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof OpenAIApi
*/
createTranslation(file, model, prompt, responseFormat, temperature, options) {
return exports.OpenAIApiFp(this.configuration).createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(this.axios, this.basePath));
}
/**
*
* @summary Delete a file.