This commit is contained in:
Lukian 2023-06-20 15:28:07 +02:00
parent 68f4b60012
commit 41ae7ff4bd
1010 changed files with 38622 additions and 17071 deletions

201
node_modules/openai/dist/api.d.ts generated vendored
View file

@ -2,7 +2,7 @@
* OpenAI API
* APIs for sampling from and fine-tuning language models
*
* The version of the OpenAPI document: 1.2.0
* The version of the OpenAPI document: 1.3.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@ -13,6 +13,33 @@ import type { Configuration } from './configuration';
import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
import type { RequestArgs } from './base';
import { BaseAPI } from './base';
/**
*
* @export
* @interface ChatCompletionFunctions
*/
export interface ChatCompletionFunctions {
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
* @type {string}
* @memberof ChatCompletionFunctions
*/
'name': string;
/**
* The description of what the function does.
* @type {string}
* @memberof ChatCompletionFunctions
*/
'description'?: string;
/**
* The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
* @type {{ [key: string]: any; }}
* @memberof ChatCompletionFunctions
*/
'parameters'?: {
[key: string]: any;
};
}
/**
*
* @export
@ -20,30 +47,56 @@ import { BaseAPI } from './base';
*/
export interface ChatCompletionRequestMessage {
/**
* The role of the author of this message.
* The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'role': ChatCompletionRequestMessageRoleEnum;
/**
* The contents of the message
* The contents of the message. `content` is required for all messages except assistant messages with function calls.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'content': string;
'content'?: string;
/**
* The name of the user in a multi-user chat
* The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
'name'?: string;
/**
*
* @type {ChatCompletionRequestMessageFunctionCall}
* @memberof ChatCompletionRequestMessage
*/
'function_call'?: ChatCompletionRequestMessageFunctionCall;
}
export declare const ChatCompletionRequestMessageRoleEnum: {
readonly System: "system";
readonly User: "user";
readonly Assistant: "assistant";
readonly Function: "function";
};
export declare type ChatCompletionRequestMessageRoleEnum = typeof ChatCompletionRequestMessageRoleEnum[keyof typeof ChatCompletionRequestMessageRoleEnum];
/**
* The name and arguments of a function that should be called, as generated by the model.
* @export
* @interface ChatCompletionRequestMessageFunctionCall
*/
export interface ChatCompletionRequestMessageFunctionCall {
/**
* The name of the function to call.
* @type {string}
* @memberof ChatCompletionRequestMessageFunctionCall
*/
'name'?: string;
/**
* The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
* @type {string}
* @memberof ChatCompletionRequestMessageFunctionCall
*/
'arguments'?: string;
}
/**
*
* @export
@ -57,16 +110,23 @@ export interface ChatCompletionResponseMessage {
*/
'role': ChatCompletionResponseMessageRoleEnum;
/**
* The contents of the message
* The contents of the message.
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
'content': string;
'content'?: string;
/**
*
* @type {ChatCompletionRequestMessageFunctionCall}
* @memberof ChatCompletionResponseMessage
*/
'function_call'?: ChatCompletionRequestMessageFunctionCall;
}
export declare const ChatCompletionResponseMessageRoleEnum: {
readonly System: "system";
readonly User: "user";
readonly Assistant: "assistant";
readonly Function: "function";
};
export declare type ChatCompletionResponseMessageRoleEnum = typeof ChatCompletionResponseMessageRoleEnum[keyof typeof ChatCompletionResponseMessageRoleEnum];
/**
@ -130,7 +190,7 @@ export interface CreateAnswerRequest {
*/
'temperature'?: number | null;
/**
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
* @type {number}
* @memberof CreateAnswerRequest
*/
@ -259,17 +319,29 @@ export interface CreateAnswerResponseSelectedDocumentsInner {
*/
export interface CreateChatCompletionRequest {
/**
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
* ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
* @type {string}
* @memberof CreateChatCompletionRequest
*/
'model': string;
/**
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
* A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).
* @type {Array<ChatCompletionRequestMessage>}
* @memberof CreateChatCompletionRequest
*/
'messages': Array<ChatCompletionRequestMessage>;
/**
* A list of functions the model may generate JSON inputs for.
* @type {Array<ChatCompletionFunctions>}
* @memberof CreateChatCompletionRequest
*/
'functions'?: Array<ChatCompletionFunctions>;
/**
*
* @type {CreateChatCompletionRequestFunctionCall}
* @memberof CreateChatCompletionRequest
*/
'function_call'?: CreateChatCompletionRequestFunctionCall;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number}
@ -289,7 +361,7 @@ export interface CreateChatCompletionRequest {
*/
'n'?: number | null;
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
* @type {boolean}
* @memberof CreateChatCompletionRequest
*/
@ -301,7 +373,7 @@ export interface CreateChatCompletionRequest {
*/
'stop'?: CreateChatCompletionRequestStop;
/**
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
* The maximum number of [tokens](/tokenizer) to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
@ -331,6 +403,25 @@ export interface CreateChatCompletionRequest {
*/
'user'?: string;
}
/**
* @type CreateChatCompletionRequestFunctionCall
* Controls how the model responds to function calls. \"none\" means the model does not call a function, and responds to the end-user. \"auto\" means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\":\\ \"my_function\"}` forces the model to call that function. \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.
* @export
*/
export declare type CreateChatCompletionRequestFunctionCall = CreateChatCompletionRequestFunctionCallOneOf | string;
/**
*
* @export
* @interface CreateChatCompletionRequestFunctionCallOneOf
*/
export interface CreateChatCompletionRequestFunctionCallOneOf {
/**
* The name of the function to call.
* @type {string}
* @memberof CreateChatCompletionRequestFunctionCallOneOf
*/
'name': string;
}
/**
* @type CreateChatCompletionRequestStop
* Up to 4 sequences where the API will stop generating further tokens.
@ -454,7 +545,7 @@ export interface CreateClassificationRequest {
*/
'temperature'?: number | null;
/**
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
* @type {number}
* @memberof CreateClassificationRequest
*/
@ -589,7 +680,7 @@ export interface CreateCompletionRequest {
*/
'suffix'?: string | null;
/**
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
* @type {number}
* @memberof CreateCompletionRequest
*/
@ -613,13 +704,13 @@ export interface CreateCompletionRequest {
*/
'n'?: number | null;
/**
* Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
* @type {boolean}
* @memberof CreateCompletionRequest
*/
'stream'?: boolean | null;
/**
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5.
* @type {number}
* @memberof CreateCompletionRequest
*/
@ -910,7 +1001,7 @@ export interface CreateEmbeddingRequest {
}
/**
* @type CreateEmbeddingRequestInput
* Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.
* Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
* @export
*/
export declare type CreateEmbeddingRequestInput = Array<any> | Array<number> | Array<string> | string;
@ -1489,6 +1580,19 @@ export interface Engine {
*/
'ready': boolean;
}
/**
*
* @export
* @interface ErrorResponse
*/
export interface ErrorResponse {
/**
*
* @type {Error}
* @memberof ErrorResponse
*/
'error': Error;
}
/**
*
* @export
@ -1769,6 +1873,37 @@ export interface Model {
*/
'owned_by': string;
}
/**
*
* @export
* @interface ModelError
*/
export interface ModelError {
/**
*
* @type {string}
* @memberof ModelError
*/
'type': string;
/**
*
* @type {string}
* @memberof ModelError
*/
'message': string;
/**
*
* @type {string}
* @memberof ModelError
*/
'param': string | null;
/**
*
* @type {string}
* @memberof ModelError
*/
'code': string | null;
}
/**
*
* @export
@ -1848,7 +1983,7 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
createAnswer: (createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/**
*
* @summary Creates a completion for the chat message
* @summary Creates a model response for the given chat conversation.
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -1865,7 +2000,7 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
createClassification: (createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
/**
*
* @summary Creates a completion for the provided prompt and parameters
* @summary Creates a completion for the provided prompt and parameters.
* @param {CreateCompletionRequest} createCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -1959,7 +2094,7 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
@ -1972,7 +2107,7 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
@ -2101,7 +2236,7 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateAnswerResponse>>;
/**
*
* @summary Creates a completion for the chat message
* @summary Creates a model response for the given chat conversation.
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -2118,7 +2253,7 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateClassificationResponse>>;
/**
*
* @summary Creates a completion for the provided prompt and parameters
* @summary Creates a completion for the provided prompt and parameters.
* @param {CreateCompletionRequest} createCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -2212,7 +2347,7 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
@ -2225,7 +2360,7 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
@ -2354,7 +2489,7 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise<CreateAnswerResponse>;
/**
*
* @summary Creates a completion for the chat message
* @summary Creates a model response for the given chat conversation.
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -2371,7 +2506,7 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
createClassification(createClassificationRequest: CreateClassificationRequest, options?: any): AxiosPromise<CreateClassificationResponse>;
/**
*
* @summary Creates a completion for the provided prompt and parameters
* @summary Creates a completion for the provided prompt and parameters.
* @param {CreateCompletionRequest} createCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -2465,7 +2600,7 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
@ -2478,7 +2613,7 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
@ -2611,7 +2746,7 @@ export declare class OpenAIApi extends BaseAPI {
createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateAnswerResponse, any>>;
/**
*
* @summary Creates a completion for the chat message
* @summary Creates a model response for the given chat conversation.
* @param {CreateChatCompletionRequest} createChatCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -2630,7 +2765,7 @@ export declare class OpenAIApi extends BaseAPI {
createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateClassificationResponse, any>>;
/**
*
* @summary Creates a completion for the provided prompt and parameters
* @summary Creates a completion for the provided prompt and parameters.
* @param {CreateCompletionRequest} createCompletionRequest
* @param {*} [options] Override http request option.
* @throws {RequiredError}
@ -2734,7 +2869,7 @@ export declare class OpenAIApi extends BaseAPI {
/**
*
* @summary Transcribes audio into the input language.
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
@ -2748,7 +2883,7 @@ export declare class OpenAIApi extends BaseAPI {
/**
*
* @summary Translates audio into into English.
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
* @param {string} model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
* @param {string} [prompt] An optional text to guide the model\\\&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.