added version 2.0

This commit is contained in:
Lukian LEIZOUR 2024-03-04 20:38:46 +01:00
parent 0163d40b4e
commit 0f18925d1a
3231 changed files with 1449 additions and 374732 deletions

View file

@ -1,5 +1,5 @@
const { Configuration, OpenAIApi } = require("openai");
const { addToLogs } = require("./botTools");
const prompt = require("../data/prompt.json").prompt;
const configuration = new Configuration({
apiKey: process.env.OPENAI,
@ -7,70 +7,78 @@ const configuration = new Configuration({
const openai = new OpenAIApi(configuration);
async function generateImage(query) {
response = await openai.createImage({
prompt: query,
n: 1,
size: "1024x1024",
response_format : 'url'
}).catch((err) => {
console.log(err);
addToLogs("--> error : " + err);
});
return response;
}
async function answerQuestion(query) {
response = await openai.createChatCompletion({
model: "gpt-4",
messages: [{ "role" : "user", "content" : query}],
temperature: 0.9,
}).catch((err) => {
console.log(err);
addToLogs("--> error : " + err);
})
return response;
return new Promise((resolve, reject) => {
openai
.createChatCompletion({
model: "gpt-4-1106-preview",
messages: [
{ role: "system", content: prompt },
{ role: "user", content: query },
],
temperature: 0.9,
})
.then((res) => {
resolve(res);
})
.catch((err) => {
reject(err);
});
});
}
async function quickAnswer(query) {
response = await openai.createChatCompletion({
model: "gpt-3.5-turbo",
messages: [{ "role" : "user", "content" : query}],
temperature: 0.9,
}).catch((err) => {
console.log(err);
addToLogs("--> error : " + err);
})
return response;
return new Promise((resolve, reject) => {
openai
.createChatCompletion({
model: "gpt-3.5-turbo-1106",
messages: [
{ role: "system", content: prompt },
{ role: "user", content: query },
],
temperature: 0.9,
})
.then((res) => {
resolve(res);
})
.catch((err) => {
reject(err);
});
});
}
async function sendConv (messages) {
response = await openai.createChatCompletion({
model: "gpt-4",
messages: messages,
temperature: 0.9,
}).catch((err) => {
console.log(err);
addToLogs("--> error : " + err);
})
return response;
async function sendConv(messages) {
return new Promise((resolve, reject) => {
openai
.createChatCompletion({
model: "gpt-4-1106-preview",
messages: messages,
temperature: 0.9,
})
.then((res) => {
resolve(res);
})
.catch((err) => {
reject(err);
});
});
}
async function sendQuickConv (messages) {
response = await openai.createChatCompletion({
model: "gpt-3.5-turbo",
messages: messages,
temperature: 0.9,
}).catch((err) => {
console.log(err);
addToLogs("--> error : " + err);
})
return response;
async function sendQuickConv(messages) {
return new Promise((resolve, reject) => {
openai
.createChatCompletion({
model: "gpt-3.5-turbo-1106",
messages: messages,
temperature: 0.9,
})
.then((res) => {
resolve(res);
})
.catch((err) => {
reject(err);
});
});
}
module.exports = { generateImage, answerQuestion, sendConv, quickAnswer, sendQuickConv };
module.exports = { answerQuestion, sendConv, quickAnswer, sendQuickConv };