From efa0f12349986d791c5149280fab43306f2d9113 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 20 Aug 2023 16:25:16 +0300 Subject: [PATCH] Fix prompt manager issues --- public/scripts/openai.js | 8 +++++--- server.js | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 07155521b..d280305f3 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -513,7 +513,8 @@ function populateChatHistory(prompts, chatCompletion, type = null, cyclePrompt = const chatMessage = Message.fromPrompt(promptManager.preparePrompt(prompt)); if (true === promptManager.serviceSettings.names_in_completion && prompt.name) { - chatMessage.name = promptManager.isValidName(prompt.name) ? prompt.name : promptManager.sanitizeName(prompt.name); + const messageName = promptManager.isValidName(prompt.name) ? prompt.name : promptManager.sanitizeName(prompt.name); + chatMessage.setName(messageName); } if (chatCompletion.canAfford(chatMessage)) chatCompletion.insertAtStart(chatMessage, 'chatHistory'); @@ -1371,7 +1372,7 @@ function countTokens(messages, full = false) { for (const message of messages) { const model = getTokenizerModel(); - const hash = getStringHash(message.content); + const hash = getStringHash(JSON.stringify(message)); const cacheKey = `${model}-${hash}`; const cachedCount = tokenCache[chatId][cacheKey]; @@ -1443,7 +1444,7 @@ class Message { this.role = role; this.content = content; - if (this.content) { + if (typeof this.content === 'string') { this.tokens = tokenHandler.count({ role: this.role, content: this.content }); } else { this.tokens = 0; @@ -1452,6 +1453,7 @@ class Message { setName(name) { this.name = name; + this.tokens = tokenHandler.count({ role: this.role, content: this.content, name: this.name }); } /** diff --git a/server.js b/server.js index 940ce13f7..e606f87e3 100644 --- a/server.js +++ b/server.js @@ -3460,7 +3460,7 @@ app.post("/generate_openai", jsonParser, function (request, response_generate_op config.responseType = 'stream'; } - async function makeRequest(config, response_generate_openai, request, retries = 5, timeout = 1000) { + async function makeRequest(config, response_generate_openai, request, retries = 5, timeout = 5000) { try { const response = await axios(config); @@ -3482,7 +3482,7 @@ app.post("/generate_openai", jsonParser, function (request, response_generate_op } } catch (error) { if (error.response && error.response.status === 429 && retries > 0) { - console.log('Out of quota, retrying...'); + console.log(`Out of quota, retrying in ${Math.round(timeout / 1000)}s`); setTimeout(() => { makeRequest(config, response_generate_openai, request, retries - 1); }, timeout);