Increase timeouts of OAI out of quota requests

This commit is contained in:
Cohee 2023-11-08 12:07:14 +02:00
parent b3ced2c4c5
commit 740f6548a2
2 changed files with 3 additions and 2 deletions

View File

@ -110,7 +110,7 @@ const max_4k = 4095;
const max_8k = 8191; const max_8k = 8191;
const max_16k = 16383; const max_16k = 16383;
const max_32k = 32767; const max_32k = 32767;
const max_128k = 128 * 1024 - 1; const max_128k = 128 * 1000;
const scale_max = 8191; const scale_max = 8191;
const claude_max = 9000; // We have a proper tokenizer, so theoretically could be larger (up to 9k) const claude_max = 9000; // We have a proper tokenizer, so theoretically could be larger (up to 9k)
const palm2_max = 7500; // The real context window is 8192, spare some for padding due to using turbo tokenizer const palm2_max = 7500; // The real context window is 8192, spare some for padding due to using turbo tokenizer

View File

@ -3222,7 +3222,8 @@ app.post("/generate_openai", jsonParser, function (request, response_generate_op
} else if (fetchResponse.status === 429 && retries > 0) { } else if (fetchResponse.status === 429 && retries > 0) {
console.log(`Out of quota, retrying in ${Math.round(timeout / 1000)}s`); console.log(`Out of quota, retrying in ${Math.round(timeout / 1000)}s`);
setTimeout(() => { setTimeout(() => {
makeRequest(config, response_generate_openai, request, retries - 1); timeout *= 2;
makeRequest(config, response_generate_openai, request, retries - 1, timeout);
}, timeout); }, timeout);
} else { } else {
await handleErrorResponse(fetchResponse); await handleErrorResponse(fetchResponse);