added more options to the scale request

This commit is contained in:
based 2023-08-22 21:29:18 +10:00
parent 06902279a9
commit ba925f388c
3 changed files with 29 additions and 20 deletions

View File

@ -710,7 +710,7 @@
</div> </div>
</div> </div>
</div> </div>
<div class="range-block" data-source="openai,claude,openrouter,ai21"> <div class="range-block" data-source="openai,claude,openrouter,ai21,scale">
<div class="range-block-title" data-i18n="Top-p"> <div class="range-block-title" data-i18n="Top-p">
Top P Top P
</div> </div>
@ -1578,7 +1578,7 @@
</div> </div>
</div> </div>
</div> </div>
<div class="range-block m-t-1" data-source="openai,openrouter"> <div class="range-block m-t-1" data-source="openai,openrouter,scale">
<div class="range-block-title openai_restorable" data-i18n="Logit Bias"> <div class="range-block-title openai_restorable" data-i18n="Logit Bias">
Logit Bias Logit Bias
</div> </div>

View File

@ -1084,23 +1084,34 @@ function saveModelList(data) {
} }
} }
async function sendAltScaleRequest(openai_msgs_tosend, signal) { async function sendAltScaleRequest(openai_msgs_tosend, logit_bias, signal) {
const generate_url = '/generate_altscale'; const generate_url = '/generate_altscale';
let firstMsg = substituteParams(openai_msgs_tosend[0].content); let firstSysMsgs = []
let subsequentMsgs = openai_msgs_tosend.slice(1); for(let msg of openai_msgs_tosend){
if(msg.role === 'system') {
firstSysMsgs.push(substituteParams(msg.content));
} else {
break;
}
}
const joinedMsgs = subsequentMsgs.reduce((acc, obj) => { let subsequentMsgs = openai_msgs_tosend.slice(firstSysMsgs.length);
const joinedSysMsgs = substituteParams(firstSysMsgs.join("\n"));
const joinedSubsequentMsgs = subsequentMsgs.reduce((acc, obj) => {
return acc + obj.role + ": " + obj.content + "\n"; return acc + obj.role + ": " + obj.content + "\n";
}, ""); }, "");
openai_msgs_tosend = substituteParams(joinedMsgs);
console.log(openai_msgs_tosend) openai_msgs_tosend = substituteParams(joinedSubsequentMsgs);
const generate_data = { const generate_data = {
sysprompt: firstMsg, sysprompt: joinedSysMsgs,
prompt: openai_msgs_tosend, prompt: openai_msgs_tosend,
temp: parseFloat(oai_settings.temp_openai), temp: parseFloat(oai_settings.temp_openai),
top_p: parseFloat(oai_settings.top_p_openai),
max_tokens: parseFloat(oai_settings.openai_max_tokens), max_tokens: parseFloat(oai_settings.openai_max_tokens),
logit_bias: logit_bias,
} }
const response = await fetch(generate_url, { const response = await fetch(generate_url, {
@ -1109,6 +1120,7 @@ async function sendAltScaleRequest(openai_msgs_tosend, signal) {
headers: getRequestHeaders(), headers: getRequestHeaders(),
signal: signal signal: signal
}); });
const data = await response.json(); const data = await response.json();
return data.output; return data.output;
} }
@ -1143,17 +1155,13 @@ async function sendOpenAIRequest(type, openai_msgs_tosend, signal) {
openai_msgs_tosend = substituteParams(joinedMsgs); openai_msgs_tosend = substituteParams(joinedMsgs);
} }
if (isScale && !!$('#scale-alt').prop('checked')) {
return sendAltScaleRequest(openai_msgs_tosend, signal)
}
// If we're using the window.ai extension, use that instead // If we're using the window.ai extension, use that instead
// Doesn't support logit bias yet // Doesn't support logit bias yet
if (oai_settings.chat_completion_source == chat_completion_sources.WINDOWAI) { if (oai_settings.chat_completion_source == chat_completion_sources.WINDOWAI) {
return sendWindowAIRequest(openai_msgs_tosend, signal, stream); return sendWindowAIRequest(openai_msgs_tosend, signal, stream);
} }
const logitBiasSources = [chat_completion_sources.OPENAI, chat_completion_sources.OPENROUTER]; const logitBiasSources = [chat_completion_sources.OPENAI, chat_completion_sources.OPENROUTER, chat_completion_sources.SCALE];
if (oai_settings.bias_preset_selected if (oai_settings.bias_preset_selected
&& logitBiasSources.includes(oai_settings.chat_completion_source) && logitBiasSources.includes(oai_settings.chat_completion_source)
&& Array.isArray(oai_settings.bias_presets[oai_settings.bias_preset_selected]) && Array.isArray(oai_settings.bias_presets[oai_settings.bias_preset_selected])
@ -1162,6 +1170,10 @@ async function sendOpenAIRequest(type, openai_msgs_tosend, signal) {
biasCache = logit_bias; biasCache = logit_bias;
} }
if (isScale && oai_settings.use_alt_scale) {
return sendAltScaleRequest(openai_msgs_tosend, logit_bias, signal)
}
const model = getChatCompletionModel(); const model = getChatCompletionModel();
const generate_data = { const generate_data = {
"messages": openai_msgs_tosend, "messages": openai_msgs_tosend,

View File

@ -3329,11 +3329,11 @@ app.post("/generate_altscale", jsonParser, function (request, response_generate_
modelType: 'OpenAi', modelType: 'OpenAi',
maxTokens: request.body.max_tokens, maxTokens: request.body.max_tokens,
temperature: request.body.temp, temperature: request.body.temp,
stop: null, stop: "user:",
suffix: null, suffix: null,
topP: null, topP: request.body.top_p,
logprobs: null, logprobs: null,
logitBias: null logitBias: request.body.logit_bias
}, },
inputs: [ inputs: [
{ {
@ -3348,11 +3348,8 @@ app.post("/generate_altscale", jsonParser, function (request, response_generate_
values: { values: {
'variant.taxonomy': ['undefined'], 'variant.taxonomy': ['undefined'],
'prompt.variablesSourceDataId': ['undefined'], 'prompt.variablesSourceDataId': ['undefined'],
'modelParameters.stop': ['undefined'],
'modelParameters.suffix': ['undefined'], 'modelParameters.suffix': ['undefined'],
'modelParameters.topP': ['undefined'],
'modelParameters.logprobs': ['undefined'], 'modelParameters.logprobs': ['undefined'],
'modelParameters.logitBias': ['undefined']
} }
} }
}) })