mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Make generate_data preparation a switch-case
We switch based on main_api. In the future, I'd like to move the openai-specific token count stuff outside the switch case and extract the generate_data preparation into its own function that we can pass main_api into.
This commit is contained in:
@@ -3905,9 +3905,10 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||||||
let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate
|
let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate
|
||||||
let thisPromptBits = [];
|
let thisPromptBits = [];
|
||||||
|
|
||||||
// TODO: Make this a switch
|
|
||||||
let generate_data;
|
let generate_data;
|
||||||
if (main_api == 'koboldhorde' || main_api == 'kobold') {
|
switch (main_api) {
|
||||||
|
case 'koboldhorde':
|
||||||
|
case 'kobold':
|
||||||
if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) {
|
if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) {
|
||||||
maxLength = Math.min(maxLength, adjustedParams.maxLength);
|
maxLength = Math.min(maxLength, adjustedParams.maxLength);
|
||||||
maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors
|
maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors
|
||||||
@@ -3927,15 +3928,16 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||||||
const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context;
|
const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context;
|
||||||
generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type);
|
generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type);
|
||||||
}
|
}
|
||||||
}
|
break;
|
||||||
else if (main_api == 'textgenerationwebui') {
|
case 'textgenerationwebui':
|
||||||
generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type);
|
generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type);
|
||||||
}
|
break;
|
||||||
else if (main_api == 'novel') {
|
case 'novel': {
|
||||||
const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
|
const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
|
||||||
generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type);
|
generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
else if (main_api == 'openai') {
|
case 'openai': {
|
||||||
let [prompt, counts] = await prepareOpenAIMessages({
|
let [prompt, counts] = await prepareOpenAIMessages({
|
||||||
name2: name2,
|
name2: name2,
|
||||||
charDescription: description,
|
charDescription: description,
|
||||||
@@ -3957,6 +3959,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||||||
}, dryRun);
|
}, dryRun);
|
||||||
generate_data = { prompt: prompt };
|
generate_data = { prompt: prompt };
|
||||||
|
|
||||||
|
// TODO: move these side-effects somewhere else, so this switch-case solely sets generate_data
|
||||||
// counts will return false if the user has not enabled the token breakdown feature
|
// counts will return false if the user has not enabled the token breakdown feature
|
||||||
if (counts) {
|
if (counts) {
|
||||||
parseTokenCounts(counts, thisPromptBits);
|
parseTokenCounts(counts, thisPromptBits);
|
||||||
@@ -3965,6 +3968,8 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
|||||||
if (!dryRun) {
|
if (!dryRun) {
|
||||||
setInContextMessages(openai_messages_count, type);
|
setInContextMessages(openai_messages_count, type);
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dryRun) {
|
if (dryRun) {
|
||||||
|
Reference in New Issue
Block a user