Make generate_data preparation a switch-case

We switch based on main_api. In the future, I'd like to move the
openai-specific token count stuff outside the switch case and extract
the generate_data preparation into its own function that we can pass
main_api into.
This commit is contained in:
valadaptive
2024-04-25 08:31:10 -04:00
parent fe663c4f04
commit ff9345a843

View File

@@ -3905,9 +3905,10 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate
let thisPromptBits = [];
// TODO: Make this a switch
let generate_data;
if (main_api == 'koboldhorde' || main_api == 'kobold') {
switch (main_api) {
case 'koboldhorde':
case 'kobold':
if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) {
maxLength = Math.min(maxLength, adjustedParams.maxLength);
maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors
@@ -3927,15 +3928,16 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context;
generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type);
}
}
else if (main_api == 'textgenerationwebui') {
break;
case 'textgenerationwebui':
generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type);
}
else if (main_api == 'novel') {
break;
case 'novel': {
const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type);
break;
}
else if (main_api == 'openai') {
case 'openai': {
let [prompt, counts] = await prepareOpenAIMessages({
name2: name2,
charDescription: description,
@@ -3957,6 +3959,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
}, dryRun);
generate_data = { prompt: prompt };
// TODO: move these side-effects somewhere else, so this switch-case solely sets generate_data
// counts will return false if the user has not enabled the token breakdown feature
if (counts) {
parseTokenCounts(counts, thisPromptBits);
@@ -3965,6 +3968,8 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
if (!dryRun) {
setInContextMessages(openai_messages_count, type);
}
break;
}
}
if (dryRun) {