mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Make generate_data preparation a switch-case
We switch based on main_api. In the future, I'd like to move the openai-specific token count stuff outside the switch case and extract the generate_data preparation into its own function that we can pass main_api into.
This commit is contained in:
115
public/script.js
115
public/script.js
@@ -3905,65 +3905,70 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
|
||||
let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate
|
||||
let thisPromptBits = [];
|
||||
|
||||
// TODO: Make this a switch
|
||||
let generate_data;
|
||||
if (main_api == 'koboldhorde' || main_api == 'kobold') {
|
||||
if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) {
|
||||
maxLength = Math.min(maxLength, adjustedParams.maxLength);
|
||||
maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors
|
||||
switch (main_api) {
|
||||
case 'koboldhorde':
|
||||
case 'kobold':
|
||||
if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) {
|
||||
maxLength = Math.min(maxLength, adjustedParams.maxLength);
|
||||
maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors
|
||||
}
|
||||
|
||||
generate_data = {
|
||||
prompt: finalPrompt,
|
||||
gui_settings: true,
|
||||
max_length: maxLength,
|
||||
max_context_length: max_context,
|
||||
api_server,
|
||||
};
|
||||
|
||||
if (preset_settings != 'gui') {
|
||||
const isHorde = main_api == 'koboldhorde';
|
||||
const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]];
|
||||
const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context;
|
||||
generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type);
|
||||
}
|
||||
break;
|
||||
case 'textgenerationwebui':
|
||||
generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type);
|
||||
break;
|
||||
case 'novel': {
|
||||
const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
|
||||
generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type);
|
||||
break;
|
||||
}
|
||||
case 'openai': {
|
||||
let [prompt, counts] = await prepareOpenAIMessages({
|
||||
name2: name2,
|
||||
charDescription: description,
|
||||
charPersonality: personality,
|
||||
Scenario: scenario,
|
||||
worldInfoBefore: worldInfoBefore,
|
||||
worldInfoAfter: worldInfoAfter,
|
||||
extensionPrompts: extension_prompts,
|
||||
bias: promptBias,
|
||||
type: type,
|
||||
quietPrompt: quiet_prompt,
|
||||
quietImage: quietImage,
|
||||
cyclePrompt: cyclePrompt,
|
||||
systemPromptOverride: system,
|
||||
jailbreakPromptOverride: jailbreak,
|
||||
personaDescription: persona,
|
||||
messages: oaiMessages,
|
||||
messageExamples: oaiMessageExamples,
|
||||
}, dryRun);
|
||||
generate_data = { prompt: prompt };
|
||||
|
||||
generate_data = {
|
||||
prompt: finalPrompt,
|
||||
gui_settings: true,
|
||||
max_length: maxLength,
|
||||
max_context_length: max_context,
|
||||
api_server,
|
||||
};
|
||||
// TODO: move these side-effects somewhere else, so this switch-case solely sets generate_data
|
||||
// counts will return false if the user has not enabled the token breakdown feature
|
||||
if (counts) {
|
||||
parseTokenCounts(counts, thisPromptBits);
|
||||
}
|
||||
|
||||
if (preset_settings != 'gui') {
|
||||
const isHorde = main_api == 'koboldhorde';
|
||||
const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]];
|
||||
const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context;
|
||||
generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type);
|
||||
}
|
||||
}
|
||||
else if (main_api == 'textgenerationwebui') {
|
||||
generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type);
|
||||
}
|
||||
else if (main_api == 'novel') {
|
||||
const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
|
||||
generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type);
|
||||
}
|
||||
else if (main_api == 'openai') {
|
||||
let [prompt, counts] = await prepareOpenAIMessages({
|
||||
name2: name2,
|
||||
charDescription: description,
|
||||
charPersonality: personality,
|
||||
Scenario: scenario,
|
||||
worldInfoBefore: worldInfoBefore,
|
||||
worldInfoAfter: worldInfoAfter,
|
||||
extensionPrompts: extension_prompts,
|
||||
bias: promptBias,
|
||||
type: type,
|
||||
quietPrompt: quiet_prompt,
|
||||
quietImage: quietImage,
|
||||
cyclePrompt: cyclePrompt,
|
||||
systemPromptOverride: system,
|
||||
jailbreakPromptOverride: jailbreak,
|
||||
personaDescription: persona,
|
||||
messages: oaiMessages,
|
||||
messageExamples: oaiMessageExamples,
|
||||
}, dryRun);
|
||||
generate_data = { prompt: prompt };
|
||||
|
||||
// counts will return false if the user has not enabled the token breakdown feature
|
||||
if (counts) {
|
||||
parseTokenCounts(counts, thisPromptBits);
|
||||
}
|
||||
|
||||
if (!dryRun) {
|
||||
setInContextMessages(openai_messages_count, type);
|
||||
if (!dryRun) {
|
||||
setInContextMessages(openai_messages_count, type);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user