diff --git a/default/settings.json b/default/settings.json index bd11111d9..bdd82b11a 100644 --- a/default/settings.json +++ b/default/settings.json @@ -47,6 +47,7 @@ "ban_eos_token": false, "skip_special_tokens": true, "streaming": false, + "sampler_priority": "temperature\ndynamic_temperature\nquadratic_sampling\ntop_k\ntop_p\ntypical_p\nepsilon_cutoff\neta_cutoff\ntfs\ntop_a\nmin_p\nmirostat" "mirostat_mode": 0, "mirostat_tau": 5, "mirostat_eta": 0.1, diff --git a/public/index.html b/public/index.html index 94427889c..e4fd16838 100644 --- a/public/index.html +++ b/public/index.html @@ -1451,6 +1451,16 @@ +
+
+

+ Sampler Priority +
+

+
+ +
+
Logit Bias diff --git a/public/scripts/textgen-settings.js b/public/scripts/textgen-settings.js index b1a292e68..b190e3880 100644 --- a/public/scripts/textgen-settings.js +++ b/public/scripts/textgen-settings.js @@ -96,6 +96,7 @@ const settings = { negative_prompt: '', grammar_string: '', banned_tokens: '', + sampler_priority: '', //n_aphrodite: 1, //best_of_aphrodite: 1, ignore_eos_token_aphrodite: false, @@ -170,6 +171,7 @@ const setting_names = [ //'log_probs_aphrodite', //'prompt_log_probs_aphrodite' 'sampler_order', + 'sampler_priority', 'n', 'logit_bias', 'custom_model', @@ -827,6 +829,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate, 'dynatemp_range': settings.dynatemp ? (settings.max_temp - settings.min_temp) / 2 : 0, 'dynatemp_exponent': settings.dynatemp ? settings.dynatemp_exponent : 1, 'smoothing_factor': settings.smoothing_factor, + 'sampler_priority': (settings.type === OOBA || settings.type === APHRODITE || settings.type == TABBY) ? settings.sampler_priority : undefined, 'stopping_strings': getStoppingStrings(isImpersonate, isContinue), 'stop': getStoppingStrings(isImpersonate, isContinue), 'truncation_length': max_context, @@ -860,6 +863,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate, 'guidance_scale': cfgValues?.guidanceScale?.value ?? settings.guidance_scale ?? 1, 'negative_prompt': cfgValues?.negativePrompt ?? substituteParams(settings.negative_prompt) ?? '', 'grammar_string': settings.grammar_string, + 'sampler_priority': (settings.type === OOBA || settings.type === APHRODITE || settings.type == TABBY) ? settings.sampler_priority : undefined, // llama.cpp aliases. In case someone wants to use LM Studio as Text Completion API 'repeat_penalty': settings.rep_pen, 'tfs_z': settings.tfs,