Sampler priority support

This commit is contained in:
kalomaze 2024-02-10 14:57:41 -06:00
parent df97f5364b
commit 2065f95edc
3 changed files with 15 additions and 0 deletions

View File

@ -47,6 +47,7 @@
"ban_eos_token": false,
"skip_special_tokens": true,
"streaming": false,
"sampler_priority": "temperature\ndynamic_temperature\nquadratic_sampling\ntop_k\ntop_p\ntypical_p\nepsilon_cutoff\neta_cutoff\ntfs\ntop_a\nmin_p\nmirostat"
"mirostat_mode": 0,
"mirostat_tau": 5,
"mirostat_eta": 0.1,

View File

@ -1451,6 +1451,16 @@
<textarea id="banned_tokens_textgenerationwebui" class="text_pole textarea_compact" name="banned_tokens_textgenerationwebui" rows="3" placeholder="Example:&#10;some text&#10;[42, 69, 1337]"></textarea>
</div>
</div>
<div data-newbie-hidden class="wide100p">
<hr data-newbie-hidden class="width100p">
<h4 class="range-block-title justifyCenter">
<span data-i18n="Sampler Priority">Sampler Priority</span>
<div class="margin5 fa-solid fa-circle-info opacity50p " title="Ooba only.&#13;Determines the order of samplers."></div>
</h4>
<div class="wide100p">
<textarea id="sampler_priority_textgenerationwebui" class="text_pole textarea_compact" name="sampler_priority_textgenerationwebui" rows="3" placeholder="temperature&#10;dynamic_temperature&#10;quadratic_sampling&#10;top_k&#10;top_p&#10;typical_p&#10;epsilon_cutoff&#10;eta_cutoff&#10;tfs&#10;top_a&#10;min_p&#10;mirostat"></textarea>
</div>
</div>
<div class="range-block wide100p">
<div class="range-block-title title_restorable">
<span data-i18n="Logit Bias">Logit Bias</span>

View File

@ -96,6 +96,7 @@ const settings = {
negative_prompt: '',
grammar_string: '',
banned_tokens: '',
sampler_priority: '',
//n_aphrodite: 1,
//best_of_aphrodite: 1,
ignore_eos_token_aphrodite: false,
@ -170,6 +171,7 @@ const setting_names = [
//'log_probs_aphrodite',
//'prompt_log_probs_aphrodite'
'sampler_order',
'sampler_priority',
'n',
'logit_bias',
'custom_model',
@ -827,6 +829,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
'dynatemp_range': settings.dynatemp ? (settings.max_temp - settings.min_temp) / 2 : 0,
'dynatemp_exponent': settings.dynatemp ? settings.dynatemp_exponent : 1,
'smoothing_factor': settings.smoothing_factor,
'sampler_priority': (settings.type === OOBA || settings.type === APHRODITE || settings.type == TABBY) ? settings.sampler_priority : undefined,
'stopping_strings': getStoppingStrings(isImpersonate, isContinue),
'stop': getStoppingStrings(isImpersonate, isContinue),
'truncation_length': max_context,
@ -860,6 +863,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
'guidance_scale': cfgValues?.guidanceScale?.value ?? settings.guidance_scale ?? 1,
'negative_prompt': cfgValues?.negativePrompt ?? substituteParams(settings.negative_prompt) ?? '',
'grammar_string': settings.grammar_string,
'sampler_priority': (settings.type === OOBA || settings.type === APHRODITE || settings.type == TABBY) ? settings.sampler_priority : undefined,
// llama.cpp aliases. In case someone wants to use LM Studio as Text Completion API
'repeat_penalty': settings.rep_pen,
'tfs_z': settings.tfs,