fixed ordering in image generation source, and max token length for llm models

This commit is contained in:
Edward Kim 2024-08-10 21:55:52 -04:00
parent 7546030c48
commit 88ada0f67d
2 changed files with 14 additions and 3 deletions

View File

@ -37,6 +37,7 @@
</label>
<label for="sd_source" data-i18n="Source">Source</label>
<select id="sd_source">
<option value="blockentropy">Block Entropy</option>
<option value="comfy">ComfyUI</option>
<option value="drawthings">DrawThings HTTP API</option>
<option value="extras">Extras API (local / remote)</option>
@ -48,7 +49,6 @@
<option value="auto">Stable Diffusion Web UI (AUTOMATIC1111)</option>
<option value="horde">Stable Horde</option>
<option value="togetherai">TogetherAI</option>
<option value="blockentropy">Block Entropy</option>
</select>
<div data-sd-source="auto">
<label for="sd_auto_url">SD Web UI URL</label>

View File

@ -120,6 +120,7 @@ const default_bias_presets = {
const max_2k = 2047;
const max_4k = 4095;
const max_8k = 8191;
const max_12k = 12287;
const max_16k = 16383;
const max_32k = 32767;
const max_64k = 65535;
@ -4333,8 +4334,18 @@ async function onModelChange() {
if (oai_settings.chat_completion_source === chat_completion_sources.BLOCKENTROPY) {
if (oai_settings.max_context_unlocked) {
$('#openai_max_context').attr('max', unlocked_max);
} else {
$('#openai_max_context').attr('max', max_32k);
}
else if (oai_settings.groq_model.includes('llama3.1')) {
$('#openai_max_context').attr('max', max_16k);
}
else if (oai_settings.groq_model.includes('72b')) {
$('#openai_max_context').attr('max', max_16k);
}
else if (oai_settings.groq_model.includes('120b')) {
$('#openai_max_context').attr('max', max_12k);
}
else {
$('#openai_max_context').attr('max', max_8k);
}
oai_settings.openai_max_context = Math.min(oai_settings.openai_max_context, Number($('#openai_max_context').attr('max')));