Add vLLM as multimodal captioning source

This commit is contained in:
Cohee
2024-06-29 00:33:12 +03:00
parent a287ccfca2
commit 003066a036
4 changed files with 26 additions and 2 deletions

View File

@ -26,6 +26,7 @@
<option value="openai">OpenAI</option>
<option value="openrouter">OpenRouter</option>
<option value="ooba" data-i18n="Text Generation WebUI (oobabooga)">Text Generation WebUI (oobabooga)</option>
<option value="vllm">vLLM</option>
</select>
</div>
<div class="flex1 flex-container flexFlowColumn flexNoGap">
@ -66,6 +67,7 @@
<option data-type="llamacpp" value="llamacpp_current" data-i18n="currently_loaded">[Currently loaded]</option>
<option data-type="ooba" value="ooba_current" data-i18n="currently_loaded">[Currently loaded]</option>
<option data-type="koboldcpp" value="koboldcpp_current" data-i18n="currently_loaded">[Currently loaded]</option>
<option data-type="vllm" value="vllm_current" data-i18n="currently_selected">[Currently selected]</option>
<option data-type="custom" value="custom_current" data-i18n="currently_selected">[Currently selected]</option>
</select>
</div>