Merge pull request #2492 from fizzAI/staging

Add Gemma 2 9b to available Groq models
This commit is contained in:
Cohee
2024-07-08 10:47:10 +03:00
committed by GitHub
2 changed files with 2 additions and 1 deletions

View File

@@ -2847,6 +2847,7 @@
<option value="llama3-70b-8192">llama3-70b-8192</option> <option value="llama3-70b-8192">llama3-70b-8192</option>
<option value="mixtral-8x7b-32768">mixtral-8x7b-32768</option> <option value="mixtral-8x7b-32768">mixtral-8x7b-32768</option>
<option value="gemma-7b-it">gemma-7b-it</option> <option value="gemma-7b-it">gemma-7b-it</option>
<option value="gemma2-9b-it">gemma2-9b-it</option>
</select> </select>
</div> </div>
<div id="perplexity_form" data-source="perplexity"> <div id="perplexity_form" data-source="perplexity">

View File

@@ -4112,7 +4112,7 @@ async function onModelChange() {
if (oai_settings.max_context_unlocked) { if (oai_settings.max_context_unlocked) {
$('#openai_max_context').attr('max', unlocked_max); $('#openai_max_context').attr('max', unlocked_max);
} }
else if (['llama3-8b-8192', 'llama3-70b-8192', 'gemma-7b-it'].includes(oai_settings.groq_model)) { else if (['llama3-8b-8192', 'llama3-70b-8192', 'gemma-7b-it', 'gemma2-9b-it'].includes(oai_settings.groq_model)) {
$('#openai_max_context').attr('max', max_8k); $('#openai_max_context').attr('max', max_8k);
} }
else if (['mixtral-8x7b-32768'].includes(oai_settings.groq_model)) { else if (['mixtral-8x7b-32768'].includes(oai_settings.groq_model)) {