mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Allow using JSON schema with llamacpp server
This commit is contained in:
@ -1458,7 +1458,7 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div data-newbie-hidden id="json_schema_block" data-tg-type="tabby" class="wide100p">
|
<div data-newbie-hidden id="json_schema_block" data-tg-type="tabby, llamacpp" class="wide100p">
|
||||||
<hr class="wide100p">
|
<hr class="wide100p">
|
||||||
<h4 class="wide100p textAlignCenter"><span data-i18n="JSON Schema">JSON Schema</span>
|
<h4 class="wide100p textAlignCenter"><span data-i18n="JSON Schema">JSON Schema</span>
|
||||||
<a href="https://json-schema.org/learn/getting-started-step-by-step" target="_blank">
|
<a href="https://json-schema.org/learn/getting-started-step-by-step" target="_blank">
|
||||||
|
@ -991,7 +991,7 @@ export function getTextGenModel() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function isJsonSchemaSupported() {
|
export function isJsonSchemaSupported() {
|
||||||
return settings.type === TABBY && main_api === 'textgenerationwebui';
|
return [TABBY, LLAMACPP].includes(settings.type) && main_api === 'textgenerationwebui';
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate, isContinue, cfgValues, type) {
|
export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate, isContinue, cfgValues, type) {
|
||||||
@ -1065,7 +1065,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
|
|||||||
'guidance_scale': cfgValues?.guidanceScale?.value ?? settings.guidance_scale ?? 1,
|
'guidance_scale': cfgValues?.guidanceScale?.value ?? settings.guidance_scale ?? 1,
|
||||||
'negative_prompt': cfgValues?.negativePrompt ?? substituteParams(settings.negative_prompt) ?? '',
|
'negative_prompt': cfgValues?.negativePrompt ?? substituteParams(settings.negative_prompt) ?? '',
|
||||||
'grammar_string': settings.grammar_string,
|
'grammar_string': settings.grammar_string,
|
||||||
'json_schema': settings.type === TABBY ? settings.json_schema : undefined,
|
'json_schema': [TABBY, LLAMACPP].includes(settings.type) ? settings.json_schema : undefined,
|
||||||
// llama.cpp aliases. In case someone wants to use LM Studio as Text Completion API
|
// llama.cpp aliases. In case someone wants to use LM Studio as Text Completion API
|
||||||
'repeat_penalty': settings.rep_pen,
|
'repeat_penalty': settings.rep_pen,
|
||||||
'tfs_z': settings.tfs,
|
'tfs_z': settings.tfs,
|
||||||
@ -1150,5 +1150,15 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
|
|||||||
|
|
||||||
eventSource.emitAndWait(event_types.TEXT_COMPLETION_SETTINGS_READY, params);
|
eventSource.emitAndWait(event_types.TEXT_COMPLETION_SETTINGS_READY, params);
|
||||||
|
|
||||||
|
// Grammar conflicts with with json_schema
|
||||||
|
if (settings.type === LLAMACPP) {
|
||||||
|
if (params.json_schema && Object.keys(params.json_schema).length > 0) {
|
||||||
|
delete params.grammar_string;
|
||||||
|
delete params.grammar;
|
||||||
|
} else {
|
||||||
|
delete params.json_schema;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return params;
|
return params;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user