mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Generic generate methods (#3566)
* sendOpenAIRequest/getTextGenGenerationData methods are improved, now it can use custom API, instead of active ones * Added missing model param * Removed unnecessary variable * active_oai_settings -> settings * settings -> textgenerationwebui_settings * Better presetToSettings names, simpler settings name in getTextGenGenerationData, * Removed unused jailbreak_system * Reverted most core changes, new custom-request.js file * Forced stream to false, removed duplicate method, exported settingsToUpdate * Rewrite typedefs to define props one by one * Added extractData param for simplicity * Fixed typehints * Fixed typehints (again) --------- Co-authored-by: Cohee <18619528+Cohee1207@users.noreply.github.com>
This commit is contained in:
@ -28,7 +28,6 @@
|
||||
"wrap_in_quotes": false,
|
||||
"names_behavior": 0,
|
||||
"send_if_empty": "",
|
||||
"jailbreak_system": false,
|
||||
"impersonation_prompt": "[Write your next reply from the point of view of {{user}}, using the chat history so far as a guideline for the writing style of {{user}}. Don't write as {{char}} or system. Don't describe actions of {{char}}.]",
|
||||
"new_chat_prompt": "[Start a new Chat]",
|
||||
"new_group_chat_prompt": "[Start a new group chat. Group members: {{group}}]",
|
||||
|
@ -626,7 +626,6 @@
|
||||
"ai21_model": "jamba-1.5-large",
|
||||
"windowai_model": "",
|
||||
"openrouter_model": "OR_Website",
|
||||
"jailbreak_system": true,
|
||||
"reverse_proxy": "",
|
||||
"chat_completion_source": "openai",
|
||||
"max_context_unlocked": false,
|
||||
|
@ -5647,7 +5647,7 @@ export async function sendStreamingRequest(type, data) {
|
||||
* @returns {string} Generation URL
|
||||
* @throws {Error} If the API is unknown
|
||||
*/
|
||||
function getGenerateUrl(api) {
|
||||
export function getGenerateUrl(api) {
|
||||
switch (api) {
|
||||
case 'kobold':
|
||||
return '/api/backends/kobold/generate';
|
||||
|
189
public/scripts/custom-request.js
Normal file
189
public/scripts/custom-request.js
Normal file
@ -0,0 +1,189 @@
|
||||
import { getPresetManager } from './preset-manager.js';
|
||||
import { extractMessageFromData, getGenerateUrl, getRequestHeaders } from '../script.js';
|
||||
import { getTextGenServer } from './textgen-settings.js';
|
||||
|
||||
// #region Type Definitions
|
||||
/**
|
||||
* @typedef {Object} TextCompletionRequestBase
|
||||
* @property {string} prompt - The text prompt for completion
|
||||
* @property {number} max_tokens - Maximum number of tokens to generate
|
||||
* @property {string} [model] - Optional model name
|
||||
* @property {string} api_type - Type of API to use
|
||||
* @property {string} [api_server] - Optional API server URL
|
||||
* @property {number} [temperature] - Optional temperature parameter
|
||||
*/
|
||||
|
||||
/** @typedef {Record<string, any> & TextCompletionRequestBase} TextCompletionRequest */
|
||||
|
||||
/**
|
||||
* @typedef {Object} TextCompletionPayloadBase
|
||||
* @property {string} prompt - The text prompt for completion
|
||||
* @property {number} max_tokens - Maximum number of tokens to generate
|
||||
* @property {number} max_new_tokens - Alias for max_tokens
|
||||
* @property {string} [model] - Optional model name
|
||||
* @property {string} api_type - Type of API to use
|
||||
* @property {string} api_server - API server URL
|
||||
* @property {number} [temperature] - Optional temperature parameter
|
||||
*/
|
||||
|
||||
/** @typedef {Record<string, any> & TextCompletionPayloadBase} TextCompletionPayload */
|
||||
|
||||
/**
|
||||
* @typedef {Object} ChatCompletionMessage
|
||||
* @property {string} role - The role of the message author (e.g., "user", "assistant", "system")
|
||||
* @property {string} content - The content of the message
|
||||
*/
|
||||
|
||||
/**
|
||||
* @typedef {Object} ChatCompletionPayloadBase
|
||||
* @property {ChatCompletionMessage[]} messages - Array of chat messages
|
||||
* @property {string} [model] - Optional model name to use for completion
|
||||
* @property {string} chat_completion_source - Source provider for chat completion
|
||||
* @property {number} max_tokens - Maximum number of tokens to generate
|
||||
* @property {number} [temperature] - Optional temperature parameter for response randomness
|
||||
*/
|
||||
|
||||
/** @typedef {Record<string, any> & ChatCompletionPayloadBase} ChatCompletionPayload */
|
||||
// #endregion
|
||||
|
||||
/**
|
||||
* Creates & sends a text completion request. Streaming is not supported.
|
||||
*/
|
||||
export class TextCompletionService {
|
||||
static TYPE = 'textgenerationwebui';
|
||||
|
||||
/**
|
||||
* @param {TextCompletionRequest} custom
|
||||
* @returns {TextCompletionPayload}
|
||||
*/
|
||||
static createRequestData({ prompt, max_tokens, model, api_type, api_server, temperature, ...props }) {
|
||||
return {
|
||||
...props,
|
||||
prompt,
|
||||
max_tokens,
|
||||
max_new_tokens: max_tokens,
|
||||
model,
|
||||
api_type,
|
||||
api_server: api_server ?? getTextGenServer(api_type),
|
||||
temperature,
|
||||
stream: false,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a text completion request to the specified server
|
||||
* @param {TextCompletionPayload} data Request data
|
||||
* @param {boolean?} extractData Extract message from the response. Default true
|
||||
* @returns {Promise<string | any>} Extracted data or the raw response
|
||||
* @throws {Error}
|
||||
*/
|
||||
static async sendRequest(data, extractData = true) {
|
||||
const response = await fetch(getGenerateUrl(this.TYPE), {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
cache: 'no-cache',
|
||||
body: JSON.stringify(data),
|
||||
signal: new AbortController().signal,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw await response.json();
|
||||
}
|
||||
|
||||
const json = await response.json();
|
||||
return extractData ? extractMessageFromData(json, this.TYPE) : json;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} presetName
|
||||
* @param {TextCompletionRequest} custom
|
||||
* @param {boolean?} extractData Extract message from the response. Default true
|
||||
* @returns {Promise<string | any>} Extracted data or the raw response
|
||||
* @throws {Error}
|
||||
*/
|
||||
static async sendRequestWithPreset(presetName, custom, extractData = true) {
|
||||
const presetManager = getPresetManager(this.TYPE);
|
||||
if (!presetManager) {
|
||||
throw new Error('Preset manager not found');
|
||||
}
|
||||
|
||||
const preset = presetManager.getCompletionPresetByName(presetName);
|
||||
if (!preset) {
|
||||
throw new Error('Preset not found');
|
||||
}
|
||||
|
||||
const data = this.createRequestData({ ...preset, ...custom });
|
||||
|
||||
return await this.sendRequest(data, extractData);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates & sends a chat completion request. Streaming is not supported.
|
||||
*/
|
||||
export class ChatCompletionService {
|
||||
static TYPE = 'openai';
|
||||
|
||||
/**
|
||||
* @param {ChatCompletionPayload} custom
|
||||
* @returns {ChatCompletionPayload}
|
||||
*/
|
||||
static createRequestData({ messages, model, chat_completion_source, max_tokens, temperature, ...props }) {
|
||||
return {
|
||||
...props,
|
||||
messages,
|
||||
model,
|
||||
chat_completion_source,
|
||||
max_tokens,
|
||||
temperature,
|
||||
stream: false,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a chat completion request
|
||||
* @param {ChatCompletionPayload} data Request data
|
||||
* @param {boolean?} extractData Extract message from the response. Default true
|
||||
* @returns {Promise<string | any>} Extracted data or the raw response
|
||||
* @throws {Error}
|
||||
*/
|
||||
static async sendRequest(data, extractData = true) {
|
||||
const response = await fetch('/api/backends/chat-completions/generate', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
cache: 'no-cache',
|
||||
body: JSON.stringify(data),
|
||||
signal: new AbortController().signal,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw await response.json();
|
||||
}
|
||||
|
||||
const json = await response.json();
|
||||
return extractData ? extractMessageFromData(json, this.TYPE) : json;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} presetName
|
||||
* @param {ChatCompletionPayload} custom
|
||||
* @param {boolean} extractData Extract message from the response. Default true
|
||||
* @returns {Promise<string | any>} Extracted data or the raw response
|
||||
* @throws {Error}
|
||||
*/
|
||||
static async sendRequestWithPreset(presetName, custom, extractData = true) {
|
||||
const presetManager = getPresetManager(this.TYPE);
|
||||
if (!presetManager) {
|
||||
throw new Error('Preset manager not found');
|
||||
}
|
||||
|
||||
const preset = presetManager.getCompletionPresetByName(presetName);
|
||||
if (!preset) {
|
||||
throw new Error('Preset not found');
|
||||
}
|
||||
|
||||
const data = this.createRequestData({ ...preset, ...custom });
|
||||
|
||||
return await this.sendRequest(data, extractData);
|
||||
}
|
||||
}
|
@ -224,6 +224,87 @@ const sensitiveFields = [
|
||||
'custom_include_headers',
|
||||
];
|
||||
|
||||
/**
|
||||
* preset_name -> [selector, setting_name, is_checkbox]
|
||||
* @type {Record<string, [string, string, boolean]>}
|
||||
*/
|
||||
export const settingsToUpdate = {
|
||||
chat_completion_source: ['#chat_completion_source', 'chat_completion_source', false],
|
||||
temperature: ['#temp_openai', 'temp_openai', false],
|
||||
frequency_penalty: ['#freq_pen_openai', 'freq_pen_openai', false],
|
||||
presence_penalty: ['#pres_pen_openai', 'pres_pen_openai', false],
|
||||
top_p: ['#top_p_openai', 'top_p_openai', false],
|
||||
top_k: ['#top_k_openai', 'top_k_openai', false],
|
||||
top_a: ['#top_a_openai', 'top_a_openai', false],
|
||||
min_p: ['#min_p_openai', 'min_p_openai', false],
|
||||
repetition_penalty: ['#repetition_penalty_openai', 'repetition_penalty_openai', false],
|
||||
max_context_unlocked: ['#oai_max_context_unlocked', 'max_context_unlocked', true],
|
||||
openai_model: ['#model_openai_select', 'openai_model', false],
|
||||
claude_model: ['#model_claude_select', 'claude_model', false],
|
||||
windowai_model: ['#model_windowai_select', 'windowai_model', false],
|
||||
openrouter_model: ['#model_openrouter_select', 'openrouter_model', false],
|
||||
openrouter_use_fallback: ['#openrouter_use_fallback', 'openrouter_use_fallback', true],
|
||||
openrouter_group_models: ['#openrouter_group_models', 'openrouter_group_models', false],
|
||||
openrouter_sort_models: ['#openrouter_sort_models', 'openrouter_sort_models', false],
|
||||
openrouter_providers: ['#openrouter_providers_chat', 'openrouter_providers', false],
|
||||
openrouter_allow_fallbacks: ['#openrouter_allow_fallbacks', 'openrouter_allow_fallbacks', true],
|
||||
openrouter_middleout: ['#openrouter_middleout', 'openrouter_middleout', false],
|
||||
ai21_model: ['#model_ai21_select', 'ai21_model', false],
|
||||
mistralai_model: ['#model_mistralai_select', 'mistralai_model', false],
|
||||
cohere_model: ['#model_cohere_select', 'cohere_model', false],
|
||||
perplexity_model: ['#model_perplexity_select', 'perplexity_model', false],
|
||||
groq_model: ['#model_groq_select', 'groq_model', false],
|
||||
nanogpt_model: ['#model_nanogpt_select', 'nanogpt_model', false],
|
||||
deepseek_model: ['#model_deepseek_select', 'deepseek_model', false],
|
||||
zerooneai_model: ['#model_01ai_select', 'zerooneai_model', false],
|
||||
blockentropy_model: ['#model_blockentropy_select', 'blockentropy_model', false],
|
||||
custom_model: ['#custom_model_id', 'custom_model', false],
|
||||
custom_url: ['#custom_api_url_text', 'custom_url', false],
|
||||
custom_include_body: ['#custom_include_body', 'custom_include_body', false],
|
||||
custom_exclude_body: ['#custom_exclude_body', 'custom_exclude_body', false],
|
||||
custom_include_headers: ['#custom_include_headers', 'custom_include_headers', false],
|
||||
custom_prompt_post_processing: ['#custom_prompt_post_processing', 'custom_prompt_post_processing', false],
|
||||
google_model: ['#model_google_select', 'google_model', false],
|
||||
openai_max_context: ['#openai_max_context', 'openai_max_context', false],
|
||||
openai_max_tokens: ['#openai_max_tokens', 'openai_max_tokens', false],
|
||||
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true],
|
||||
names_behavior: ['#names_behavior', 'names_behavior', false],
|
||||
send_if_empty: ['#send_if_empty_textarea', 'send_if_empty', false],
|
||||
impersonation_prompt: ['#impersonation_prompt_textarea', 'impersonation_prompt', false],
|
||||
new_chat_prompt: ['#newchat_prompt_textarea', 'new_chat_prompt', false],
|
||||
new_group_chat_prompt: ['#newgroupchat_prompt_textarea', 'new_group_chat_prompt', false],
|
||||
new_example_chat_prompt: ['#newexamplechat_prompt_textarea', 'new_example_chat_prompt', false],
|
||||
continue_nudge_prompt: ['#continue_nudge_prompt_textarea', 'continue_nudge_prompt', false],
|
||||
bias_preset_selected: ['#openai_logit_bias_preset', 'bias_preset_selected', false],
|
||||
reverse_proxy: ['#openai_reverse_proxy', 'reverse_proxy', false],
|
||||
wi_format: ['#wi_format_textarea', 'wi_format', false],
|
||||
scenario_format: ['#scenario_format_textarea', 'scenario_format', false],
|
||||
personality_format: ['#personality_format_textarea', 'personality_format', false],
|
||||
group_nudge_prompt: ['#group_nudge_prompt_textarea', 'group_nudge_prompt', false],
|
||||
stream_openai: ['#stream_toggle', 'stream_openai', true],
|
||||
prompts: ['', 'prompts', false],
|
||||
prompt_order: ['', 'prompt_order', false],
|
||||
api_url_scale: ['#api_url_scale', 'api_url_scale', false],
|
||||
show_external_models: ['#openai_show_external_models', 'show_external_models', true],
|
||||
proxy_password: ['#openai_proxy_password', 'proxy_password', false],
|
||||
assistant_prefill: ['#claude_assistant_prefill', 'assistant_prefill', false],
|
||||
assistant_impersonation: ['#claude_assistant_impersonation', 'assistant_impersonation', false],
|
||||
claude_use_sysprompt: ['#claude_use_sysprompt', 'claude_use_sysprompt', true],
|
||||
use_makersuite_sysprompt: ['#use_makersuite_sysprompt', 'use_makersuite_sysprompt', true],
|
||||
use_alt_scale: ['#use_alt_scale', 'use_alt_scale', true],
|
||||
squash_system_messages: ['#squash_system_messages', 'squash_system_messages', true],
|
||||
image_inlining: ['#openai_image_inlining', 'image_inlining', true],
|
||||
inline_image_quality: ['#openai_inline_image_quality', 'inline_image_quality', false],
|
||||
continue_prefill: ['#continue_prefill', 'continue_prefill', true],
|
||||
continue_postfix: ['#continue_postfix', 'continue_postfix', false],
|
||||
function_calling: ['#openai_function_calling', 'function_calling', true],
|
||||
show_thoughts: ['#openai_show_thoughts', 'show_thoughts', true],
|
||||
reasoning_effort: ['#openai_reasoning_effort', 'reasoning_effort', false],
|
||||
seed: ['#seed_openai', 'seed', false],
|
||||
n: ['#n_openai', 'n', false],
|
||||
bypass_status_check: ['#openai_bypass_status_check', 'bypass_status_check', true],
|
||||
};
|
||||
|
||||
const default_settings = {
|
||||
preset_settings_openai: 'Default',
|
||||
temp_openai: 1.0,
|
||||
@ -277,7 +358,6 @@ const default_settings = {
|
||||
openrouter_providers: [],
|
||||
openrouter_allow_fallbacks: true,
|
||||
openrouter_middleout: openrouter_middleout_types.ON,
|
||||
jailbreak_system: false,
|
||||
reverse_proxy: '',
|
||||
chat_completion_source: chat_completion_sources.OPENAI,
|
||||
max_context_unlocked: false,
|
||||
@ -357,7 +437,6 @@ const oai_settings = {
|
||||
openrouter_providers: [],
|
||||
openrouter_allow_fallbacks: true,
|
||||
openrouter_middleout: openrouter_middleout_types.ON,
|
||||
jailbreak_system: false,
|
||||
reverse_proxy: '',
|
||||
chat_completion_source: chat_completion_sources.OPENAI,
|
||||
max_context_unlocked: false,
|
||||
@ -3219,7 +3298,6 @@ function loadOpenAISettings(data, settings) {
|
||||
$('#openai_max_tokens').val(oai_settings.openai_max_tokens);
|
||||
|
||||
$('#wrap_in_quotes').prop('checked', oai_settings.wrap_in_quotes);
|
||||
$('#jailbreak_system').prop('checked', oai_settings.jailbreak_system);
|
||||
$('#openai_show_external_models').prop('checked', oai_settings.show_external_models);
|
||||
$('#openai_external_category').toggle(oai_settings.show_external_models);
|
||||
$('#claude_use_sysprompt').prop('checked', oai_settings.claude_use_sysprompt);
|
||||
@ -3503,7 +3581,6 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) {
|
||||
names_behavior: settings.names_behavior,
|
||||
send_if_empty: settings.send_if_empty,
|
||||
jailbreak_prompt: settings.jailbreak_prompt,
|
||||
jailbreak_system: settings.jailbreak_system,
|
||||
impersonation_prompt: settings.impersonation_prompt,
|
||||
new_chat_prompt: settings.new_chat_prompt,
|
||||
new_group_chat_prompt: settings.new_group_chat_prompt,
|
||||
@ -3923,82 +4000,6 @@ async function onLogitBiasPresetDeleteClick() {
|
||||
|
||||
// Load OpenAI preset settings
|
||||
function onSettingsPresetChange() {
|
||||
const settingsToUpdate = {
|
||||
chat_completion_source: ['#chat_completion_source', 'chat_completion_source', false],
|
||||
temperature: ['#temp_openai', 'temp_openai', false],
|
||||
frequency_penalty: ['#freq_pen_openai', 'freq_pen_openai', false],
|
||||
presence_penalty: ['#pres_pen_openai', 'pres_pen_openai', false],
|
||||
top_p: ['#top_p_openai', 'top_p_openai', false],
|
||||
top_k: ['#top_k_openai', 'top_k_openai', false],
|
||||
top_a: ['#top_a_openai', 'top_a_openai', false],
|
||||
min_p: ['#min_p_openai', 'min_p_openai', false],
|
||||
repetition_penalty: ['#repetition_penalty_openai', 'repetition_penalty_openai', false],
|
||||
max_context_unlocked: ['#oai_max_context_unlocked', 'max_context_unlocked', true],
|
||||
openai_model: ['#model_openai_select', 'openai_model', false],
|
||||
claude_model: ['#model_claude_select', 'claude_model', false],
|
||||
windowai_model: ['#model_windowai_select', 'windowai_model', false],
|
||||
openrouter_model: ['#model_openrouter_select', 'openrouter_model', false],
|
||||
openrouter_use_fallback: ['#openrouter_use_fallback', 'openrouter_use_fallback', true],
|
||||
openrouter_group_models: ['#openrouter_group_models', 'openrouter_group_models', false],
|
||||
openrouter_sort_models: ['#openrouter_sort_models', 'openrouter_sort_models', false],
|
||||
openrouter_providers: ['#openrouter_providers_chat', 'openrouter_providers', false],
|
||||
openrouter_allow_fallbacks: ['#openrouter_allow_fallbacks', 'openrouter_allow_fallbacks', true],
|
||||
openrouter_middleout: ['#openrouter_middleout', 'openrouter_middleout', false],
|
||||
ai21_model: ['#model_ai21_select', 'ai21_model', false],
|
||||
mistralai_model: ['#model_mistralai_select', 'mistralai_model', false],
|
||||
cohere_model: ['#model_cohere_select', 'cohere_model', false],
|
||||
perplexity_model: ['#model_perplexity_select', 'perplexity_model', false],
|
||||
groq_model: ['#model_groq_select', 'groq_model', false],
|
||||
nanogpt_model: ['#model_nanogpt_select', 'nanogpt_model', false],
|
||||
deepseek_model: ['#model_deepseek_select', 'deepseek_model', false],
|
||||
zerooneai_model: ['#model_01ai_select', 'zerooneai_model', false],
|
||||
blockentropy_model: ['#model_blockentropy_select', 'blockentropy_model', false],
|
||||
custom_model: ['#custom_model_id', 'custom_model', false],
|
||||
custom_url: ['#custom_api_url_text', 'custom_url', false],
|
||||
custom_include_body: ['#custom_include_body', 'custom_include_body', false],
|
||||
custom_exclude_body: ['#custom_exclude_body', 'custom_exclude_body', false],
|
||||
custom_include_headers: ['#custom_include_headers', 'custom_include_headers', false],
|
||||
custom_prompt_post_processing: ['#custom_prompt_post_processing', 'custom_prompt_post_processing', false],
|
||||
google_model: ['#model_google_select', 'google_model', false],
|
||||
openai_max_context: ['#openai_max_context', 'openai_max_context', false],
|
||||
openai_max_tokens: ['#openai_max_tokens', 'openai_max_tokens', false],
|
||||
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true],
|
||||
names_behavior: ['#names_behavior', 'names_behavior', false],
|
||||
send_if_empty: ['#send_if_empty_textarea', 'send_if_empty', false],
|
||||
impersonation_prompt: ['#impersonation_prompt_textarea', 'impersonation_prompt', false],
|
||||
new_chat_prompt: ['#newchat_prompt_textarea', 'new_chat_prompt', false],
|
||||
new_group_chat_prompt: ['#newgroupchat_prompt_textarea', 'new_group_chat_prompt', false],
|
||||
new_example_chat_prompt: ['#newexamplechat_prompt_textarea', 'new_example_chat_prompt', false],
|
||||
continue_nudge_prompt: ['#continue_nudge_prompt_textarea', 'continue_nudge_prompt', false],
|
||||
bias_preset_selected: ['#openai_logit_bias_preset', 'bias_preset_selected', false],
|
||||
reverse_proxy: ['#openai_reverse_proxy', 'reverse_proxy', false],
|
||||
wi_format: ['#wi_format_textarea', 'wi_format', false],
|
||||
scenario_format: ['#scenario_format_textarea', 'scenario_format', false],
|
||||
personality_format: ['#personality_format_textarea', 'personality_format', false],
|
||||
group_nudge_prompt: ['#group_nudge_prompt_textarea', 'group_nudge_prompt', false],
|
||||
stream_openai: ['#stream_toggle', 'stream_openai', true],
|
||||
prompts: ['', 'prompts', false],
|
||||
prompt_order: ['', 'prompt_order', false],
|
||||
api_url_scale: ['#api_url_scale', 'api_url_scale', false],
|
||||
show_external_models: ['#openai_show_external_models', 'show_external_models', true],
|
||||
proxy_password: ['#openai_proxy_password', 'proxy_password', false],
|
||||
assistant_prefill: ['#claude_assistant_prefill', 'assistant_prefill', false],
|
||||
assistant_impersonation: ['#claude_assistant_impersonation', 'assistant_impersonation', false],
|
||||
claude_use_sysprompt: ['#claude_use_sysprompt', 'claude_use_sysprompt', true],
|
||||
use_makersuite_sysprompt: ['#use_makersuite_sysprompt', 'use_makersuite_sysprompt', true],
|
||||
use_alt_scale: ['#use_alt_scale', 'use_alt_scale', true],
|
||||
squash_system_messages: ['#squash_system_messages', 'squash_system_messages', true],
|
||||
image_inlining: ['#openai_image_inlining', 'image_inlining', true],
|
||||
inline_image_quality: ['#openai_inline_image_quality', 'inline_image_quality', false],
|
||||
continue_prefill: ['#continue_prefill', 'continue_prefill', true],
|
||||
continue_postfix: ['#continue_postfix', 'continue_postfix', false],
|
||||
function_calling: ['#openai_function_calling', 'function_calling', true],
|
||||
show_thoughts: ['#openai_show_thoughts', 'show_thoughts', true],
|
||||
reasoning_effort: ['#openai_reasoning_effort', 'reasoning_effort', false],
|
||||
seed: ['#seed_openai', 'seed', false],
|
||||
n: ['#n_openai', 'n', false],
|
||||
};
|
||||
|
||||
const presetNameBefore = oai_settings.preset_settings_openai;
|
||||
|
||||
const presetName = $('#settings_preset_openai').find(':selected').text();
|
||||
|
@ -77,6 +77,7 @@ import { accountStorage } from './util/AccountStorage.js';
|
||||
import { timestampToMoment, uuidv4 } from './utils.js';
|
||||
import { getGlobalVariable, getLocalVariable, setGlobalVariable, setLocalVariable } from './variables.js';
|
||||
import { convertCharacterBook, loadWorldInfo, saveWorldInfo, updateWorldInfoList } from './world-info.js';
|
||||
import { ChatCompletionService, TextCompletionService } from './custom-request.js';
|
||||
|
||||
export function getContext() {
|
||||
return {
|
||||
@ -207,6 +208,8 @@ export function getContext() {
|
||||
getChatCompletionModel,
|
||||
printMessages,
|
||||
clearChat,
|
||||
ChatCompletionService,
|
||||
TextCompletionService,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -108,12 +108,12 @@ const BIAS_KEY = '#textgenerationwebui_api-settings';
|
||||
// (7 days later) The future has come.
|
||||
const MANCER_SERVER_KEY = 'mancer_server';
|
||||
const MANCER_SERVER_DEFAULT = 'https://neuro.mancer.tech';
|
||||
let MANCER_SERVER = localStorage.getItem(MANCER_SERVER_KEY) ?? MANCER_SERVER_DEFAULT;
|
||||
let TOGETHERAI_SERVER = 'https://api.together.xyz';
|
||||
let INFERMATICAI_SERVER = 'https://api.totalgpt.ai';
|
||||
let DREAMGEN_SERVER = 'https://dreamgen.com';
|
||||
let OPENROUTER_SERVER = 'https://openrouter.ai/api';
|
||||
let FEATHERLESS_SERVER = 'https://api.featherless.ai/v1';
|
||||
export let MANCER_SERVER = localStorage.getItem(MANCER_SERVER_KEY) ?? MANCER_SERVER_DEFAULT;
|
||||
export let TOGETHERAI_SERVER = 'https://api.together.xyz';
|
||||
export let INFERMATICAI_SERVER = 'https://api.totalgpt.ai';
|
||||
export let DREAMGEN_SERVER = 'https://dreamgen.com';
|
||||
export let OPENROUTER_SERVER = 'https://openrouter.ai/api';
|
||||
export let FEATHERLESS_SERVER = 'https://api.featherless.ai/v1';
|
||||
|
||||
export const SERVER_INPUTS = {
|
||||
[textgen_types.OOBA]: '#textgenerationwebui_api_url_text',
|
||||
|
Reference in New Issue
Block a user