mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Merge pull request #1568 from DonMoralez/staging
(claude)reworked prefix, sysprompt, console messages, sequence check
This commit is contained in:
@@ -547,10 +547,6 @@
|
||||
<textarea id="jailbreak_prompt_quick_edit_textarea" class="text_pole textarea_compact autoSetHeight" rows="6" placeholder="—" data-pm-prompt="jailbreak"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div id="claude_assistant_prefill_block" data-source="claude" class="range-block">
|
||||
<span id="claude_assistant_prefill_text" data-i18n="Assistant Prefill">Assistant Prefill</span>
|
||||
<textarea id="claude_assistant_prefill" class="text_pole textarea_compact" name="assistant_prefill autoSetHeight" rows="3" maxlength="10000" data-i18n="[placeholder]Start Claude's answer with..." placeholder="Start Claude's answer with..."></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div data-newbie-hidden class="inline-drawer wide100p">
|
||||
@@ -1545,10 +1541,38 @@
|
||||
</div>
|
||||
<div data-newbie-hidden class="range-block" data-source="claude">
|
||||
<label for="exclude_assistant" title="Exclude Assistant suffix" class="checkbox_label widthFreeExpand">
|
||||
<input id="exclude_assistant" type="checkbox" /><span data-i18n="Exclude Assistant suffix">Exclude Assistant suffix</span>
|
||||
<input id="exclude_assistant" type="checkbox" />
|
||||
<span data-i18n="Exclude Assistant suffix">Exclude Assistant suffix</span>
|
||||
</label>
|
||||
<div class="toggle-description justifyLeft">
|
||||
<span data-i18n="Exclude the assistant suffix from being added to the end of prompt.">Exclude the assistant suffix from being added to the end of prompt (Requires jailbreak with 'Assistant:' in it).</span>
|
||||
<span data-i18n="Exclude the assistant suffix from being added to the end of prompt.">
|
||||
Exclude the assistant suffix from being added to the end of prompt (Requires jailbreak with 'Assistant:' in it).
|
||||
</span>
|
||||
</div>
|
||||
<div id="claude_assistant_prefill_block" class="wide100p">
|
||||
<span id="claude_assistant_prefill_text" data-i18n="Assistant Prefill">Assistant Prefill</span>
|
||||
<textarea id="claude_assistant_prefill" class="text_pole textarea_compact" name="assistant_prefill autoSetHeight" rows="3" maxlength="10000" data-i18n="[placeholder]Start Claude's answer with..." placeholder="Start Claude's answer with..."></textarea>
|
||||
</div>
|
||||
<label for="claude_use_sysprompt" class="checkbox_label widthFreeExpand">
|
||||
<input id="claude_use_sysprompt" type="checkbox" />
|
||||
<span data-i18n="Use system prompt (Claude 2.1+ only)">
|
||||
Use system prompt (Claude 2.1+ only)
|
||||
</span>
|
||||
</label>
|
||||
<div class="toggle-description justifyLeft">
|
||||
<span data-i18n="Exclude the 'Human: ' prefix from being added to the beginning of the prompt.">
|
||||
Exclude the 'Human: ' prefix from being added to the beginning of the prompt.
|
||||
Instead, place it between the system prompt and the first message with the role 'assistant' (right before 'Chat History' by default).
|
||||
</span>
|
||||
</div>
|
||||
<div id="claude_human_sysprompt_message_block" class="wide100p">
|
||||
<div class="range-block-title openai_restorable">
|
||||
<span data-i18n="Human: first message">Human: first message</span>
|
||||
<div id="claude_human_sysprompt_message_restore" title="Restore Human: first message" class="right_menu_button">
|
||||
<div class="fa-solid fa-clock-rotate-left"></div>
|
||||
</div>
|
||||
</div>
|
||||
<textarea id="claude_human_sysprompt_textarea" class="text_pole textarea_compact" rows="4" maxlength="10000" data-i18n="[placeholder]Human message" placeholder="Human message, instruction, etc. Adds nothing when empty, i.e. requires a new prompt with the role 'user' or manually adding the 'Human: ' prefix."></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@@ -90,6 +90,7 @@ const default_wi_format = '[Details of the fictional world the RP is set in:\n{0
|
||||
const default_new_chat_prompt = '[Start a new Chat]';
|
||||
const default_new_group_chat_prompt = '[Start a new group chat. Group members: {{group}}]';
|
||||
const default_new_example_chat_prompt = '[Start a new Chat]';
|
||||
const default_claude_human_sysprompt_message = 'Let\'s get started. Please generate your response based on the information and instructions provided above.';
|
||||
const default_continue_nudge_prompt = '[Continue the following message. Do not include ANY parts of the original message. Use capitalization and punctuation as if your reply is a part of the original message: {{lastChatMessage}}]';
|
||||
const default_bias = 'Default (none)';
|
||||
const default_personality_format = '[{{char}}\'s personality: {{personality}}]';
|
||||
@@ -230,9 +231,11 @@ const default_settings = {
|
||||
show_external_models: false,
|
||||
proxy_password: '',
|
||||
assistant_prefill: '',
|
||||
human_sysprompt_message: default_claude_human_sysprompt_message,
|
||||
use_ai21_tokenizer: false,
|
||||
use_google_tokenizer: false,
|
||||
exclude_assistant: false,
|
||||
claude_use_sysprompt: false,
|
||||
use_alt_scale: false,
|
||||
squash_system_messages: false,
|
||||
image_inlining: false,
|
||||
@@ -291,9 +294,11 @@ const oai_settings = {
|
||||
show_external_models: false,
|
||||
proxy_password: '',
|
||||
assistant_prefill: '',
|
||||
human_sysprompt_message: default_claude_human_sysprompt_message,
|
||||
use_ai21_tokenizer: false,
|
||||
use_google_tokenizer: false,
|
||||
exclude_assistant: false,
|
||||
claude_use_sysprompt: false,
|
||||
use_alt_scale: false,
|
||||
squash_system_messages: false,
|
||||
image_inlining: false,
|
||||
@@ -1559,7 +1564,9 @@ async function sendOpenAIRequest(type, messages, signal) {
|
||||
if (isClaude) {
|
||||
generate_data['top_k'] = Number(oai_settings.top_k_openai);
|
||||
generate_data['exclude_assistant'] = oai_settings.exclude_assistant;
|
||||
generate_data['claude_use_sysprompt'] = oai_settings.claude_use_sysprompt;
|
||||
generate_data['stop'] = getCustomStoppingStrings(); // Claude shouldn't have limits on stop strings.
|
||||
generate_data['human_sysprompt_message'] = substituteParams(oai_settings.human_sysprompt_message);
|
||||
// Don't add a prefill on quiet gens (summarization)
|
||||
if (!isQuiet && !oai_settings.exclude_assistant) {
|
||||
generate_data['assistant_prefill'] = substituteParams(oai_settings.assistant_prefill);
|
||||
@@ -2002,7 +2009,7 @@ class ChatCompletion {
|
||||
|
||||
for (let message of this.messages.collection) {
|
||||
if (!excludeList.includes(message.identifier) && message.role === 'system' && !message.name) {
|
||||
if (lastMessage && lastMessage.role === 'system') {
|
||||
if (lastMessage && message.content && lastMessage.role === 'system') {
|
||||
lastMessage.content += '\n' + message.content;
|
||||
lastMessage.tokens = tokenHandler.count({ role: lastMessage.role, content: lastMessage.content });
|
||||
}
|
||||
@@ -2360,6 +2367,7 @@ function loadOpenAISettings(data, settings) {
|
||||
oai_settings.show_external_models = settings.show_external_models ?? default_settings.show_external_models;
|
||||
oai_settings.proxy_password = settings.proxy_password ?? default_settings.proxy_password;
|
||||
oai_settings.assistant_prefill = settings.assistant_prefill ?? default_settings.assistant_prefill;
|
||||
oai_settings.human_sysprompt_message = settings.human_sysprompt_message ?? default_settings.human_sysprompt_message;
|
||||
oai_settings.image_inlining = settings.image_inlining ?? default_settings.image_inlining;
|
||||
oai_settings.bypass_status_check = settings.bypass_status_check ?? default_settings.bypass_status_check;
|
||||
|
||||
@@ -2378,11 +2386,13 @@ function loadOpenAISettings(data, settings) {
|
||||
if (settings.use_ai21_tokenizer !== undefined) { oai_settings.use_ai21_tokenizer = !!settings.use_ai21_tokenizer; oai_settings.use_ai21_tokenizer ? ai21_max = 8191 : ai21_max = 9200; }
|
||||
if (settings.use_google_tokenizer !== undefined) oai_settings.use_google_tokenizer = !!settings.use_google_tokenizer;
|
||||
if (settings.exclude_assistant !== undefined) oai_settings.exclude_assistant = !!settings.exclude_assistant;
|
||||
if (settings.claude_use_sysprompt !== undefined) oai_settings.claude_use_sysprompt = !!settings.claude_use_sysprompt;
|
||||
if (settings.use_alt_scale !== undefined) { oai_settings.use_alt_scale = !!settings.use_alt_scale; updateScaleForm(); }
|
||||
$('#stream_toggle').prop('checked', oai_settings.stream_openai);
|
||||
$('#api_url_scale').val(oai_settings.api_url_scale);
|
||||
$('#openai_proxy_password').val(oai_settings.proxy_password);
|
||||
$('#claude_assistant_prefill').val(oai_settings.assistant_prefill);
|
||||
$('#claude_human_sysprompt_textarea').val(oai_settings.human_sysprompt_message);
|
||||
$('#openai_image_inlining').prop('checked', oai_settings.image_inlining);
|
||||
$('#openai_bypass_status_check').prop('checked', oai_settings.bypass_status_check);
|
||||
|
||||
@@ -2415,6 +2425,7 @@ function loadOpenAISettings(data, settings) {
|
||||
$('#use_ai21_tokenizer').prop('checked', oai_settings.use_ai21_tokenizer);
|
||||
$('#use_google_tokenizer').prop('checked', oai_settings.use_google_tokenizer);
|
||||
$('#exclude_assistant').prop('checked', oai_settings.exclude_assistant);
|
||||
$('#claude_use_sysprompt').prop('checked', oai_settings.claude_use_sysprompt);
|
||||
$('#scale-alt').prop('checked', oai_settings.use_alt_scale);
|
||||
$('#openrouter_use_fallback').prop('checked', oai_settings.openrouter_use_fallback);
|
||||
$('#openrouter_force_instruct').prop('checked', oai_settings.openrouter_force_instruct);
|
||||
@@ -2618,9 +2629,11 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) {
|
||||
api_url_scale: settings.api_url_scale,
|
||||
show_external_models: settings.show_external_models,
|
||||
assistant_prefill: settings.assistant_prefill,
|
||||
human_sysprompt_message: settings.human_sysprompt_message,
|
||||
use_ai21_tokenizer: settings.use_ai21_tokenizer,
|
||||
use_google_tokenizer: settings.use_google_tokenizer,
|
||||
exclude_assistant: settings.exclude_assistant,
|
||||
claude_use_sysprompt: settings.claude_use_sysprompt,
|
||||
use_alt_scale: settings.use_alt_scale,
|
||||
squash_system_messages: settings.squash_system_messages,
|
||||
image_inlining: settings.image_inlining,
|
||||
@@ -2986,9 +2999,11 @@ function onSettingsPresetChange() {
|
||||
show_external_models: ['#openai_show_external_models', 'show_external_models', true],
|
||||
proxy_password: ['#openai_proxy_password', 'proxy_password', false],
|
||||
assistant_prefill: ['#claude_assistant_prefill', 'assistant_prefill', false],
|
||||
human_sysprompt_message: ['#claude_human_sysprompt_textarea', 'human_sysprompt_message', false],
|
||||
use_ai21_tokenizer: ['#use_ai21_tokenizer', 'use_ai21_tokenizer', true],
|
||||
use_google_tokenizer: ['#use_google_tokenizer', 'use_google_tokenizer', true],
|
||||
exclude_assistant: ['#exclude_assistant', 'exclude_assistant', true],
|
||||
claude_use_sysprompt: ['#claude_use_sysprompt', 'claude_use_sysprompt', true],
|
||||
use_alt_scale: ['#use_alt_scale', 'use_alt_scale', true],
|
||||
squash_system_messages: ['#squash_system_messages', 'squash_system_messages', true],
|
||||
image_inlining: ['#openai_image_inlining', 'image_inlining', true],
|
||||
@@ -3510,6 +3525,7 @@ function toggleChatCompletionForms() {
|
||||
|
||||
if (chat_completion_sources.CLAUDE == oai_settings.chat_completion_source) {
|
||||
$('#claude_assistant_prefill_block').toggle(!oai_settings.exclude_assistant);
|
||||
$('#claude_human_sysprompt_message_block').toggle(oai_settings.claude_use_sysprompt);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3708,6 +3724,12 @@ $(document).ready(async function () {
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#claude_use_sysprompt').on('change', function () {
|
||||
oai_settings.claude_use_sysprompt = !!$('#claude_use_sysprompt').prop('checked');
|
||||
$('#claude_human_sysprompt_message_block').toggle(oai_settings.claude_use_sysprompt);
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#names_in_completion').on('change', function () {
|
||||
oai_settings.names_in_completion = !!$('#names_in_completion').prop('checked');
|
||||
saveSettingsDebounced();
|
||||
@@ -3781,6 +3803,12 @@ $(document).ready(async function () {
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#claude_human_sysprompt_message_restore').on('click', function () {
|
||||
oai_settings.human_sysprompt_message = default_claude_human_sysprompt_message;
|
||||
$('#claude_human_sysprompt_textarea').val(oai_settings.human_sysprompt_message);
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#newgroupchat_prompt_restore').on('click', function () {
|
||||
oai_settings.new_group_chat_prompt = default_new_group_chat_prompt;
|
||||
$('#newgroupchat_prompt_textarea').val(oai_settings.new_group_chat_prompt);
|
||||
@@ -3868,6 +3896,11 @@ $(document).ready(async function () {
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#claude_human_sysprompt_textarea').on('input', function () {
|
||||
oai_settings.human_sysprompt_message = String($('#claude_human_sysprompt_textarea').val());
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#openrouter_use_fallback').on('input', function () {
|
||||
oai_settings.openrouter_use_fallback = !!$(this).prop('checked');
|
||||
saveSettingsDebounced();
|
||||
|
@@ -4,7 +4,7 @@ const { Readable } = require('stream');
|
||||
|
||||
const { jsonParser } = require('../../express-common');
|
||||
const { CHAT_COMPLETION_SOURCES, GEMINI_SAFETY, BISON_SAFETY } = require('../../constants');
|
||||
const { forwardFetchResponse, getConfigValue, tryParse, uuidv4, mergeObjectWithYaml, excludeKeysByYaml } = require('../../util');
|
||||
const { forwardFetchResponse, getConfigValue, tryParse, uuidv4, mergeObjectWithYaml, excludeKeysByYaml, color } = require('../../util');
|
||||
const { convertClaudePrompt, convertGooglePrompt, convertTextCompletionPrompt } = require('../prompt-converters');
|
||||
|
||||
const { readSecret, SECRET_KEYS } = require('../secrets');
|
||||
@@ -21,9 +21,10 @@ const API_CLAUDE = 'https://api.anthropic.com/v1';
|
||||
async function sendClaudeRequest(request, response) {
|
||||
const apiUrl = new URL(request.body.reverse_proxy || API_CLAUDE).toString();
|
||||
const apiKey = request.body.reverse_proxy ? request.body.proxy_password : readSecret(SECRET_KEYS.CLAUDE);
|
||||
const divider = '-'.repeat(process.stdout.columns);
|
||||
|
||||
if (!apiKey) {
|
||||
console.log('Claude API key is missing.');
|
||||
console.log(color.red(`Claude API key is missing.\n${divider}`));
|
||||
return response.status(400).send({ error: true });
|
||||
}
|
||||
|
||||
@@ -34,34 +35,66 @@ async function sendClaudeRequest(request, response) {
|
||||
controller.abort();
|
||||
});
|
||||
|
||||
let doSystemPrompt = request.body.model === 'claude-2' || request.body.model === 'claude-2.1';
|
||||
let requestPrompt = convertClaudePrompt(request.body.messages, true, !request.body.exclude_assistant, doSystemPrompt);
|
||||
const isSysPromptSupported = request.body.model === 'claude-2' || request.body.model === 'claude-2.1';
|
||||
const requestPrompt = convertClaudePrompt(request.body.messages, !request.body.exclude_assistant, request.body.assistant_prefill, isSysPromptSupported, request.body.claude_use_sysprompt, request.body.human_sysprompt_message);
|
||||
|
||||
if (request.body.assistant_prefill && !request.body.exclude_assistant) {
|
||||
requestPrompt += request.body.assistant_prefill;
|
||||
// Check Claude messages sequence and prefixes presence.
|
||||
const sequence = requestPrompt.split('\n').filter(x => x.startsWith('Human:') || x.startsWith('Assistant:'));
|
||||
const humanFound = sequence.some(line => line.startsWith('Human:'));
|
||||
const assistantFound = sequence.some(line => line.startsWith('Assistant:'));
|
||||
let humanErrorCount = 0;
|
||||
let assistantErrorCount = 0;
|
||||
|
||||
for (let i = 0; i < sequence.length - 1; i++) {
|
||||
if (sequence[i].startsWith(sequence[i + 1].split(':')[0])) {
|
||||
if (sequence[i].startsWith('Human:')) {
|
||||
humanErrorCount++;
|
||||
} else if (sequence[i].startsWith('Assistant:')) {
|
||||
assistantErrorCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log('Claude request:', requestPrompt);
|
||||
const stop_sequences = ['\n\nHuman:', '\n\nSystem:', '\n\nAssistant:'];
|
||||
if (!humanFound) {
|
||||
console.log(color.red(`${divider}\nWarning: No 'Human:' prefix found in the prompt.\n${divider}`));
|
||||
}
|
||||
if (!assistantFound) {
|
||||
console.log(color.red(`${divider}\nWarning: No 'Assistant: ' prefix found in the prompt.\n${divider}`));
|
||||
}
|
||||
if (!sequence[0].startsWith('Human:')) {
|
||||
console.log(color.red(`${divider}\nWarning: The messages sequence should start with 'Human:' prefix.\nMake sure you have 'Human:' prefix at the very beggining of the prompt, or after the system prompt.\n${divider}`));
|
||||
}
|
||||
if (humanErrorCount > 0 || assistantErrorCount > 0) {
|
||||
console.log(color.red(`${divider}\nWarning: Detected incorrect Prefix sequence(s).`));
|
||||
console.log(color.red(`Incorrect "Human:" prefix(es): ${humanErrorCount}.\nIncorrect "Assistant: " prefix(es): ${assistantErrorCount}.`));
|
||||
console.log(color.red('Check the prompt above and fix it in the SillyTavern.'));
|
||||
console.log(color.red('\nThe correct sequence should look like this:\nSystem prompt <-(for the sysprompt format only, else have 2 empty lines above the first human\'s message.)'));
|
||||
console.log(color.red(` <-----(Each message beginning with the "Assistant:/Human:" prefix must have one empty line above.)\nHuman:\n\nAssistant:\n...\n\nHuman:\n\nAssistant:\n${divider}`));
|
||||
}
|
||||
|
||||
// Add custom stop sequences
|
||||
const stopSequences = ['\n\nHuman:', '\n\nSystem:', '\n\nAssistant:'];
|
||||
if (Array.isArray(request.body.stop)) {
|
||||
stop_sequences.push(...request.body.stop);
|
||||
stopSequences.push(...request.body.stop);
|
||||
}
|
||||
|
||||
const requestBody = {
|
||||
prompt: requestPrompt,
|
||||
model: request.body.model,
|
||||
max_tokens_to_sample: request.body.max_tokens,
|
||||
stop_sequences: stopSequences,
|
||||
temperature: request.body.temperature,
|
||||
top_p: request.body.top_p,
|
||||
top_k: request.body.top_k,
|
||||
stream: request.body.stream,
|
||||
};
|
||||
|
||||
console.log('Claude request:', requestBody);
|
||||
|
||||
const generateResponse = await fetch(apiUrl + '/complete', {
|
||||
method: 'POST',
|
||||
signal: controller.signal,
|
||||
body: JSON.stringify({
|
||||
prompt: requestPrompt,
|
||||
model: request.body.model,
|
||||
max_tokens_to_sample: request.body.max_tokens,
|
||||
stop_sequences: stop_sequences,
|
||||
temperature: request.body.temperature,
|
||||
top_p: request.body.top_p,
|
||||
top_k: request.body.top_k,
|
||||
stream: request.body.stream,
|
||||
}),
|
||||
body: JSON.stringify(requestBody),
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01',
|
||||
@@ -75,20 +108,20 @@ async function sendClaudeRequest(request, response) {
|
||||
forwardFetchResponse(generateResponse, response);
|
||||
} else {
|
||||
if (!generateResponse.ok) {
|
||||
console.log(`Claude API returned error: ${generateResponse.status} ${generateResponse.statusText} ${await generateResponse.text()}`);
|
||||
console.log(color.red(`Claude API returned error: ${generateResponse.status} ${generateResponse.statusText}\n${await generateResponse.text()}\n${divider}`));
|
||||
return response.status(generateResponse.status).send({ error: true });
|
||||
}
|
||||
|
||||
const generateResponseJson = await generateResponse.json();
|
||||
const responseText = generateResponseJson.completion;
|
||||
console.log('Claude response:', responseText);
|
||||
console.log('Claude response:', generateResponseJson);
|
||||
|
||||
// Wrap it back to OAI format
|
||||
const reply = { choices: [{ 'message': { 'content': responseText } }] };
|
||||
return response.send(reply);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('Error communicating with Claude: ', error);
|
||||
console.log(color.red(`Error communicating with Claude: ${error}\n${divider}`));
|
||||
if (!response.headersSent) {
|
||||
return response.status(500).send({ error: true });
|
||||
}
|
||||
|
@@ -1,74 +1,67 @@
|
||||
/**
|
||||
* Convert a prompt from the ChatML objects to the format used by Claude.
|
||||
* @param {object[]} messages Array of messages
|
||||
* @param {boolean} addHumanPrefix Add Human prefix
|
||||
* @param {boolean} addAssistantPostfix Add Assistant postfix
|
||||
* @param {boolean} withSystemPrompt Build system prompt before "\n\nHuman: "
|
||||
* @param {boolean} addAssistantPostfix Add Assistant postfix.
|
||||
* @param {string} addAssistantPrefill Add Assistant prefill after the assistant postfix.
|
||||
* @param {boolean} withSysPromptSupport Indicates if the Claude model supports the system prompt format.
|
||||
* @param {boolean} useSystemPrompt Indicates if the system prompt format should be used.
|
||||
* @param {string} addSysHumanMsg Add Human message between system prompt and assistant.
|
||||
* @returns {string} Prompt for Claude
|
||||
* @copyright Prompt Conversion script taken from RisuAI by kwaroran (GPLv3).
|
||||
*/
|
||||
function convertClaudePrompt(messages, addHumanPrefix, addAssistantPostfix, withSystemPrompt) {
|
||||
// Claude doesn't support message names, so we'll just add them to the message content.
|
||||
for (const message of messages) {
|
||||
if (message.name && message.role !== 'system') {
|
||||
message.content = message.name + ': ' + message.content;
|
||||
delete message.name;
|
||||
function convertClaudePrompt(messages, addAssistantPostfix, addAssistantPrefill, withSysPromptSupport, useSystemPrompt, addSysHumanMsg) {
|
||||
|
||||
//Prepare messages for claude.
|
||||
if (messages.length > 0) {
|
||||
messages[0].role = 'system';
|
||||
//Add the assistant's message to the end of messages.
|
||||
if (addAssistantPostfix) {
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: addAssistantPrefill || '',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let systemPrompt = '';
|
||||
if (withSystemPrompt) {
|
||||
let lastSystemIdx = -1;
|
||||
|
||||
for (let i = 0; i < messages.length - 1; i++) {
|
||||
const message = messages[i];
|
||||
if (message.role === 'system' && !message.name) {
|
||||
systemPrompt += message.content + '\n\n';
|
||||
} else {
|
||||
lastSystemIdx = i - 1;
|
||||
break;
|
||||
// Find the index of the first message with an assistant role and check for a "'user' role/Human:" before it.
|
||||
let hasUser = false;
|
||||
const firstAssistantIndex = messages.findIndex((message, i) => {
|
||||
if (i >= 0 && (message.role === 'user' || message.content.includes('\n\nHuman: '))) {
|
||||
hasUser = true;
|
||||
}
|
||||
return message.role === 'assistant' && i > 0;
|
||||
});
|
||||
// When 2.1+ and 'Use system prompt" checked, switches to the system prompt format by setting the first message's role to the 'system'.
|
||||
// Inserts the human's message before the first the assistant one, if there are no such message or prefix found.
|
||||
if (withSysPromptSupport && useSystemPrompt) {
|
||||
messages[0].role = 'system';
|
||||
if (firstAssistantIndex > 0 && addSysHumanMsg && !hasUser) {
|
||||
messages.splice(firstAssistantIndex, 0, {
|
||||
role: 'user',
|
||||
content: addSysHumanMsg,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// Otherwise, use the default message format by setting the first message's role to 'user'(compatible with all claude models including 2.1.)
|
||||
messages[0].role = 'user';
|
||||
// Fix messages order for default message format when(messages > Context Size) by merging two messages with "\n\nHuman: " prefixes into one, before the first Assistant's message.
|
||||
if (firstAssistantIndex > 0) {
|
||||
messages[firstAssistantIndex - 1].role = firstAssistantIndex - 1 !== 0 && messages[firstAssistantIndex - 1].role === 'user' ? 'FixHumMsg' : messages[firstAssistantIndex - 1].role;
|
||||
}
|
||||
}
|
||||
if (lastSystemIdx >= 0) {
|
||||
messages.splice(0, lastSystemIdx + 1);
|
||||
}
|
||||
}
|
||||
|
||||
let requestPrompt = messages.map((v) => {
|
||||
let prefix = '';
|
||||
switch (v.role) {
|
||||
case 'assistant':
|
||||
prefix = '\n\nAssistant: ';
|
||||
break;
|
||||
case 'user':
|
||||
prefix = '\n\nHuman: ';
|
||||
break;
|
||||
case 'system':
|
||||
// According to the Claude docs, H: and A: should be used for example conversations.
|
||||
if (v.name === 'example_assistant') {
|
||||
prefix = '\n\nA: ';
|
||||
} else if (v.name === 'example_user') {
|
||||
prefix = '\n\nH: ';
|
||||
} else {
|
||||
prefix = '\n\n';
|
||||
}
|
||||
break;
|
||||
}
|
||||
return prefix + v.content;
|
||||
// Convert messages to the prompt.
|
||||
let requestPrompt = messages.map((v, i) => {
|
||||
// Set prefix according to the role.
|
||||
let prefix = {
|
||||
'assistant': '\n\nAssistant: ',
|
||||
'user': '\n\nHuman: ',
|
||||
'system': i === 0 ? '' : v.name === 'example_assistant' ? '\n\nA: ' : v.name === 'example_user' ? '\n\nH: ' : '\n\n',
|
||||
'FixHumMsg': '\n\nFirst message: ',
|
||||
}[v.role] ?? '';
|
||||
// Claude doesn't support message names, so we'll just add them to the message content.
|
||||
return `${prefix}${v.name && v.role !== 'system' ? `${v.name}: ` : ''}${v.content}`;
|
||||
}).join('');
|
||||
|
||||
if (addHumanPrefix) {
|
||||
requestPrompt = '\n\nHuman: ' + requestPrompt;
|
||||
}
|
||||
|
||||
if (addAssistantPostfix) {
|
||||
requestPrompt = requestPrompt + '\n\nAssistant: ';
|
||||
}
|
||||
|
||||
if (withSystemPrompt) {
|
||||
requestPrompt = systemPrompt + requestPrompt;
|
||||
}
|
||||
|
||||
return requestPrompt;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user