Unify Claude request logging with other API sources
This commit is contained in:
parent
b5e59c819c
commit
ffb711d802
|
@ -38,8 +38,6 @@ async function sendClaudeRequest(request, response) {
|
||||||
const isSysPromptSupported = request.body.model === 'claude-2' || request.body.model === 'claude-2.1';
|
const isSysPromptSupported = request.body.model === 'claude-2' || request.body.model === 'claude-2.1';
|
||||||
const requestPrompt = convertClaudePrompt(request.body.messages, !request.body.exclude_assistant, request.body.assistant_prefill, isSysPromptSupported, request.body.claude_use_sysprompt, request.body.human_sysprompt_message);
|
const requestPrompt = convertClaudePrompt(request.body.messages, !request.body.exclude_assistant, request.body.assistant_prefill, isSysPromptSupported, request.body.claude_use_sysprompt, request.body.human_sysprompt_message);
|
||||||
|
|
||||||
console.log(color.green(`${divider}\nClaude request\n`) + color.cyan(`PROMPT\n${divider}\n${requestPrompt}\n${divider}`));
|
|
||||||
|
|
||||||
// Check Claude messages sequence and prefixes presence.
|
// Check Claude messages sequence and prefixes presence.
|
||||||
const sequence = requestPrompt.split('\n').filter(x => x.startsWith('Human:') || x.startsWith('Assistant:'));
|
const sequence = requestPrompt.split('\n').filter(x => x.startsWith('Human:') || x.startsWith('Assistant:'));
|
||||||
const humanFound = sequence.some(line => line.startsWith('Human:'));
|
const humanFound = sequence.some(line => line.startsWith('Human:'));
|
||||||
|
@ -69,30 +67,34 @@ async function sendClaudeRequest(request, response) {
|
||||||
if (humanErrorCount > 0 || assistantErrorCount > 0) {
|
if (humanErrorCount > 0 || assistantErrorCount > 0) {
|
||||||
console.log(color.red(`${divider}\nWarning: Detected incorrect Prefix sequence(s).`));
|
console.log(color.red(`${divider}\nWarning: Detected incorrect Prefix sequence(s).`));
|
||||||
console.log(color.red(`Incorrect "Human:" prefix(es): ${humanErrorCount}.\nIncorrect "Assistant: " prefix(es): ${assistantErrorCount}.`));
|
console.log(color.red(`Incorrect "Human:" prefix(es): ${humanErrorCount}.\nIncorrect "Assistant: " prefix(es): ${assistantErrorCount}.`));
|
||||||
console.log(color.red('Check the prompt above and fix it in the sillytavern.'));
|
console.log(color.red('Check the prompt above and fix it in the SillyTavern.'));
|
||||||
console.log(color.red('\nThe correct sequence should look like this:\nSystem prompt <-(for the sysprompt format only, else have 2 empty lines above the first human\'s message.)'));
|
console.log(color.red('\nThe correct sequence should look like this:\nSystem prompt <-(for the sysprompt format only, else have 2 empty lines above the first human\'s message.)'));
|
||||||
console.log(color.red(` <-----(Each message beginning with the "Assistant:/Human:" prefix must have one empty line above.)\nHuman:\n\nAssistant:\n...\n\nHuman:\n\nAssistant:\n${divider}`));
|
console.log(color.red(` <-----(Each message beginning with the "Assistant:/Human:" prefix must have one empty line above.)\nHuman:\n\nAssistant:\n...\n\nHuman:\n\nAssistant:\n${divider}`));
|
||||||
}
|
}
|
||||||
const stop_sequences = ['\n\nHuman:', '\n\nSystem:', '\n\nAssistant:'];
|
|
||||||
|
|
||||||
// Add custom stop sequences
|
// Add custom stop sequences
|
||||||
|
const stopSequences = ['\n\nHuman:', '\n\nSystem:', '\n\nAssistant:'];
|
||||||
if (Array.isArray(request.body.stop)) {
|
if (Array.isArray(request.body.stop)) {
|
||||||
stop_sequences.push(...request.body.stop);
|
stopSequences.push(...request.body.stop);
|
||||||
}
|
}
|
||||||
|
|
||||||
const generateResponse = await fetch(apiUrl + '/complete', {
|
const requestBody = {
|
||||||
method: 'POST',
|
|
||||||
signal: controller.signal,
|
|
||||||
body: JSON.stringify({
|
|
||||||
prompt: requestPrompt,
|
prompt: requestPrompt,
|
||||||
model: request.body.model,
|
model: request.body.model,
|
||||||
max_tokens_to_sample: request.body.max_tokens,
|
max_tokens_to_sample: request.body.max_tokens,
|
||||||
stop_sequences: stop_sequences,
|
stop_sequences: stopSequences,
|
||||||
temperature: request.body.temperature,
|
temperature: request.body.temperature,
|
||||||
top_p: request.body.top_p,
|
top_p: request.body.top_p,
|
||||||
top_k: request.body.top_k,
|
top_k: request.body.top_k,
|
||||||
stream: request.body.stream,
|
stream: request.body.stream,
|
||||||
}),
|
};
|
||||||
|
|
||||||
|
console.log('Claude request:', requestBody);
|
||||||
|
|
||||||
|
const generateResponse = await fetch(apiUrl + '/complete', {
|
||||||
|
method: 'POST',
|
||||||
|
signal: controller.signal,
|
||||||
|
body: JSON.stringify(requestBody),
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'anthropic-version': '2023-06-01',
|
'anthropic-version': '2023-06-01',
|
||||||
|
@ -112,7 +114,7 @@ async function sendClaudeRequest(request, response) {
|
||||||
|
|
||||||
const generateResponseJson = await generateResponse.json();
|
const generateResponseJson = await generateResponse.json();
|
||||||
const responseText = generateResponseJson.completion;
|
const responseText = generateResponseJson.completion;
|
||||||
console.log(color.green(`Claude response\n${divider}\n${responseText}\n${divider}`));
|
console.log('Claude response:', generateResponseJson);
|
||||||
|
|
||||||
// Wrap it back to OAI format
|
// Wrap it back to OAI format
|
||||||
const reply = { choices: [{ 'message': { 'content': responseText } }] };
|
const reply = { choices: [{ 'message': { 'content': responseText } }] };
|
||||||
|
|
Loading…
Reference in New Issue