Merge branch 'main' of https://github.com/Cohee1207/SillyTavern into mark-favorite-filter-by-favorite

This commit is contained in:
Wilfred Chen
2023-04-22 13:21:35 +08:00
22 changed files with 114 additions and 82 deletions

View File

@@ -259,6 +259,7 @@ class Client {
constructor(auto_reconnect = false, use_cached_bots = false) {
this.auto_reconnect = auto_reconnect;
this.use_cached_bots = use_cached_bots;
this.abortController = new AbortController();
}
async init(token, proxy = null) {
@@ -267,6 +268,7 @@ class Client {
timeout: 60000,
httpAgent: new http.Agent({ keepAlive: true }),
httpsAgent: new https.Agent({ keepAlive: true }),
signal: this.abortController.signal,
});
if (proxy) {
this.session.defaults.proxy = {
@@ -544,6 +546,8 @@ class Client {
let messageId;
while (true) {
try {
this.abortController.signal.throwIfAborted();
const message = this.message_queues[humanMessageId].shift();
if (!message) {
await new Promise(resolve => setTimeout(() => resolve(), 1000));

View File

@@ -1,6 +1,6 @@
<html>
<head>
<title>TavernAI - Note - Character Derscriptions</title>
<title>TavernAI - Note - Character Descriptions</title>
<link rel="stylesheet" href="/css/notes.css">
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
@@ -22,7 +22,7 @@
<p>
For most Kobold's models the easiest way is to use a free form for description, and in each sentence it is desirable to specify the name of the character.<br><br>
The entire description should be in one line without hyphenation.<br><br>
For examle:<br><br>
For example:<br><br>
<code>
Chloe is a female elf. Chloe wears black-white maid dress with green collar and red glasses. Chloe has medium length black hair. Chloe's personality is...
</code>
@@ -33,11 +33,10 @@
Details here: <a target="_blank" href="https://github.com/KoboldAI/KoboldAI-Client/wiki/Pro-Tips">Pro-Tips</a>
</p>
<hr>
<br>
<p>
<u>A list of tags that are replaced when sending to generate:</u><br><br>
{{user}} and &lt;USER&gt; : replaced by the User's Name<br>
{{char}} and &lt;BOT&gt; : replaced by the Character's Name
{{user}} and &lt;USER&gt; are replaced by the User's Name<br>
{{char}} and &lt;BOT&gt; are replaced by the Character's Name
</p>
</div>
</div>

View File

@@ -13,7 +13,7 @@
<div id="content">
<h2>Chat import</h2>
<h3>Import chats into TavernAI</h3>
<p>For import Character.ai chats use tool: <a href="https://github.com/0x000011b/characterai-dumper">https://github.com/0x000011b/characterai-dumper</a></p>
<p>To import Character.AI chats, use this tool: <a href="https://github.com/0x000011b/characterai-dumper">https://github.com/0x000011b/characterai-dumper</a>.</p>
</div>
</div>
</body>

View File

@@ -21,10 +21,12 @@
<br><br>&lt;START&gt;<br>
{{user}}: Hello<br>
{{char}}: *excitedly* Hello there, dear! Are you new to Axel? Don't worry, I, Aqua the goddess of water, am here to help you! Do you need any assistance? And may I say, I look simply radiant today! *strikes a pose and looks at you with puppy eyes*</p>
<hr><br>A list of tags that are replaced when sending to generate:<br><br>
{{user}} and &lt;USER&gt; are replaced by User Name<br>
{{char}} and &lt;BOT&gt; are replaced by Character Name<br><br>
*for Pygmalion "{{user}}:" and "&lt;USER&gt;:" will be replaced by "You:"
<hr>
<p>
<u>A list of tags that are replaced when sending to generate:</u><br><br>
{{user}} and &lt;USER&gt; are replaced by the User's Name<br>
{{char}} and &lt;BOT&gt; are replaced by the Character's Name
</p>
</div>
</div>
</body>

View File

@@ -18,8 +18,8 @@
<hr>
<p>
<u>A list of tags that are replaced when sending to generate:</u><br><br>
{{user}} and &lt;USER&gt; : replaced by User Name<br>
{{char}} and &lt;BOT&gt; : replaced by Character Name<br><br>
{{user}} and &lt;USER&gt; are replaced by the User's Name<br>
{{char}} and &lt;BOT&gt; are replaced by the Character's Name
</p>
</div>
</div>

View File

@@ -64,7 +64,7 @@
Comment
</h3>
<p>
A supplemental text comment for the your convenience, which is not utilized by the AI.
A supplemental text comment for your convenience, which is not utilized by the AI.
</p>
<h3>
Constant

View File

@@ -13,7 +13,7 @@
<div id="content">
<h2>Personality summary</h2>
<p>
A brief description of the personality. It is added to the chat to a depth of 8-15 messages, so it has a significant impact on the character.
A brief description of the personality. It is added to the chat at a depth of 8-15 messages, so it has a significant impact on the character.
</p>
Example:
@@ -26,13 +26,11 @@
<p>*In Pygmalion model, it is used as a "Personality:" graph</p>
<hr>
<p>
<u>List of tags that are replaced when sending to generate:</u><br><br>
{{user}} and &lt;USER&gt; : replaced by the User's Name<br>
{{char}} and &lt;BOT&gt; : replaced by the Character's Name<br><br>
<u>A list of tags that are replaced when sending to generate:</u><br><br>
{{user}} and &lt;USER&gt; are replaced by the User's Name<br>
{{char}} and &lt;BOT&gt; are replaced by the Character's Name
</p>
</div>
</div>
</body>

View File

@@ -21,15 +21,15 @@
For example:
<br><br>
<code>
*I noticed you came inside, I walked up and stood right in front of you* Wellcome. I'm glad to see you here.
*i said with toothy smug sunny smile looking you straight in the eye* What brings you...
*I noticed you came inside, I walked up and stood right in front of you* Welcome. I'm glad to see you here.
*I said with toothy smug sunny smile looking you straight in the eye* What brings you...
</code>
<Br>
<hr>
<p>
A list of tags that are replaced when sending to generate:<br><br>
{{user}} and &lt;USER&gt; are replaced by User Name<br>
{{char}} and &lt;BOT&gt; are replaced by Character Name<br><br>
<u>A list of tags that are replaced when sending to generate:</u><br><br>
{{user}} and &lt;USER&gt; are replaced by the User's Name<br>
{{char}} and &lt;BOT&gt; are replaced by the Character's Name
</p>
</div>
</div>

View File

@@ -30,11 +30,11 @@
<h3>Repetition penalty range</h3>
<p>The range of influence of Repetition penalty in tokens.</p>
<h3>Amount generation</h3>
<p>The maximum amount of tokens that a AI will generate to respond. One word is approximately 3-4 tokens.
<p>The maximum amount of tokens that the AI will generate to respond. One word is approximately 3-4 tokens.
The larger the parameter value, the longer the generation time takes.</p>
<h3>Context size</h3>
<p>How much will the AI remember. Context size also affects the speed of generation.<br><br>
<u>Important</u>: The setting of Context Size in TavernAI GUI override setting for KoboldAI GUI
<u>Important</u>: The setting of Context Size in TavernAI GUI overrides the setting for KoboldAI GUI
</p>
<h2>Advanced Settings</h2>
@@ -51,8 +51,8 @@
<h3>Top P Sampling</h3>
<p>
This setting controls how much of the text generated is based on the most likely options.
The top P words with the highest probabilities are considered. A word is then chosen at random, with a
higher chance of selecting words with higher probabilities.
Only words with the highest probabilities, together summing up to P, are considered. A word is then
chosen at random, with a higher chance of selecting words with higher probabilities.
</p>
<p>
Set value to 1 to disable its effect.

View File

@@ -1,23 +0,0 @@
<html>
<head>
<title>TavernAI - Note - Temperature</title>
<link rel="stylesheet" href="/css/notes.css">
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin="">
<link href="https://fonts.googleapis.com/css2?family=Noto+Sans:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&amp;display=swap" rel="stylesheet">
</head>
<body>
<div id="main">
<div id="content">
<h2>Temperature</h2>
<p>
Value from 0.1 to 2.0.<br><br>
Less value - the answers are more logical, but less creative.<Br><br>
More value - the answers are more creative, but less logical.
</p>
</div>
</div>
</body>
</html>

View File

@@ -12,7 +12,7 @@
<div id="main">
<div id="content">
<h2>NovelAI Models</h2>
<p>If your subscribe tier is Paper, Tablet or Scroll use only Euterpe model otherwise you can not get an answer from NovelAI api.</p>
<p>If your subscription tier is Paper, Tablet or Scroll use only Euterpe model otherwise you can not get an answer from NovelAI API.</p>
</div>
</div>
</body>

View File

@@ -17,7 +17,7 @@
There are two types of anchors: <u>Character Anchor</u> and <u>Style Anchor</u>
</p>
<p>
<u>Character Anchor</u> - affects the character played by the AI by motivating him to write longer messages.<br><br>
<u>Character Anchor</u> - affects the character played by the AI by motivating it to write longer messages.<br><br>
Looks like:
<code>[Elaborate speaker]</code>
</p>
@@ -31,10 +31,10 @@
Anchors Order sets the location of anchors in the promt, the first anchor in the order is much further back in the context and thus has less influence than second.
</p>
<p>
The second anchor is only turned on after 8-12 messages, because when the chat still only has a few message the first anchor creates enough effect ob its own.
The second anchor is only turned on after 8-12 messages, because when the chat still only has a few messages, the first anchor creates enough effect on its own.
</p>
<p>
Sometimes an AI model may not perceive anchors correctly or the AI model already generates sufficiently long messages.<br>
Sometimes an AI model may not perceive anchors correctly or the AI model already generates sufficiently long messages.
For these cases, you can disable the anchors by unchecking their respective boxes.
</p>
<p>

View File

@@ -17,8 +17,8 @@
<div id="content">
<h2>Advanced Formatting</h2>
<p>
The settings provided in this section allow for a more control over the prompt building strategy.
Most specifics of the prompt building depend on whether a Pygmalion model is selected or special formatting is force enabled.
The settings provided in this section allow for more control over the prompt building strategy.
Most specifics of the prompt building depend on whether a Pygmalion model is selected or special formatting is force-enabled.
The core differences between the formatting schemas are listed below.
</p>
<h3>Custom Chat Separator</h3>
@@ -28,24 +28,24 @@
<h3>For <u>Pygmalion</u> formatting</h3>
<h4>Disable description formatting</h4>
<p>
<code><b>NAME's Persona: </b></code> won't be prepended to the content your character's Description box.
<code><b>NAME's Persona: </b></code> won't be prepended to the content of your character's Description box.
</p>
<h4>Disable scenario formatting</h4>
<p>
<code><b>Scenario: </b></code> won't be prepended to the content your character's Scenario box.
<code><b>Scenario: </b></code> won't be prepended to the content of your character's Scenario box.
</p>
<h4>Disable personality formatting</h4>
<p>
<code><b>Personality: </b></code> won't be prepended to the content your character's Personality box.
<code><b>Personality: </b></code> won't be prepended to the content of your character's Personality box.
</p>
<h4>Disable example chats formatting</h4>
<p>
<code>&lt;START&gt;</code> is not added at the beginning of each example message block.<br>
<code>&lt;START&gt;</code> won't be added at the beginning of each example message block.<br>
<i>(If custom separator is not set)</i>
</p>
<h4>Disable chat start formatting</h4>
<p>
<code>&lt;START&gt;</code> is not added before the between the character card and the chat log.<br>
<code>&lt;START&gt;</code> won't be added between the character card and the chat log.<br>
<i>(If custom separator is not set)</i>
</p>
<h4>Always add character's name to prompt</h4>
@@ -59,25 +59,25 @@
</p>
<h4>Disable scenario formatting</h4>
<p>
<code><b>Circumstances and context of the dialogue: </b></code> won't be prepended to the content your character's Scenario box.
<code><b>Circumstances and context of the dialogue: </b></code> won't be prepended to the content of your character's Scenario box.
</p>
<h4>Disable personality formatting</h4>
<p>
<code><b>NAME's personality: </b></code> won't be prepended to the content your character's Personality box.
<code><b>NAME's personality: </b></code> won't be prepended to the content of your character's Personality box.
</p>
<h4>Disable example chats formatting</h4>
<p>
<code>This is how <b>Character</b> should talk</code> is not added at the beginning of each example message block.<br>
<code>This is how <b>Character</b> should talk</code> won't be added at the beginning of each example message block.<br>
<i>(If custom separator is not set)</i>
</p>
<h4>Disable chat start formatting</h4>
<p>
<code>Then the roleplay chat between <b>User</b> and <b>Character</b> begins</code> is not added before the between the character card and the chat log.<br>
<code>Then the roleplay chat between <b>User</b> and <b>Character</b> begins</code> won't be added between the character card and the chat log.<br>
<i>(If custom separator is not set)</i>
</p>
<h4>Always add character's name to prompt</h4>
<p>
Appends character's name to the prompt to force model to complete the message as a character:
Appends character's name to the prompt to force the model to complete the message as the character:
</p>
<code>

View File

@@ -1,7 +1,7 @@
<html>
<head>
<title>Advanced Formatting</title>
<title>Group reply order strategies</title>
<link rel="stylesheet" href="/css/notes.css">
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">

View File

@@ -1,7 +1,7 @@
<html>
<head>
<title>Advanced Settings</title>
<title>OpenAI API key</title>
<link rel="stylesheet" href="/css/notes.css">
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">

View File

@@ -15,6 +15,7 @@
<body>
<div id="main">
<div id="content">
<h2>Gradio Streaming Function ID</h2>
<p>
To use streaming with Text Generation Web UI, a Gradio function index needs to be provided.
It is impossible to be determined programmatically and should be typed in manually.

View File

@@ -15,7 +15,7 @@
<div id="content">
<h2>Character Tokens</h2>
<p><b>TLDR: If you're working with an AI model with a 2048 context token limit, your 1000 token character definition is cutting the AI's 'memory' in half.</b></p>
<p><b>TL;DR: If you're working with an AI model with a 2048 context token limit, your 1000 token character definition is cutting the AI's 'memory' in half.</b></p>
<p>To put this in perspective, a decent response from a good AI can easily be around 200-300 tokens. In this case, the AI would only be able to 'remember' about 3 exchanges worth of chat history.</p>
<hr>
@@ -23,7 +23,7 @@
<p>When we see your character has over 1000 tokens in its definitions, we highlight it for you because this can lower the AI's capabilities to provide an enjoyable conversation.</p>
<h3>What happens if my Character has too many tokens?</h3>
<p>Don't Worry - it won't break anything. At worst, if the Character's permanent tokens are too large, it simply means there will be less room left in the context for other things (see below).</p>
<p>Don't worry - it won't break anything. At worst, if the Character's permanent tokens are too large, it simply means there will be less room left in the context for other things (see below).</p>
<p>The only negative side effect this can have is the AI will have less 'memory', as it will have less chat history available to process.</p>
<p>This is because every AI model has a limit to the amount of context it can process at one time.</p>
<h3>'Context'?</h3>

View File

@@ -1274,6 +1274,7 @@ class StreamingProcessor {
this.isStopped = false;
this.isFinished = false;
this.generator = this.nullStreamingGeneration;
this.abortController = new AbortController();
}
async generate() {
@@ -1460,7 +1461,10 @@ async function Generate(type, automatic_trigger, force_name2) {
storyString += appendToStoryString(Scenario, power_user.disable_scenario_formatting ? '' : 'Scenario: ');
} else {
storyString += appendToStoryString(charDescription, '');
storyString += appendToStoryString(charPersonality, power_user.disable_personality_formatting ? '' : name2 + "'s personality: ");
if (count_view_mes < topAnchorDepth) {
storyString += appendToStoryString(charPersonality, power_user.disable_personality_formatting ? '' : name2 + "'s personality: ");
}
}
if (power_user.custom_chat_separator && power_user.custom_chat_separator.length) {
@@ -1928,7 +1932,7 @@ async function Generate(type, automatic_trigger, force_name2) {
let prompt = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
if (isStreamingEnabled()) {
streamingProcessor.generator = await sendOpenAIRequest(prompt);
streamingProcessor.generator = await sendOpenAIRequest(prompt, streamingProcessor.abortController.signal);
}
else {
sendOpenAIRequest(prompt).then(onSuccess).catch(onError);
@@ -1939,14 +1943,14 @@ async function Generate(type, automatic_trigger, force_name2) {
}
else if (main_api == 'poe') {
if (isStreamingEnabled()) {
streamingProcessor.generator = await generatePoe(type, finalPromt);
streamingProcessor.generator = await generatePoe(type, finalPromt, streamingProcessor.abortController.signal);
}
else {
generatePoe(type, finalPromt).then(onSuccess).catch(onError);
}
}
else if (main_api == 'textgenerationwebui' && textgenerationwebui_settings.streaming) {
streamingProcessor.generator = await generateTextGenWithStreaming(generate_data);
streamingProcessor.generator = await generateTextGenWithStreaming(generate_data, streamingProcessor.abortController.signal);
}
else {
jQuery.ajax({
@@ -5039,6 +5043,7 @@ $(document).ready(function () {
$(document).on("click", ".mes_stop", function () {
if (streamingProcessor) {
streamingProcessor.abortController.abort();
streamingProcessor.isStopped = true;
streamingProcessor.onStopStreaming();
streamingProcessor = null;
@@ -5132,4 +5137,11 @@ $(document).ready(function () {
}
});
});
$(document).on('beforeunload', () => {
if (streamingProcessor) {
console.log('Page reloaded. Aborting streaming...');
streamingProcessor.abortController.abort();
}
});
})

View File

@@ -436,7 +436,12 @@ function getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefor
return whole_prompt;
}
async function sendOpenAIRequest(openai_msgs_tosend) {
async function sendOpenAIRequest(openai_msgs_tosend, signal) {
// Provide default abort signal
if (!signal) {
signal = new AbortController().signal;
}
if (oai_settings.reverse_proxy) {
validateReverseProxy();
}
@@ -459,7 +464,8 @@ async function sendOpenAIRequest(openai_msgs_tosend) {
headers: {
'Content-Type': 'application/json',
"X-CSRF-Token": token,
}
},
signal: signal,
});
if (oai_settings.stream_openai) {

View File

@@ -86,7 +86,7 @@ function onBotChange() {
saveSettingsDebounced();
}
async function generatePoe(type, finalPrompt) {
async function generatePoe(type, finalPrompt, signal) {
if (poe_settings.auto_purge) {
let count_to_delete = -1;
@@ -136,7 +136,7 @@ async function generatePoe(type, finalPrompt) {
finalPrompt = sentences.join('');
}
const reply = await sendMessage(finalPrompt, true);
const reply = await sendMessage(finalPrompt, true, signal);
got_reply = true;
return reply;
}
@@ -160,7 +160,11 @@ async function purgeConversation(count = -1) {
return response.ok;
}
async function sendMessage(prompt, withStreaming) {
async function sendMessage(prompt, withStreaming, signal) {
if (!signal) {
signal = new AbortController().signal;
}
const body = JSON.stringify({
bot: poe_settings.bot,
token: poe_settings.token,
@@ -175,6 +179,7 @@ async function sendMessage(prompt, withStreaming) {
},
body: body,
method: 'POST',
signal: signal,
});
if (withStreaming && poe_settings.streaming) {

View File

@@ -147,7 +147,7 @@ function setSettingByName(i, value, trigger) {
}
}
async function generateTextGenWithStreaming(generate_data) {
async function generateTextGenWithStreaming(generate_data, signal) {
const response = await fetch('/generate_textgenerationwebui', {
headers: {
'X-CSRF-Token': token,
@@ -157,6 +157,7 @@ async function generateTextGenWithStreaming(generate_data) {
},
body: JSON.stringify(generate_data),
method: 'POST',
signal: signal,
});
return async function* streamData() {

View File

@@ -367,6 +367,10 @@ app.post("/generate_textgenerationwebui", jsonParser, async function (request, r
if (!!request.header('X-Response-Streaming')) {
const fn_index = Number(request.header('X-Gradio-Streaming-Function'));
let isStreamingStopped = false;
request.socket.on('close', function() {
isStreamingStopped = true;
});
response_generate.writeHead(200, {
'Content-Type': 'text/plain;charset=utf-8',
@@ -404,6 +408,12 @@ app.post("/generate_textgenerationwebui", jsonParser, async function (request, r
});
while (true) {
if (isStreamingStopped) {
console.error('Streaming stopped by user. Closing websocket...');
websocket.close();
return null;
}
if (websocket.readyState == 0 || websocket.readyState == 1 || websocket.readyState == 2) {
await delay(50);
yield text;
@@ -1893,6 +1903,12 @@ app.post('/generate_poe', jsonParser, async (request, response) => {
}
if (streaming) {
let isStreamingStopped = false;
request.socket.on('close', function() {
isStreamingStopped = true;
client.abortController.abort();
});
try {
response.writeHead(200, {
'Content-Type': 'text/plain;charset=utf-8',
@@ -1902,6 +1918,11 @@ app.post('/generate_poe', jsonParser, async (request, response) => {
let reply = '';
for await (const mes of client.send_message(bot, prompt)) {
if (isStreamingStopped) {
console.error('Streaming stopped by user. Closing websocket...');
break;
}
let newText = mes.text.substring(reply.length);
reply = mes.text;
response.write(newText);
@@ -2133,6 +2154,11 @@ app.post("/generate_openai", jsonParser, function (request, response_generate_op
if (!request.body) return response_generate_openai.sendStatus(400);
const api_url = new URL(request.body.reverse_proxy || api_openai).toString();
const controller = new AbortController();
request.socket.on('close', function() {
controller.abort();
});
console.log(request.body);
const config = {
method: 'post',
@@ -2151,7 +2177,8 @@ app.post("/generate_openai", jsonParser, function (request, response_generate_op
"frequency_penalty": request.body.frequency_penalty,
"stop": request.body.stop,
"logit_bias": request.body.logit_bias
}
},
signal: controller.signal,
};
if (request.body.stream)