rebrand of welcome message, icons, and system avatar
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 5.9 KiB After Width: | Height: | Size: 6.0 KiB |
Before Width: | Height: | Size: 7.1 KiB After Width: | Height: | Size: 7.6 KiB |
Before Width: | Height: | Size: 4.4 KiB After Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 4.7 KiB After Width: | Height: | Size: 3.6 KiB |
Before Width: | Height: | Size: 5.3 KiB After Width: | Height: | Size: 32 KiB |
@ -163,10 +163,10 @@ window["TavernAI"] = {};
|
|||||||
let converter = new showdown.Converter({ emoji: "true" });
|
let converter = new showdown.Converter({ emoji: "true" });
|
||||||
const gpt3 = new GPT3BrowserTokenizer({ type: 'gpt3' });
|
const gpt3 = new GPT3BrowserTokenizer({ type: 'gpt3' });
|
||||||
/* let bg_menu_toggle = false; */
|
/* let bg_menu_toggle = false; */
|
||||||
const systemUserName = "TavernAI";
|
const systemUserName = "SillyTavern System";
|
||||||
let default_user_name = "You";
|
let default_user_name = "You";
|
||||||
let name1 = default_user_name;
|
let name1 = default_user_name;
|
||||||
let name2 = "TavernAI";
|
let name2 = "SillyTavern System";
|
||||||
let chat = [];
|
let chat = [];
|
||||||
let safetychat = [
|
let safetychat = [
|
||||||
{
|
{
|
||||||
@ -248,11 +248,11 @@ const system_messages = {
|
|||||||
is_user: false,
|
is_user: false,
|
||||||
is_name: true,
|
is_name: true,
|
||||||
mes: [
|
mes: [
|
||||||
'Welcome to TavernAI! In order to begin chatting:',
|
'Welcome to SillyTavern! In order to begin chatting:',
|
||||||
'<ul>',
|
'<ol>',
|
||||||
'<li>Connect to one of the supported generation APIs</li>',
|
'<li>Connect to one of the supported generation APIs</li>',
|
||||||
'<li>Create or pick a character from the list</li>',
|
'<li>Create or pick a character from the list</li>',
|
||||||
'</ul>',
|
'</ol>',
|
||||||
"<h4>Running on Colab and can't get an answer from the AI or getting Out of Memory errors?</h4>",
|
"<h4>Running on Colab and can't get an answer from the AI or getting Out of Memory errors?</h4>",
|
||||||
'Set a lower Context Size in AI generation settings.<br>Values in range of 1400-1600 Tokens would be the safest choice.',
|
'Set a lower Context Size in AI generation settings.<br>Values in range of 1400-1600 Tokens would be the safest choice.',
|
||||||
'<h4>Still have questions left?</h4>',
|
'<h4>Still have questions left?</h4>',
|
||||||
|