mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Compare commits
17 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
6541d3e741 | ||
|
d4d903323e | ||
|
5354086438 | ||
|
e9fe58d514 | ||
|
d92f5338b5 | ||
|
c4f9fdd1ee | ||
|
a73a205e2c | ||
|
c154536fa6 | ||
|
0f183fdcf1 | ||
|
b1d5637fcf | ||
|
caa985590b | ||
|
bb8657f93f | ||
|
c974898887 | ||
|
41e7c1533c | ||
|
658de57348 | ||
|
7c9c0df946 | ||
|
3b75ef8c39 |
11
.github/readme.md
vendored
11
.github/readme.md
vendored
@@ -6,7 +6,7 @@ Based on a fork of TavernAI 1.2.8
|
||||
|
||||
### Brought to you by Cohee, RossAscends and the SillyTavern community
|
||||
|
||||
NOTE: We have added [a FAQ](faq.md) to answer most of your questions and help you get started.
|
||||
NOTE: We have added [a FAQ](https://docs.sillytavern.app/usage/faq/) to answer most of your questions and help you get started.
|
||||
|
||||
### What is SillyTavern or TavernAI?
|
||||
|
||||
@@ -14,6 +14,15 @@ Tavern is a user interface you can install on your computer (and Android phones)
|
||||
|
||||
SillyTavern is a fork of TavernAI 1.2.8 which is under more active development and has added many major features. At this point, they can be thought of as completely independent programs.
|
||||
|
||||
### Branches
|
||||
|
||||
SillyTavern is being developed using a two-branch system to ensure a smooth experience for all users.
|
||||
|
||||
* main -🌟 **Recommended for most users.** This is the most stable and recommended branch, updated only when major releases are pushed. It's suitable for the majority of users.
|
||||
* dev - ⚠️ **Not recommended for casual use.** This branch has the latest features, but be cautious as it may break at any time. Only for power users and enthusiasts.
|
||||
|
||||
If you're not familiar with using the git CLI or don't understand what a branch is, don't worry! The main branch is always the preferable option for you.
|
||||
|
||||
### What do I need other than Tavern?
|
||||
|
||||
On its own Tavern is useless, as it's just a user interface. You have to have access to an AI system backend that can act as the roleplay character. There are various supported backends: OpenAPI API (GPT), KoboldAI (either running locally or on Google Colab), and more. You can read more about this in [the FAQ](faq.md).
|
||||
|
3
.replit
3
.replit
@@ -1,6 +1,7 @@
|
||||
|
||||
hidden = [".config", "package-lock.json"]
|
||||
run = "chmod 755 ./start.sh && ./start.sh"
|
||||
entrypoint = "server.js"
|
||||
|
||||
[[hints]]
|
||||
regex = "Error \\[ERR_REQUIRE_ESM\\]"
|
||||
@@ -62,7 +63,7 @@ support = true
|
||||
cwd = "."
|
||||
environment = []
|
||||
pauseForSourceMap = false
|
||||
program = "./index.js"
|
||||
program = "./server.js"
|
||||
request = "launch"
|
||||
sourceMaps = true
|
||||
stopOnEntry = false
|
||||
|
@@ -6,7 +6,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Links**<br>\n",
|
||||
"Extensions API GitHub: https://github.com/Cohee1207/SillyTavern-extras/<br>\n",
|
||||
"Extensions API GitHub: https://github.com/SillyTavern/SillyTavern-extras/<br>\n",
|
||||
"SillyTavern community Discord (support and discussion): https://discord.gg/RZdyAEUPvj"
|
||||
]
|
||||
},
|
||||
@@ -34,6 +34,9 @@
|
||||
"source": [
|
||||
"#@markdown Enables hosting of extensions backend for SillyTavern Extras\n",
|
||||
"use_cpu = False #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Allows to run SillyTavern Extras on CPU (use if you're out of daily GPU allowance)\n",
|
||||
"use_sd_cpu = False #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Allows to run Stable Diffusion pipeline on CPU (slow!)\n",
|
||||
"extras_enable_captioning = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Loads the image captioning module\n",
|
||||
"Captions_Model = \"Salesforce/blip-image-captioning-large\" #@param [ \"Salesforce/blip-image-captioning-large\", \"Salesforce/blip-image-captioning-base\" ]\n",
|
||||
@@ -51,8 +54,10 @@
|
||||
"#@markdown * Qiliang/bart-large-cnn-samsum-ChatGPT_v3 - summarization model optimized for chats\n",
|
||||
"#@markdown * Qiliang/bart-large-cnn-samsum-ElectrifAi_v10 - nice results so far, but still being evaluated\n",
|
||||
"#@markdown * distilbart-xsum-12-3 - faster, but pretty basic alternative\n",
|
||||
"extras_enable_tts = True #@param {type:\"boolean\"}\n",
|
||||
"extras_enable_silero_tts = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Enables Silero text-to-speech module\n",
|
||||
"extras_enable_edge_tts = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Enables Microsoft Edge text-to-speech module\n",
|
||||
"extras_enable_sd = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Enables SD picture generation\n",
|
||||
"SD_Model = \"ckpt/anything-v4.5-vae-swapped\" #@param [ \"ckpt/anything-v4.5-vae-swapped\", \"hakurei/waifu-diffusion\", \"philz1337/clarity\", \"prompthero/openjourney\", \"ckpt/sd15\", \"stabilityai/stable-diffusion-2-1-base\" ]\n",
|
||||
@@ -73,6 +78,8 @@
|
||||
"params = []\n",
|
||||
"if use_cpu:\n",
|
||||
" params.append('--cpu')\n",
|
||||
"if use_sd_cpu:\n",
|
||||
" params.append('--sd-cpu')\n",
|
||||
"params.append('--share')\n",
|
||||
"ExtrasModules = []\n",
|
||||
"\n",
|
||||
@@ -84,8 +91,10 @@
|
||||
" ExtrasModules.append('classify')\n",
|
||||
"if (extras_enable_sd):\n",
|
||||
" ExtrasModules.append('sd')\n",
|
||||
"if (extras_enable_tts):\n",
|
||||
" ExtrasModules.append('tts')\n",
|
||||
"if (extras_enable_silero_tts):\n",
|
||||
" ExtrasModules.append('silero-tts')\n",
|
||||
"if extras_enable_edge_tts:\n",
|
||||
" ExtrasModules.append('edge-tts')\n",
|
||||
"if (extras_enable_chromadb):\n",
|
||||
" ExtrasModules.append('chromadb')\n",
|
||||
"\n",
|
||||
|
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "sillytavern",
|
||||
"version": "1.6.6",
|
||||
"version": "1.6.8",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "sillytavern",
|
||||
"version": "1.6.6",
|
||||
"version": "1.6.8",
|
||||
"license": "AGPL-3.0",
|
||||
"dependencies": {
|
||||
"@dqbd/tiktoken": "^1.0.2",
|
||||
|
@@ -46,7 +46,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/Cohee1207/SillyTavern.git"
|
||||
},
|
||||
"version": "1.6.6",
|
||||
"version": "1.6.8",
|
||||
"scripts": {
|
||||
"start": "node server.js",
|
||||
"pkg": "pkg --compress Gzip --no-bytecode --public ."
|
||||
|
@@ -315,10 +315,10 @@ class Client {
|
||||
|
||||
async get_bots() {
|
||||
const viewer = this.next_data.props.pageProps.payload.viewer;
|
||||
if (!viewer.availableBots) {
|
||||
if (!viewer.availableBotsConnection) {
|
||||
throw new Error('Invalid token.');
|
||||
}
|
||||
const botList = viewer.viewerBotList;
|
||||
const botList = viewer.availableBotsConnection.edges.map(x => x.node);
|
||||
const retries = 2;
|
||||
const bots = {};
|
||||
for (const bot of botList.filter(x => x.deletionState == 'not_deleted')) {
|
||||
|
10
public/instruct/WizardLM-13B.json
Normal file
10
public/instruct/WizardLM-13B.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "WizardLM-13B",
|
||||
"system_prompt": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\n\nWrite {{char}}'s next detailed reply in a fictional roleplay chat between {{user}} and {{char}}.",
|
||||
"system_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"input_sequence": "USER: ",
|
||||
"output_sequence": "ASSISTANT: ",
|
||||
"separator_sequence": "",
|
||||
"wrap": true
|
||||
}
|
@@ -1370,10 +1370,10 @@ function getStoppingStrings(isImpersonate, addSpace) {
|
||||
|
||||
if (power_user.instruct.enabled) {
|
||||
if (power_user.instruct.input_sequence) {
|
||||
result.push(wrap(power_user.instruct.input_sequence));
|
||||
result.push(substituteParams(wrap(power_user.instruct.input_sequence), name1, name2));
|
||||
}
|
||||
if (power_user.instruct.output_sequence) {
|
||||
result.push(wrap(power_user.instruct.output_sequence));
|
||||
result.push(substituteParams(wrap(power_user.instruct.output_sequence), name1, name2));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1774,7 +1774,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
const magName = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
|
||||
|
||||
if (isInstruct) {
|
||||
message_already_generated = formatInstructModePrompt(magName, isImpersonate);
|
||||
message_already_generated = formatInstructModePrompt(magName, isImpersonate, false, name1, name2);
|
||||
} else {
|
||||
message_already_generated = `${magName}: `;
|
||||
}
|
||||
@@ -2116,14 +2116,14 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
// Add quiet generation prompt at depth 0
|
||||
if (quiet_prompt && quiet_prompt.length) {
|
||||
const name = is_pygmalion ? 'You' : name1;
|
||||
const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, false) : `\n${name}: ${quiet_prompt}`;
|
||||
const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, false, name1, name2) : `\n${name}: ${quiet_prompt}`;
|
||||
mesSendString += quietAppend;
|
||||
}
|
||||
|
||||
// Get instruct mode line
|
||||
if (isInstruct && tokens_already_generated === 0) {
|
||||
const name = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
|
||||
mesSendString += formatInstructModePrompt(name, isImpersonate, promptBias);
|
||||
mesSendString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2);
|
||||
}
|
||||
|
||||
// Get non-instruct impersonation line
|
||||
@@ -2541,7 +2541,7 @@ function formatMessageHistoryItem(chatItem, isInstruct) {
|
||||
let textResult = shouldPrependName ? `${itemName}: ${chatItem.mes}\n` : `${chatItem.mes}\n`;
|
||||
|
||||
if (isInstruct) {
|
||||
textResult = formatInstructModeChat(itemName, chatItem.mes, chatItem.is_user, isNarratorType, chatItem.force_avatar);
|
||||
textResult = formatInstructModeChat(itemName, chatItem.mes, chatItem.is_user, isNarratorType, chatItem.force_avatar, name1, name2);
|
||||
}
|
||||
|
||||
textResult = replaceBiasMarkup(textResult);
|
||||
|
@@ -641,9 +641,14 @@ function loadInstructMode() {
|
||||
});
|
||||
}
|
||||
|
||||
export function formatInstructModeChat(name, mes, isUser, isNarrator, forceAvatar) {
|
||||
export function formatInstructModeChat(name, mes, isUser, isNarrator, forceAvatar, name1, name2) {
|
||||
const includeNames = isNarrator ? false : (power_user.instruct.names || !!selected_group || !!forceAvatar);
|
||||
const sequence = (isUser || isNarrator) ? power_user.instruct.input_sequence : power_user.instruct.output_sequence;
|
||||
const sequence = substituteParams(
|
||||
(isUser || isNarrator) ? power_user.instruct.input_sequence : power_user.instruct.output_sequence,
|
||||
name1,
|
||||
name2
|
||||
);
|
||||
|
||||
const separator = power_user.instruct.wrap ? '\n' : '';
|
||||
const separatorSequence = power_user.instruct.separator_sequence && !isUser
|
||||
? power_user.instruct.separator_sequence
|
||||
@@ -662,9 +667,14 @@ export function formatInstructStoryString(story) {
|
||||
return text;
|
||||
}
|
||||
|
||||
export function formatInstructModePrompt(name, isImpersonate, promptBias) {
|
||||
export function formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2) {
|
||||
const includeNames = power_user.instruct.names || !!selected_group;
|
||||
const sequence = isImpersonate ? power_user.instruct.input_sequence : power_user.instruct.output_sequence;
|
||||
const sequence = substituteParams(
|
||||
isImpersonate ? power_user.instruct.input_sequence : power_user.instruct.output_sequence,
|
||||
name1,
|
||||
name2
|
||||
);
|
||||
|
||||
const separator = power_user.instruct.wrap ? '\n' : '';
|
||||
let text = includeNames ? (separator + sequence + separator + `${name}:`) : (separator + sequence);
|
||||
|
||||
|
Reference in New Issue
Block a user