Compare commits

...

25 Commits
1.5.3 ... 1.5.4

Author SHA1 Message Date
Cohee
da76933c95 Merge pull request #339 from ramblingcoder/main
Update docker-compose.yml to reflect sillytavern name
2023-05-19 11:57:03 +03:00
SillyLossy
74d99e09da Bump package version 2023-05-19 11:56:28 +03:00
Cohee
8da082ff8d Merge pull request #340 from nai-degen/fix-partial-sse-handling
Fixes streaming responses hanging when encountering partial SSE message
2023-05-19 11:53:20 +03:00
unknown
7e59745dfc buffers partial SSE messages from Readable 2023-05-19 03:20:27 -05:00
ramblingcoder
3e4e1ba96a Update docker-compose.yml 2023-05-18 18:09:41 -05:00
ramblingcoder
6557abcd07 Update docker-compose.yml to reflect sillytavern name 2023-05-18 17:44:12 -05:00
RossAscends
db439be897 add black and white backgrounds 2023-05-18 14:48:31 +09:00
SillyLossy
a656783b15 Upgrade tensorflow in colab 2023-05-17 01:13:35 +03:00
RossAscends
fde5f7af84 Update readme.md with SD/TSS images 2023-05-17 04:00:05 +09:00
RossAscends
454994a7bd Update readme.md with SD/TTS info 2023-05-17 03:55:23 +09:00
Cohee
843e7a8363 Create build-and-publish-release-main.yml 2023-05-16 20:24:32 +03:00
SillyLossy
849c82b6f7 Fix Poe message sending 2023-05-16 11:19:38 +03:00
SillyLossy
a4aba352e7 Merge branch 'main' of https://github.com/SillyLossy/TavernAI 2023-05-16 10:38:00 +03:00
SillyLossy
1bfb5637b0 Check for crop arguments before applying it 2023-05-16 10:37:52 +03:00
Cohee
d72f3bb35e Merge pull request #319 from sanskar-mk2/swipe-cursor
Swipe cursor
2023-05-16 10:22:47 +03:00
Cohee
bd2bcf6e9d Update readme.md 2023-05-16 10:17:01 +03:00
Sanskar Tiwari
b823d40df6 ocd whitespace 2023-05-16 04:44:10 +05:30
Sanskar Tiwari
b1acf1532e make swipe button cursor pointer since it is a button 2023-05-16 04:42:50 +05:30
SillyLossy
1ec3352f39 Revert pygmalion formatting of substitution parameters #317 2023-05-16 01:17:37 +03:00
SillyLossy
6bb44b95b0 Fix OAI key usage 2023-05-16 00:53:33 +03:00
Cohee
2b54d21617 Merge pull request #315 from sanskar-mk2/main
add llama-precise settings
2023-05-15 21:28:06 +03:00
Sanskar Tiwari
08a25d2fbf add llama-precise settings 2023-05-15 23:23:53 +05:30
Cohee
d01bee97ad Merge pull request #308 from BlueprintCoding/Blueprint 2023-05-15 10:03:42 +03:00
bcp-hayden
ee2ecd6d4b Update start.sh to dynamically select directory for start.sh 2023-05-14 17:10:09 -06:00
Cohee
33042f6dea Update bug_report.md 2023-05-15 00:59:32 +03:00
17 changed files with 147 additions and 22 deletions

View File

@@ -30,6 +30,7 @@ Providing the logs from the browser DevTools console (opened by pressing the F12
**Desktop (please complete the following information):** **Desktop (please complete the following information):**
- OS/Device: [e.g. Windows 11] - OS/Device: [e.g. Windows 11]
- Environment: [cloud, local] - Environment: [cloud, local]
- Node.js version (if applicable): [run `node --version` in cmd]
- Browser [e.g. chrome, safari] - Browser [e.g. chrome, safari]
- Generation API [e.g. KoboldAI, OpenAI] - Generation API [e.g. KoboldAI, OpenAI]
- Branch [main, dev] - Branch [main, dev]

View File

@@ -0,0 +1,46 @@
name: Build and Publish Release (Main)
on:
push:
branches:
- main
jobs:
build_and_publish:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Node.js
uses: actions/setup-node@v2
with:
node-version: 18
- name: Install dependencies
run: npm ci
- name: Build and package with pkg
run: |
npm install -g pkg
npm run pkg
- name: Create or update release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: continuous-release-main
release_name: Continuous Release (Main)
draft: false
prerelease: true
- name: Upload binaries to release
uses: softprops/action-gh-release@v1
with:
files: dist/*
release_id: ${{ steps.create_release.outputs.id }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -98,7 +98,7 @@
"!git clone https://github.com/Cohee1207/tts_samples\n", "!git clone https://github.com/Cohee1207/tts_samples\n",
"!npm install -g localtunnel\n", "!npm install -g localtunnel\n",
"!pip install -r requirements-complete.txt\n", "!pip install -r requirements-complete.txt\n",
"!pip install tensorflow==2.11\n", "!pip install tensorflow==2.12\n",
"\n", "\n",
"\n", "\n",
"cmd = f\"python server.py {' '.join(params)}\"\n", "cmd = f\"python server.py {' '.join(params)}\"\n",

View File

@@ -1,12 +1,13 @@
version: "3" version: "3"
services: services:
tavernai: sillytavern:
build: .. build: ..
container_name: tavernai container_name: sillytavern
hostname: tavernai hostname: sillytavern
image: tavernai/tavernai:latest image: cohee1207/sillytavern:latest
ports: ports:
- "8000:8000" - "8000:8000"
volumes: volumes:
- "./config:/home/node/app/config" - "./config:/home/node/app/config"
- "./config.conf:/home/node/app/config.conf"
restart: unless-stopped restart: unless-stopped

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "sillytavern", "name": "sillytavern",
"version": "1.5.3", "version": "1.5.4",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "sillytavern", "name": "sillytavern",
"version": "1.5.3", "version": "1.5.4",
"license": "AGPL-3.0", "license": "AGPL-3.0",
"dependencies": { "dependencies": {
"@dqbd/tiktoken": "^1.0.2", "@dqbd/tiktoken": "^1.0.2",

View File

@@ -40,7 +40,7 @@
"type": "git", "type": "git",
"url": "https://github.com/Cohee1207/SillyTavern.git" "url": "https://github.com/Cohee1207/SillyTavern.git"
}, },
"version": "1.5.3", "version": "1.5.4",
"scripts": { "scripts": {
"start": "node server.js" "start": "node server.js"
}, },

View File

@@ -521,7 +521,7 @@ class Client {
console.log(`Sending message to ${chatbot}: ${message}`); console.log(`Sending message to ${chatbot}: ${message}`);
const messageData = await this.send_query("AddHumanMessageMutation", { const messageData = await this.send_query("SendMessageMutation", {
"bot": chatbot, "bot": chatbot,
"query": message, "query": message,
"chatId": this.bots[chatbot]["chatId"], "chatId": this.bots[chatbot]["chatId"],
@@ -531,14 +531,14 @@ class Client {
delete this.active_messages["pending"]; delete this.active_messages["pending"];
if (!messageData["data"]["messageCreateWithStatus"]["messageLimit"]["canSend"]) { if (!messageData["data"]["messageEdgeCreate"]["message"]) {
throw new Error(`Daily limit reached for ${chatbot}.`); throw new Error(`Daily limit reached for ${chatbot}.`);
} }
let humanMessageId; let humanMessageId;
try { try {
const humanMessage = messageData["data"]["messageCreateWithStatus"]; const humanMessage = messageData["data"]["messageEdgeCreate"]["message"];
humanMessageId = humanMessage["message"]["messageId"]; humanMessageId = humanMessage["node"]["messageId"];
} catch (error) { } catch (error) {
throw new Error(`An unknown error occured. Raw response data: ${messageData}`); throw new Error(`An unknown error occured. Raw response data: ${messageData}`);
} }

View File

@@ -0,0 +1,40 @@
mutation chatHelpers_sendMessageMutation_Mutation(
$chatId: BigInt!
$bot: String!
$query: String!
$source: MessageSource
$withChatBreak: Boolean!
) {
messageEdgeCreate(chatId: $chatId, bot: $bot, query: $query, source: $source, withChatBreak: $withChatBreak) {
chatBreak {
cursor
node {
id
messageId
text
author
suggestedReplies
creationTime
state
}
id
}
message {
cursor
node {
id
messageId
text
author
suggestedReplies
creationTime
state
chat {
shouldShowDisclaimer
id
}
}
id
}
}
}

View File

@@ -0,0 +1,15 @@
{
"temp": 0.7,
"top_p": 0.1,
"top_k": 40,
"typical_p": 1,
"rep_pen": 1.18,
"no_repeat_ngram_size": 0,
"penalty_alpha": 0,
"num_beams": 1,
"length_penalty": 1,
"min_length": 200,
"encoder_rep_pen": 1,
"do_sample": true,
"early_stopping": false
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.5 KiB

View File

@@ -1369,7 +1369,12 @@ function getExtensionPrompt(position = 0, depth = undefined, separator = "\n") {
function baseChatReplace(value, name1, name2) { function baseChatReplace(value, name1, name2) {
if (value !== undefined && value.length > 0) { if (value !== undefined && value.length > 0) {
value = substituteParams(value, is_pygmalion ? "You" : name1, name2); if (is_pygmalion) {
value = value.replace(/{{user}}:/gi, 'You:');
value = value.replace(/<USER>:/gi, 'You:');
}
value = substituteParams(value, name1, name2);
if (power_user.collapse_newlines) { if (power_user.collapse_newlines) {
value = collapseNewlines(value); value = collapseNewlines(value);

View File

@@ -555,13 +555,19 @@ async function sendOpenAIRequest(type, openai_msgs_tosend, signal) {
const decoder = new TextDecoder(); const decoder = new TextDecoder();
const reader = response.body.getReader(); const reader = response.body.getReader();
let getMessage = ""; let getMessage = "";
let messageBuffer = "";
while (true) { while (true) {
const { done, value } = await reader.read(); const { done, value } = await reader.read();
let response = decoder.decode(value); let response = decoder.decode(value);
tryParseStreamingError(response); tryParseStreamingError(response);
let eventList = response.split("\n"); // ReadableStream's buffer is not guaranteed to contain full SSE messages as they arrive in chunks
// We need to buffer chunks until we have one or more full messages (separated by double newlines)
messageBuffer += response;
let eventList = messageBuffer.split("\n\n");
// Last element will be an empty string or a leftover partial message
messageBuffer = eventList.pop();
for (let event of eventList) { for (let event of eventList) {
if (!event.startsWith("data")) if (!event.startsWith("data"))

View File

@@ -527,6 +527,7 @@ code {
grid-column-start: 4; grid-column-start: 4;
flex-flow: column; flex-flow: column;
font-size: 30px; font-size: 30px;
cursor: pointer;
} }
.swipe_right img, .swipe_right img,

View File

@@ -65,6 +65,8 @@ Get in touch with the developers directly:
* Character emotional expressions * Character emotional expressions
* Auto-Summary of the chat history * Auto-Summary of the chat history
* Sending images to chat, and the AI interpreting the content. * Sending images to chat, and the AI interpreting the content.
* Stable Diffusion image generation (5 chat-related presets plus 'free mode')
* Text-to-speech for AI response messages (via ElevenLabs, Silero, or the OS's System TTS)
## UI Extensions 🚀 ## UI Extensions 🚀
@@ -76,6 +78,8 @@ Get in touch with the developers directly:
| D&D Dice | A set of 7 classic D&D dice for all your dice rolling needs.<br><br>*I used to roll the dice.<br>Feel the fear in my enemies' eyes* | None | <img style="max-width:200px" alt="image" src="https://user-images.githubusercontent.com/18619528/226199925-a066c6fc-745e-4a2b-9203-1cbffa481b14.png"> | | D&D Dice | A set of 7 classic D&D dice for all your dice rolling needs.<br><br>*I used to roll the dice.<br>Feel the fear in my enemies' eyes* | None | <img style="max-width:200px" alt="image" src="https://user-images.githubusercontent.com/18619528/226199925-a066c6fc-745e-4a2b-9203-1cbffa481b14.png"> |
| Author's Note | Built-in extension that allows you to append notes that will be added to the context and steer the story and character in a specific direction. Because it's sent after the character description, it has a lot of weight. Thanks Ali#2222 for pitching the idea! | None | ![image](https://user-images.githubusercontent.com/128647114/230311637-d809cd9b-af66-4dd1-a310-7a27e847c011.png) | | Author's Note | Built-in extension that allows you to append notes that will be added to the context and steer the story and character in a specific direction. Because it's sent after the character description, it has a lot of weight. Thanks Ali#2222 for pitching the idea! | None | ![image](https://user-images.githubusercontent.com/128647114/230311637-d809cd9b-af66-4dd1-a310-7a27e847c011.png) |
| Character Backgrounds | Built-in extension to assign unique backgrounds to specific chats or groups. | None | <img style="max-width:200px" alt="image" src="https://user-images.githubusercontent.com/18619528/233494454-bfa7c9c7-4faa-4d97-9c69-628fd96edd92.png"> | | Character Backgrounds | Built-in extension to assign unique backgrounds to specific chats or groups. | None | <img style="max-width:200px" alt="image" src="https://user-images.githubusercontent.com/18619528/233494454-bfa7c9c7-4faa-4d97-9c69-628fd96edd92.png"> |
| Stable Diffusion | Use local of cloud-based Stable Diffusion webUI API to generate images. 5 presets included ('you', 'your face', 'me', 'the story', and 'the last message'. Free mode also supported via `/sd (anything_here_)` command in the chat input bar. Most common StableDiffusion generation settings are customizable within the SillyTavern UI. | None | <img style="max-width:200px" alt="image" src="https://files.catbox.moe/ppata8.png"> |
| Text-to-Speech | AI-generated voice will read back character messages on demand, or automatically read new messages they arrive. Supports ElevenLabs, Silero, and your device's TTS service. | None | <img style="max-width:200px" alt="image" src="https://files.catbox.moe/o3wxkk.png"> |
## UI/CSS/Quality of Life tweaks by RossAscends ## UI/CSS/Quality of Life tweaks by RossAscends
@@ -136,8 +140,8 @@ Easy to follow guide with pretty pictures:
5. Open a Command Prompt inside that folder by clicking in the 'Address Bar' at the top, typing `cmd`, and pressing Enter. 5. Open a Command Prompt inside that folder by clicking in the 'Address Bar' at the top, typing `cmd`, and pressing Enter.
6. Once the black box (Command Prompt) pops up, type ONE of the following into it and press Enter: 6. Once the black box (Command Prompt) pops up, type ONE of the following into it and press Enter:
* for Main Branch: `git clone <https://github.com/Cohee1207/SillyTavern> -b main` * for Main Branch: `git clone https://github.com/Cohee1207/SillyTavern -b main`
* for Dev Branch: `git clone <https://github.com/Cohee1207/SillyTavern> -b dev` * for Dev Branch: `git clone https://github.com/Cohee1207/SillyTavern -b dev`
7. Once everything is cloned, double click `Start.bat` to make NodeJS install its requirements. 7. Once everything is cloned, double click `Start.bat` to make NodeJS install its requirements.
8. The server will then start, and SillyTavern will popup in your browser. 8. The server will then start, and SillyTavern will popup in your browser.

View File

@@ -860,7 +860,7 @@ async function charaWrite(img_url, data, target_img, response = undefined, mes =
let rawImg = await jimp.read(img_url); let rawImg = await jimp.read(img_url);
// Apply crop if defined // Apply crop if defined
if (typeof crop == 'object') { if (typeof crop == 'object' && [crop.x, crop.y, crop.width, crop.height].every(x => typeof x === 'number')) {
rawImg = rawImg.crop(crop.x, crop.y, crop.width, crop.height); rawImg = rawImg.crop(crop.x, crop.y, crop.width, crop.height);
} }
@@ -2437,7 +2437,13 @@ app.post("/openai_bias", jsonParser, async function (request, response) {
// Shamelessly stolen from Agnai // Shamelessly stolen from Agnai
app.post("/openai_usage", jsonParser, async function (request, response) { app.post("/openai_usage", jsonParser, async function (request, response) {
if (!request.body) return response.sendStatus(400); if (!request.body) return response.sendStatus(400);
const key = request.body.key; const key = readSecret(SECRET_KEYS.OPENAI);
if (!key) {
console.warn('Get key usage failed: Missing OpenAI API key.');
return response.sendStatus(401);
}
const api_url = new URL(request.body.reverse_proxy || api_openai).toString(); const api_url = new URL(request.body.reverse_proxy || api_openai).toString();
const headers = { const headers = {

View File

@@ -30,4 +30,4 @@ echo "Installing Node Modules..."
npm i npm i
echo "Entering SillyTavern..." echo "Entering SillyTavern..."
node server.js node "$(dirname "$0")/server.js"