diff --git a/.github/readme.md b/.github/readme.md
index 885f25461..3fa3826b4 100644
--- a/.github/readme.md
+++ b/.github/readme.md
@@ -223,7 +223,7 @@ If the ST-hosting device is on the same wifi network, you will use the ST-host's
If you (or someone else) want to connect to your hosted ST while not being on the same network, you will need the public IP of your ST-hosting device.
-* While using the ST-hosting device, access [this page](https://whatismyipaddress.com/) and look for for `IPv4`. This is what you would use to connect from the remote device.
+* While using the ST-hosting device, access [this page](https://whatismyipaddress.com/) and look for `IPv4`. This is what you would use to connect from the remote device.
### 3. Connect the remote device to the ST host machine
diff --git a/colab/GPU.ipynb b/colab/GPU.ipynb
index 630ea43ca..4adf35e6c 100644
--- a/colab/GPU.ipynb
+++ b/colab/GPU.ipynb
@@ -40,20 +40,20 @@
"use_sd_cpu = False #@param {type:\"boolean\"}\n",
"#@markdown ***\n",
"#@markdown Loads the image captioning module\n",
- "extras_enable_captioning = True #@param {type:\"boolean\"}\n",
- "Captions_Model = \"Salesforce/blip-image-captioning-large\" #@param [ \"Salesforce/blip-image-captioning-large\", \"Salesforce/blip-image-captioning-base\" ]\n",
+ "extras_enable_caption = True #@param {type:\"boolean\"}\n",
+ "captioning_model = \"Salesforce/blip-image-captioning-large\" #@param [ \"Salesforce/blip-image-captioning-large\", \"Salesforce/blip-image-captioning-base\" ]\n",
"#@markdown * Salesforce/blip-image-captioning-large - good base model\n",
"#@markdown * Salesforce/blip-image-captioning-base - slightly faster but less accurate\n",
"#@markdown ***\n",
"#@markdown Loads the sentiment classification model\n",
- "extras_enable_emotions = True #@param {type:\"boolean\"}\n",
- "Emotions_Model = \"nateraw/bert-base-uncased-emotion\" #@param [\"nateraw/bert-base-uncased-emotion\", \"joeddav/distilbert-base-uncased-go-emotions-student\"]\n",
+ "extras_enable_classify = True #@param {type:\"boolean\"}\n",
+ "classification_model = \"nateraw/bert-base-uncased-emotion\" #@param [\"nateraw/bert-base-uncased-emotion\", \"joeddav/distilbert-base-uncased-go-emotions-student\"]\n",
"#@markdown * nateraw/bert-base-uncased-emotion = 6 supported emotions
\n",
"#@markdown * joeddav/distilbert-base-uncased-go-emotions-student = 28 supported emotions\n",
"#@markdown ***\n",
"#@markdown Loads the story summarization module\n",
- "extras_enable_memory = True #@param {type:\"boolean\"}\n",
- "Memory_Model = \"slauw87/bart_summarisation\" #@param [ \"slauw87/bart_summarisation\", \"Qiliang/bart-large-cnn-samsum-ChatGPT_v3\", \"Qiliang/bart-large-cnn-samsum-ElectrifAi_v10\", \"distilbart-xsum-12-3\" ]\n",
+ "extras_enable_summarize = True #@param {type:\"boolean\"}\n",
+ "summarization_model = \"slauw87/bart_summarisation\" #@param [ \"slauw87/bart_summarisation\", \"Qiliang/bart-large-cnn-samsum-ChatGPT_v3\", \"Qiliang/bart-large-cnn-samsum-ElectrifAi_v10\", \"distilbart-xsum-12-3\" ]\n",
"#@markdown * slauw87/bart_summarisation - general purpose summarization model\n",
"#@markdown * Qiliang/bart-large-cnn-samsum-ChatGPT_v3 - summarization model optimized for chats\n",
"#@markdown * Qiliang/bart-large-cnn-samsum-ElectrifAi_v10 - nice results so far, but still being evaluated\n",
@@ -63,10 +63,18 @@
"extras_enable_silero_tts = True #@param {type:\"boolean\"}\n",
"#@markdown Enables Microsoft Edge text-to-speech module\n",
"extras_enable_edge_tts = True #@param {type:\"boolean\"}\n",
+ "#@markdown Enables RVC module\n",
+ "extras_enable_rvc = False #@param {type:\"boolean\"}\n",
+ "#@markdown ***\n",
+ "#@markdown Enables Whisper speech recognition module\n",
+ "extras_enable_whisper_stt = True #@param {type:\"boolean\"}\n",
+ "whisper_model = \"base.en\" #@param [ \"tiny.en\", \"base.en\", \"small.en\", \"medium.en\", \"tiny\", \"base\", \"small\", \"medium\", \"large\" ]\n",
+ "#@markdown There are five model sizes, four with English-only versions, offering speed and accuracy tradeoffs.\n",
+ "#@markdown The .en models for English-only applications tend to perform better, especially for the tiny.en and base.en models.\n",
"#@markdown ***\n",
"#@markdown Enables SD picture generation\n",
"extras_enable_sd = True #@param {type:\"boolean\"}\n",
- "SD_Model = \"ckpt/anything-v4.5-vae-swapped\" #@param [ \"ckpt/anything-v4.5-vae-swapped\", \"hakurei/waifu-diffusion\", \"philz1337/clarity\", \"prompthero/openjourney\", \"ckpt/sd15\", \"stabilityai/stable-diffusion-2-1-base\" ]\n",
+ "sd_model = \"ckpt/anything-v4.5-vae-swapped\" #@param [ \"ckpt/anything-v4.5-vae-swapped\", \"hakurei/waifu-diffusion\", \"philz1337/clarity\", \"prompthero/openjourney\", \"ckpt/sd15\", \"stabilityai/stable-diffusion-2-1-base\" ]\n",
"#@markdown * ckpt/anything-v4.5-vae-swapped - anime style model\n",
"#@markdown * hakurei/waifu-diffusion - anime style model\n",
"#@markdown * philz1337/clarity - realistic style model\n",
@@ -91,28 +99,36 @@
"if secure:\n",
" params.append('--secure')\n",
"params.append('--share')\n",
- "ExtrasModules = []\n",
+ "modules = []\n",
"\n",
- "if (extras_enable_captioning):\n",
- " ExtrasModules.append('caption')\n",
- "if (extras_enable_memory):\n",
- " ExtrasModules.append('summarize')\n",
- "if (extras_enable_emotions):\n",
- " ExtrasModules.append('classify')\n",
- "if (extras_enable_sd):\n",
- " ExtrasModules.append('sd')\n",
- "if (extras_enable_silero_tts):\n",
- " ExtrasModules.append('silero-tts')\n",
+ "if extras_enable_caption:\n",
+ " modules.append('caption')\n",
+ "if extras_enable_summarize:\n",
+ " modules.append('summarize')\n",
+ "if extras_enable_classify:\n",
+ " modules.append('classify')\n",
+ "if extras_enable_sd:\n",
+ " modules.append('sd')\n",
+ "if extras_enable_silero_tts:\n",
+ " modules.append('silero-tts')\n",
"if extras_enable_edge_tts:\n",
- " ExtrasModules.append('edge-tts')\n",
- "if (extras_enable_chromadb):\n",
- " ExtrasModules.append('chromadb')\n",
+ " modules.append('edge-tts')\n",
+ "if extras_enable_chromadb:\n",
+ " modules.append('chromadb')\n",
+ "if extras_enable_whisper_stt:\n",
+ " modules.append('whisper-stt')\n",
+ " params.append(f'--stt-whisper-model-path={whisper_model}')\n",
+ "if extras_enable_rvc:\n",
+ " modules.append('rvc')\n",
+ " params.append('--max-content-length=2000')\n",
+ " params.append('--rvc-save-file')\n",
"\n",
- "params.append(f'--classification-model={Emotions_Model}')\n",
- "params.append(f'--summarization-model={Memory_Model}')\n",
- "params.append(f'--captioning-model={Captions_Model}')\n",
- "params.append(f'--sd-model={SD_Model}')\n",
- "params.append(f'--enable-modules={\",\".join(ExtrasModules)}')\n",
+ "\n",
+ "params.append(f'--classification-model={classification_model}')\n",
+ "params.append(f'--summarization-model={summarization_model}')\n",
+ "params.append(f'--captioning-model={captioning_model}')\n",
+ "params.append(f'--sd-model={sd_model}')\n",
+ "params.append(f'--enable-modules={\",\".join(modules)}')\n",
"\n",
"\n",
"%cd /\n",
@@ -121,22 +137,13 @@
"!git clone https://github.com/Cohee1207/tts_samples\n",
"!npm install -g localtunnel\n",
"%pip install -r requirements.txt\n",
- "#!pip install tensorflow==2.14\n",
- "#!pip install colorama\n",
- "#!pip install Flask-Cors\n",
- "#!pip install Flask-Compress\n",
- "#!pip install transformers\n",
- "#!pip install Flask_Cloudflared\n",
- "#!pip install webuiapi\n",
- "#!pip install diffusers\n",
- "#!pip install accelerate\n",
- "#!pip install silero_api_server\n",
- "#!pip install edge_tts\n",
- "#!pip install chromadb\n",
- "#!pip install sentence_transformers\n",
"!wget https://github.com/cloudflare/cloudflared/releases/download/2023.5.0/cloudflared-linux-amd64 -O /tmp/cloudflared-linux-amd64\n",
"!chmod +x /tmp/cloudflared-linux-amd64\n",
"\n",
+ "if extras_enable_rvc:\n",
+ " print(\"Installing RVC requirements\")\n",
+ " !pip install -r requirements-rvc.txt\n",
+ "\n",
"# Generate a random API key\n",
"api_key = secrets.token_hex(5)\n",
"\n",
diff --git a/public/index.html b/public/index.html
index 6ac83f389..78c1dfdf8 100644
--- a/public/index.html
+++ b/public/index.html
@@ -8,6 +8,7 @@
+
diff --git a/public/manifest.json b/public/manifest.json
new file mode 100644
index 000000000..28df3de4c
--- /dev/null
+++ b/public/manifest.json
@@ -0,0 +1,30 @@
+{
+ "name": "SillyTavern",
+ "short_name": "SillyTavern",
+ "start_url": "/",
+ "display": "standalone",
+ "theme_color": "#202124",
+ "background_color": "#202124",
+ "icons": [
+ {
+ "src": "img/apple-icon-57x57.png",
+ "sizes": "57x57",
+ "type": "image/png"
+ },
+ {
+ "src": "img/apple-icon-72x72.png",
+ "sizes": "72x72",
+ "type": "image/png"
+ },
+ {
+ "src": "img/apple-icon-114x114.png",
+ "sizes": "114x114",
+ "type": "image/png"
+ },
+ {
+ "src": "img/apple-icon-144x144.png",
+ "sizes": "144x144",
+ "type": "image/png"
+ }
+ ]
+}
diff --git a/public/scripts/extensions/shared.js b/public/scripts/extensions/shared.js
index 37dbec96a..26164d8d0 100644
--- a/public/scripts/extensions/shared.js
+++ b/public/scripts/extensions/shared.js
@@ -23,7 +23,7 @@ export async function getMultimodalCaption(base64Img, prompt) {
const compressionLimit = 2 * 1024 * 1024;
if (extension_settings.caption.multimodal_api === 'openrouter' && base64Bytes > compressionLimit) {
const maxSide = 1024;
- base64Img = await createThumbnail(base64Img, maxSide, maxSide);
+ base64Img = await createThumbnail(base64Img, maxSide, maxSide, 'image/jpeg');
}
const apiResult = await fetch('/api/openai/caption-image', {
diff --git a/public/scripts/utils.js b/public/scripts/utils.js
index d8b99956f..571583240 100644
--- a/public/scripts/utils.js
+++ b/public/scripts/utils.js
@@ -973,9 +973,10 @@ export function loadFileToDocument(url, type) {
* @param {string} dataUrl The data URL encoded data of the image.
* @param {number} maxWidth The maximum width of the thumbnail.
* @param {number} maxHeight The maximum height of the thumbnail.
+ * @param {string} [type='image/jpeg'] The type of the thumbnail.
* @returns {Promise} A promise that resolves to the thumbnail data URL.
*/
-export function createThumbnail(dataUrl, maxWidth, maxHeight) {
+export function createThumbnail(dataUrl, maxWidth, maxHeight, type = 'image/jpeg') {
return new Promise((resolve, reject) => {
const img = new Image();
img.src = dataUrl;
@@ -1000,7 +1001,7 @@ export function createThumbnail(dataUrl, maxWidth, maxHeight) {
ctx.drawImage(img, 0, 0, thumbnailWidth, thumbnailHeight);
// Convert the canvas to a data URL and resolve the promise
- const thumbnailDataUrl = canvas.toDataURL('image/jpeg');
+ const thumbnailDataUrl = canvas.toDataURL(type);
resolve(thumbnailDataUrl);
};