Merge branch 'release' into staging

This commit is contained in:
Cohee 2023-11-23 20:55:13 +02:00
commit 58eae43cb0
2 changed files with 47 additions and 40 deletions

2
.github/readme.md vendored
View File

@ -223,7 +223,7 @@ If the ST-hosting device is on the same wifi network, you will use the ST-host's
If you (or someone else) want to connect to your hosted ST while not being on the same network, you will need the public IP of your ST-hosting device.
* While using the ST-hosting device, access [this page](https://whatismyipaddress.com/) and look for for `IPv4`. This is what you would use to connect from the remote device.
* While using the ST-hosting device, access [this page](https://whatismyipaddress.com/) and look for `IPv4`. This is what you would use to connect from the remote device.
### 3. Connect the remote device to the ST host machine

View File

@ -40,20 +40,20 @@
"use_sd_cpu = False #@param {type:\"boolean\"}\n",
"#@markdown ***\n",
"#@markdown Loads the image captioning module\n",
"extras_enable_captioning = True #@param {type:\"boolean\"}\n",
"Captions_Model = \"Salesforce/blip-image-captioning-large\" #@param [ \"Salesforce/blip-image-captioning-large\", \"Salesforce/blip-image-captioning-base\" ]\n",
"extras_enable_caption = True #@param {type:\"boolean\"}\n",
"captioning_model = \"Salesforce/blip-image-captioning-large\" #@param [ \"Salesforce/blip-image-captioning-large\", \"Salesforce/blip-image-captioning-base\" ]\n",
"#@markdown * Salesforce/blip-image-captioning-large - good base model\n",
"#@markdown * Salesforce/blip-image-captioning-base - slightly faster but less accurate\n",
"#@markdown ***\n",
"#@markdown Loads the sentiment classification model\n",
"extras_enable_emotions = True #@param {type:\"boolean\"}\n",
"Emotions_Model = \"nateraw/bert-base-uncased-emotion\" #@param [\"nateraw/bert-base-uncased-emotion\", \"joeddav/distilbert-base-uncased-go-emotions-student\"]\n",
"extras_enable_classify = True #@param {type:\"boolean\"}\n",
"classification_model = \"nateraw/bert-base-uncased-emotion\" #@param [\"nateraw/bert-base-uncased-emotion\", \"joeddav/distilbert-base-uncased-go-emotions-student\"]\n",
"#@markdown * nateraw/bert-base-uncased-emotion = 6 supported emotions<br>\n",
"#@markdown * joeddav/distilbert-base-uncased-go-emotions-student = 28 supported emotions\n",
"#@markdown ***\n",
"#@markdown Loads the story summarization module\n",
"extras_enable_memory = True #@param {type:\"boolean\"}\n",
"Memory_Model = \"slauw87/bart_summarisation\" #@param [ \"slauw87/bart_summarisation\", \"Qiliang/bart-large-cnn-samsum-ChatGPT_v3\", \"Qiliang/bart-large-cnn-samsum-ElectrifAi_v10\", \"distilbart-xsum-12-3\" ]\n",
"extras_enable_summarize = True #@param {type:\"boolean\"}\n",
"summarization_model = \"slauw87/bart_summarisation\" #@param [ \"slauw87/bart_summarisation\", \"Qiliang/bart-large-cnn-samsum-ChatGPT_v3\", \"Qiliang/bart-large-cnn-samsum-ElectrifAi_v10\", \"distilbart-xsum-12-3\" ]\n",
"#@markdown * slauw87/bart_summarisation - general purpose summarization model\n",
"#@markdown * Qiliang/bart-large-cnn-samsum-ChatGPT_v3 - summarization model optimized for chats\n",
"#@markdown * Qiliang/bart-large-cnn-samsum-ElectrifAi_v10 - nice results so far, but still being evaluated\n",
@ -63,10 +63,18 @@
"extras_enable_silero_tts = True #@param {type:\"boolean\"}\n",
"#@markdown Enables Microsoft Edge text-to-speech module\n",
"extras_enable_edge_tts = True #@param {type:\"boolean\"}\n",
"#@markdown Enables RVC module\n",
"extras_enable_rvc = False #@param {type:\"boolean\"}\n",
"#@markdown ***\n",
"#@markdown Enables Whisper speech recognition module\n",
"extras_enable_whisper_stt = True #@param {type:\"boolean\"}\n",
"whisper_model = \"base.en\" #@param [ \"tiny.en\", \"base.en\", \"small.en\", \"medium.en\", \"tiny\", \"base\", \"small\", \"medium\", \"large\" ]\n",
"#@markdown There are five model sizes, four with English-only versions, offering speed and accuracy tradeoffs.\n",
"#@markdown The .en models for English-only applications tend to perform better, especially for the tiny.en and base.en models.\n",
"#@markdown ***\n",
"#@markdown Enables SD picture generation\n",
"extras_enable_sd = True #@param {type:\"boolean\"}\n",
"SD_Model = \"ckpt/anything-v4.5-vae-swapped\" #@param [ \"ckpt/anything-v4.5-vae-swapped\", \"hakurei/waifu-diffusion\", \"philz1337/clarity\", \"prompthero/openjourney\", \"ckpt/sd15\", \"stabilityai/stable-diffusion-2-1-base\" ]\n",
"sd_model = \"ckpt/anything-v4.5-vae-swapped\" #@param [ \"ckpt/anything-v4.5-vae-swapped\", \"hakurei/waifu-diffusion\", \"philz1337/clarity\", \"prompthero/openjourney\", \"ckpt/sd15\", \"stabilityai/stable-diffusion-2-1-base\" ]\n",
"#@markdown * ckpt/anything-v4.5-vae-swapped - anime style model\n",
"#@markdown * hakurei/waifu-diffusion - anime style model\n",
"#@markdown * philz1337/clarity - realistic style model\n",
@ -91,28 +99,36 @@
"if secure:\n",
" params.append('--secure')\n",
"params.append('--share')\n",
"ExtrasModules = []\n",
"modules = []\n",
"\n",
"if (extras_enable_captioning):\n",
" ExtrasModules.append('caption')\n",
"if (extras_enable_memory):\n",
" ExtrasModules.append('summarize')\n",
"if (extras_enable_emotions):\n",
" ExtrasModules.append('classify')\n",
"if (extras_enable_sd):\n",
" ExtrasModules.append('sd')\n",
"if (extras_enable_silero_tts):\n",
" ExtrasModules.append('silero-tts')\n",
"if extras_enable_caption:\n",
" modules.append('caption')\n",
"if extras_enable_summarize:\n",
" modules.append('summarize')\n",
"if extras_enable_classify:\n",
" modules.append('classify')\n",
"if extras_enable_sd:\n",
" modules.append('sd')\n",
"if extras_enable_silero_tts:\n",
" modules.append('silero-tts')\n",
"if extras_enable_edge_tts:\n",
" ExtrasModules.append('edge-tts')\n",
"if (extras_enable_chromadb):\n",
" ExtrasModules.append('chromadb')\n",
" modules.append('edge-tts')\n",
"if extras_enable_chromadb:\n",
" modules.append('chromadb')\n",
"if extras_enable_whisper_stt:\n",
" modules.append('whisper-stt')\n",
" params.append(f'--stt-whisper-model-path={whisper_model}')\n",
"if extras_enable_rvc:\n",
" modules.append('rvc')\n",
" params.append('--max-content-length=2000')\n",
" params.append('--rvc-save-file')\n",
"\n",
"params.append(f'--classification-model={Emotions_Model}')\n",
"params.append(f'--summarization-model={Memory_Model}')\n",
"params.append(f'--captioning-model={Captions_Model}')\n",
"params.append(f'--sd-model={SD_Model}')\n",
"params.append(f'--enable-modules={\",\".join(ExtrasModules)}')\n",
"\n",
"params.append(f'--classification-model={classification_model}')\n",
"params.append(f'--summarization-model={summarization_model}')\n",
"params.append(f'--captioning-model={captioning_model}')\n",
"params.append(f'--sd-model={sd_model}')\n",
"params.append(f'--enable-modules={\",\".join(modules)}')\n",
"\n",
"\n",
"%cd /\n",
@ -121,22 +137,13 @@
"!git clone https://github.com/Cohee1207/tts_samples\n",
"!npm install -g localtunnel\n",
"%pip install -r requirements.txt\n",
"#!pip install tensorflow==2.14\n",
"#!pip install colorama\n",
"#!pip install Flask-Cors\n",
"#!pip install Flask-Compress\n",
"#!pip install transformers\n",
"#!pip install Flask_Cloudflared\n",
"#!pip install webuiapi\n",
"#!pip install diffusers\n",
"#!pip install accelerate\n",
"#!pip install silero_api_server\n",
"#!pip install edge_tts\n",
"#!pip install chromadb\n",
"#!pip install sentence_transformers\n",
"!wget https://github.com/cloudflare/cloudflared/releases/download/2023.5.0/cloudflared-linux-amd64 -O /tmp/cloudflared-linux-amd64\n",
"!chmod +x /tmp/cloudflared-linux-amd64\n",
"\n",
"if extras_enable_rvc:\n",
" print(\"Installing RVC requirements\")\n",
" !pip install -r requirements-rvc.txt\n",
"\n",
"# Generate a random API key\n",
"api_key = secrets.token_hex(5)\n",
"\n",