mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Update GPU colab
This commit is contained in:
170
colab/GPU.ipynb
170
colab/GPU.ipynb
@@ -1,24 +1,25 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"private_outputs": true,
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"accelerator": "GPU",
|
||||
"gpuClass": "standard"
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Links**<br>\n",
|
||||
"Cohee's TavernAI fork Github https://github.com/Cohee1207/SillyTavern<br>\n",
|
||||
"Cohee's TavernAI Extras Github https://github.com/Cohee1207/TavernAI-extras/<br>\n",
|
||||
"TavernAI Discord https://discord.gg/zmK2gmr45t<br>\n",
|
||||
"Questions? Hit me up on Discord: Cohee#1207"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"cellView": "form",
|
||||
"id": "_1gpebrnlp5-"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title <b><-- Convert TavernAI characters to SillyTavern format</b>\n",
|
||||
"\n",
|
||||
@@ -53,34 +54,30 @@
|
||||
"\n",
|
||||
"%cd /\n",
|
||||
"!rm -rf /convert"
|
||||
],
|
||||
"metadata": {
|
||||
"cellView": "form",
|
||||
"id": "_1gpebrnlp5-"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "ewkXkyiFP2Hq"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title <-- Tap this if you play on Mobile { display-mode: \"form\" }\n",
|
||||
"%%html\n",
|
||||
"<b>Press play on the music player to keep the tab alive, then start KoboldAI below (Uses only 13MB of data)</b><br/>\n",
|
||||
"<audio src=\"https://raw.githubusercontent.com/KoboldAI/KoboldAI-Client/main/colab/silence.m4a\" controls>"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "lVftocpwCoYw",
|
||||
"cellView": "form"
|
||||
"cellView": "form",
|
||||
"id": "lVftocpwCoYw"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title <b><-- Select your model below and then click this to start KoboldAI</b>\n",
|
||||
"\n",
|
||||
@@ -91,6 +88,25 @@
|
||||
"UseGoogleDrive = True #@param {type:\"boolean\"}\n",
|
||||
"StartKoboldAI = True #@param {type:\"boolean\"}\n",
|
||||
"ModelsFromDrive = False #@param {type:\"boolean\"}\n",
|
||||
"UseExtrasExtensions = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Enables hosting of extensions backend for TavernAI Extras\n",
|
||||
"extras_enable_captioning = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Loads the image captioning module\n",
|
||||
"Captions_Model = \"Salesforce/blip-image-captioning-large\" #@param [ \"Salesforce/blip-image-captioning-large\", \"Salesforce/blip-image-captioning-base\" ]\n",
|
||||
"#@markdown * Salesforce/blip-image-captioning-large - good base model\n",
|
||||
"#@markdown * Salesforce/blip-image-captioning-base - slightly faster but less accurate\n",
|
||||
"extras_enable_emotions = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Loads the sentiment classification model\n",
|
||||
"Emotions_Model = \"bhadresh-savani/distilbert-base-uncased-emotion\" #@param [\"bhadresh-savani/distilbert-base-uncased-emotion\", \"joeddav/distilbert-base-uncased-go-emotions-student\"]\n",
|
||||
"#@markdown * bhadresh-savani/distilbert-base-uncased-emotion = 6 supported emotions<br>\n",
|
||||
"#@markdown * joeddav/distilbert-base-uncased-go-emotions-student = 28 supported emotions\n",
|
||||
"extras_enable_memory = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Loads the story summarization module\n",
|
||||
"Memory_Model = \"Qiliang/bart-large-cnn-samsum-ChatGPT_v3\" #@param [ \"Qiliang/bart-large-cnn-samsum-ChatGPT_v3\", \"Qiliang/bart-large-cnn-samsum-ElectrifAi_v10\", \"distilbart-xsum-12-3\" ]\n",
|
||||
"#@markdown * Qiliang/bart-large-cnn-samsum-ChatGPT_v3 - summarization model optimized for chats\n",
|
||||
"#@markdown * Qiliang/bart-large-cnn-samsum-ElectrifAi_v10 - nice results so far, but still being evaluated\n",
|
||||
"#@markdown * distilbart-xsum-12-3 - faster, but pretty basic alternative\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"%cd /content\n",
|
||||
"\n",
|
||||
@@ -268,8 +284,77 @@
|
||||
" !nvm use 19.1.0\n",
|
||||
"ii.addTask(\"Install node\", installNode)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# TavernAI extras\n",
|
||||
"params = []\n",
|
||||
"params.append('--cpu')\n",
|
||||
"ExtrasModules = []\n",
|
||||
"\n",
|
||||
"if (extras_enable_captioning):\n",
|
||||
" ExtrasModules.append('caption')\n",
|
||||
"if (extras_enable_memory):\n",
|
||||
" ExtrasModules.append('summarize')\n",
|
||||
"if (extras_enable_emotions):\n",
|
||||
" ExtrasModules.append('classify')\n",
|
||||
"\n",
|
||||
"params.append(f'--classification-model={Emotions_Model}')\n",
|
||||
"params.append(f'--summarization-model={Memory_Model}')\n",
|
||||
"params.append(f'--captioning-model={Captions_Model}')\n",
|
||||
"params.append(f'--enable-modules={\",\".join(ExtrasModules)}')\n",
|
||||
"\n",
|
||||
"extras_url = '(disabled)'\n",
|
||||
"\n",
|
||||
"if UseExtrasExtensions:\n",
|
||||
" def cloneExtras():\n",
|
||||
" %cd /\n",
|
||||
" !git clone https://github.com/Cohee1207/TavernAI-extras\n",
|
||||
" ii.addTask('clone extras', cloneExtras)\n",
|
||||
"\n",
|
||||
" def installRequirements():\n",
|
||||
" %cd /TavernAI-extras\n",
|
||||
" !npm install -g localtunnel\n",
|
||||
" !pip install -r requirements.txt\n",
|
||||
" !pip install tensorflow==2.11\n",
|
||||
" ii.addTask('install requirements', installRequirements)\n",
|
||||
"\n",
|
||||
" def runServer():\n",
|
||||
" cmd = f\"python server.py {' '.join(params)}\"\n",
|
||||
" print(cmd)\n",
|
||||
" extras_process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd='/TavernAI-extras', shell=True)\n",
|
||||
" print('processId:', extras_process.pid)\n",
|
||||
" while True:\n",
|
||||
" line = extras_process.stdout.readline().decode().strip()\n",
|
||||
" if \"Running on \" in line:\n",
|
||||
" break\n",
|
||||
" if not line:\n",
|
||||
" print('breaking on line')\n",
|
||||
" break\n",
|
||||
" print(line)\n",
|
||||
" ii.addTask('run server', runServer)\n",
|
||||
"\n",
|
||||
" def extractUrl():\n",
|
||||
" subprocess.call('nohup lt --port 5100 > ./extras.out 2> ./extras.err &', shell=True)\n",
|
||||
" print('Waiting for lt init...')\n",
|
||||
" time.sleep(5)\n",
|
||||
"\n",
|
||||
" while True:\n",
|
||||
" if (os.path.getsize('./extras.out') > 0):\n",
|
||||
" with open('./extras.out', 'r') as f:\n",
|
||||
" lines = f.readlines()\n",
|
||||
" for x in range(len(lines)):\n",
|
||||
" if ('your url is: ' in lines[x]):\n",
|
||||
" print('TavernAI Extensions URL:')\n",
|
||||
" extras_url = lines[x].split('your url is: ')[1]\n",
|
||||
" print(extras_url)\n",
|
||||
" break\n",
|
||||
" if (os.path.getsize('./extras.err') > 0):\n",
|
||||
" with open('./extras.err', 'r') as f:\n",
|
||||
" print(f.readlines())\n",
|
||||
" break\n",
|
||||
" ii.addTask('extract extras URL', extractUrl)\n",
|
||||
"\n",
|
||||
"def cloneTavern():\n",
|
||||
" !git clone https://github.com/EnergoStalin/SillyTavern\n",
|
||||
" !git clone https://github.com/Cohee1207/SillyTavern\n",
|
||||
"ii.addTask(\"Clone SillyTavern\", cloneTavern)\n",
|
||||
"\n",
|
||||
"ii.run()\n",
|
||||
@@ -306,13 +391,28 @@
|
||||
"\n",
|
||||
"%env colaburl=$url\n",
|
||||
"%env SILLY_TAVERN_PORT=5001\n",
|
||||
"print(\"KoboldAI LINK:\", url, \"###SillyTavern LINK###\", sep=\"\\n\")\n",
|
||||
"print(\"KoboldAI LINK:\", url, '###Extensions API LINK###', extras_url, \"###SillyTavern LINK###\", sep=\"\\n\")\n",
|
||||
"p = subprocess.Popen([\"lt\", \"--port\", \"5001\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n",
|
||||
"print(p.stdout.readline().decode().strip())\n",
|
||||
"!node server.js"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"private_outputs": true,
|
||||
"provenance": []
|
||||
},
|
||||
"gpuClass": "standard",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
Reference in New Issue
Block a user