From 72a7aac2c7f089b74c8c782e57d6a98ec0e2139b Mon Sep 17 00:00:00 2001 From: Gnome Ann <> Date: Thu, 20 Jan 2022 15:14:55 -0500 Subject: [PATCH 1/5] Sync memory properly after random game request --- aiserver.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aiserver.py b/aiserver.py index 7c9598ce..0d32a014 100644 --- a/aiserver.py +++ b/aiserver.py @@ -4549,6 +4549,8 @@ def newGameRequest(): def randomGameRequest(topic, memory=""): if(vars.noai): newGameRequest() + vars.memory = memory + emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) return vars.recentrng = topic vars.recentrngm = memory @@ -4561,6 +4563,7 @@ def randomGameRequest(topic, memory=""): vars.lua_koboldbridge.feedback = None actionsubmit("", force_submit=True, force_prompt_gen=True) vars.memory = memory + emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) # Load desired settings from both the model and the users config file if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly", "TPUMeshTransformerGPTJ"]): From 17f284b837e69988f3cd099655333f9e127b98be Mon Sep 17 00:00:00 2001 From: henk717 Date: Fri, 21 Jan 2022 11:10:44 +0100 Subject: [PATCH 2/5] Descriptions --- colab/TPU.ipynb | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/colab/TPU.ipynb b/colab/TPU.ipynb index 1ebef2be..2ff11cc3 100644 --- a/colab/TPU.ipynb +++ b/colab/TPU.ipynb @@ -102,7 +102,49 @@ { "cell_type": "markdown", "source": [ - "# MODEL LIST PENDING (WIP COLAB)" + "# TPU Edition Model Descriptions\n", + "\n", + "| Model | Size | Style | Drive Space | Description |\n", + "| ------------------------------ | ------ | --------- | ----------- | ------------------------------------------------------------ |\n", + "| Skein 6B by VE_FORBDRYDERNE | 6B TPU | Hybrid | 0 GB | Skein is our flagship NSFW 6B model, it is a hybrid between a Adventure model and a Novel model. Best used with either Adventure mode or the You Bias userscript enabled. Skein has been trained on high quality Novels along with CYOA adventure stories and is not as wackey as the Adventure model. It also has tagging support. |\n", + "| Adventure 6B by VE_FORBRYDERNE | 6B TPU | Adventure | 0 GB | Adventure is a 6B model designed to mimick the behavior of AI Dungeon. It is exclusively for Adventure Mode and can take you on the epic and wackey adventures that AI Dungeon players love. It also features the many tropes of AI Dungeon as it has been trained on very similar data. It must be used in second person (You). |\n", + "| Lit 6B by Haru | 6B TPU | NSFW | 8 GB / 12 GB | Lit is a great NSFW model trained by Haru on both a large set of Literotica stories and high quality novels along with tagging support. Creating a high quality model for your NSFW stories. This model is exclusively a novel model and is best used in third person. |\n", + "| Generic 6B by EleutherAI | 6B TPU | Generic | 10 GB / 12 GB | GPT-J-6B is what all other models are based on, if you need something that has no specific bias towards any particular subject this is the model for you. Best used when the other models are not suitable for what you wish to do. Such as homework assistance, blog writing, coding and more. It needs more hand holding than other models and is more prone to undesirable formatting changes. |\n", + "| C1 6B by Haru | 6B TPU | Chatbot | 8 GB / 12 GB | C1 has been trained on various internet chatrooms, it makes the basis for an interesting chatbot model and has been optimized to be used in the Chatmode. |\n", + "\n", + "\n", + "# [GPU Edition Model Descriptions](https://colab.research.google.com/github/KoboldAI/KoboldAI-Client/blob/united/colab/GPU.ipynb)\n", + "\n", + "| Model | Size | Style | Description |\n", + "| ------------------------------------------------------------ | -------- | ---------- | ------------------------------------------------------------ |\n", + "| [GPT-Neo-2.7B-Picard](https://huggingface.co/KoboldAI/GPT-Neo-2.7B-Picard) by Mr Seeker | 2.7B GPU | Novel | Picard is a model trained for SFW Novels based on GPT-Neo-2.7B. It is focused on Novel style writing without the NSFW bias. While the name suggests a sci-fi model this model is designed for Novels of a variety of genre's. It is meant to be used in KoboldAI's regular mode. |\n", + "| [GPT-Neo-2.7B-AID](https://huggingface.co/KoboldAI/GPT-Neo-2.7B-AID) by melastacho | 2.7B GPU | Adventure | Also know as Adventure 2.7B this is a clone of the AI Dungeon Classic model and is best known for the epic wackey adventures that AI Dungeon Classic players love. |\n", + "| [GPT-Neo-2.7B-Horni-LN](https://huggingface.co/KoboldAI/GPT-Neo-2.7B-Horni-LN) by finetune | 2.7B GPU | Novel | This model is based on GPT-Neo-2.7B-Horni and retains its NSFW knowledge, but was then further biased towards SFW novel stories. If you seek a balance between a SFW Novel model and a NSFW model this model should be a good choice. |\n", + "| [GPT-Neo-2.7B-Horni](https://huggingface.co/KoboldAI/GPT-Neo-2.7B-Horni) by finetune | 2.7B GPU | NSFW | This model is tuned on Literotica to produce a Novel style model biased towards NSFW content. Can still be used for SFW stories but will have a bias towards NSFW content. It is meant to be used in KoboldAI's regular mode. |\n", + "| [GPT-Neo-2.7B-Shinen](https://huggingface.co/KoboldAI/GPT-Neo-2.7B-Shinen) by Mr Seeker | 2.7B GPU | NSFW | Shinen is an alternative to the Horni model designed to be more explicit. If Horni is to tame for you shinen might produce better results. While it is a Novel model it is unsuitable for SFW stories due to its heavy NSFW bias. Shinen will not hold back. It is meant to be used in KoboldAI's regular mode. |\n", + "| [GPT-Neo-2.7B](https://huggingface.co/EleutherAI/gpt-neo-2.7B) by EleutherAI | 2.7B GPU | Generic | This is the base model for all the other 2.7B models, it is best used when you have a use case that we have no other models available for, such as writing blog articles or programming. It can also be a good basis for the experience of some of the softprompts if your softprompt is not about a subject the other models cover. |\n", + "\n", + "| Style | Description |\n", + "| --------- | ------------------------------------------------------------ |\n", + "| Novel | For regular story writing, not compatible with Adventure mode or other specialty modes. |\n", + "| NSFW | Indicates that the model is strongly biased towards NSFW content and is not suitable for children, work environments or livestreaming. Most NSFW models are also Novel models in nature. |\n", + "| Adventure | These models are excellent for people willing to play KoboldAI like a Text Adventure game and are meant to be used with Adventure mode enabled. Even if you wish to use it as a Novel style model you should always have Adventure mode on and set it to story. These models typically have a strong bias towards the use of the word You and without Adventure mode enabled break the story flow and write actions on your behalf. |\n", + "| Chatbot | These models are specifically trained for chatting and are best used with the Chatmode enabled. Typically trained on either public chatrooms or private chats. |\n", + "| Hybrid | Hybrid models are a blend between different styles, for example they are trained on both Novel stories and Adventure stories. These models are great variety models that you can use for multiple different playstyles and modes, but depending on your usage you may need to enable Adventure Mode or the You bias (in userscripts). |\n", + "| Generic | Generic models are not trained towards anything specific, typically used as a basis for other tasks and models. They can do everything the other models can do, but require much more handholding to work properly. Generic models are an ideal basis for tasks that we have no specific model for, or for experiencing a softprompt in its raw form. |\n", + "\n", + "## How to start KoboldAI in 7 simple steps\n", + "Using KoboldAI on Google Colab is easy! Simply follow these steps to get started:\n", + "1. Mobile phone? Tap the play button below next to \"<--- Tap this if you play on mobile\" to reveal an audio player, play the silent audio to keep the tab alive so Google will not shut you down when your using KoboldAI. If no audio player is revealed your phone browser does not support Google Colab in the mobile view, go to your browser menu and enable Desktop mode before you continue.\n", + "2. Select the model that most describes what you would like to do, by default we have the most recommended model for people willing to try out KoboldAI selected.\n", + "3. Click the play button next to \"<--- Click this to start KoboldAI\".\n", + "4. Allow Google Drive access, this typically happens trough a popup but sometimes Google Drive access may be requested trough the older method by asking you to click on a link and copy a code. This is normal behavior for Colab and only you will get access to your files, nothing is shared with us.\n", + "5. Now the automatic installation and Download process starts, for most models in the TPU edition expect the loading to take between 15 and 30 minutes on average depending on the current Colab download speeds and the model you selected. These downloads happen trough Google's internet connection, you will not be billed by your internet provider and it will not count towards any download limits.\n", + "6. After waiting a Trycloudflare link appears, click the link to enjoy KoboldAI. If you get a 1033 error Cloudflare is not done loading, in that case keep refreshing until it goes away. (If it keeps happening after 2 minutes Cloudflare has an issue, in that case you can use Runtime -> Restart and Run All to get a new link).\n", + "7. As you play KoboldAI, keep this Colab tab open in the background and check occationally for Captcha's so they do not shut your instance down. If you do get shut down you can always download a copy of your gamesave in the Save menu inside KoboldAI. Stories are never lost as long as you keep KoboldAI open in your browser.\n", + "\n", + "Get a error message saying you do not have access to a GPU/TPU instance? Do not continue and try again later, KoboldAI will not run correctly without them.\n", + "\n" ], "metadata": { "id": "i0-9ARA3c4Fx" From 67ea0810331b633125618f13d5dfab1b0549ef28 Mon Sep 17 00:00:00 2001 From: henk717 Date: Fri, 21 Jan 2022 11:12:43 +0100 Subject: [PATCH 3/5] Link Fix --- colab/TPU.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/colab/TPU.ipynb b/colab/TPU.ipynb index 2ff11cc3..2da651e1 100644 --- a/colab/TPU.ipynb +++ b/colab/TPU.ipynb @@ -113,7 +113,7 @@ "| C1 6B by Haru | 6B TPU | Chatbot | 8 GB / 12 GB | C1 has been trained on various internet chatrooms, it makes the basis for an interesting chatbot model and has been optimized to be used in the Chatmode. |\n", "\n", "\n", - "# [GPU Edition Model Descriptions](https://colab.research.google.com/github/KoboldAI/KoboldAI-Client/blob/united/colab/GPU.ipynb)\n", + "# [GPU Edition Model Descriptions](https://colab.research.google.com/github/KoboldAI/KoboldAI-Client/blob/main/colab/GPU.ipynb)\n", "\n", "| Model | Size | Style | Description |\n", "| ------------------------------------------------------------ | -------- | ---------- | ------------------------------------------------------------ |\n", From 8e00c4a1902def18eda0048a66c0d964d2031ed8 Mon Sep 17 00:00:00 2001 From: henk717 Date: Fri, 21 Jan 2022 11:21:24 +0100 Subject: [PATCH 4/5] KoboldAI launcher for Jupyter Notebook platforms --- play.ipynb | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 play.ipynb diff --git a/play.ipynb b/play.ipynb new file mode 100644 index 00000000..af94064b --- /dev/null +++ b/play.ipynb @@ -0,0 +1,45 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "KoboldAI Jupyter", + "provenance": [], + "authorship_tag": "ABX9TyMDTbAhtDnKJa+aIEaQjpsL" + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "TPU" + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# KoboldAI Launcher for generic Jupyter Notebooks\n", + "This notebook is meant as a way to easily launch KoboldAI on existing Jupyter instances that already have KoboldAI installed (For example a custom Saturn Cloud or Paperspace instance).\n", + "\n", + "For Google Colab please check out our Google Colab edition available at : https://colab.research.google.com/github/KoboldAI/KoboldAI-Client/blob/main/colab/TPU.ipynb" + ], + "metadata": { + "id": "hMRnGz42Xsy3" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "40B1QvI3Xv02" + }, + "outputs": [], + "source": [ + "!pip install -r requirements.txt\n", + "!python3 aiserver.py --remote" + ] + } + ] +} \ No newline at end of file From 9cddaa80412e24c639442effca2fedec0e3c5bce Mon Sep 17 00:00:00 2001 From: henk717 Date: Fri, 21 Jan 2022 11:23:37 +0100 Subject: [PATCH 5/5] Official branch is now default --- colab/TPU.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/colab/TPU.ipynb b/colab/TPU.ipynb index 2da651e1..57c4b5cc 100644 --- a/colab/TPU.ipynb +++ b/colab/TPU.ipynb @@ -49,7 +49,7 @@ "source": [ "#@title <-- Click this to start KoboldAI\n", "Model = \"Skein 6B\" #@param [\"Skein 6B\", \"Adventure 6B\", \"Lit 6B\", \"Generic 6B\", \"C1 6B\"]\n", - "Version = \"United\" #@param [\"Official\", \"United\"] {allow-input: true}\n", + "Version = \"Official\" #@param [\"Official\", \"United\"] {allow-input: true}\n", "Drive = \"Unextracted (Less Space)\" #@param [\"Unextracted (Less Space)\", \"Extracted (Faster Loading)\"]\n", "#@markdown Extracted models take up more space but load faster the next time you use them, not all models use your Google Drive. See the Model list below for descriptions and space requirements. If your extracted model does not load the next time you try to launch KoboldAI delete the folder from your Google Drive and ensure enough space is available.\n", "\n",