2021-05-03 00:46:45 +02:00
#==================================================================#
# KoboldAI Client
2021-05-22 11:28:40 +02:00
# Version: 1.15.0
2021-05-03 00:46:45 +02:00
# By: KoboldAIDev
#==================================================================#
2021-05-11 01:17:10 +02:00
# External packages
2021-05-03 00:46:45 +02:00
from os import path , getcwd
2021-08-19 13:18:01 +02:00
import re
2021-05-05 17:18:24 +02:00
import tkinter as tk
2021-05-07 20:32:10 +02:00
from tkinter import messagebox
2021-05-03 00:46:45 +02:00
import json
2021-06-15 06:59:08 +02:00
from typing import Literal , Union
2021-05-16 11:37:38 +02:00
import requests
2021-06-02 15:01:13 +02:00
import html
2021-08-20 00:37:59 +02:00
import argparse
2021-08-20 16:25:03 +02:00
import sys
import gc
2021-05-03 00:46:45 +02:00
2021-05-11 01:17:10 +02:00
# KoboldAI
2021-05-07 20:32:10 +02:00
import fileops
import gensettings
from utils import debounce
2021-05-11 01:17:10 +02:00
import utils
2021-08-20 16:25:03 +02:00
import breakmodel
2021-05-07 20:32:10 +02:00
2021-05-03 00:46:45 +02:00
#==================================================================#
# Variables & Storage
#==================================================================#
2021-05-07 20:32:10 +02:00
2021-05-03 00:46:45 +02:00
# Terminal tags for colored text
class colors :
2021-05-07 20:32:10 +02:00
PURPLE = ' \033 [95m '
BLUE = ' \033 [94m '
CYAN = ' \033 [96m '
GREEN = ' \033 [92m '
YELLOW = ' \033 [93m '
RED = ' \033 [91m '
END = ' \033 [0m '
2021-05-03 00:46:45 +02:00
UNDERLINE = ' \033 [4m '
2021-05-07 20:32:10 +02:00
# AI models
2021-05-03 00:46:45 +02:00
modellist = [
2021-05-03 21:19:03 +02:00
[ " GPT Neo 1.3B " , " EleutherAI/gpt-neo-1.3B " , " 8GB " ] ,
[ " GPT Neo 2.7B " , " EleutherAI/gpt-neo-2.7B " , " 16GB " ] ,
[ " GPT-2 " , " gpt2 " , " 1.2GB " ] ,
[ " GPT-2 Med " , " gpt2-medium " , " 2GB " ] ,
[ " GPT-2 Large " , " gpt2-large " , " 16GB " ] ,
2021-05-04 07:47:23 +02:00
[ " GPT-2 XL " , " gpt2-xl " , " 16GB " ] ,
2021-05-07 20:32:10 +02:00
[ " InferKit API (requires API key) " , " InferKit " , " " ] ,
2021-05-04 07:47:23 +02:00
[ " Custom Neo (eg Neo-horni) " , " NeoCustom " , " " ] ,
2021-05-14 00:58:52 +02:00
[ " Custom GPT-2 (eg CloverEdition) " , " GPT2Custom " , " " ] ,
2021-05-22 11:28:40 +02:00
[ " Google Colab " , " Colab " , " " ] ,
2021-05-29 11:46:03 +02:00
[ " OpenAI API (requires API key) " , " OAI " , " " ] ,
[ " Read Only (No AI) " , " ReadOnly " , " " ]
2021-05-03 00:46:45 +02:00
]
# Variables
class vars :
2021-05-22 11:28:40 +02:00
lastact = " " # The last action received from the user
lastctx = " " # The last context submitted to the generator
model = " " # Model ID string chosen at startup
noai = False # Runs the script without starting up the transformers pipeline
aibusy = False # Stops submissions while the AI is working
2021-08-19 12:54:44 +02:00
max_length = 1024 # Maximum number of tokens to submit per action
2021-05-22 11:28:40 +02:00
ikmax = 3000 # Maximum number of characters to submit to InferKit
2021-08-19 12:54:44 +02:00
genamt = 80 # Amount of text for each action to generate
2021-05-22 11:28:40 +02:00
ikgen = 200 # Number of characters for InferKit to generate
2021-08-19 12:54:44 +02:00
rep_pen = 1.1 # Default generator repetition_penalty
temp = 0.5 # Default generator temperature
top_p = 0.9 # Default generator top_p
2021-08-19 14:47:57 +02:00
top_k = 0 # Default generator top_k
2021-08-24 01:18:09 +02:00
tfs = 1.0 # Default generator tfs (tail-free sampling)
2021-05-29 11:46:03 +02:00
numseqs = 1 # Number of sequences to ask the generator to create
2021-05-22 11:28:40 +02:00
gamestarted = False # Whether the game has started (disables UI elements)
prompt = " " # Prompt
memory = " " # Text submitted to memory field
authornote = " " # Text submitted to Author's Note field
2021-05-07 20:32:10 +02:00
andepth = 3 # How far back in history to append author's note
2021-05-22 11:28:40 +02:00
actions = [ ] # Array of actions submitted by user and AI
worldinfo = [ ] # Array of World Info key/value objects
badwords = [ ] # Array of str/chr values that should be removed from output
badwordsids = [ ] # Tokenized array of badwords
2021-05-13 07:26:42 +02:00
deletewi = - 1 # Temporary storage for index to delete
2021-05-18 23:59:59 +02:00
wirmvwhtsp = False # Whether to remove leading whitespace from WI entries
2021-08-19 12:54:44 +02:00
widepth = 3 # How many historical actions to scan for WI hits
2021-05-03 00:46:45 +02:00
mode = " play " # Whether the interface is in play, memory, or edit mode
2021-05-05 09:04:06 +02:00
editln = 0 # Which line was last selected in Edit Mode
2021-05-03 00:46:45 +02:00
url = " https://api.inferkit.com/v1/models/standard/generate " # InferKit API URL
2021-05-22 11:28:40 +02:00
oaiurl = " " # OpenAI API URL
oaiengines = " https://api.openai.com/v1/engines "
2021-05-14 00:58:52 +02:00
colaburl = " " # Ngrok url for Google Colab mode
2021-05-04 07:47:23 +02:00
apikey = " " # API key to use for InferKit API calls
2021-05-22 11:28:40 +02:00
oaiapikey = " " # API key to use for OpenAI API calls
2021-05-05 17:18:24 +02:00
savedir = getcwd ( ) + " \ stories "
2021-05-04 07:47:23 +02:00
hascuda = False # Whether torch has detected CUDA on the system
usegpu = False # Whether to launch pipeline with GPU support
custmodpth = " " # Filesystem location of custom model to run
2021-05-11 01:17:10 +02:00
formatoptns = { } # Container for state of formatting options
2021-05-11 06:27:34 +02:00
importnum = - 1 # Selection on import popup list
importjs = { } # Temporary storage for import data
2021-05-22 11:28:40 +02:00
loadselect = " " # Temporary storage for filename to load
2021-05-29 11:46:03 +02:00
svowname = " " # Filename that was flagged for overwrite confirm
saveow = False # Whether or not overwrite confirm has been displayed
genseqs = [ ] # Temporary storage for generated sequences
useprompt = True # Whether to send the full prompt with every submit action
2021-08-20 16:25:03 +02:00
breakmodel = False # For GPU users, whether to use both system RAM and VRAM to conserve VRAM while offering speedup compared to CPU-only
bmsupported = False # Whether the breakmodel option is supported (GPT-Neo/GPT-J only, currently)
2021-08-19 13:18:01 +02:00
acregex_ai = re . compile ( r ' \ n* *>(.| \ n)* ' ) # Pattern for matching adventure actions from the AI so we can remove them
acregex_ui = re . compile ( r ' ^ *(>.*)$ ' , re . MULTILINE ) # Pattern for matching actions in the HTML-escaped story so we can apply colouring, etc (make sure to encase part to format in parentheses)
actionmode = 1
adventure = False
2021-08-20 00:37:59 +02:00
remote = False
2021-05-04 07:47:23 +02:00
#==================================================================#
# Function to get model selection at startup
#==================================================================#
def getModelSelection ( ) :
2021-05-16 01:29:41 +02:00
print ( " # Model V/RAM \n ========================================= " )
2021-05-04 07:47:23 +02:00
i = 1
for m in modellist :
2021-05-16 01:29:41 +02:00
print ( " {0} - {1} \t \t {2} " . format ( " {:<2} " . format ( i ) , m [ 0 ] . ljust ( 15 ) , m [ 2 ] ) )
2021-05-04 07:47:23 +02:00
i + = 1
print ( " " ) ;
modelsel = 0
vars . model = ' '
while ( vars . model == ' ' ) :
modelsel = input ( " Model #> " )
if ( modelsel . isnumeric ( ) and int ( modelsel ) > 0 and int ( modelsel ) < = len ( modellist ) ) :
vars . model = modellist [ int ( modelsel ) - 1 ] [ 1 ]
else :
2021-05-07 20:32:10 +02:00
print ( " {0} Please enter a valid selection. {1} " . format ( colors . RED , colors . END ) )
2021-05-04 07:47:23 +02:00
# If custom model was selected, get the filesystem location and store it
if ( vars . model == " NeoCustom " or vars . model == " GPT2Custom " ) :
2021-05-07 20:32:10 +02:00
print ( " {0} Please choose the folder where pytorch_model.bin is located: {1} \n " . format ( colors . CYAN , colors . END ) )
2021-05-05 17:18:24 +02:00
2021-05-07 20:32:10 +02:00
modpath = fileops . getdirpath ( getcwd ( ) , " Select Model Folder " )
2021-05-05 17:18:24 +02:00
2021-05-07 20:32:10 +02:00
if ( modpath ) :
2021-05-04 07:47:23 +02:00
# Save directory to vars
2021-05-07 20:32:10 +02:00
vars . custmodpth = modpath
2021-05-04 07:47:23 +02:00
else :
# Print error and retry model selection
2021-05-07 20:32:10 +02:00
print ( " {0} Model select cancelled! {1} " . format ( colors . RED , colors . END ) )
print ( " {0} Select an AI model to continue: {1} \n " . format ( colors . CYAN , colors . END ) )
2021-05-04 07:47:23 +02:00
getModelSelection ( )
2021-05-03 00:46:45 +02:00
2021-05-18 02:28:18 +02:00
#==================================================================#
# Return all keys in tokenizer dictionary containing char
#==================================================================#
def gettokenids ( char ) :
keys = [ ]
for key in vocab_keys :
if ( key . find ( char ) != - 1 ) :
keys . append ( key )
return keys
2021-05-03 00:46:45 +02:00
#==================================================================#
# Startup
#==================================================================#
2021-05-03 21:19:03 +02:00
2021-08-20 00:37:59 +02:00
# Parsing Parameters
2021-08-20 10:49:35 +02:00
parser = argparse . ArgumentParser ( description = " KoboldAI Server " )
parser . add_argument ( " --remote " , action = ' store_true ' , help = " Optimizes KoboldAI for Remote Play " )
parser . add_argument ( " --model " , help = " Specify the Model Type to skip the Menu " )
parser . add_argument ( " --path " , help = " Specify the Path for local models (For model NeoCustom or GPT2Custom) " )
2021-08-20 11:39:04 +02:00
parser . add_argument ( " --cpu " , action = ' store_true ' , help = " By default unattended launches are on the GPU use this option to force CPU usage. " )
2021-08-20 16:25:03 +02:00
parser . add_argument ( " --breakmodel " , action = ' store_true ' , help = " For models that support GPU-CPU hybrid generation, use this feature instead of GPU or CPU generation " )
parser . add_argument ( " --breakmodel_layers " , type = int , help = " Specify the number of layers to commit to system RAM if --breakmodel is used " )
2021-08-20 00:37:59 +02:00
args = parser . parse_args ( )
2021-08-20 10:49:35 +02:00
vars . model = args . model ;
2021-08-20 00:37:59 +02:00
if args . remote :
vars . remote = True ;
2021-05-03 00:46:45 +02:00
# Select a model to run
2021-08-20 10:49:35 +02:00
if args . model :
print ( " Welcome to KoboldAI! \n You have selected the following Model: " , vars . model )
if args . path :
print ( " You have selected the following path for your Model : " , args . path )
vars . custmodpth = args . path ;
2021-08-20 11:39:04 +02:00
vars . colaburl = args . path + " /request " ; # Lets just use the same parameter to keep it simple
2021-08-20 10:49:35 +02:00
else :
print ( " {0} Welcome to the KoboldAI Client! \n Select an AI model to continue: {1} \n " . format ( colors . CYAN , colors . END ) )
getModelSelection ( )
2021-05-03 00:46:45 +02:00
2021-05-03 21:19:03 +02:00
# If transformers model was selected & GPU available, ask to use CPU or GPU
2021-05-29 11:46:03 +02:00
if ( not vars . model in [ " InferKit " , " Colab " , " OAI " , " ReadOnly " ] ) :
2021-05-16 01:29:41 +02:00
# Test for GPU support
import torch
print ( " {0} Looking for GPU support... {1} " . format ( colors . PURPLE , colors . END ) , end = " " )
vars . hascuda = torch . cuda . is_available ( )
2021-08-20 16:25:03 +02:00
vars . bmsupported = vars . model in ( " EleutherAI/gpt-neo-1.3B " , " EleutherAI/gpt-neo-2.7B " , " NeoCustom " )
2021-05-16 01:29:41 +02:00
if ( vars . hascuda ) :
print ( " {0} FOUND! {1} " . format ( colors . GREEN , colors . END ) )
else :
print ( " {0} NOT FOUND! {1} " . format ( colors . YELLOW , colors . END ) )
2021-08-20 11:39:04 +02:00
if args . model :
if ( vars . hascuda ) :
genselected = True
vars . usegpu = True
2021-08-20 16:25:03 +02:00
vars . breakmodel = False
2021-08-20 11:39:04 +02:00
if ( args . cpu ) :
vars . usegpu = False
2021-08-20 16:25:03 +02:00
vars . breakmodel = False
2021-08-20 16:52:57 +02:00
if ( vars . bmsupported and args . breakmodel ) :
2021-08-20 16:25:03 +02:00
vars . usegpu = False
vars . breakmodel = True
elif ( vars . hascuda ) :
if ( vars . bmsupported ) :
print ( colors . YELLOW + " You ' re using a model that supports GPU-CPU hybrid generation! \n Currently only GPT-Neo models and GPT-J-6B support this feature. " )
print ( " {0} Use GPU or CPU for generation?: (Default GPU) {1} " . format ( colors . CYAN , colors . END ) )
if ( vars . bmsupported ) :
print ( f " 1 - GPU \n 2 - CPU \n 3 - Both (slower than GPU-only but uses less VRAM) \n " )
else :
print ( " 1 - GPU \n 2 - CPU \n " )
2021-08-20 12:30:52 +02:00
genselected = False
2021-08-20 11:39:04 +02:00
if ( vars . hascuda ) :
2021-05-16 01:29:41 +02:00
while ( genselected == False ) :
genselect = input ( " Mode> " )
if ( genselect == " " ) :
2021-08-20 16:25:03 +02:00
vars . breakmodel = False
2021-05-16 01:29:41 +02:00
vars . usegpu = True
genselected = True
elif ( genselect . isnumeric ( ) and int ( genselect ) == 1 ) :
2021-08-20 16:25:03 +02:00
vars . breakmodel = False
2021-05-16 01:29:41 +02:00
vars . usegpu = True
genselected = True
elif ( genselect . isnumeric ( ) and int ( genselect ) == 2 ) :
2021-08-20 16:25:03 +02:00
vars . breakmodel = False
vars . usegpu = False
genselected = True
elif ( vars . bmsupported and genselect . isnumeric ( ) and int ( genselect ) == 3 ) :
vars . breakmodel = True
2021-05-16 01:29:41 +02:00
vars . usegpu = False
genselected = True
else :
print ( " {0} Please enter a valid selection. {1} " . format ( colors . RED , colors . END ) )
2021-05-03 21:19:03 +02:00
2021-05-03 00:46:45 +02:00
# Ask for API key if InferKit was selected
if ( vars . model == " InferKit " ) :
if ( not path . exists ( " client.settings " ) ) :
# If the client settings file doesn't exist, create it
2021-05-07 20:32:10 +02:00
print ( " {0} Please enter your InferKit API key: {1} \n " . format ( colors . CYAN , colors . END ) )
2021-05-03 00:46:45 +02:00
vars . apikey = input ( " Key> " )
# Write API key to file
file = open ( " client.settings " , " w " )
2021-05-05 17:18:24 +02:00
try :
2021-05-16 01:29:41 +02:00
js = { " apikey " : vars . apikey }
file . write ( json . dumps ( js , indent = 3 ) )
2021-05-05 17:18:24 +02:00
finally :
file . close ( )
2021-05-03 00:46:45 +02:00
else :
2021-05-07 20:32:10 +02:00
# Otherwise open it up
2021-05-03 00:46:45 +02:00
file = open ( " client.settings " , " r " )
2021-05-07 20:32:10 +02:00
# Check if API key exists
js = json . load ( file )
2021-05-22 11:28:40 +02:00
if ( " apikey " in js and js [ " apikey " ] != " " ) :
2021-05-07 20:32:10 +02:00
# API key exists, grab it and close the file
vars . apikey = js [ " apikey " ]
file . close ( )
else :
# Get API key, add it to settings object, and write it to disk
print ( " {0} Please enter your InferKit API key: {1} \n " . format ( colors . CYAN , colors . END ) )
vars . apikey = input ( " Key> " )
js [ " apikey " ] = vars . apikey
# Write API key to file
file = open ( " client.settings " , " w " )
try :
2021-05-16 01:29:41 +02:00
file . write ( json . dumps ( js , indent = 3 ) )
2021-05-07 20:32:10 +02:00
finally :
file . close ( )
2021-05-03 00:46:45 +02:00
2021-05-22 11:28:40 +02:00
# Ask for API key if OpenAI was selected
if ( vars . model == " OAI " ) :
if ( not path . exists ( " client.settings " ) ) :
# If the client settings file doesn't exist, create it
print ( " {0} Please enter your OpenAI API key: {1} \n " . format ( colors . CYAN , colors . END ) )
vars . oaiapikey = input ( " Key> " )
# Write API key to file
file = open ( " client.settings " , " w " )
try :
js = { " oaiapikey " : vars . oaiapikey }
file . write ( json . dumps ( js , indent = 3 ) )
finally :
file . close ( )
else :
# Otherwise open it up
file = open ( " client.settings " , " r " )
# Check if API key exists
js = json . load ( file )
if ( " oaiapikey " in js and js [ " oaiapikey " ] != " " ) :
# API key exists, grab it and close the file
vars . oaiapikey = js [ " oaiapikey " ]
file . close ( )
else :
# Get API key, add it to settings object, and write it to disk
print ( " {0} Please enter your OpenAI API key: {1} \n " . format ( colors . CYAN , colors . END ) )
vars . oaiapikey = input ( " Key> " )
js [ " oaiapikey " ] = vars . oaiapikey
# Write API key to file
file = open ( " client.settings " , " w " )
try :
file . write ( json . dumps ( js , indent = 3 ) )
finally :
file . close ( )
# Get list of models from OAI
print ( " {0} Retrieving engine list... {1} " . format ( colors . PURPLE , colors . END ) , end = " " )
req = requests . get (
vars . oaiengines ,
headers = {
' Authorization ' : ' Bearer ' + vars . oaiapikey
}
)
if ( req . status_code == 200 ) :
print ( " {0} OK! {1} " . format ( colors . GREEN , colors . END ) )
print ( " {0} Please select an engine to use: {1} \n " . format ( colors . CYAN , colors . END ) )
engines = req . json ( ) [ " data " ]
# Print list of engines
i = 0
for en in engines :
print ( " {0} - {1} ( {2} ) " . format ( i , en [ " id " ] , " \033 [92mready \033 [0m " if en [ " ready " ] == True else " \033 [91mnot ready \033 [0m " ) )
i + = 1
# Get engine to use
print ( " " )
engselected = False
while ( engselected == False ) :
engine = input ( " Engine #> " )
if ( engine . isnumeric ( ) and int ( engine ) < len ( engines ) ) :
vars . oaiurl = " https://api.openai.com/v1/engines/ {0} /completions " . format ( engines [ int ( engine ) ] [ " id " ] )
engselected = True
else :
print ( " {0} Please enter a valid selection. {1} " . format ( colors . RED , colors . END ) )
else :
# Something went wrong, print the message and quit since we can't initialize an engine
print ( " {0} ERROR! {1} " . format ( colors . RED , colors . END ) )
print ( req . json ( ) )
quit ( )
2021-05-14 00:58:52 +02:00
# Ask for ngrok url if Google Colab was selected
if ( vars . model == " Colab " ) :
2021-08-20 11:39:04 +02:00
if ( vars . colaburl == " " ) :
print ( " {0} Please enter the ngrok.io or trycloudflare.com URL displayed in Google Colab: {1} \n " . format ( colors . CYAN , colors . END ) )
vars . colaburl = input ( " URL> " ) + " /request "
2021-05-14 00:58:52 +02:00
2021-05-29 11:46:03 +02:00
if ( vars . model == " ReadOnly " ) :
vars . noai = True
2021-05-03 00:46:45 +02:00
# Set logging level to reduce chatter from Flask
import logging
log = logging . getLogger ( ' werkzeug ' )
log . setLevel ( logging . ERROR )
# Start flask & SocketIO
2021-05-07 20:32:10 +02:00
print ( " {0} Initializing Flask... {1} " . format ( colors . PURPLE , colors . END ) , end = " " )
2021-05-03 00:46:45 +02:00
from flask import Flask , render_template
from flask_socketio import SocketIO , emit
app = Flask ( __name__ )
app . config [ ' SECRET KEY ' ] = ' secret! '
socketio = SocketIO ( app )
2021-05-07 20:32:10 +02:00
print ( " {0} OK! {1} " . format ( colors . GREEN , colors . END ) )
2021-05-03 00:46:45 +02:00
# Start transformers and create pipeline
2021-05-29 11:46:03 +02:00
if ( not vars . model in [ " InferKit " , " Colab " , " OAI " , " ReadOnly " ] ) :
2021-05-03 00:46:45 +02:00
if ( not vars . noai ) :
2021-05-07 20:32:10 +02:00
print ( " {0} Initializing transformers, please wait... {1} " . format ( colors . PURPLE , colors . END ) )
2021-08-20 16:25:03 +02:00
from transformers import pipeline , GPT2Tokenizer , GPT2LMHeadModel , GPTNeoForCausalLM , GPTNeoModel , AutoModel
2021-05-03 06:24:16 +02:00
2021-05-04 07:47:23 +02:00
# If custom GPT Neo model was chosen
if ( vars . model == " NeoCustom " ) :
model = GPTNeoForCausalLM . from_pretrained ( vars . custmodpth )
tokenizer = GPT2Tokenizer . from_pretrained ( vars . custmodpth )
# Is CUDA available? If so, use GPU, otherwise fall back to CPU
2021-08-20 16:25:03 +02:00
if ( vars . hascuda ) :
if ( vars . usegpu ) :
generator = pipeline ( ' text-generation ' , model = model , tokenizer = tokenizer , device = 0 )
elif ( vars . breakmodel ) : # Use both RAM and VRAM (breakmodel)
n_layers = model . config . num_layers
breakmodel . total_blocks = n_layers
model . half ( ) . to ( ' cpu ' )
gc . collect ( )
model . transformer . wte . to ( breakmodel . gpu_device )
model . transformer . ln_f . to ( breakmodel . gpu_device )
2021-08-21 16:54:57 +02:00
if ( hasattr ( model , ' lm_head ' ) ) :
2021-08-21 01:32:18 +02:00
model . lm_head . to ( breakmodel . gpu_device )
2021-08-20 19:00:53 +02:00
if ( not hasattr ( model . config , ' rotary ' ) or not model . config . rotary ) :
2021-08-20 17:34:31 +02:00
model . transformer . wpe . to ( breakmodel . gpu_device )
2021-08-20 16:25:03 +02:00
gc . collect ( )
2021-08-21 02:50:03 +02:00
if ( args . breakmodel_layers is not None ) :
breakmodel . ram_blocks = max ( 0 , min ( n_layers , args . breakmodel_layers ) )
2021-08-20 16:25:03 +02:00
else :
print ( colors . CYAN + " \n How many layers would you like to put into system RAM? " )
print ( " The more of them you put into system RAM, the slower it will run, " )
print ( " but it will require less VRAM " )
print ( " (roughly proportional to number of layers). " )
print ( f " This model has { colors . YELLOW } { n_layers } { colors . CYAN } layers. { colors . END } \n " )
while ( True ) :
layerselect = input ( " # of layers> " )
if ( layerselect . isnumeric ( ) and 0 < = int ( layerselect ) < = n_layers ) :
breakmodel . ram_blocks = int ( layerselect )
break
else :
print ( f " { colors . RED } Please enter an integer between 0 and { n_layers } . { colors . END } " )
print ( f " { colors . PURPLE } Will commit { colors . YELLOW } { breakmodel . ram_blocks } { colors . PURPLE } of { colors . YELLOW } { n_layers } { colors . PURPLE } layers to system RAM. { colors . END } " )
GPTNeoModel . forward = breakmodel . new_forward
generator = model . generate
else :
generator = pipeline ( ' text-generation ' , model = model , tokenizer = tokenizer )
2021-05-04 07:47:23 +02:00
else :
generator = pipeline ( ' text-generation ' , model = model , tokenizer = tokenizer )
# If custom GPT2 model was chosen
elif ( vars . model == " GPT2Custom " ) :
model = GPT2LMHeadModel . from_pretrained ( vars . custmodpth )
tokenizer = GPT2Tokenizer . from_pretrained ( vars . custmodpth )
# Is CUDA available? If so, use GPU, otherwise fall back to CPU
if ( vars . hascuda and vars . usegpu ) :
generator = pipeline ( ' text-generation ' , model = model , tokenizer = tokenizer , device = 0 )
else :
generator = pipeline ( ' text-generation ' , model = model , tokenizer = tokenizer )
# If base HuggingFace model was chosen
2021-05-03 06:24:16 +02:00
else :
2021-05-04 07:47:23 +02:00
# Is CUDA available? If so, use GPU, otherwise fall back to CPU
tokenizer = GPT2Tokenizer . from_pretrained ( vars . model )
2021-08-20 16:25:03 +02:00
if ( vars . hascuda ) :
if ( vars . usegpu ) :
generator = pipeline ( ' text-generation ' , model = vars . model , device = 0 )
elif ( vars . breakmodel ) : # Use both RAM and VRAM (breakmodel)
model = AutoModel . from_pretrained ( vars . model )
n_layers = model . config . num_layers
breakmodel . total_blocks = n_layers
model . half ( ) . to ( ' cpu ' )
gc . collect ( )
model . transformer . wte . to ( breakmodel . gpu_device )
model . transformer . ln_f . to ( breakmodel . gpu_device )
2021-08-21 16:54:57 +02:00
if ( hasattr ( model , ' lm_head ' ) ) :
2021-08-21 01:32:18 +02:00
model . lm_head . to ( breakmodel . gpu_device )
2021-08-20 19:00:53 +02:00
if ( not hasattr ( model . config , ' rotary ' ) or not model . config . rotary ) :
2021-08-20 17:34:31 +02:00
model . transformer . wpe . to ( breakmodel . gpu_device )
2021-08-20 16:25:03 +02:00
gc . collect ( )
2021-08-21 02:50:03 +02:00
if ( args . breakmodel_layers is not None ) :
breakmodel . ram_blocks = max ( 0 , min ( n_layers , args . breakmodel_layers ) )
2021-08-20 16:25:03 +02:00
else :
print ( colors . CYAN + " \n How many layers would you like to put into system RAM? " )
print ( " The more of them you put into system RAM, the slower it will run, " )
print ( " but it will require less VRAM " )
print ( " (roughly proportional to number of layers). " )
print ( f " This model has { colors . YELLOW } { n_layers } { colors . CYAN } layers. { colors . END } \n " )
while ( True ) :
layerselect = input ( " # of layers> " )
if ( layerselect . isnumeric ( ) and 0 < = int ( layerselect ) < = n_layers ) :
breakmodel . ram_blocks = int ( layerselect )
break
else :
print ( f " { colors . RED } Please enter an integer between 0 and { n_layers } . { colors . END } " )
print ( f " { colors . PURPLE } Will commit { colors . YELLOW } { breakmodel . ram_blocks } { colors . PURPLE } of { colors . YELLOW } { n_layers } { colors . PURPLE } layers to system RAM. { colors . END } " )
GPTNeoModel . forward = breakmodel . new_forward
generator = model . generate
else :
generator = pipeline ( ' text-generation ' , model = vars . model )
2021-05-04 07:47:23 +02:00
else :
generator = pipeline ( ' text-generation ' , model = vars . model )
2021-05-18 02:28:18 +02:00
# Suppress Author's Note by flagging square brackets
vocab = tokenizer . get_vocab ( )
vocab_keys = vocab . keys ( )
vars . badwords = gettokenids ( " [ " )
for key in vars . badwords :
vars . badwordsids . append ( [ vocab [ key ] ] )
2021-05-07 20:32:10 +02:00
print ( " {0} OK! {1} pipeline created! {2} " . format ( colors . GREEN , vars . model , colors . END ) )
2021-05-03 00:46:45 +02:00
else :
2021-05-22 11:28:40 +02:00
# If we're running Colab or OAI, we still need a tokenizer.
2021-05-14 00:58:52 +02:00
if ( vars . model == " Colab " ) :
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer . from_pretrained ( " EleutherAI/gpt-neo-2.7B " )
2021-05-22 11:28:40 +02:00
elif ( vars . model == " OAI " ) :
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer . from_pretrained ( " gpt2 " )
2021-05-03 00:46:45 +02:00
# Set up Flask routes
@app.route ( ' / ' )
@app.route ( ' /index ' )
def index ( ) :
return render_template ( ' index.html ' )
#============================ METHODS =============================#
#==================================================================#
# Event triggered when browser SocketIO is loaded and connects to server
#==================================================================#
@socketio.on ( ' connect ' )
def do_connect ( ) :
2021-05-07 20:32:10 +02:00
print ( " {0} Client connected! {1} " . format ( colors . GREEN , colors . END ) )
2021-05-03 00:46:45 +02:00
emit ( ' from_server ' , { ' cmd ' : ' connected ' } )
2021-08-20 00:37:59 +02:00
if ( vars . remote ) :
emit ( ' from_server ' , { ' cmd ' : ' runs_remotely ' } )
2021-05-03 00:46:45 +02:00
if ( not vars . gamestarted ) :
setStartState ( )
2021-05-07 20:32:10 +02:00
sendsettings ( )
2021-05-04 15:56:48 +02:00
refresh_settings ( )
2021-05-13 07:26:42 +02:00
sendwi ( )
vars . mode = " play "
2021-05-03 00:46:45 +02:00
else :
# Game in session, send current game data and ready state to browser
refresh_story ( )
2021-05-07 20:32:10 +02:00
sendsettings ( )
2021-05-04 15:56:48 +02:00
refresh_settings ( )
2021-05-13 07:26:42 +02:00
sendwi ( )
2021-05-03 00:46:45 +02:00
if ( vars . mode == " play " ) :
if ( not vars . aibusy ) :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setgamestate ' , ' data ' : ' ready ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
else :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setgamestate ' , ' data ' : ' wait ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
elif ( vars . mode == " edit " ) :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' true ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
elif ( vars . mode == " memory " ) :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' memmode ' , ' data ' : ' true ' } , broadcast = True )
2021-05-13 07:26:42 +02:00
elif ( vars . mode == " wi " ) :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' wimode ' , ' data ' : ' true ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
#==================================================================#
# Event triggered when browser SocketIO sends data to the server
#==================================================================#
@socketio.on ( ' message ' )
def get_message ( msg ) :
2021-05-07 20:32:10 +02:00
print ( " {0} Data recieved: {1} {2} " . format ( colors . GREEN , msg , colors . END ) )
2021-05-03 00:46:45 +02:00
# Submit action
if ( msg [ ' cmd ' ] == ' submit ' ) :
if ( vars . mode == " play " ) :
2021-08-19 13:18:01 +02:00
actionsubmit ( msg [ ' data ' ] , actionmode = msg [ ' actionmode ' ] )
2021-05-03 00:46:45 +02:00
elif ( vars . mode == " edit " ) :
editsubmit ( msg [ ' data ' ] )
elif ( vars . mode == " memory " ) :
memsubmit ( msg [ ' data ' ] )
# Retry Action
elif ( msg [ ' cmd ' ] == ' retry ' ) :
2021-05-29 11:46:03 +02:00
actionretry ( msg [ ' data ' ] )
2021-05-03 00:46:45 +02:00
# Back/Undo Action
elif ( msg [ ' cmd ' ] == ' back ' ) :
2021-05-29 11:46:03 +02:00
actionback ( )
2021-08-24 00:52:45 +02:00
# EditMode Action (old)
2021-05-03 00:46:45 +02:00
elif ( msg [ ' cmd ' ] == ' edit ' ) :
if ( vars . mode == " play " ) :
vars . mode = " edit "
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' true ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
elif ( vars . mode == " edit " ) :
vars . mode = " play "
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' false ' } , broadcast = True )
2021-08-24 00:52:45 +02:00
# EditLine Action (old)
2021-05-03 00:46:45 +02:00
elif ( msg [ ' cmd ' ] == ' editline ' ) :
editrequest ( int ( msg [ ' data ' ] ) )
2021-08-24 00:52:45 +02:00
# Inline edit
elif ( msg [ ' cmd ' ] == ' inlineedit ' ) :
inlineedit ( msg [ ' chunk ' ] , msg [ ' data ' ] )
elif ( msg [ ' cmd ' ] == ' inlinedelete ' ) :
inlinedelete ( msg [ ' data ' ] )
# DeleteLine Action (old)
2021-05-03 00:46:45 +02:00
elif ( msg [ ' cmd ' ] == ' delete ' ) :
deleterequest ( )
elif ( msg [ ' cmd ' ] == ' memory ' ) :
togglememorymode ( )
2021-05-22 11:28:40 +02:00
elif ( msg [ ' cmd ' ] == ' savetofile ' ) :
savetofile ( )
elif ( msg [ ' cmd ' ] == ' loadfromfile ' ) :
loadfromfile ( )
2021-05-11 06:27:34 +02:00
elif ( msg [ ' cmd ' ] == ' import ' ) :
importRequest ( )
2021-05-03 00:46:45 +02:00
elif ( msg [ ' cmd ' ] == ' newgame ' ) :
newGameRequest ( )
2021-08-19 12:54:44 +02:00
elif ( msg [ ' cmd ' ] == ' rndgame ' ) :
randomGameRequest ( msg [ ' data ' ] )
2021-05-04 15:56:48 +02:00
elif ( msg [ ' cmd ' ] == ' settemp ' ) :
2021-05-07 20:32:10 +02:00
vars . temp = float ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabeltemp ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-04 15:56:48 +02:00
elif ( msg [ ' cmd ' ] == ' settopp ' ) :
vars . top_p = float ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabeltopp ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-08-19 14:47:57 +02:00
elif ( msg [ ' cmd ' ] == ' settopk ' ) :
vars . top_k = int ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabeltopk ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-08-19 14:47:57 +02:00
elif ( msg [ ' cmd ' ] == ' settfs ' ) :
vars . tfs = float ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabeltfs ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-04 15:56:48 +02:00
elif ( msg [ ' cmd ' ] == ' setreppen ' ) :
vars . rep_pen = float ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabelreppen ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-04 15:56:48 +02:00
elif ( msg [ ' cmd ' ] == ' setoutput ' ) :
vars . genamt = int ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabeloutput ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-07 20:32:10 +02:00
elif ( msg [ ' cmd ' ] == ' settknmax ' ) :
vars . max_length = int ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabeltknmax ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-07 20:32:10 +02:00
elif ( msg [ ' cmd ' ] == ' setikgen ' ) :
vars . ikgen = int ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabelikgen ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-05 09:04:06 +02:00
# Author's Note field update
elif ( msg [ ' cmd ' ] == ' anote ' ) :
anotesubmit ( msg [ ' data ' ] )
# Author's Note depth update
elif ( msg [ ' cmd ' ] == ' anotedepth ' ) :
vars . andepth = int ( msg [ ' data ' ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setlabelanotedepth ' , ' data ' : msg [ ' data ' ] } , broadcast = True )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-11 01:17:10 +02:00
# Format - Trim incomplete sentences
elif ( msg [ ' cmd ' ] == ' frmttriminc ' ) :
if ( ' frmttriminc ' in vars . formatoptns ) :
vars . formatoptns [ " frmttriminc " ] = msg [ ' data ' ]
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-11 01:17:10 +02:00
elif ( msg [ ' cmd ' ] == ' frmtrmblln ' ) :
if ( ' frmtrmblln ' in vars . formatoptns ) :
vars . formatoptns [ " frmtrmblln " ] = msg [ ' data ' ]
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-11 01:17:10 +02:00
elif ( msg [ ' cmd ' ] == ' frmtrmspch ' ) :
if ( ' frmtrmspch ' in vars . formatoptns ) :
vars . formatoptns [ " frmtrmspch " ] = msg [ ' data ' ]
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-11 01:17:10 +02:00
elif ( msg [ ' cmd ' ] == ' frmtadsnsp ' ) :
if ( ' frmtadsnsp ' in vars . formatoptns ) :
vars . formatoptns [ " frmtadsnsp " ] = msg [ ' data ' ]
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-11 06:27:34 +02:00
elif ( msg [ ' cmd ' ] == ' importselect ' ) :
2021-05-14 22:27:47 +02:00
vars . importnum = int ( msg [ " data " ] . replace ( " import " , " " ) )
2021-05-11 06:27:34 +02:00
elif ( msg [ ' cmd ' ] == ' importcancel ' ) :
emit ( ' from_server ' , { ' cmd ' : ' popupshow ' , ' data ' : False } )
vars . importjs = { }
elif ( msg [ ' cmd ' ] == ' importaccept ' ) :
emit ( ' from_server ' , { ' cmd ' : ' popupshow ' , ' data ' : False } )
importgame ( )
2021-05-13 07:26:42 +02:00
elif ( msg [ ' cmd ' ] == ' wi ' ) :
togglewimode ( )
elif ( msg [ ' cmd ' ] == ' wiinit ' ) :
if ( int ( msg [ ' data ' ] ) < len ( vars . worldinfo ) ) :
vars . worldinfo [ msg [ ' data ' ] ] [ " init " ] = True
addwiitem ( )
elif ( msg [ ' cmd ' ] == ' widelete ' ) :
deletewi ( msg [ ' data ' ] )
2021-08-19 13:48:33 +02:00
elif ( msg [ ' cmd ' ] == ' wiselon ' ) :
vars . worldinfo [ msg [ ' data ' ] ] [ " selective " ] = True
elif ( msg [ ' cmd ' ] == ' wiseloff ' ) :
vars . worldinfo [ msg [ ' data ' ] ] [ " selective " ] = False
2021-05-13 07:26:42 +02:00
elif ( msg [ ' cmd ' ] == ' sendwilist ' ) :
commitwi ( msg [ ' data ' ] )
2021-05-16 11:29:39 +02:00
elif ( msg [ ' cmd ' ] == ' aidgimport ' ) :
importAidgRequest ( msg [ ' data ' ] )
2021-05-22 11:28:40 +02:00
elif ( msg [ ' cmd ' ] == ' saveasrequest ' ) :
saveas ( msg [ ' data ' ] )
elif ( msg [ ' cmd ' ] == ' saverequest ' ) :
save ( )
elif ( msg [ ' cmd ' ] == ' loadlistrequest ' ) :
getloadlist ( )
elif ( msg [ ' cmd ' ] == ' loadselect ' ) :
vars . loadselect = msg [ " data " ]
elif ( msg [ ' cmd ' ] == ' loadrequest ' ) :
loadRequest ( getcwd ( ) + " /stories/ " + vars . loadselect + " .json " )
elif ( msg [ ' cmd ' ] == ' clearoverwrite ' ) :
vars . svowname = " "
vars . saveow = False
2021-05-29 11:46:03 +02:00
elif ( msg [ ' cmd ' ] == ' seqsel ' ) :
selectsequence ( msg [ ' data ' ] )
elif ( msg [ ' cmd ' ] == ' setnumseq ' ) :
vars . numseqs = int ( msg [ ' data ' ] )
emit ( ' from_server ' , { ' cmd ' : ' setlabelnumseq ' , ' data ' : msg [ ' data ' ] } )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-29 11:46:03 +02:00
elif ( msg [ ' cmd ' ] == ' setwidepth ' ) :
vars . widepth = int ( msg [ ' data ' ] )
emit ( ' from_server ' , { ' cmd ' : ' setlabelwidepth ' , ' data ' : msg [ ' data ' ] } )
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-05-29 11:46:03 +02:00
elif ( msg [ ' cmd ' ] == ' setuseprompt ' ) :
vars . useprompt = msg [ ' data ' ]
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-08-19 13:18:01 +02:00
elif ( msg [ ' cmd ' ] == ' setadventure ' ) :
vars . adventure = msg [ ' data ' ]
2021-08-20 16:25:03 +02:00
settingschanged ( )
2021-08-20 15:32:02 +02:00
refresh_settings ( )
2021-08-19 13:18:01 +02:00
refresh_story ( )
2021-05-29 11:46:03 +02:00
elif ( msg [ ' cmd ' ] == ' importwi ' ) :
wiimportrequest ( )
2021-05-13 07:26:42 +02:00
2021-05-03 00:46:45 +02:00
#==================================================================#
2021-05-22 11:28:40 +02:00
# Send start message and tell Javascript to set UI state
2021-05-03 00:46:45 +02:00
#==================================================================#
def setStartState ( ) :
2021-05-29 11:46:03 +02:00
txt = " <span>Welcome to <span class= \" color_cyan \" >KoboldAI Client</span>! You are running <span class= \" color_green \" > " + vars . model + " </span>.<br/> "
if ( not vars . noai ) :
txt = txt + " Please load a game or enter a prompt below to begin!</span> "
else :
txt = txt + " Please load or import a story to read. There is no AI in this mode. "
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' updatescreen ' , ' gamestarted ' : vars . gamestarted , ' data ' : txt } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' setgamestate ' , ' data ' : ' start ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
2021-05-07 20:32:10 +02:00
#==================================================================#
2021-05-22 11:28:40 +02:00
# Transmit applicable settings to SocketIO to build UI sliders/toggles
2021-05-07 20:32:10 +02:00
#==================================================================#
def sendsettings ( ) :
# Send settings for selected AI type
if ( vars . model != " InferKit " ) :
for set in gensettings . gensettingstf :
emit ( ' from_server ' , { ' cmd ' : ' addsetting ' , ' data ' : set } )
else :
for set in gensettings . gensettingsik :
emit ( ' from_server ' , { ' cmd ' : ' addsetting ' , ' data ' : set } )
2021-05-11 01:17:10 +02:00
# Send formatting options
for frm in gensettings . formatcontrols :
emit ( ' from_server ' , { ' cmd ' : ' addformat ' , ' data ' : frm } )
# Add format key to vars if it wasn't loaded with client.settings
if ( not frm [ " id " ] in vars . formatoptns ) :
vars . formatoptns [ frm [ " id " ] ] = False ;
2021-05-07 20:32:10 +02:00
#==================================================================#
2021-05-22 11:28:40 +02:00
# Take settings from vars and write them to client settings file
2021-05-07 20:32:10 +02:00
#==================================================================#
def savesettings ( ) :
# Build json to write
js = { }
2021-05-11 01:17:10 +02:00
js [ " apikey " ] = vars . apikey
js [ " andepth " ] = vars . andepth
js [ " temp " ] = vars . temp
js [ " top_p " ] = vars . top_p
2021-08-19 14:47:57 +02:00
js [ " top_k " ] = vars . top_k
js [ " tfs " ] = vars . tfs
2021-05-11 01:17:10 +02:00
js [ " rep_pen " ] = vars . rep_pen
js [ " genamt " ] = vars . genamt
js [ " max_length " ] = vars . max_length
js [ " ikgen " ] = vars . ikgen
js [ " formatoptns " ] = vars . formatoptns
2021-05-29 11:46:03 +02:00
js [ " numseqs " ] = vars . numseqs
js [ " widepth " ] = vars . widepth
js [ " useprompt " ] = vars . useprompt
2021-08-19 13:18:01 +02:00
js [ " adventure " ] = vars . adventure
2021-05-07 20:32:10 +02:00
# Write it
file = open ( " client.settings " , " w " )
try :
2021-05-16 01:29:41 +02:00
file . write ( json . dumps ( js , indent = 3 ) )
2021-05-07 20:32:10 +02:00
finally :
file . close ( )
#==================================================================#
2021-05-22 11:28:40 +02:00
# Read settings from client file JSON and send to vars
2021-05-07 20:32:10 +02:00
#==================================================================#
def loadsettings ( ) :
if ( path . exists ( " client.settings " ) ) :
# Read file contents into JSON object
file = open ( " client.settings " , " r " )
js = json . load ( file )
# Copy file contents to vars
2021-05-10 15:33:41 +02:00
if ( " apikey " in js ) :
vars . apikey = js [ " apikey " ]
2021-05-10 15:26:31 +02:00
if ( " andepth " in js ) :
vars . andepth = js [ " andepth " ]
if ( " temp " in js ) :
vars . temp = js [ " temp " ]
if ( " top_p " in js ) :
vars . top_p = js [ " top_p " ]
2021-08-19 14:47:57 +02:00
if ( " top_k " in js ) :
vars . top_k = js [ " top_k " ]
if ( " tfs " in js ) :
vars . tfs = js [ " tfs " ]
2021-05-10 15:26:31 +02:00
if ( " rep_pen " in js ) :
vars . rep_pen = js [ " rep_pen " ]
if ( " genamt " in js ) :
vars . genamt = js [ " genamt " ]
if ( " max_length " in js ) :
vars . max_length = js [ " max_length " ]
if ( " ikgen " in js ) :
vars . ikgen = js [ " ikgen " ]
2021-05-11 01:17:10 +02:00
if ( " formatoptns " in js ) :
vars . formatoptns = js [ " formatoptns " ]
2021-05-29 11:46:03 +02:00
if ( " numseqs " in js ) :
vars . numseqs = js [ " numseqs " ]
if ( " widepth " in js ) :
vars . widepth = js [ " widepth " ]
if ( " useprompt " in js ) :
vars . useprompt = js [ " useprompt " ]
2021-08-19 13:18:01 +02:00
if ( " adventure " in js ) :
vars . adventure = js [ " adventure " ]
2021-05-07 20:32:10 +02:00
file . close ( )
#==================================================================#
2021-05-22 11:28:40 +02:00
# Don't save settings unless 2 seconds have passed without modification
2021-05-07 20:32:10 +02:00
#==================================================================#
@debounce ( 2 )
def settingschanged ( ) :
print ( " {0} Saving settings! {1} " . format ( colors . GREEN , colors . END ) )
savesettings ( )
2021-05-03 00:46:45 +02:00
#==================================================================#
2021-05-22 11:28:40 +02:00
# Take input text from SocketIO and decide what to do with it
2021-05-03 00:46:45 +02:00
#==================================================================#
2021-08-19 13:18:01 +02:00
def actionsubmit ( data , actionmode = 0 ) :
2021-05-18 23:59:59 +02:00
# Ignore new submissions if the AI is currently busy
2021-05-03 00:46:45 +02:00
if ( vars . aibusy ) :
return
set_aibusy ( 1 )
2021-08-19 13:18:01 +02:00
vars . actionmode = actionmode
# "Action" mode
if ( actionmode == 1 ) :
data = data . strip ( ) . lstrip ( ' > ' )
data = re . sub ( r ' \ n+ ' , ' ' , data )
data = f " \n \n > { data } \n "
2021-05-18 23:59:59 +02:00
# If we're not continuing, store a copy of the raw input
if ( data != " " ) :
vars . lastact = data
2021-05-03 00:46:45 +02:00
if ( not vars . gamestarted ) :
2021-05-11 01:17:10 +02:00
# Start the game
vars . gamestarted = True
# Save this first action as the prompt
vars . prompt = data
2021-05-29 11:46:03 +02:00
if ( not vars . noai ) :
# Clear the startup text from game screen
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' updatescreen ' , ' gamestarted ' : vars . gamestarted , ' data ' : ' Please wait, generating story... ' } , broadcast = True )
2021-05-29 11:46:03 +02:00
calcsubmit ( data ) # Run the first action through the generator
2021-08-24 01:19:36 +02:00
emit ( ' from_server ' , { ' cmd ' : ' scrolldown ' , ' data ' : ' ' } , broadcast = True )
2021-05-29 11:46:03 +02:00
else :
refresh_story ( )
set_aibusy ( 0 )
2021-08-24 01:19:36 +02:00
emit ( ' from_server ' , { ' cmd ' : ' scrolldown ' , ' data ' : ' ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
else :
# Dont append submission if it's a blank/continue action
if ( data != " " ) :
2021-05-11 01:17:10 +02:00
# Apply input formatting & scripts before sending to tokenizer
2021-08-19 13:18:01 +02:00
if ( vars . actionmode == 0 ) :
data = applyinputformatting ( data )
2021-05-11 01:17:10 +02:00
# Store the result in the Action log
2021-05-03 00:46:45 +02:00
vars . actions . append ( data )
2021-06-15 06:59:08 +02:00
update_story_chunk ( ' last ' )
2021-05-29 11:46:03 +02:00
if ( not vars . noai ) :
# Off to the tokenizer!
calcsubmit ( data )
2021-08-24 01:19:36 +02:00
emit ( ' from_server ' , { ' cmd ' : ' scrolldown ' , ' data ' : ' ' } , broadcast = True )
2021-05-29 11:46:03 +02:00
else :
set_aibusy ( 0 )
2021-08-24 01:19:36 +02:00
emit ( ' from_server ' , { ' cmd ' : ' scrolldown ' , ' data ' : ' ' } , broadcast = True )
2021-05-29 11:46:03 +02:00
#==================================================================#
#
#==================================================================#
def actionretry ( data ) :
if ( vars . noai ) :
emit ( ' from_server ' , { ' cmd ' : ' errmsg ' , ' data ' : " Retry function unavailable in Read Only mode. " } )
return
if ( vars . aibusy ) :
return
set_aibusy ( 1 )
# Remove last action if possible and resubmit
if ( len ( vars . actions ) > 0 ) :
vars . actions . pop ( )
2021-06-15 06:59:08 +02:00
remove_story_chunk ( len ( vars . actions ) + 1 )
2021-05-29 11:46:03 +02:00
calcsubmit ( ' ' )
#==================================================================#
#
#==================================================================#
def actionback ( ) :
if ( vars . aibusy ) :
return
# Remove last index of actions and refresh game screen
if ( len ( vars . actions ) > 0 ) :
2021-06-15 06:59:08 +02:00
action_index = len ( vars . actions )
2021-05-29 11:46:03 +02:00
vars . actions . pop ( )
2021-06-15 06:59:08 +02:00
remove_story_chunk ( len ( vars . actions ) + 1 )
2021-05-03 00:46:45 +02:00
#==================================================================#
# Take submitted text and build the text to be given to generator
#==================================================================#
def calcsubmit ( txt ) :
2021-05-05 09:04:06 +02:00
anotetxt = " " # Placeholder for Author's Note text
lnanote = 0 # Placeholder for Author's Note length
forceanote = False # In case we don't have enough actions to hit A.N. depth
anoteadded = False # In case our budget runs out before we hit A.N. depth
actionlen = len ( vars . actions )
2021-05-13 07:26:42 +02:00
# Scan for WorldInfo matches
winfo = checkworldinfo ( txt )
# Add a newline to the end of memory
if ( vars . memory != " " and vars . memory [ - 1 ] != " \n " ) :
mem = vars . memory + " \n "
else :
mem = vars . memory
2021-05-05 09:04:06 +02:00
# Build Author's Note if set
if ( vars . authornote != " " ) :
anotetxt = " \n [Author ' s note: " + vars . authornote + " ] \n "
2021-05-03 00:46:45 +02:00
# For all transformers models
if ( vars . model != " InferKit " ) :
2021-05-05 09:04:06 +02:00
anotetkns = [ ] # Placeholder for Author's Note tokens
2021-05-03 00:46:45 +02:00
# Calculate token budget
prompttkns = tokenizer . encode ( vars . prompt )
lnprompt = len ( prompttkns )
2021-05-13 07:26:42 +02:00
memtokens = tokenizer . encode ( mem )
2021-05-03 00:46:45 +02:00
lnmem = len ( memtokens )
2021-05-13 07:26:42 +02:00
witokens = tokenizer . encode ( winfo )
lnwi = len ( witokens )
2021-05-05 09:04:06 +02:00
if ( anotetxt != " " ) :
anotetkns = tokenizer . encode ( anotetxt )
lnanote = len ( anotetkns )
2021-05-03 00:46:45 +02:00
2021-05-29 11:46:03 +02:00
if ( vars . useprompt ) :
budget = vars . max_length - lnprompt - lnmem - lnanote - lnwi - vars . genamt
else :
budget = vars . max_length - lnmem - lnanote - lnwi - vars . genamt
2021-05-05 09:04:06 +02:00
if ( actionlen == 0 ) :
2021-05-03 00:46:45 +02:00
# First/Prompt action
2021-05-13 07:26:42 +02:00
subtxt = vars . memory + winfo + anotetxt + vars . prompt
lnsub = lnmem + lnwi + lnprompt + lnanote
2021-05-05 09:04:06 +02:00
2021-05-22 11:28:40 +02:00
if ( not vars . model in [ " Colab " , " OAI " ] ) :
2021-05-14 05:30:54 +02:00
generate ( subtxt , lnsub + 1 , lnsub + vars . genamt )
2021-05-22 11:28:40 +02:00
elif ( vars . model == " Colab " ) :
2021-05-14 05:30:54 +02:00
sendtocolab ( subtxt , lnsub + 1 , lnsub + vars . genamt )
2021-05-22 11:28:40 +02:00
elif ( vars . model == " OAI " ) :
oairequest ( subtxt , lnsub + 1 , lnsub + vars . genamt )
2021-05-03 00:46:45 +02:00
else :
2021-05-05 09:04:06 +02:00
tokens = [ ]
# Check if we have the action depth to hit our A.N. depth
if ( anotetxt != " " and actionlen < vars . andepth ) :
forceanote = True
2021-05-03 00:46:45 +02:00
# Get most recent action tokens up to our budget
2021-05-05 09:04:06 +02:00
for n in range ( actionlen ) :
2021-05-03 00:46:45 +02:00
if ( budget < = 0 ) :
break
acttkns = tokenizer . encode ( vars . actions [ ( - 1 - n ) ] )
tknlen = len ( acttkns )
if ( tknlen < budget ) :
tokens = acttkns + tokens
budget - = tknlen
else :
count = budget * - 1
tokens = acttkns [ count : ] + tokens
2021-05-29 11:46:03 +02:00
budget = 0
2021-05-03 19:57:27 +02:00
break
2021-05-05 09:04:06 +02:00
# Inject Author's Note if we've reached the desired depth
if ( n == vars . andepth - 1 ) :
if ( anotetxt != " " ) :
tokens = anotetkns + tokens # A.N. len already taken from bdgt
anoteadded = True
2021-05-29 11:46:03 +02:00
# If we're not using the prompt every time and there's still budget left,
# add some prompt.
if ( not vars . useprompt ) :
if ( budget > 0 ) :
prompttkns = prompttkns [ - budget : ]
else :
prompttkns = [ ]
2021-05-05 09:04:06 +02:00
# Did we get to add the A.N.? If not, do it here
if ( anotetxt != " " ) :
if ( ( not anoteadded ) or forceanote ) :
2021-05-18 02:28:18 +02:00
tokens = memtokens + witokens + anotetkns + prompttkns + tokens
2021-05-05 09:04:06 +02:00
else :
2021-05-18 02:28:18 +02:00
tokens = memtokens + witokens + prompttkns + tokens
2021-05-05 09:04:06 +02:00
else :
2021-05-13 07:26:42 +02:00
# Prepend Memory, WI, and Prompt before action tokens
tokens = memtokens + witokens + prompttkns + tokens
2021-05-03 00:46:45 +02:00
# Send completed bundle to generator
ln = len ( tokens )
2021-05-14 00:58:52 +02:00
2021-05-22 11:28:40 +02:00
if ( not vars . model in [ " Colab " , " OAI " ] ) :
2021-05-14 00:58:52 +02:00
generate (
tokenizer . decode ( tokens ) ,
ln + 1 ,
ln + vars . genamt
)
2021-05-22 11:28:40 +02:00
elif ( vars . model == " Colab " ) :
2021-05-14 00:58:52 +02:00
sendtocolab (
tokenizer . decode ( tokens ) ,
ln + 1 ,
ln + vars . genamt
)
2021-05-22 11:28:40 +02:00
elif ( vars . model == " OAI " ) :
oairequest (
tokenizer . decode ( tokens ) ,
ln + 1 ,
ln + vars . genamt
)
2021-05-03 00:46:45 +02:00
# For InferKit web API
else :
2021-05-05 09:04:06 +02:00
# Check if we have the action depth to hit our A.N. depth
if ( anotetxt != " " and actionlen < vars . andepth ) :
forceanote = True
2021-05-29 11:46:03 +02:00
if ( vars . useprompt ) :
budget = vars . ikmax - len ( vars . prompt ) - len ( anotetxt ) - len ( mem ) - len ( winfo ) - 1
else :
budget = vars . ikmax - len ( anotetxt ) - len ( mem ) - len ( winfo ) - 1
2021-05-03 00:46:45 +02:00
subtxt = " "
2021-05-30 02:43:30 +02:00
prompt = vars . prompt
2021-05-05 09:04:06 +02:00
for n in range ( actionlen ) :
2021-05-03 00:46:45 +02:00
if ( budget < = 0 ) :
break
actlen = len ( vars . actions [ ( - 1 - n ) ] )
if ( actlen < budget ) :
subtxt = vars . actions [ ( - 1 - n ) ] + subtxt
budget - = actlen
else :
count = budget * - 1
subtxt = vars . actions [ ( - 1 - n ) ] [ count : ] + subtxt
2021-05-29 11:46:03 +02:00
budget = 0
2021-05-03 19:57:27 +02:00
break
2021-05-05 09:04:06 +02:00
2021-05-29 11:46:03 +02:00
# If we're not using the prompt every time and there's still budget left,
# add some prompt.
if ( not vars . useprompt ) :
if ( budget > 0 ) :
prompt = vars . prompt [ - budget : ]
else :
prompt = " "
2021-05-05 09:04:06 +02:00
# Inject Author's Note if we've reached the desired depth
if ( n == vars . andepth - 1 ) :
if ( anotetxt != " " ) :
subtxt = anotetxt + subtxt # A.N. len already taken from bdgt
anoteadded = True
2021-05-03 00:46:45 +02:00
2021-05-05 09:04:06 +02:00
# Did we get to add the A.N.? If not, do it here
if ( anotetxt != " " ) :
if ( ( not anoteadded ) or forceanote ) :
2021-05-29 11:46:03 +02:00
subtxt = mem + winfo + anotetxt + prompt + subtxt
2021-05-05 09:04:06 +02:00
else :
2021-05-29 11:46:03 +02:00
subtxt = mem + winfo + prompt + subtxt
2021-05-03 00:46:45 +02:00
else :
2021-05-29 11:46:03 +02:00
subtxt = mem + winfo + prompt + subtxt
2021-05-03 00:46:45 +02:00
# Send it!
ikrequest ( subtxt )
#==================================================================#
# Send text to generator and deal with output
#==================================================================#
def generate ( txt , min , max ) :
2021-05-07 20:32:10 +02:00
print ( " {0} Min: {1} , Max: {2} , Txt: {3} {4} " . format ( colors . YELLOW , min , max , txt , colors . END ) )
2021-05-05 09:04:06 +02:00
2021-05-18 23:59:59 +02:00
# Store context in memory to use it for comparison with generated content
vars . lastctx = txt
2021-05-05 09:04:06 +02:00
# Clear CUDA cache if using GPU
2021-08-20 16:25:03 +02:00
if ( vars . hascuda and ( vars . usegpu or vars . breakmodel ) ) :
gc . collect ( )
2021-05-05 09:04:06 +02:00
torch . cuda . empty_cache ( )
# Submit input text to generator
2021-05-29 11:46:03 +02:00
try :
2021-08-19 14:47:57 +02:00
top_p = vars . top_p if vars . top_p > 0.0 else None
top_k = vars . top_k if vars . top_k > 0 else None
tfs = vars . tfs if vars . tfs > 0.0 else None
2021-08-20 16:25:03 +02:00
# generator() only accepts a torch tensor of tokens (long datatype) as
# its first argument if we're using breakmodel, otherwise a string
# is fine
if ( vars . hascuda and vars . breakmodel ) :
gen_in = tokenizer . encode ( txt , return_tensors = " pt " , truncation = True ) . long ( ) . to ( breakmodel . gpu_device )
else :
gen_in = txt
2021-08-19 14:47:57 +02:00
2021-08-21 18:15:31 +02:00
with torch . no_grad ( ) :
genout = generator (
gen_in ,
do_sample = True ,
min_length = min ,
max_length = max ,
repetition_penalty = vars . rep_pen ,
top_p = top_p ,
top_k = top_k ,
tfs = tfs ,
temperature = vars . temp ,
bad_words_ids = vars . badwordsids ,
use_cache = True ,
return_full_text = False ,
num_return_sequences = vars . numseqs
)
2021-05-29 11:46:03 +02:00
except Exception as e :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' errmsg ' , ' data ' : ' Error occured during generator call, please check console. ' } , broadcast = True )
2021-05-29 11:46:03 +02:00
print ( " {0} {1} {2} " . format ( colors . RED , e , colors . END ) )
set_aibusy ( 0 )
return
2021-08-20 17:15:32 +02:00
# Need to manually strip and decode tokens if we're not using a pipeline
if ( vars . hascuda and vars . breakmodel ) :
genout = [ { " generated_text " : tokenizer . decode ( tokens [ len ( gen_in [ 0 ] ) - len ( tokens ) : ] ) } for tokens in genout ]
2021-05-29 11:46:03 +02:00
if ( len ( genout ) == 1 ) :
genresult ( genout [ 0 ] [ " generated_text " ] )
else :
genselect ( genout )
# Clear CUDA cache again if using GPU
2021-08-20 17:17:34 +02:00
if ( vars . hascuda and ( vars . usegpu or vars . breakmodel ) ) :
2021-08-21 18:15:31 +02:00
del genout
gc . collect ( )
2021-05-29 11:46:03 +02:00
torch . cuda . empty_cache ( )
set_aibusy ( 0 )
#==================================================================#
# Deal with a single return sequence from generate()
#==================================================================#
def genresult ( genout ) :
2021-05-07 20:32:10 +02:00
print ( " {0} {1} {2} " . format ( colors . CYAN , genout , colors . END ) )
2021-05-11 01:17:10 +02:00
# Format output before continuing
2021-05-29 11:46:03 +02:00
genout = applyoutputformatting ( genout )
2021-05-11 01:17:10 +02:00
# Add formatted text to Actions array and refresh the game screen
vars . actions . append ( genout )
2021-06-15 06:59:08 +02:00
update_story_chunk ( ' last ' )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' texteffect ' , ' data ' : len ( vars . actions ) } , broadcast = True )
2021-05-29 11:46:03 +02:00
#==================================================================#
# Send generator sequences to the UI for selection
#==================================================================#
def genselect ( genout ) :
i = 0
for result in genout :
# Apply output formatting rules to sequences
result [ " generated_text " ] = applyoutputformatting ( result [ " generated_text " ] )
print ( " {0} [Result {1} ] \n {2} {3} " . format ( colors . CYAN , i , result [ " generated_text " ] , colors . END ) )
i + = 1
2021-05-03 00:46:45 +02:00
2021-05-29 11:46:03 +02:00
# Store sequences in memory until selection is made
vars . genseqs = genout
2021-05-05 09:04:06 +02:00
2021-05-29 11:46:03 +02:00
# Send sequences to UI for selection
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' genseqs ' , ' data ' : genout } , broadcast = True )
2021-05-29 11:46:03 +02:00
#==================================================================#
# Send selected sequence to action log and refresh UI
#==================================================================#
def selectsequence ( n ) :
if ( len ( vars . genseqs ) == 0 ) :
return
vars . actions . append ( vars . genseqs [ int ( n ) ] [ " generated_text " ] )
2021-06-15 06:59:08 +02:00
update_story_chunk ( ' last ' )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' texteffect ' , ' data ' : len ( vars . actions ) } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' hidegenseqs ' , ' data ' : ' ' } , broadcast = True )
2021-05-29 11:46:03 +02:00
vars . genseqs = [ ]
2021-05-03 00:46:45 +02:00
2021-05-14 00:58:52 +02:00
#==================================================================#
# Send transformers-style request to ngrok/colab host
#==================================================================#
def sendtocolab ( txt , min , max ) :
# Log request to console
2021-05-16 01:29:41 +02:00
print ( " {0} Tokens: {1} , Txt: {2} {3} " . format ( colors . YELLOW , min - 1 , txt , colors . END ) )
2021-05-14 00:58:52 +02:00
2021-05-18 23:59:59 +02:00
# Store context in memory to use it for comparison with generated content
vars . lastctx = txt
2021-05-14 00:58:52 +02:00
# Build request JSON data
reqdata = {
' text ' : txt ,
' min ' : min ,
' max ' : max ,
' rep_pen ' : vars . rep_pen ,
' temperature ' : vars . temp ,
2021-05-29 11:46:03 +02:00
' top_p ' : vars . top_p ,
2021-08-19 14:47:57 +02:00
' top_k ' : vars . top_k ,
' tfs ' : vars . tfs ,
2021-05-29 11:46:03 +02:00
' numseqs ' : vars . numseqs ,
' retfultxt ' : False
2021-05-14 00:58:52 +02:00
}
# Create request
req = requests . post (
vars . colaburl ,
json = reqdata
)
# Deal with the response
if ( req . status_code == 200 ) :
2021-05-29 11:46:03 +02:00
js = req . json ( ) [ " data " ]
# Try to be backwards compatible with outdated colab
if ( " text " in js ) :
genout = [ getnewcontent ( js [ " text " ] ) ]
else :
genout = js [ " seqs " ]
if ( len ( genout ) == 1 ) :
genresult ( genout [ 0 ] )
else :
# Convert torch output format to transformers
seqs = [ ]
for seq in genout :
seqs . append ( { " generated_text " : seq } )
genselect ( seqs )
2021-05-14 00:58:52 +02:00
# Format output before continuing
2021-05-29 11:46:03 +02:00
#genout = applyoutputformatting(getnewcontent(genout))
2021-05-14 00:58:52 +02:00
# Add formatted text to Actions array and refresh the game screen
2021-05-29 11:46:03 +02:00
#vars.actions.append(genout)
#refresh_story()
#emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)})
2021-05-14 00:58:52 +02:00
set_aibusy ( 0 )
else :
2021-05-18 23:59:59 +02:00
errmsg = " Colab API Error: Failed to get a reply from the server. Please check the colab console. "
2021-05-18 02:28:18 +02:00
print ( " {0} {1} {2} " . format ( colors . RED , errmsg , colors . END ) )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' errmsg ' , ' data ' : errmsg } , broadcast = True )
2021-05-14 00:58:52 +02:00
set_aibusy ( 0 )
2021-05-03 00:46:45 +02:00
#==================================================================#
# Replaces returns and newlines with HTML breaks
#==================================================================#
def formatforhtml ( txt ) :
return txt . replace ( " \\ r " , " <br/> " ) . replace ( " \\ n " , " <br/> " ) . replace ( ' \n ' , ' <br/> ' ) . replace ( ' \r ' , ' <br/> ' )
#==================================================================#
2021-05-11 01:17:10 +02:00
# Strips submitted text from the text returned by the AI
2021-05-03 00:46:45 +02:00
#==================================================================#
def getnewcontent ( txt ) :
2021-05-18 23:59:59 +02:00
# If the submitted context was blank, then everything is new
if ( vars . lastctx == " " ) :
return txt
2021-05-03 00:46:45 +02:00
2021-05-18 23:59:59 +02:00
# Tokenize the last context and the generated content
ctxtokens = tokenizer . encode ( vars . lastctx )
txttokens = tokenizer . encode ( txt )
dif = ( len ( txttokens ) - len ( ctxtokens ) ) * - 1
2021-05-13 15:35:11 +02:00
2021-05-18 23:59:59 +02:00
# Remove the context from the returned text
newtokens = txttokens [ dif : ]
2021-05-13 15:35:11 +02:00
2021-05-18 23:59:59 +02:00
return tokenizer . decode ( newtokens )
2021-05-03 00:46:45 +02:00
2021-05-11 01:17:10 +02:00
#==================================================================#
# Applies chosen formatting options to text submitted to AI
#==================================================================#
def applyinputformatting ( txt ) :
# Add sentence spacing
if ( vars . formatoptns [ " frmtadsnsp " ] ) :
2021-05-14 08:39:36 +02:00
txt = utils . addsentencespacing ( txt , vars )
2021-05-11 01:17:10 +02:00
return txt
#==================================================================#
# Applies chosen formatting options to text returned from AI
#==================================================================#
def applyoutputformatting ( txt ) :
# Use standard quotes and apostrophes
txt = utils . fixquotes ( txt )
2021-08-19 13:18:01 +02:00
# Adventure mode clipping of all characters after '>'
if ( vars . adventure ) :
txt = vars . acregex_ai . sub ( ' ' , txt )
2021-05-11 01:17:10 +02:00
# Trim incomplete sentences
if ( vars . formatoptns [ " frmttriminc " ] ) :
txt = utils . trimincompletesentence ( txt )
# Replace blank lines
if ( vars . formatoptns [ " frmtrmblln " ] ) :
txt = utils . replaceblanklines ( txt )
# Remove special characters
if ( vars . formatoptns [ " frmtrmspch " ] ) :
2021-08-19 13:18:01 +02:00
txt = utils . removespecialchars ( txt , vars )
2021-05-11 01:17:10 +02:00
return txt
2021-05-03 00:46:45 +02:00
#==================================================================#
# Sends the current story content to the Game Screen
#==================================================================#
def refresh_story ( ) :
2021-08-25 01:02:52 +02:00
text_parts = [ ' <chunk n= " 0 " id= " n0 " tabindex= " -1 " > ' , html . escape ( vars . prompt ) , ' </chunk> ' ]
2021-05-30 03:36:24 +02:00
for idx , item in enumerate ( vars . actions , start = 1 ) :
2021-08-19 17:06:39 +02:00
item = html . escape ( item )
2021-08-19 13:18:01 +02:00
if vars . adventure : # Add special formatting to adventure actions
2021-08-19 17:06:39 +02:00
item = vars . acregex_ui . sub ( ' <action> \\ 1</action> ' , item )
2021-08-25 01:02:52 +02:00
text_parts . extend ( ( ' <chunk n= " ' , str ( idx ) , ' " id= " n ' , str ( idx ) , ' " tabindex= " -1 " > ' , item , ' </chunk> ' ) )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' updatescreen ' , ' gamestarted ' : vars . gamestarted , ' data ' : formatforhtml ( ' ' . join ( text_parts ) ) } , broadcast = True )
2021-05-03 00:46:45 +02:00
2021-06-15 06:59:08 +02:00
#==================================================================#
# Signals the Game Screen to update one of the chunks
#==================================================================#
def update_story_chunk ( idx : Union [ int , Literal [ ' last ' ] ] ) :
if idx == ' last ' :
if len ( vars . actions ) < = 1 :
# In this case, we are better off just refreshing the whole thing as the
# prompt might not have been shown yet (with a "Generating story..."
2021-06-15 07:02:11 +02:00
# message instead).
2021-06-15 06:59:08 +02:00
refresh_story ( )
return
idx = len ( vars . actions )
if idx == 0 :
text = vars . prompt
else :
# Actions are 0 based, but in chunks 0 is the prompt.
# So the chunk index is one more than the corresponding action index.
text = vars . actions [ idx - 1 ]
2021-08-25 01:02:52 +02:00
item = html . escape ( text )
if vars . adventure : # Add special formatting to adventure actions
item = vars . acregex_ui . sub ( ' <action> \\ 1</action> ' , item )
chunk_text = f ' <chunk n= " { idx } " id= " n { idx } " tabindex= " -1 " > { formatforhtml ( item ) } </chunk> '
2021-08-25 00:40:12 +02:00
emit ( ' from_server ' , { ' cmd ' : ' updatechunk ' , ' data ' : { ' index ' : idx , ' html ' : chunk_text , ' last ' : ( idx == len ( vars . actions ) ) } } , broadcast = True )
2021-06-15 06:59:08 +02:00
#==================================================================#
# Signals the Game Screen to remove one of the chunks
#==================================================================#
def remove_story_chunk ( idx : int ) :
2021-08-25 00:40:12 +02:00
emit ( ' from_server ' , { ' cmd ' : ' removechunk ' , ' data ' : idx } , broadcast = True )
2021-06-15 06:59:08 +02:00
2021-05-04 15:56:48 +02:00
#==================================================================#
# Sends the current generator settings to the Game Menu
#==================================================================#
def refresh_settings ( ) :
2021-05-11 01:17:10 +02:00
# Suppress toggle change events while loading state
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' allowtoggle ' , ' data ' : False } , broadcast = True )
2021-05-11 01:17:10 +02:00
2021-05-07 20:32:10 +02:00
if ( vars . model != " InferKit " ) :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' updatetemp ' , ' data ' : vars . temp } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatetopp ' , ' data ' : vars . top_p } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatetopk ' , ' data ' : vars . top_k } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatetfs ' , ' data ' : vars . tfs } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatereppen ' , ' data ' : vars . rep_pen } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updateoutlen ' , ' data ' : vars . genamt } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatetknmax ' , ' data ' : vars . max_length } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatenumseq ' , ' data ' : vars . numseqs } , broadcast = True )
2021-05-07 20:32:10 +02:00
else :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' updatetemp ' , ' data ' : vars . temp } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatetopp ' , ' data ' : vars . top_p } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updateikgen ' , ' data ' : vars . ikgen } , broadcast = True )
2021-05-07 20:32:10 +02:00
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' updateanotedepth ' , ' data ' : vars . andepth } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatewidepth ' , ' data ' : vars . widepth } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updateuseprompt ' , ' data ' : vars . useprompt } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updateadventure ' , ' data ' : vars . adventure } , broadcast = True )
2021-05-11 01:17:10 +02:00
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' updatefrmttriminc ' , ' data ' : vars . formatoptns [ " frmttriminc " ] } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatefrmtrmblln ' , ' data ' : vars . formatoptns [ " frmtrmblln " ] } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatefrmtrmspch ' , ' data ' : vars . formatoptns [ " frmtrmspch " ] } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' updatefrmtadsnsp ' , ' data ' : vars . formatoptns [ " frmtadsnsp " ] } , broadcast = True )
2021-05-11 01:17:10 +02:00
# Allow toggle events again
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' allowtoggle ' , ' data ' : True } , broadcast = True )
2021-05-04 15:56:48 +02:00
2021-05-03 00:46:45 +02:00
#==================================================================#
# Sets the logical and display states for the AI Busy condition
#==================================================================#
def set_aibusy ( state ) :
if ( state ) :
vars . aibusy = True
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setgamestate ' , ' data ' : ' wait ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
else :
vars . aibusy = False
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setgamestate ' , ' data ' : ' ready ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
#==================================================================#
#
#==================================================================#
def editrequest ( n ) :
if ( n == 0 ) :
txt = vars . prompt
else :
txt = vars . actions [ n - 1 ]
vars . editln = n
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setinputtext ' , ' data ' : txt } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' enablesubmit ' , ' data ' : ' ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
#==================================================================#
#
#==================================================================#
def editsubmit ( data ) :
if ( vars . editln == 0 ) :
vars . prompt = data
else :
vars . actions [ vars . editln - 1 ] = data
vars . mode = " play "
2021-06-15 06:59:08 +02:00
update_story_chunk ( vars . editln )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' texteffect ' , ' data ' : vars . editln } , broadcast = True )
2021-08-24 00:52:45 +02:00
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' false ' } )
2021-05-03 00:46:45 +02:00
#==================================================================#
#
#==================================================================#
def deleterequest ( ) :
# Don't delete prompt
if ( vars . editln == 0 ) :
# Send error message
pass
else :
del vars . actions [ vars . editln - 1 ]
vars . mode = " play "
2021-06-15 06:59:08 +02:00
remove_story_chunk ( vars . editln )
2021-08-24 00:52:45 +02:00
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' false ' } )
#==================================================================#
#
#==================================================================#
def inlineedit ( chunk , data ) :
chunk = int ( chunk )
if ( chunk == 0 ) :
vars . prompt = data
else :
vars . actions [ chunk - 1 ] = data
2021-08-25 01:02:52 +02:00
update_story_chunk ( chunk )
2021-08-24 00:52:45 +02:00
emit ( ' from_server ' , { ' cmd ' : ' texteffect ' , ' data ' : chunk } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' false ' } , broadcast = True )
#==================================================================#
#
#==================================================================#
def inlinedelete ( chunk ) :
chunk = int ( chunk )
# Don't delete prompt
if ( chunk == 0 ) :
# Send error message
2021-08-25 01:02:52 +02:00
update_story_chunk ( chunk )
2021-08-24 17:24:29 +02:00
emit ( ' from_server ' , { ' cmd ' : ' errmsg ' , ' data ' : " Cannot delete the prompt. " } )
2021-08-24 00:52:45 +02:00
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' false ' } , broadcast = True )
else :
del vars . actions [ chunk - 1 ]
2021-08-25 01:02:52 +02:00
remove_story_chunk ( chunk )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' false ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
#==================================================================#
# Toggles the game mode for memory editing and sends UI commands
#==================================================================#
def togglememorymode ( ) :
if ( vars . mode == " play " ) :
vars . mode = " memory "
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' memmode ' , ' data ' : ' true ' } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' setinputtext ' , ' data ' : vars . memory } , broadcast = True )
2021-08-22 15:54:35 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setanote ' , ' data ' : vars . authornote } )
2021-05-03 00:46:45 +02:00
elif ( vars . mode == " memory " ) :
vars . mode = " play "
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' memmode ' , ' data ' : ' false ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
2021-05-13 07:26:42 +02:00
#==================================================================#
# Toggles the game mode for WI editing and sends UI commands
#==================================================================#
def togglewimode ( ) :
if ( vars . mode == " play " ) :
vars . mode = " wi "
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' wimode ' , ' data ' : ' true ' } , broadcast = True )
2021-05-13 07:26:42 +02:00
elif ( vars . mode == " wi " ) :
# Commit WI fields first
requestwi ( )
# Then set UI state back to Play
vars . mode = " play "
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' wimode ' , ' data ' : ' false ' } , broadcast = True )
2021-08-20 17:51:49 +02:00
sendwi ( )
2021-05-13 07:26:42 +02:00
#==================================================================#
#
#==================================================================#
def addwiitem ( ) :
2021-08-19 13:48:33 +02:00
ob = { " key " : " " , " keysecondary " : " " , " content " : " " , " num " : len ( vars . worldinfo ) , " init " : False , " selective " : False }
2021-05-13 07:26:42 +02:00
vars . worldinfo . append ( ob ) ;
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' addwiitem ' , ' data ' : ob } , broadcast = True )
2021-05-13 07:26:42 +02:00
#==================================================================#
#
#==================================================================#
def sendwi ( ) :
# Cache len of WI
ln = len ( vars . worldinfo )
# Clear contents of WI container
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' clearwi ' , ' data ' : ' ' } , broadcast = True )
2021-05-13 07:26:42 +02:00
# If there are no WI entries, send an empty WI object
if ( ln == 0 ) :
addwiitem ( )
else :
# Send contents of WI array
for wi in vars . worldinfo :
ob = wi
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' addwiitem ' , ' data ' : ob } , broadcast = True )
2021-05-13 07:26:42 +02:00
# Make sure last WI item is uninitialized
if ( vars . worldinfo [ - 1 ] [ " init " ] ) :
addwiitem ( )
#==================================================================#
# Request current contents of all WI HTML elements
#==================================================================#
def requestwi ( ) :
list = [ ]
for wi in vars . worldinfo :
list . append ( wi [ " num " ] )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' requestwiitem ' , ' data ' : list } , broadcast = True )
2021-05-13 07:26:42 +02:00
#==================================================================#
# Renumber WI items consecutively
#==================================================================#
def organizewi ( ) :
if ( len ( vars . worldinfo ) > 0 ) :
count = 0
for wi in vars . worldinfo :
wi [ " num " ] = count
count + = 1
#==================================================================#
# Extract object from server and send it to WI objects
#==================================================================#
def commitwi ( ar ) :
for ob in ar :
2021-08-19 13:48:33 +02:00
vars . worldinfo [ ob [ " num " ] ] [ " key " ] = ob [ " key " ]
vars . worldinfo [ ob [ " num " ] ] [ " keysecondary " ] = ob [ " keysecondary " ]
vars . worldinfo [ ob [ " num " ] ] [ " content " ] = ob [ " content " ]
vars . worldinfo [ ob [ " num " ] ] [ " selective " ] = ob [ " selective " ]
2021-05-13 07:26:42 +02:00
# Was this a deletion request? If so, remove the requested index
if ( vars . deletewi > = 0 ) :
del vars . worldinfo [ vars . deletewi ]
organizewi ( )
# Send the new WI array structure
sendwi ( )
# And reset deletewi index
vars . deletewi = - 1
#==================================================================#
#
#==================================================================#
def deletewi ( num ) :
if ( num < len ( vars . worldinfo ) ) :
# Store index of deletion request
vars . deletewi = num
# Get contents of WI HTML inputs
requestwi ( )
#==================================================================#
2021-05-18 23:59:59 +02:00
# Look for WI keys in text to generator
2021-05-13 07:26:42 +02:00
#==================================================================#
def checkworldinfo ( txt ) :
# Dont go any further if WI is empty
if ( len ( vars . worldinfo ) == 0 ) :
return
2021-05-18 23:59:59 +02:00
# Cache actions length
ln = len ( vars . actions )
# Don't bother calculating action history if widepth is 0
if ( vars . widepth > 0 ) :
depth = vars . widepth
# If this is not a continue, add 1 to widepth since submitted
# text is already in action history @ -1
if ( txt != " " and vars . prompt != txt ) :
txt = " "
depth + = 1
if ( ln > = depth ) :
txt = " " . join ( vars . actions [ ( depth * - 1 ) : ] )
elif ( ln > 0 ) :
txt = vars . prompt + " " . join ( vars . actions [ ( depth * - 1 ) : ] )
elif ( ln == 0 ) :
txt = vars . prompt
2021-05-13 07:26:42 +02:00
# Scan text for matches on WI keys
wimem = " "
for wi in vars . worldinfo :
if ( wi [ " key " ] != " " ) :
# Split comma-separated keys
keys = wi [ " key " ] . split ( " , " )
2021-08-19 13:48:33 +02:00
keys_secondary = wi . get ( " keysecondary " , " " ) . split ( " , " )
2021-05-13 07:26:42 +02:00
for k in keys :
2021-05-18 23:59:59 +02:00
ky = k
# Remove leading/trailing spaces if the option is enabled
if ( vars . wirmvwhtsp ) :
ky = k . strip ( )
2021-05-13 07:26:42 +02:00
if ky in txt :
2021-08-19 13:48:33 +02:00
if wi . get ( " selective " , False ) and len ( keys_secondary ) :
found = False
for ks in keys_secondary :
ksy = ks
if ( vars . wirmvwhtsp ) :
ksy = ks . strip ( )
if ksy in txt :
wimem = wimem + wi [ " content " ] + " \n "
found = True
break
if found :
break
else :
wimem = wimem + wi [ " content " ] + " \n "
break
2021-05-13 07:26:42 +02:00
return wimem
2021-05-03 00:46:45 +02:00
#==================================================================#
# Commit changes to Memory storage
#==================================================================#
def memsubmit ( data ) :
# Maybe check for length at some point
# For now just send it to storage
vars . memory = data
vars . mode = " play "
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' memmode ' , ' data ' : ' false ' } , broadcast = True )
2021-05-05 09:04:06 +02:00
# Ask for contents of Author's Note field
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' getanote ' , ' data ' : ' ' } , broadcast = True )
2021-05-05 09:04:06 +02:00
#==================================================================#
# Commit changes to Author's Note
#==================================================================#
def anotesubmit ( data ) :
# Maybe check for length at some point
# For now just send it to storage
vars . authornote = data
2021-05-03 00:46:45 +02:00
#==================================================================#
# Assembles game data into a request to InferKit API
#==================================================================#
def ikrequest ( txt ) :
# Log request to console
2021-05-07 20:32:10 +02:00
print ( " {0} Len: {1} , Txt: {2} {3} " . format ( colors . YELLOW , len ( txt ) , txt , colors . END ) )
2021-05-03 00:46:45 +02:00
# Build request JSON data
reqdata = {
' forceNoEnd ' : True ,
2021-05-07 20:32:10 +02:00
' length ' : vars . ikgen ,
2021-05-03 00:46:45 +02:00
' prompt ' : {
' isContinuation ' : False ,
' text ' : txt
} ,
' startFromBeginning ' : False ,
' streamResponse ' : False ,
' temperature ' : vars . temp ,
2021-05-04 17:48:24 +02:00
' topP ' : vars . top_p
2021-05-03 00:46:45 +02:00
}
# Create request
req = requests . post (
vars . url ,
json = reqdata ,
headers = {
' Authorization ' : ' Bearer ' + vars . apikey
}
)
# Deal with the response
if ( req . status_code == 200 ) :
genout = req . json ( ) [ " data " ] [ " text " ]
2021-05-07 20:32:10 +02:00
print ( " {0} {1} {2} " . format ( colors . CYAN , genout , colors . END ) )
2021-05-03 00:46:45 +02:00
vars . actions . append ( genout )
2021-06-15 06:59:08 +02:00
update_story_chunk ( ' last ' )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' texteffect ' , ' data ' : len ( vars . actions ) } , broadcast = True )
2021-05-03 00:46:45 +02:00
set_aibusy ( 0 )
else :
# Send error message to web client
er = req . json ( )
if ( " error " in er ) :
code = er [ " error " ] [ " extensions " ] [ " code " ]
elif ( " errors " in er ) :
code = er [ " errors " ] [ 0 ] [ " extensions " ] [ " code " ]
errmsg = " InferKit API Error: {0} - {1} " . format ( req . status_code , code )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' errmsg ' , ' data ' : errmsg } , broadcast = True )
2021-05-03 00:46:45 +02:00
set_aibusy ( 0 )
2021-05-22 11:28:40 +02:00
#==================================================================#
# Assembles game data into a request to OpenAI API
#==================================================================#
def oairequest ( txt , min , max ) :
# Log request to console
print ( " {0} Len: {1} , Txt: {2} {3} " . format ( colors . YELLOW , len ( txt ) , txt , colors . END ) )
# Store context in memory to use it for comparison with generated content
vars . lastctx = txt
# Build request JSON data
reqdata = {
' prompt ' : txt ,
' max_tokens ' : max ,
' temperature ' : vars . temp ,
' top_p ' : vars . top_p ,
' n ' : 1 ,
' stream ' : False
}
req = requests . post (
vars . oaiurl ,
json = reqdata ,
headers = {
' Authorization ' : ' Bearer ' + vars . oaiapikey ,
' Content-Type ' : ' application/json '
}
)
# Deal with the response
if ( req . status_code == 200 ) :
genout = req . json ( ) [ " choices " ] [ 0 ] [ " text " ]
print ( " {0} {1} {2} " . format ( colors . CYAN , genout , colors . END ) )
vars . actions . append ( genout )
2021-06-15 06:59:08 +02:00
update_story_chunk ( ' last ' )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' texteffect ' , ' data ' : len ( vars . actions ) } , broadcast = True )
2021-05-22 11:28:40 +02:00
set_aibusy ( 0 )
else :
# Send error message to web client
er = req . json ( )
if ( " error " in er ) :
type = er [ " error " ] [ " type " ]
message = er [ " error " ] [ " message " ]
errmsg = " OpenAI API Error: {0} - {1} " . format ( type , message )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' errmsg ' , ' data ' : errmsg } , broadcast = True )
2021-05-22 11:28:40 +02:00
set_aibusy ( 0 )
2021-05-03 00:46:45 +02:00
#==================================================================#
# Forces UI to Play mode
#==================================================================#
def exitModes ( ) :
if ( vars . mode == " edit " ) :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' editmode ' , ' data ' : ' false ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
elif ( vars . mode == " memory " ) :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' memmode ' , ' data ' : ' false ' } , broadcast = True )
2021-05-13 07:26:42 +02:00
elif ( vars . mode == " wi " ) :
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' wimode ' , ' data ' : ' false ' } , broadcast = True )
2021-05-03 00:46:45 +02:00
vars . mode = " play "
#==================================================================#
2021-05-22 11:28:40 +02:00
# Launch in-browser save prompt
#==================================================================#
def saveas ( name ) :
# Check if filename exists already
name = utils . cleanfilename ( name )
if ( not fileops . saveexists ( name ) or ( vars . saveow and vars . svowname == name ) ) :
# All clear to save
saveRequest ( getcwd ( ) + " /stories/ " + name + " .json " )
emit ( ' from_server ' , { ' cmd ' : ' hidesaveas ' , ' data ' : ' ' } )
vars . saveow = False
vars . svowname = " "
else :
# File exists, prompt for overwrite
vars . saveow = True
vars . svowname = name
emit ( ' from_server ' , { ' cmd ' : ' askforoverwrite ' , ' data ' : ' ' } )
2021-05-03 00:46:45 +02:00
#==================================================================#
2021-05-22 11:28:40 +02:00
# Save the currently running story
#==================================================================#
def save ( ) :
# Check if a file is currently open
if ( " .json " in vars . savedir ) :
saveRequest ( vars . savedir )
else :
emit ( ' from_server ' , { ' cmd ' : ' saveas ' , ' data ' : ' ' } )
#==================================================================#
# Save the story via file browser
#==================================================================#
def savetofile ( ) :
2021-05-07 20:32:10 +02:00
savpath = fileops . getsavepath ( vars . savedir , " Save Story As " , [ ( " Json " , " *.json " ) ] )
2021-05-22 11:28:40 +02:00
saveRequest ( savpath )
#==================================================================#
# Save the story to specified path
#==================================================================#
def saveRequest ( savpath ) :
2021-05-07 20:32:10 +02:00
if ( savpath ) :
2021-05-03 00:46:45 +02:00
# Leave Edit/Memory mode before continuing
exitModes ( )
2021-05-07 20:32:10 +02:00
2021-05-03 00:46:45 +02:00
# Save path for future saves
2021-05-07 20:32:10 +02:00
vars . savedir = savpath
2021-05-03 00:46:45 +02:00
# Build json to write
js = { }
js [ " gamestarted " ] = vars . gamestarted
js [ " prompt " ] = vars . prompt
js [ " memory " ] = vars . memory
2021-05-05 09:04:06 +02:00
js [ " authorsnote " ] = vars . authornote
2021-05-03 00:46:45 +02:00
js [ " actions " ] = vars . actions
2021-05-13 07:26:42 +02:00
js [ " worldinfo " ] = [ ]
# Extract only the important bits of WI
for wi in vars . worldinfo :
if ( wi [ " key " ] != " " ) :
js [ " worldinfo " ] . append ( {
" key " : wi [ " key " ] ,
2021-08-19 13:48:33 +02:00
" keysecondary " : wi [ " keysecondary " ] ,
" content " : wi [ " content " ] ,
" selective " : wi [ " selective " ]
2021-05-13 07:26:42 +02:00
} )
2021-05-07 20:32:10 +02:00
2021-05-03 00:46:45 +02:00
# Write it
2021-05-07 20:32:10 +02:00
file = open ( savpath , " w " )
2021-05-05 17:18:24 +02:00
try :
2021-05-16 01:29:41 +02:00
file . write ( json . dumps ( js , indent = 3 ) )
2021-05-05 17:18:24 +02:00
finally :
file . close ( )
2021-05-29 11:46:03 +02:00
print ( " {0} Story saved to {1} ! {2} " . format ( colors . GREEN , path . basename ( savpath ) , colors . END ) )
2021-05-03 00:46:45 +02:00
#==================================================================#
2021-05-22 11:28:40 +02:00
# Load a saved story via file browser
#==================================================================#
def getloadlist ( ) :
emit ( ' from_server ' , { ' cmd ' : ' buildload ' , ' data ' : fileops . getstoryfiles ( ) } )
#==================================================================#
# Load a saved story via file browser
2021-05-03 00:46:45 +02:00
#==================================================================#
2021-05-22 11:28:40 +02:00
def loadfromfile ( ) :
2021-05-07 20:32:10 +02:00
loadpath = fileops . getloadpath ( vars . savedir , " Select Story File " , [ ( " Json " , " *.json " ) ] )
2021-05-22 11:28:40 +02:00
loadRequest ( loadpath )
#==================================================================#
# Load a stored story from a file
#==================================================================#
def loadRequest ( loadpath ) :
2021-05-07 20:32:10 +02:00
if ( loadpath ) :
2021-05-03 00:46:45 +02:00
# Leave Edit/Memory mode before continuing
exitModes ( )
2021-05-07 20:32:10 +02:00
2021-05-03 00:46:45 +02:00
# Read file contents into JSON object
2021-05-07 20:32:10 +02:00
file = open ( loadpath , " r " )
js = json . load ( file )
2021-05-03 00:46:45 +02:00
# Copy file contents to vars
vars . gamestarted = js [ " gamestarted " ]
vars . prompt = js [ " prompt " ]
vars . memory = js [ " memory " ]
vars . actions = js [ " actions " ]
2021-05-13 07:26:42 +02:00
vars . worldinfo = [ ]
2021-05-18 23:59:59 +02:00
vars . lastact = " "
vars . lastctx = " "
2021-05-05 09:04:06 +02:00
# Try not to break older save files
if ( " authorsnote " in js ) :
vars . authornote = js [ " authorsnote " ]
2021-05-07 20:32:10 +02:00
else :
vars . authornote = " "
2021-05-05 09:04:06 +02:00
2021-05-13 07:26:42 +02:00
if ( " worldinfo " in js ) :
num = 0
for wi in js [ " worldinfo " ] :
vars . worldinfo . append ( {
" key " : wi [ " key " ] ,
2021-08-19 13:48:33 +02:00
" keysecondary " : wi . get ( " keysecondary " , " " ) ,
2021-05-13 07:26:42 +02:00
" content " : wi [ " content " ] ,
" num " : num ,
2021-08-19 13:48:33 +02:00
" init " : True ,
" selective " : wi . get ( " selective " , False )
2021-05-13 07:26:42 +02:00
} )
num + = 1
2021-05-03 00:46:45 +02:00
file . close ( )
2021-05-07 20:32:10 +02:00
2021-05-22 11:28:40 +02:00
# Save path for save button
vars . savedir = loadpath
# Clear loadselect var
vars . loadselect = " "
2021-05-03 00:46:45 +02:00
# Refresh game screen
2021-05-13 07:26:42 +02:00
sendwi ( )
2021-05-03 00:46:45 +02:00
refresh_story ( )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setgamestate ' , ' data ' : ' ready ' } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' hidegenseqs ' , ' data ' : ' ' } , broadcast = True )
2021-05-29 11:46:03 +02:00
print ( " {0} Story loaded from {1} ! {2} " . format ( colors . GREEN , path . basename ( loadpath ) , colors . END ) )
2021-05-03 00:46:45 +02:00
2021-05-11 06:27:34 +02:00
#==================================================================#
# Import an AIDungon game exported with Mimi's tool
#==================================================================#
def importRequest ( ) :
importpath = fileops . getloadpath ( vars . savedir , " Select AID CAT File " , [ ( " Json " , " *.json " ) ] )
if ( importpath ) :
# Leave Edit/Memory mode before continuing
exitModes ( )
# Read file contents into JSON object
2021-05-14 22:27:47 +02:00
file = open ( importpath , " rb " )
2021-05-11 06:27:34 +02:00
vars . importjs = json . load ( file )
2021-05-18 02:28:18 +02:00
# If a bundle file is being imported, select just the Adventures object
2021-05-17 16:00:32 +02:00
if type ( vars . importjs ) is dict and " stories " in vars . importjs :
vars . importjs = vars . importjs [ " stories " ]
2021-05-11 06:27:34 +02:00
# Clear Popup Contents
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' clearpopup ' , ' data ' : ' ' } , broadcast = True )
2021-05-11 06:27:34 +02:00
# Initialize vars
num = 0
vars . importnum = - 1
# Get list of stories
for story in vars . importjs :
ob = { }
ob [ " num " ] = num
2021-05-14 22:27:47 +02:00
if ( story [ " title " ] != " " and story [ " title " ] != None ) :
ob [ " title " ] = story [ " title " ]
else :
ob [ " title " ] = " (No Title) "
if ( story [ " description " ] != " " and story [ " description " ] != None ) :
2021-05-11 06:27:34 +02:00
ob [ " descr " ] = story [ " description " ]
else :
ob [ " descr " ] = " (No Description) "
2021-05-16 23:45:21 +02:00
if ( " actions " in story ) :
ob [ " acts " ] = len ( story [ " actions " ] )
elif ( " actionWindow " in story ) :
ob [ " acts " ] = len ( story [ " actionWindow " ] )
2021-05-11 06:27:34 +02:00
emit ( ' from_server ' , { ' cmd ' : ' addimportline ' , ' data ' : ob } )
num + = 1
# Show Popup
emit ( ' from_server ' , { ' cmd ' : ' popupshow ' , ' data ' : True } )
#==================================================================#
# Import an AIDungon game selected in popup
#==================================================================#
def importgame ( ) :
if ( vars . importnum > = 0 ) :
# Cache reference to selected game
ref = vars . importjs [ vars . importnum ]
# Copy game contents to vars
vars . gamestarted = True
2021-05-16 23:45:21 +02:00
2021-05-18 02:28:18 +02:00
# Support for different versions of export script
2021-05-16 23:45:21 +02:00
if ( " actions " in ref ) :
if ( len ( ref [ " actions " ] ) > 0 ) :
vars . prompt = ref [ " actions " ] [ 0 ] [ " text " ]
else :
vars . prompt = " "
elif ( " actionWindow " in ref ) :
if ( len ( ref [ " actionWindow " ] ) > 0 ) :
vars . prompt = ref [ " actionWindow " ] [ 0 ] [ " text " ]
else :
vars . prompt = " "
2021-05-11 06:27:34 +02:00
else :
vars . prompt = " "
vars . memory = ref [ " memory " ]
2021-05-17 16:00:32 +02:00
vars . authornote = ref [ " authorsNote " ] if type ( ref [ " authorsNote " ] ) is str else " "
2021-05-11 06:27:34 +02:00
vars . actions = [ ]
2021-05-13 07:26:42 +02:00
vars . worldinfo = [ ]
2021-05-18 23:59:59 +02:00
vars . lastact = " "
vars . lastctx = " "
2021-05-11 06:27:34 +02:00
# Get all actions except for prompt
2021-05-16 23:45:21 +02:00
if ( " actions " in ref ) :
if ( len ( ref [ " actions " ] ) > 1 ) :
for act in ref [ " actions " ] [ 1 : ] :
vars . actions . append ( act [ " text " ] )
elif ( " actionWindow " in ref ) :
if ( len ( ref [ " actionWindow " ] ) > 1 ) :
for act in ref [ " actionWindow " ] [ 1 : ] :
vars . actions . append ( act [ " text " ] )
2021-05-11 06:27:34 +02:00
2021-05-13 07:26:42 +02:00
# Get just the important parts of world info
2021-05-14 22:27:47 +02:00
if ( ref [ " worldInfo " ] != None ) :
if ( len ( ref [ " worldInfo " ] ) > 1 ) :
num = 0
for wi in ref [ " worldInfo " ] :
vars . worldinfo . append ( {
" key " : wi [ " keys " ] ,
2021-08-19 13:48:33 +02:00
" keysecondary " : wi . get ( " keysecondary " , " " ) ,
2021-05-14 22:27:47 +02:00
" content " : wi [ " entry " ] ,
" num " : num ,
2021-08-19 13:48:33 +02:00
" init " : True ,
" selective " : wi . get ( " selective " , False )
2021-05-14 22:27:47 +02:00
} )
num + = 1
2021-05-13 07:26:42 +02:00
2021-05-11 06:27:34 +02:00
# Clear import data
vars . importjs = { }
2021-05-22 11:28:40 +02:00
# Reset current save
vars . savedir = getcwd ( ) + " \ stories "
2021-05-11 06:27:34 +02:00
# Refresh game screen
2021-05-13 07:26:42 +02:00
sendwi ( )
2021-05-11 06:27:34 +02:00
refresh_story ( )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setgamestate ' , ' data ' : ' ready ' } , broadcast = True )
emit ( ' from_server ' , { ' cmd ' : ' hidegenseqs ' , ' data ' : ' ' } , broadcast = True )
2021-05-11 06:27:34 +02:00
2021-05-16 11:29:39 +02:00
#==================================================================#
# Import an aidg.club prompt and start a new game with it.
#==================================================================#
2021-05-16 20:53:19 +02:00
def importAidgRequest ( id ) :
2021-05-16 11:29:39 +02:00
exitModes ( )
2021-05-16 20:53:19 +02:00
urlformat = " https://prompts.aidg.club/api/ "
2021-05-16 11:29:39 +02:00
req = requests . get ( urlformat + id )
if ( req . status_code == 200 ) :
2021-05-16 20:53:19 +02:00
js = req . json ( )
2021-05-16 11:29:39 +02:00
2021-05-16 20:53:19 +02:00
# Import game state
2021-05-16 11:29:39 +02:00
vars . gamestarted = True
2021-05-16 20:53:19 +02:00
vars . prompt = js [ " promptContent " ]
vars . memory = js [ " memory " ]
vars . authornote = js [ " authorsNote " ]
2021-05-16 11:29:39 +02:00
vars . actions = [ ]
vars . worldinfo = [ ]
2021-05-18 23:59:59 +02:00
vars . lastact = " "
vars . lastctx = " "
2021-05-16 11:29:39 +02:00
2021-05-16 20:53:19 +02:00
num = 0
for wi in js [ " worldInfos " ] :
vars . worldinfo . append ( {
" key " : wi [ " keys " ] ,
2021-08-19 13:48:33 +02:00
" keysecondary " : wi . get ( " keysecondary " , " " ) ,
2021-05-16 20:53:19 +02:00
" content " : wi [ " entry " ] ,
" num " : num ,
2021-08-19 13:48:33 +02:00
" init " : True ,
" selective " : wi . get ( " selective " , False )
2021-05-16 20:53:19 +02:00
} )
num + = 1
2021-05-16 11:29:39 +02:00
2021-05-22 11:28:40 +02:00
# Reset current save
vars . savedir = getcwd ( ) + " \ stories "
2021-05-16 11:29:39 +02:00
# Refresh game screen
sendwi ( )
refresh_story ( )
2021-08-20 15:32:02 +02:00
emit ( ' from_server ' , { ' cmd ' : ' setgamestate ' , ' data ' : ' ready ' } , broadcast = True )
2021-05-16 11:29:39 +02:00
2021-05-29 11:46:03 +02:00
#==================================================================#
# Import World Info JSON file
#==================================================================#
def wiimportrequest ( ) :
importpath = fileops . getloadpath ( vars . savedir , " Select World Info File " , [ ( " Json " , " *.json " ) ] )
if ( importpath ) :
file = open ( importpath , " rb " )
js = json . load ( file )
if ( len ( js ) > 0 ) :
# If the most recent WI entry is blank, remove it.
if ( not vars . worldinfo [ - 1 ] [ " init " ] ) :
del vars . worldinfo [ - 1 ]
# Now grab the new stuff
num = len ( vars . worldinfo )
for wi in js :
vars . worldinfo . append ( {
" key " : wi [ " keys " ] ,
2021-08-19 13:48:33 +02:00
" keysecondary " : wi . get ( " keysecondary " , " " ) ,
2021-05-29 11:46:03 +02:00
" content " : wi [ " entry " ] ,
" num " : num ,
2021-08-19 13:48:33 +02:00
" init " : True ,
" selective " : wi . get ( " selective " , False )
2021-05-29 11:46:03 +02:00
} )
num + = 1
print ( " {0} " . format ( vars . worldinfo [ 0 ] ) )
# Refresh game screen
sendwi ( )
2021-05-03 00:46:45 +02:00
#==================================================================#
# Starts a new story
#==================================================================#
def newGameRequest ( ) :
2021-05-22 11:28:40 +02:00
# Leave Edit/Memory mode before continuing
exitModes ( )
2021-05-05 17:18:24 +02:00
2021-05-22 11:28:40 +02:00
# Clear vars values
vars . gamestarted = False
vars . prompt = " "
vars . memory = " "
vars . actions = [ ]
vars . authornote = " "
vars . worldinfo = [ ]
vars . lastact = " "
vars . lastctx = " "
# Reset current save
vars . savedir = getcwd ( ) + " \ stories "
# Refresh game screen
sendwi ( )
setStartState ( )
2021-05-03 00:46:45 +02:00
2021-08-19 12:54:44 +02:00
def randomGameRequest ( topic ) :
newGameRequest ( )
vars . memory = " You generate the following " + topic + " story concept : "
actionsubmit ( " " )
vars . memory = " "
2021-05-03 00:46:45 +02:00
#==================================================================#
2021-05-07 20:32:10 +02:00
# Final startup commands to launch Flask app
2021-05-03 00:46:45 +02:00
#==================================================================#
if __name__ == " __main__ " :
2021-08-20 00:37:59 +02:00
2021-05-07 20:32:10 +02:00
# Load settings from client.settings
loadsettings ( )
# Start Flask/SocketIO (Blocking, so this must be last method!)
print ( " {0} Server started! \r You may now connect with a browser at http://127.0.0.1:5000/ {1} " . format ( colors . GREEN , colors . END ) )
2021-08-19 13:18:01 +02:00
#socketio.run(app, host='0.0.0.0', port=5000)
2021-08-20 00:37:59 +02:00
if ( vars . remote ) :
from flask_cloudflared import start_cloudflared
start_cloudflared ( 5000 )
socketio . run ( app , host = ' 0.0.0.0 ' , port = 5000 )
else :
import webbrowser
webbrowser . open_new ( ' http://localhost:5000 ' )
2021-08-20 16:25:03 +02:00
socketio . run ( app )