diff --git a/.gitignore b/.gitignore index ff83d6e7..dd922f16 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,16 @@ client.settings # Ignore stories file except for test_story stories/* -!stories/sample_story.json \ No newline at end of file +settings/* +!stories/sample_story.json +/.project +*.bak +miniconda3/* +*.settings +__pycache__ + +# Ignore PyCharm project files. +.idea + +# Ignore compiled Python files. +*.pyc diff --git a/UPDATE YOUR COLAB NOTEBOOK.txt b/UPDATE YOUR COLAB NOTEBOOK.txt deleted file mode 100644 index 67a821f4..00000000 --- a/UPDATE YOUR COLAB NOTEBOOK.txt +++ /dev/null @@ -1,3 +0,0 @@ -If you use Google Colab to run your models, and you made a local copy of the Colab notebook in Google Drive instead of using the community notebook, you MUST make a new copy of the community notebook to use the new multiple-sequence generation feature. The link is below: - -https://colab.research.google.com/drive/1uGe9f4ruIQog3RLxfUsoThakvLpHjIkX?usp=sharing \ No newline at end of file diff --git a/aiserver.py b/aiserver.py index 8dcec2af..ccc765e1 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1,22 +1,31 @@ #==================================================================# -# KoboldAI Client -# Version: 1.15.0 -# By: KoboldAIDev +# KoboldAI +# Version: 1.16.0 +# By: KoboldAIDev and the KoboldAI Community #==================================================================# # External packages +import os from os import path, getcwd +import re import tkinter as tk from tkinter import messagebox import json +import collections +from typing import Union + import requests import html +import argparse +import sys +import gc # KoboldAI import fileops import gensettings from utils import debounce import utils +import structures #==================================================================# # Variables & Storage @@ -35,15 +44,15 @@ class colors: # AI models modellist = [ + ["Custom Neo (GPT-Neo / Converted GPT-J)", "NeoCustom", ""], + ["Custom GPT-2 (eg CloverEdition)", "GPT2Custom", ""], ["GPT Neo 1.3B", "EleutherAI/gpt-neo-1.3B", "8GB"], ["GPT Neo 2.7B", "EleutherAI/gpt-neo-2.7B", "16GB"], - ["GPT-2", "gpt2", "1.2GB"], + ["GPT-2", "gpt2", "1GB"], ["GPT-2 Med", "gpt2-medium", "2GB"], - ["GPT-2 Large", "gpt2-large", "16GB"], - ["GPT-2 XL", "gpt2-xl", "16GB"], + ["GPT-2 Large", "gpt2-large", "4GB"], + ["GPT-2 XL", "gpt2-xl", "8GB"], ["InferKit API (requires API key)", "InferKit", ""], - ["Custom Neo (eg Neo-horni)", "NeoCustom", ""], - ["Custom GPT-2 (eg CloverEdition)", "GPT2Custom", ""], ["Google Colab", "Colab", ""], ["OpenAI API (requires API key)", "OAI", ""], ["Read Only (No AI)", "ReadOnly", ""] @@ -56,26 +65,28 @@ class vars: model = "" # Model ID string chosen at startup noai = False # Runs the script without starting up the transformers pipeline aibusy = False # Stops submissions while the AI is working - max_length = 512 # Maximum number of tokens to submit per action + max_length = 1024 # Maximum number of tokens to submit per action ikmax = 3000 # Maximum number of characters to submit to InferKit - genamt = 60 # Amount of text for each action to generate + genamt = 80 # Amount of text for each action to generate ikgen = 200 # Number of characters for InferKit to generate - rep_pen = 1.0 # Default generator repetition_penalty - temp = 1.0 # Default generator temperature - top_p = 1.0 # Default generator top_p + rep_pen = 1.1 # Default generator repetition_penalty + temp = 0.5 # Default generator temperature + top_p = 0.9 # Default generator top_p + top_k = 0 # Default generator top_k + tfs = 1.0 # Default generator tfs (tail-free sampling) numseqs = 1 # Number of sequences to ask the generator to create gamestarted = False # Whether the game has started (disables UI elements) prompt = "" # Prompt memory = "" # Text submitted to memory field authornote = "" # Text submitted to Author's Note field andepth = 3 # How far back in history to append author's note - actions = [] # Array of actions submitted by user and AI + actions = structures.KoboldStoryRegister() # Actions submitted by user and AI worldinfo = [] # Array of World Info key/value objects - badwords = [] # Array of str/chr values that should be removed from output - badwordsids = [] # Tokenized array of badwords + # badwords = [] # Array of str/chr values that should be removed from output + badwordsids = [[13460], [6880], [50256], [42496], [4613], [17414], [22039], [16410], [27], [29], [38430], [37922], [15913], [24618], [28725], [58], [47175], [36937], [26700], [12878], [16471], [37981], [5218], [29795], [13412], [45160], [3693], [49778], [4211], [20598], [36475], [33409], [44167], [32406], [29847], [29342], [42669], [685], [25787], [7359], [3784], [5320], [33994], [33490], [34516], [43734], [17635], [24293], [9959], [23785], [21737], [28401], [18161], [26358], [32509], [1279], [38155], [18189], [26894], [6927], [14610], [23834], [11037], [14631], [26933], [46904], [22330], [25915], [47934], [38214], [1875], [14692], [41832], [13163], [25970], [29565], [44926], [19841], [37250], [49029], [9609], [44438], [16791], [17816], [30109], [41888], [47527], [42924], [23984], [49074], [33717], [31161], [49082], [30138], [31175], [12240], [14804], [7131], [26076], [33250], [3556], [38381], [36338], [32756], [46581], [17912], [49146]] # Tokenized array of badwords used to prevent AI artifacting deletewi = -1 # Temporary storage for index to delete wirmvwhtsp = False # Whether to remove leading whitespace from WI entries - widepth = 1 # How many historical actions to scan for WI hits + widepth = 3 # How many historical actions to scan for WI hits mode = "play" # Whether the interface is in play, memory, or edit mode editln = 0 # Which line was last selected in Edit Mode url = "https://api.inferkit.com/v1/models/standard/generate" # InferKit API URL @@ -88,14 +99,25 @@ class vars: hascuda = False # Whether torch has detected CUDA on the system usegpu = False # Whether to launch pipeline with GPU support custmodpth = "" # Filesystem location of custom model to run - formatoptns = {} # Container for state of formatting options + formatoptns = {'frmttriminc': True, 'frmtrmblln': False, 'frmtrmspch': False, 'frmtadsnsp': False} # Container for state of formatting options importnum = -1 # Selection on import popup list importjs = {} # Temporary storage for import data loadselect = "" # Temporary storage for filename to load svowname = "" # Filename that was flagged for overwrite confirm saveow = False # Whether or not overwrite confirm has been displayed genseqs = [] # Temporary storage for generated sequences + recentback = False # Whether Back button was recently used without Submitting or Retrying after useprompt = True # Whether to send the full prompt with every submit action + breakmodel = False # For GPU users, whether to use both system RAM and VRAM to conserve VRAM while offering speedup compared to CPU-only + bmsupported = False # Whether the breakmodel option is supported (GPT-Neo/GPT-J only, currently) + smandelete = False # Whether stories can be deleted from inside the browser + smanrename = False # Whether stories can be renamed from inside the browser + laststory = None # Filename (without extension) of most recent story JSON file we loaded + acregex_ai = re.compile(r'\n* *>(.|\n)*') # Pattern for matching adventure actions from the AI so we can remove them + acregex_ui = re.compile(r'^ *(>.*)$', re.MULTILINE) # Pattern for matching actions in the HTML-escaped story so we can apply colouring, etc (make sure to encase part to format in parentheses) + actionmode = 1 + adventure = False + remote = False #==================================================================# # Function to get model selection at startup @@ -141,13 +163,56 @@ def gettokenids(char): keys.append(key) return keys +#==================================================================# +# Return Model Name +#==================================================================# +def getmodelname(): + if(args.configname): + modelname = args.configname + return modelname + if(vars.model == "NeoCustom" or vars.model == "GPT2Custom"): + modelname = os.path.basename(os.path.normpath(vars.custmodpth)) + return modelname + else: + modelname = vars.model + return modelname + #==================================================================# # Startup #==================================================================# +# Parsing Parameters +parser = argparse.ArgumentParser(description="KoboldAI Server") +parser.add_argument("--remote", action='store_true', help="Optimizes KoboldAI for Remote Play") +parser.add_argument("--model", help="Specify the Model Type to skip the Menu") +parser.add_argument("--path", help="Specify the Path for local models (For model NeoCustom or GPT2Custom)") +parser.add_argument("--cpu", action='store_true', help="By default unattended launches are on the GPU use this option to force CPU usage.") +parser.add_argument("--breakmodel", action='store_true', help="For models that support GPU-CPU hybrid generation, use this feature instead of GPU or CPU generation") +parser.add_argument("--breakmodel_layers", type=int, help="Specify the number of layers to commit to system RAM if --breakmodel is used") +parser.add_argument("--override_delete", action='store_true', help="Deleting stories from inside the browser is disabled if you are using --remote and enabled otherwise. Using this option will instead allow deleting stories if using --remote and prevent deleting stories otherwise.") +parser.add_argument("--override_rename", action='store_true', help="Renaming stories from inside the browser is disabled if you are using --remote and enabled otherwise. Using this option will instead allow renaming stories if using --remote and prevent renaming stories otherwise.") +parser.add_argument("--configname", help="Force a fixed configuration name to aid with config management.") + +args = parser.parse_args() +vars.model = args.model; + +if args.remote: + vars.remote = True; + +vars.smandelete = vars.remote == args.override_delete +vars.smanrename = vars.remote == args.override_rename + # Select a model to run -print("{0}Welcome to the KoboldAI Client!\nSelect an AI model to continue:{1}\n".format(colors.CYAN, colors.END)) -getModelSelection() +if args.model: + print("Welcome to KoboldAI!\nYou have selected the following Model:", vars.model) + if args.path: + print("You have selected the following path for your Model :", args.path) + vars.custmodpth = args.path; + vars.colaburl = args.path + "/request"; # Lets just use the same parameter to keep it simple + +else: + print("{0}Welcome to the KoboldAI Server!\nSelect an AI model to continue:{1}\n".format(colors.CYAN, colors.END)) + getModelSelection() # If transformers model was selected & GPU available, ask to use CPU or GPU if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]): @@ -155,24 +220,50 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]): import torch print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="") vars.hascuda = torch.cuda.is_available() + vars.bmsupported = vars.model in ("EleutherAI/gpt-neo-1.3B", "EleutherAI/gpt-neo-2.7B", "NeoCustom") if(vars.hascuda): print("{0}FOUND!{1}".format(colors.GREEN, colors.END)) else: print("{0}NOT FOUND!{1}".format(colors.YELLOW, colors.END)) - if(vars.hascuda): - print("{0}Use GPU or CPU for generation?: (Default GPU){1}\n".format(colors.CYAN, colors.END)) - print(" 1 - GPU\n 2 - CPU\n") + if args.model: + if(vars.hascuda): + genselected = True + vars.usegpu = True + vars.breakmodel = False + if(args.cpu): + vars.usegpu = False + vars.breakmodel = False + if(vars.bmsupported and args.breakmodel): + vars.usegpu = False + vars.breakmodel = True + elif(vars.hascuda): + if(vars.bmsupported): + print(colors.YELLOW + "You're using a model that supports GPU-CPU hybrid generation!\nCurrently only GPT-Neo models and GPT-J-6B support this feature.") + print("{0}Use GPU or CPU for generation?: (Default GPU){1}".format(colors.CYAN, colors.END)) + if(vars.bmsupported): + print(f" 1 - GPU\n 2 - CPU\n 3 - Both (slower than GPU-only but uses less VRAM)\n") + else: + print(" 1 - GPU\n 2 - CPU\n") genselected = False + + if(vars.hascuda): while(genselected == False): genselect = input("Mode> ") if(genselect == ""): + vars.breakmodel = False vars.usegpu = True genselected = True elif(genselect.isnumeric() and int(genselect) == 1): + vars.breakmodel = False vars.usegpu = True genselected = True elif(genselect.isnumeric() and int(genselect) == 2): + vars.breakmodel = False + vars.usegpu = False + genselected = True + elif(vars.bmsupported and genselect.isnumeric() and int(genselect) == 3): + vars.breakmodel = True vars.usegpu = False genselected = True else: @@ -180,12 +271,12 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]): # Ask for API key if InferKit was selected if(vars.model == "InferKit"): - if(not path.exists("client.settings")): + if(not path.exists("settings/" + getmodelname() + ".settings")): # If the client settings file doesn't exist, create it print("{0}Please enter your InferKit API key:{1}\n".format(colors.CYAN, colors.END)) vars.apikey = input("Key> ") # Write API key to file - file = open("client.settings", "w") + file = open("settings/" + getmodelname() + ".settings", "w") try: js = {"apikey": vars.apikey} file.write(json.dumps(js, indent=3)) @@ -193,7 +284,7 @@ if(vars.model == "InferKit"): file.close() else: # Otherwise open it up - file = open("client.settings", "r") + file = open("settings/" + getmodelname() + ".settings", "r") # Check if API key exists js = json.load(file) if("apikey" in js and js["apikey"] != ""): @@ -206,7 +297,7 @@ if(vars.model == "InferKit"): vars.apikey = input("Key> ") js["apikey"] = vars.apikey # Write API key to file - file = open("client.settings", "w") + file = open("settings/" + getmodelname() + ".settings", "w") try: file.write(json.dumps(js, indent=3)) finally: @@ -214,12 +305,12 @@ if(vars.model == "InferKit"): # Ask for API key if OpenAI was selected if(vars.model == "OAI"): - if(not path.exists("client.settings")): + if(not path.exists("settings/" + getmodelname() + ".settings")): # If the client settings file doesn't exist, create it print("{0}Please enter your OpenAI API key:{1}\n".format(colors.CYAN, colors.END)) vars.oaiapikey = input("Key> ") # Write API key to file - file = open("client.settings", "w") + file = open("settings/" + getmodelname() + ".settings", "w") try: js = {"oaiapikey": vars.oaiapikey} file.write(json.dumps(js, indent=3)) @@ -227,7 +318,7 @@ if(vars.model == "OAI"): file.close() else: # Otherwise open it up - file = open("client.settings", "r") + file = open("settings/" + getmodelname() + ".settings", "r") # Check if API key exists js = json.load(file) if("oaiapikey" in js and js["oaiapikey"] != ""): @@ -240,7 +331,7 @@ if(vars.model == "OAI"): vars.oaiapikey = input("Key> ") js["oaiapikey"] = vars.oaiapikey # Write API key to file - file = open("client.settings", "w") + file = open("settings/" + getmodelname() + ".settings", "w") try: file.write(json.dumps(js, indent=3)) finally: @@ -281,8 +372,9 @@ if(vars.model == "OAI"): # Ask for ngrok url if Google Colab was selected if(vars.model == "Colab"): - print("{0}Please enter the ngrok.io or trycloudflare.com URL displayed in Google Colab:{1}\n".format(colors.CYAN, colors.END)) - vars.colaburl = input("URL> ") + "/request" + if(vars.colaburl == ""): + print("{0}Please enter the ngrok.io or trycloudflare.com URL displayed in Google Colab:{1}\n".format(colors.CYAN, colors.END)) + vars.colaburl = input("URL> ") + "/request" if(vars.model == "ReadOnly"): vars.noai = True @@ -294,7 +386,7 @@ log.setLevel(logging.ERROR) # Start flask & SocketIO print("{0}Initializing Flask... {1}".format(colors.PURPLE, colors.END), end="") -from flask import Flask, render_template +from flask import Flask, render_template, Response, request from flask_socketio import SocketIO, emit app = Flask(__name__) app.config['SECRET KEY'] = 'secret!' @@ -305,15 +397,49 @@ print("{0}OK!{1}".format(colors.GREEN, colors.END)) if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]): if(not vars.noai): print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END)) - from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM + from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModel # If custom GPT Neo model was chosen if(vars.model == "NeoCustom"): model = GPTNeoForCausalLM.from_pretrained(vars.custmodpth) tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth) # Is CUDA available? If so, use GPU, otherwise fall back to CPU - if(vars.hascuda and vars.usegpu): - generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) + if(vars.hascuda): + if(vars.usegpu): + generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) + elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel) + import breakmodel + n_layers = model.config.num_layers + breakmodel.total_blocks = n_layers + model.half().to('cpu') + gc.collect() + model.transformer.wte.to(breakmodel.gpu_device) + model.transformer.ln_f.to(breakmodel.gpu_device) + if(hasattr(model, 'lm_head')): + model.lm_head.to(breakmodel.gpu_device) + if(not hasattr(model.config, 'rotary') or not model.config.rotary): + model.transformer.wpe.to(breakmodel.gpu_device) + gc.collect() + if(args.breakmodel_layers is not None): + breakmodel.ram_blocks = max(0, min(n_layers, args.breakmodel_layers)) + else: + print(colors.CYAN + "\nHow many layers would you like to put into system RAM?") + print("The more of them you put into system RAM, the slower it will run,") + print("but it will require less VRAM") + print("(roughly proportional to number of layers).") + print(f"This model has{colors.YELLOW} {n_layers} {colors.CYAN}layers.{colors.END}\n") + while(True): + layerselect = input("# of layers> ") + if(layerselect.isnumeric() and 0 <= int(layerselect) <= n_layers): + breakmodel.ram_blocks = int(layerselect) + break + else: + print(f"{colors.RED}Please enter an integer between 0 and {n_layers}.{colors.END}") + print(f"{colors.PURPLE}Will commit{colors.YELLOW} {breakmodel.ram_blocks} {colors.PURPLE}of{colors.YELLOW} {n_layers} {colors.PURPLE}layers to system RAM.{colors.END}") + GPTNeoModel.forward = breakmodel.new_forward + generator = model.generate + else: + generator = pipeline('text-generation', model=model, tokenizer=tokenizer) else: generator = pipeline('text-generation', model=model, tokenizer=tokenizer) # If custom GPT2 model was chosen @@ -329,18 +455,53 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]): else: # Is CUDA available? If so, use GPU, otherwise fall back to CPU tokenizer = GPT2Tokenizer.from_pretrained(vars.model) - if(vars.hascuda and vars.usegpu): - generator = pipeline('text-generation', model=vars.model, device=0) + if(vars.hascuda): + if(vars.usegpu): + generator = pipeline('text-generation', model=vars.model, device=0) + elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel) + import breakmodel + model = AutoModel.from_pretrained(vars.model) + n_layers = model.config.num_layers + breakmodel.total_blocks = n_layers + model.half().to('cpu') + gc.collect() + model.transformer.wte.to(breakmodel.gpu_device) + model.transformer.ln_f.to(breakmodel.gpu_device) + if(hasattr(model, 'lm_head')): + model.lm_head.to(breakmodel.gpu_device) + if(not hasattr(model.config, 'rotary') or not model.config.rotary): + model.transformer.wpe.to(breakmodel.gpu_device) + gc.collect() + if(args.breakmodel_layers is not None): + breakmodel.ram_blocks = max(0, min(n_layers, args.breakmodel_layers)) + else: + print(colors.CYAN + "\nHow many layers would you like to put into system RAM?") + print("The more of them you put into system RAM, the slower it will run,") + print("but it will require less VRAM") + print("(roughly proportional to number of layers).") + print(f"This model has{colors.YELLOW} {n_layers} {colors.CYAN}layers.{colors.END}\n") + while(True): + layerselect = input("# of layers> ") + if(layerselect.isnumeric() and 0 <= int(layerselect) <= n_layers): + breakmodel.ram_blocks = int(layerselect) + break + else: + print(f"{colors.RED}Please enter an integer between 0 and {n_layers}.{colors.END}") + print(f"{colors.PURPLE}Will commit{colors.YELLOW} {breakmodel.ram_blocks} {colors.PURPLE}of{colors.YELLOW} {n_layers} {colors.PURPLE}layers to system RAM.{colors.END}") + GPTNeoModel.forward = breakmodel.new_forward + generator = model.generate + else: + generator = pipeline('text-generation', model=vars.model) else: generator = pipeline('text-generation', model=vars.model) - # Suppress Author's Note by flagging square brackets - vocab = tokenizer.get_vocab() - vocab_keys = vocab.keys() - vars.badwords = gettokenids("[") - for key in vars.badwords: - vars.badwordsids.append([vocab[key]]) - + # Suppress Author's Note by flagging square brackets (Old implementation) + #vocab = tokenizer.get_vocab() + #vocab_keys = vocab.keys() + #vars.badwords = gettokenids("[") + #for key in vars.badwords: + # vars.badwordsids.append([vocab[key]]) + print("{0}OK! {1} pipeline created!{2}".format(colors.GREEN, vars.model, colors.END)) else: # If we're running Colab or OAI, we still need a tokenizer. @@ -356,6 +517,45 @@ else: @app.route('/index') def index(): return render_template('index.html') +@app.route('/download') +def download(): + save_format = request.args.get("format", "json").strip().lower() + + if(save_format == "plaintext"): + txt = vars.prompt + "".join(vars.actions.values()) + save = Response(txt) + filename = path.basename(vars.savedir) + if filename[-5:] == ".json": + filename = filename[:-5] + save.headers.set('Content-Disposition', 'attachment', filename='%s.txt' % filename) + return(save) + + # Build json to write + js = {} + js["gamestarted"] = vars.gamestarted + js["prompt"] = vars.prompt + js["memory"] = vars.memory + js["authorsnote"] = vars.authornote + js["actions"] = tuple(vars.actions.values()) + js["worldinfo"] = [] + + # Extract only the important bits of WI + for wi in vars.worldinfo: + if(wi["constant"] or wi["key"] != ""): + js["worldinfo"].append({ + "key": wi["key"], + "keysecondary": wi["keysecondary"], + "content": wi["content"], + "selective": wi["selective"], + "constant": wi["constant"] + }) + + save = Response(json.dumps(js, indent=3)) + filename = path.basename(vars.savedir) + if filename[-5:] == ".json": + filename = filename[:-5] + save.headers.set('Content-Disposition', 'attachment', filename='%s.json' % filename) + return(save) #============================ METHODS =============================# @@ -365,41 +565,51 @@ def index(): @socketio.on('connect') def do_connect(): print("{0}Client connected!{1}".format(colors.GREEN, colors.END)) - emit('from_server', {'cmd': 'connected'}) + emit('from_server', {'cmd': 'connected', 'smandelete': vars.smandelete, 'smanrename': vars.smanrename}) + if(vars.remote): + emit('from_server', {'cmd': 'runs_remotely'}) + if(not vars.gamestarted): setStartState() sendsettings() refresh_settings() + vars.laststory = None + emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) sendwi() + emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) vars.mode = "play" else: # Game in session, send current game data and ready state to browser refresh_story() sendsettings() refresh_settings() + emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) sendwi() + emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) if(vars.mode == "play"): if(not vars.aibusy): - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) else: - emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, broadcast=True) elif(vars.mode == "edit"): - emit('from_server', {'cmd': 'editmode', 'data': 'true'}) + emit('from_server', {'cmd': 'editmode', 'data': 'true'}, broadcast=True) elif(vars.mode == "memory"): - emit('from_server', {'cmd': 'memmode', 'data': 'true'}) + emit('from_server', {'cmd': 'memmode', 'data': 'true'}, broadcast=True) elif(vars.mode == "wi"): - emit('from_server', {'cmd': 'wimode', 'data': 'true'}) + emit('from_server', {'cmd': 'wimode', 'data': 'true'}, broadcast=True) #==================================================================# # Event triggered when browser SocketIO sends data to the server #==================================================================# @socketio.on('message') def get_message(msg): - print("{0}Data recieved:{1}{2}".format(colors.GREEN, msg, colors.END)) + print("{0}Data received:{1}{2}".format(colors.GREEN, msg, colors.END)) # Submit action if(msg['cmd'] == 'submit'): if(vars.mode == "play"): - actionsubmit(msg['data']) + actionsubmit(msg['data'], actionmode=msg['actionmode']) elif(vars.mode == "edit"): editsubmit(msg['data']) elif(vars.mode == "memory"): @@ -410,79 +620,107 @@ def get_message(msg): # Back/Undo Action elif(msg['cmd'] == 'back'): actionback() - # EditMode Action + # EditMode Action (old) elif(msg['cmd'] == 'edit'): if(vars.mode == "play"): vars.mode = "edit" - emit('from_server', {'cmd': 'editmode', 'data': 'true'}) + emit('from_server', {'cmd': 'editmode', 'data': 'true'}, broadcast=True) elif(vars.mode == "edit"): vars.mode = "play" - emit('from_server', {'cmd': 'editmode', 'data': 'false'}) - # EditLine Action + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + # EditLine Action (old) elif(msg['cmd'] == 'editline'): editrequest(int(msg['data'])) - # DeleteLine Action + # Inline edit + elif(msg['cmd'] == 'inlineedit'): + inlineedit(msg['chunk'], msg['data']) + elif(msg['cmd'] == 'inlinedelete'): + inlinedelete(msg['data']) + # DeleteLine Action (old) elif(msg['cmd'] == 'delete'): deleterequest() elif(msg['cmd'] == 'memory'): togglememorymode() - elif(msg['cmd'] == 'savetofile'): + elif(not vars.remote and msg['cmd'] == 'savetofile'): savetofile() - elif(msg['cmd'] == 'loadfromfile'): + elif(not vars.remote and msg['cmd'] == 'loadfromfile'): loadfromfile() - elif(msg['cmd'] == 'import'): + elif(not vars.remote and msg['cmd'] == 'import'): importRequest() elif(msg['cmd'] == 'newgame'): newGameRequest() + elif(msg['cmd'] == 'rndgame'): + randomGameRequest(msg['data']) elif(msg['cmd'] == 'settemp'): vars.temp = float(msg['data']) - emit('from_server', {'cmd': 'setlabeltemp', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabeltemp', 'data': msg['data']}, broadcast=True) settingschanged() + refresh_settings() elif(msg['cmd'] == 'settopp'): vars.top_p = float(msg['data']) - emit('from_server', {'cmd': 'setlabeltopp', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabeltopp', 'data': msg['data']}, broadcast=True) settingschanged() + refresh_settings() + elif(msg['cmd'] == 'settopk'): + vars.top_k = int(msg['data']) + emit('from_server', {'cmd': 'setlabeltopk', 'data': msg['data']}, broadcast=True) + settingschanged() + refresh_settings() + elif(msg['cmd'] == 'settfs'): + vars.tfs = float(msg['data']) + emit('from_server', {'cmd': 'setlabeltfs', 'data': msg['data']}, broadcast=True) + settingschanged() + refresh_settings() elif(msg['cmd'] == 'setreppen'): vars.rep_pen = float(msg['data']) - emit('from_server', {'cmd': 'setlabelreppen', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabelreppen', 'data': msg['data']}, broadcast=True) settingschanged() + refresh_settings() elif(msg['cmd'] == 'setoutput'): vars.genamt = int(msg['data']) - emit('from_server', {'cmd': 'setlabeloutput', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabeloutput', 'data': msg['data']}, broadcast=True) settingschanged() + refresh_settings() elif(msg['cmd'] == 'settknmax'): vars.max_length = int(msg['data']) - emit('from_server', {'cmd': 'setlabeltknmax', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabeltknmax', 'data': msg['data']}, broadcast=True) settingschanged() + refresh_settings() elif(msg['cmd'] == 'setikgen'): vars.ikgen = int(msg['data']) - emit('from_server', {'cmd': 'setlabelikgen', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabelikgen', 'data': msg['data']}, broadcast=True) settingschanged() + refresh_settings() # Author's Note field update elif(msg['cmd'] == 'anote'): anotesubmit(msg['data']) # Author's Note depth update elif(msg['cmd'] == 'anotedepth'): vars.andepth = int(msg['data']) - emit('from_server', {'cmd': 'setlabelanotedepth', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabelanotedepth', 'data': msg['data']}, broadcast=True) settingschanged() + refresh_settings() # Format - Trim incomplete sentences elif(msg['cmd'] == 'frmttriminc'): if('frmttriminc' in vars.formatoptns): vars.formatoptns["frmttriminc"] = msg['data'] settingschanged() + refresh_settings() elif(msg['cmd'] == 'frmtrmblln'): if('frmtrmblln' in vars.formatoptns): vars.formatoptns["frmtrmblln"] = msg['data'] settingschanged() + refresh_settings() elif(msg['cmd'] == 'frmtrmspch'): if('frmtrmspch' in vars.formatoptns): vars.formatoptns["frmtrmspch"] = msg['data'] settingschanged() + refresh_settings() elif(msg['cmd'] == 'frmtadsnsp'): if('frmtadsnsp' in vars.formatoptns): vars.formatoptns["frmtadsnsp"] = msg['data'] settingschanged() + refresh_settings() elif(msg['cmd'] == 'importselect'): vars.importnum = int(msg["data"].replace("import", "")) elif(msg['cmd'] == 'importcancel'): @@ -499,6 +737,14 @@ def get_message(msg): addwiitem() elif(msg['cmd'] == 'widelete'): deletewi(msg['data']) + elif(msg['cmd'] == 'wiselon'): + vars.worldinfo[msg['data']]["selective"] = True + elif(msg['cmd'] == 'wiseloff'): + vars.worldinfo[msg['data']]["selective"] = False + elif(msg['cmd'] == 'wiconstanton'): + vars.worldinfo[msg['data']]["constant"] = True + elif(msg['cmd'] == 'wiconstantoff'): + vars.worldinfo[msg['data']]["constant"] = False elif(msg['cmd'] == 'sendwilist'): commitwi(msg['data']) elif(msg['cmd'] == 'aidgimport'): @@ -512,7 +758,11 @@ def get_message(msg): elif(msg['cmd'] == 'loadselect'): vars.loadselect = msg["data"] elif(msg['cmd'] == 'loadrequest'): - loadRequest(getcwd()+"/stories/"+vars.loadselect+".json") + loadRequest(fileops.storypath(vars.loadselect)) + elif(msg['cmd'] == 'deletestory'): + deletesave(msg['data']) + elif(msg['cmd'] == 'renamestory'): + renamesave(msg['data'], msg['newname']) elif(msg['cmd'] == 'clearoverwrite'): vars.svowname = "" vars.saveow = False @@ -522,27 +772,34 @@ def get_message(msg): vars.numseqs = int(msg['data']) emit('from_server', {'cmd': 'setlabelnumseq', 'data': msg['data']}) settingschanged() + refresh_settings() elif(msg['cmd'] == 'setwidepth'): vars.widepth = int(msg['data']) emit('from_server', {'cmd': 'setlabelwidepth', 'data': msg['data']}) settingschanged() + refresh_settings() elif(msg['cmd'] == 'setuseprompt'): vars.useprompt = msg['data'] settingschanged() - elif(msg['cmd'] == 'importwi'): + refresh_settings() + elif(msg['cmd'] == 'setadventure'): + vars.adventure = msg['data'] + settingschanged() + refresh_settings() + elif(not vars.remote and msg['cmd'] == 'importwi'): wiimportrequest() #==================================================================# # Send start message and tell Javascript to set UI state #==================================================================# def setStartState(): - txt = "Welcome to KoboldAI Client! You are running "+vars.model+".
" + txt = "Welcome to KoboldAI! You are running "+getmodelname()+".
" if(not vars.noai): txt = txt + "Please load a game or enter a prompt below to begin!
" else: txt = txt + "Please load or import a story to read. There is no AI in this mode." - emit('from_server', {'cmd': 'updatescreen', 'data': txt}) - emit('from_server', {'cmd': 'setgamestate', 'data': 'start'}) + emit('from_server', {'cmd': 'updatescreen', 'gamestarted': vars.gamestarted, 'data': txt}, broadcast=True) + emit('from_server', {'cmd': 'setgamestate', 'data': 'start'}, broadcast=True) #==================================================================# # Transmit applicable settings to SocketIO to build UI sliders/toggles @@ -573,6 +830,8 @@ def savesettings(): js["andepth"] = vars.andepth js["temp"] = vars.temp js["top_p"] = vars.top_p + js["top_k"] = vars.top_k + js["tfs"] = vars.tfs js["rep_pen"] = vars.rep_pen js["genamt"] = vars.genamt js["max_length"] = vars.max_length @@ -581,9 +840,12 @@ def savesettings(): js["numseqs"] = vars.numseqs js["widepth"] = vars.widepth js["useprompt"] = vars.useprompt - + js["adventure"] = vars.adventure + # Write it - file = open("client.settings", "w") + if not os.path.exists('settings'): + os.mkdir('settings') + file = open("settings/" + getmodelname() + ".settings", "w") try: file.write(json.dumps(js, indent=3)) finally: @@ -593,9 +855,9 @@ def savesettings(): # Read settings from client file JSON and send to vars #==================================================================# def loadsettings(): - if(path.exists("client.settings")): + if(path.exists("settings/" + getmodelname() + ".settings")): # Read file contents into JSON object - file = open("client.settings", "r") + file = open("settings/" + getmodelname() + ".settings", "r") js = json.load(file) # Copy file contents to vars @@ -607,6 +869,10 @@ def loadsettings(): vars.temp = js["temp"] if("top_p" in js): vars.top_p = js["top_p"] + if("top_k" in js): + vars.top_k = js["top_k"] + if("tfs" in js): + vars.tfs = js["tfs"] if("rep_pen" in js): vars.rep_pen = js["rep_pen"] if("genamt" in js): @@ -623,9 +889,36 @@ def loadsettings(): vars.widepth = js["widepth"] if("useprompt" in js): vars.useprompt = js["useprompt"] + if("adventure" in js): + vars.adventure = js["adventure"] file.close() +#==================================================================# +# Allow the models to override some settings +#==================================================================# +def loadmodelsettings(): + if(path.exists(vars.custmodpth + "/config.json")): + model_config = open(vars.custmodpth + "/config.json", "r") + js = json.load(model_config) + if("badwordsids" in js): + vars.badwordsids = js["badwordsids"] + if("temp" in js): + vars.temp = js["temp"] + if("top_p" in js): + vars.top_p = js["top_p"] + if("top_k" in js): + vars.top_k = js["top_k"] + if("tfs" in js): + vars.tfs = js["tfs"] + if("rep_pen" in js): + vars.rep_pen = js["rep_pen"] + if("adventure" in js): + vars.adventure = js["adventure"] + if("formatoptns" in js): + vars.formatoptns = js["formatoptns"] + model_config.close() + #==================================================================# # Don't save settings unless 2 seconds have passed without modification #==================================================================# @@ -637,11 +930,20 @@ def settingschanged(): #==================================================================# # Take input text from SocketIO and decide what to do with it #==================================================================# -def actionsubmit(data): +def actionsubmit(data, actionmode=0): # Ignore new submissions if the AI is currently busy if(vars.aibusy): return set_aibusy(1) + + vars.recentback = False + vars.actionmode = actionmode + + # "Action" mode + if(actionmode == 1): + data = data.strip().lstrip('>') + data = re.sub(r'\n+', ' ', data) + data = f"\n\n> {data}\n" # If we're not continuing, store a copy of the raw input if(data != ""): @@ -654,25 +956,30 @@ def actionsubmit(data): vars.prompt = data if(not vars.noai): # Clear the startup text from game screen - emit('from_server', {'cmd': 'updatescreen', 'data': 'Please wait, generating story...'}) + emit('from_server', {'cmd': 'updatescreen', 'gamestarted': vars.gamestarted, 'data': 'Please wait, generating story...'}, broadcast=True) calcsubmit(data) # Run the first action through the generator + emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) else: refresh_story() set_aibusy(0) + emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) else: # Dont append submission if it's a blank/continue action if(data != ""): # Apply input formatting & scripts before sending to tokenizer - data = applyinputformatting(data) + if(vars.actionmode == 0): + data = applyinputformatting(data) # Store the result in the Action log vars.actions.append(data) - + update_story_chunk('last') + if(not vars.noai): # Off to the tokenizer! calcsubmit(data) + emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) else: - refresh_story() set_aibusy(0) + emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) #==================================================================# # @@ -683,12 +990,18 @@ def actionretry(data): return if(vars.aibusy): return - set_aibusy(1) # Remove last action if possible and resubmit - if(len(vars.actions) > 0): - vars.actions.pop() - refresh_story() + if(vars.gamestarted if vars.useprompt else len(vars.actions) > 0): + set_aibusy(1) + if(not vars.recentback and len(vars.actions) != 0 and len(vars.genseqs) == 0): # Don't pop if we're in the "Select sequence to keep" menu or if there are no non-prompt actions + last_key = vars.actions.get_last_key() + vars.actions.pop() + remove_story_chunk(last_key + 1) + vars.genseqs = [] calcsubmit('') + vars.recentback = False + elif(not vars.useprompt): + emit('from_server', {'cmd': 'errmsg', 'data': "Please enable \"Always Add Prompt\" to retry with your prompt."}) #==================================================================# # @@ -697,9 +1010,15 @@ def actionback(): if(vars.aibusy): return # Remove last index of actions and refresh game screen - if(len(vars.actions) > 0): + if(len(vars.genseqs) == 0 and len(vars.actions) > 0): + last_key = vars.actions.get_last_key() vars.actions.pop() - refresh_story() + vars.recentback = True + remove_story_chunk(last_key + 1) + elif(len(vars.genseqs) == 0): + emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}) + else: + vars.genseqs = [] #==================================================================# # Take submitted text and build the text to be given to generator @@ -766,11 +1085,13 @@ def calcsubmit(txt): forceanote = True # Get most recent action tokens up to our budget - for n in range(actionlen): + n = 0 + for key in reversed(vars.actions): + chunk = vars.actions[key] if(budget <= 0): break - acttkns = tokenizer.encode(vars.actions[(-1-n)]) + acttkns = tokenizer.encode(chunk) tknlen = len(acttkns) if(tknlen < budget): tokens = acttkns + tokens @@ -786,6 +1107,7 @@ def calcsubmit(txt): if(anotetxt != ""): tokens = anotetkns + tokens # A.N. len already taken from bdgt anoteadded = True + n += 1 # If we're not using the prompt every time and there's still budget left, # add some prompt. @@ -840,17 +1162,19 @@ def calcsubmit(txt): subtxt = "" prompt = vars.prompt - for n in range(actionlen): + n = 0 + for key in reversed(vars.actions): + chunk = vars.actions[key] if(budget <= 0): break - actlen = len(vars.actions[(-1-n)]) + actlen = len(chunk) if(actlen < budget): - subtxt = vars.actions[(-1-n)] + subtxt + subtxt = chunk + subtxt budget -= actlen else: count = budget * -1 - subtxt = vars.actions[(-1-n)][count:] + subtxt + subtxt = chunk[count:] + subtxt budget = 0 break @@ -867,6 +1191,7 @@ def calcsubmit(txt): if(anotetxt != ""): subtxt = anotetxt + subtxt # A.N. len already taken from bdgt anoteadded = True + n += 1 # Did we get to add the A.N.? If not, do it here if(anotetxt != ""): @@ -890,37 +1215,59 @@ def generate(txt, min, max): vars.lastctx = txt # Clear CUDA cache if using GPU - if(vars.hascuda and vars.usegpu): + if(vars.hascuda and (vars.usegpu or vars.breakmodel)): + gc.collect() torch.cuda.empty_cache() # Submit input text to generator try: - genout = generator( - txt, - do_sample=True, - min_length=min, - max_length=max, - repetition_penalty=vars.rep_pen, - top_p=vars.top_p, - temperature=vars.temp, - bad_words_ids=vars.badwordsids, - use_cache=True, - return_full_text=False, - num_return_sequences=vars.numseqs - ) + top_p = vars.top_p if vars.top_p > 0.0 else None + top_k = vars.top_k if vars.top_k > 0 else None + tfs = vars.tfs if vars.tfs > 0.0 else None + + # generator() only accepts a torch tensor of tokens (long datatype) as + # its first argument if we're using breakmodel, otherwise a string + # is fine + if(vars.hascuda and vars.breakmodel): + gen_in = tokenizer.encode(txt, return_tensors="pt", truncation=True).long().to(breakmodel.gpu_device) + else: + gen_in = txt + + with torch.no_grad(): + genout = generator( + gen_in, + do_sample=True, + min_length=min, + max_length=max, + repetition_penalty=vars.rep_pen, + top_p=top_p, + top_k=top_k, + tfs=tfs, + temperature=vars.temp, + bad_words_ids=vars.badwordsids, + use_cache=True, + return_full_text=False, + num_return_sequences=vars.numseqs + ) except Exception as e: - emit('from_server', {'cmd': 'errmsg', 'data': 'Error occured during generator call, please check console.'}) + emit('from_server', {'cmd': 'errmsg', 'data': 'Error occured during generator call, please check console.'}, broadcast=True) print("{0}{1}{2}".format(colors.RED, e, colors.END)) set_aibusy(0) return + # Need to manually strip and decode tokens if we're not using a pipeline + if(vars.hascuda and vars.breakmodel): + genout = [{"generated_text": tokenizer.decode(tokens[len(gen_in[0])-len(tokens):])} for tokens in genout] + if(len(genout) == 1): genresult(genout[0]["generated_text"]) else: genselect(genout) # Clear CUDA cache again if using GPU - if(vars.hascuda and vars.usegpu): + if(vars.hascuda and (vars.usegpu or vars.breakmodel)): + del genout + gc.collect() torch.cuda.empty_cache() set_aibusy(0) @@ -936,8 +1283,8 @@ def genresult(genout): # Add formatted text to Actions array and refresh the game screen vars.actions.append(genout) - refresh_story() - emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)}) + update_story_chunk('last') + emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() if len(vars.actions) else 0}, broadcast=True) #==================================================================# # Send generator sequences to the UI for selection @@ -954,10 +1301,7 @@ def genselect(genout): vars.genseqs = genout # Send sequences to UI for selection - emit('from_server', {'cmd': 'genseqs', 'data': genout}) - - # Refresh story for any input text - refresh_story() + emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True) #==================================================================# # Send selected sequence to action log and refresh UI @@ -966,9 +1310,9 @@ def selectsequence(n): if(len(vars.genseqs) == 0): return vars.actions.append(vars.genseqs[int(n)]["generated_text"]) - refresh_story() - emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)}) - emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}) + update_story_chunk('last') + emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() if len(vars.actions) else 0}, broadcast=True) + emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) vars.genseqs = [] #==================================================================# @@ -989,6 +1333,8 @@ def sendtocolab(txt, min, max): 'rep_pen': vars.rep_pen, 'temperature': vars.temp, 'top_p': vars.top_p, + 'top_k': vars.top_k, + 'tfs': vars.tfs, 'numseqs': vars.numseqs, 'retfultxt': False } @@ -1024,13 +1370,13 @@ def sendtocolab(txt, min, max): # Add formatted text to Actions array and refresh the game screen #vars.actions.append(genout) #refresh_story() - #emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)}) + #emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() if len(vars.actions) else 0}) set_aibusy(0) else: errmsg = "Colab API Error: Failed to get a reply from the server. Please check the colab console." print("{0}{1}{2}".format(colors.RED, errmsg, colors.END)) - emit('from_server', {'cmd': 'errmsg', 'data': errmsg}) + emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True) set_aibusy(0) @@ -1074,6 +1420,10 @@ def applyinputformatting(txt): def applyoutputformatting(txt): # Use standard quotes and apostrophes txt = utils.fixquotes(txt) + + # Adventure mode clipping of all characters after '>' + if(vars.adventure): + txt = vars.acregex_ai.sub('', txt) # Trim incomplete sentences if(vars.formatoptns["frmttriminc"]): @@ -1083,7 +1433,7 @@ def applyoutputformatting(txt): txt = utils.replaceblanklines(txt) # Remove special characters if(vars.formatoptns["frmtrmspch"]): - txt = utils.removespecialchars(txt) + txt = utils.removespecialchars(txt, vars) return txt @@ -1091,41 +1441,84 @@ def applyoutputformatting(txt): # Sends the current story content to the Game Screen #==================================================================# def refresh_story(): - text_parts = ['', html.escape(vars.prompt), ''] - for idx, item in enumerate(vars.actions, start=1): - text_parts.extend(('', html.escape(item), '')) - emit('from_server', {'cmd': 'updatescreen', 'data': formatforhtml(''.join(text_parts))}) + text_parts = ['', html.escape(vars.prompt), ''] + for idx in vars.actions: + item = vars.actions[idx] + idx += 1 + item = html.escape(item) + item = vars.acregex_ui.sub('\\1', item) # Add special formatting to adventure actions + text_parts.extend(('', item, '')) + emit('from_server', {'cmd': 'updatescreen', 'gamestarted': vars.gamestarted, 'data': formatforhtml(''.join(text_parts))}, broadcast=True) + + +#==================================================================# +# Signals the Game Screen to update one of the chunks +#==================================================================# +def update_story_chunk(idx: Union[int, str]): + if idx == 'last': + if len(vars.actions) <= 1: + # In this case, we are better off just refreshing the whole thing as the + # prompt might not have been shown yet (with a "Generating story..." + # message instead). + refresh_story() + return + + idx = (vars.actions.get_last_key() if len(vars.actions) else 0) + 1 + + if idx == 0: + text = vars.prompt + else: + # Actions are 0 based, but in chunks 0 is the prompt. + # So the chunk index is one more than the corresponding action index. + text = vars.actions[idx - 1] + + item = html.escape(text) + item = vars.acregex_ui.sub('\\1', item) # Add special formatting to adventure actions + + chunk_text = f'{formatforhtml(item)}' + emit('from_server', {'cmd': 'updatechunk', 'data': {'index': idx, 'html': chunk_text, 'last': (idx == (vars.actions.get_last_key() if len(vars.actions) else 0))}}, broadcast=True) + + +#==================================================================# +# Signals the Game Screen to remove one of the chunks +#==================================================================# +def remove_story_chunk(idx: int): + emit('from_server', {'cmd': 'removechunk', 'data': idx}, broadcast=True) + #==================================================================# # Sends the current generator settings to the Game Menu #==================================================================# def refresh_settings(): # Suppress toggle change events while loading state - emit('from_server', {'cmd': 'allowtoggle', 'data': False}) + emit('from_server', {'cmd': 'allowtoggle', 'data': False}, broadcast=True) if(vars.model != "InferKit"): - emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp}) - emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p}) - emit('from_server', {'cmd': 'updatereppen', 'data': vars.rep_pen}) - emit('from_server', {'cmd': 'updateoutlen', 'data': vars.genamt}) - emit('from_server', {'cmd': 'updatetknmax', 'data': vars.max_length}) - emit('from_server', {'cmd': 'updatenumseq', 'data': vars.numseqs}) + emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp}, broadcast=True) + emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p}, broadcast=True) + emit('from_server', {'cmd': 'updatetopk', 'data': vars.top_k}, broadcast=True) + emit('from_server', {'cmd': 'updatetfs', 'data': vars.tfs}, broadcast=True) + emit('from_server', {'cmd': 'updatereppen', 'data': vars.rep_pen}, broadcast=True) + emit('from_server', {'cmd': 'updateoutlen', 'data': vars.genamt}, broadcast=True) + emit('from_server', {'cmd': 'updatetknmax', 'data': vars.max_length}, broadcast=True) + emit('from_server', {'cmd': 'updatenumseq', 'data': vars.numseqs}, broadcast=True) else: - emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp}) - emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p}) - emit('from_server', {'cmd': 'updateikgen', 'data': vars.ikgen}) + emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp}, broadcast=True) + emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p}, broadcast=True) + emit('from_server', {'cmd': 'updateikgen', 'data': vars.ikgen}, broadcast=True) - emit('from_server', {'cmd': 'updateanotedepth', 'data': vars.andepth}) - emit('from_server', {'cmd': 'updatewidepth', 'data': vars.widepth}) - emit('from_server', {'cmd': 'updateuseprompt', 'data': vars.useprompt}) + emit('from_server', {'cmd': 'updateanotedepth', 'data': vars.andepth}, broadcast=True) + emit('from_server', {'cmd': 'updatewidepth', 'data': vars.widepth}, broadcast=True) + emit('from_server', {'cmd': 'updateuseprompt', 'data': vars.useprompt}, broadcast=True) + emit('from_server', {'cmd': 'updateadventure', 'data': vars.adventure}, broadcast=True) - emit('from_server', {'cmd': 'updatefrmttriminc', 'data': vars.formatoptns["frmttriminc"]}) - emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]}) - emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': vars.formatoptns["frmtrmspch"]}) - emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': vars.formatoptns["frmtadsnsp"]}) + emit('from_server', {'cmd': 'updatefrmttriminc', 'data': vars.formatoptns["frmttriminc"]}, broadcast=True) + emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]}, broadcast=True) + emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': vars.formatoptns["frmtrmspch"]}, broadcast=True) + emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': vars.formatoptns["frmtadsnsp"]}, broadcast=True) # Allow toggle events again - emit('from_server', {'cmd': 'allowtoggle', 'data': True}) + emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True) #==================================================================# # Sets the logical and display states for the AI Busy condition @@ -1133,10 +1526,10 @@ def refresh_settings(): def set_aibusy(state): if(state): vars.aibusy = True - emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, broadcast=True) else: vars.aibusy = False - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) #==================================================================# # @@ -1148,8 +1541,8 @@ def editrequest(n): txt = vars.actions[n-1] vars.editln = n - emit('from_server', {'cmd': 'setinputtext', 'data': txt}) - emit('from_server', {'cmd': 'enablesubmit', 'data': ''}) + emit('from_server', {'cmd': 'setinputtext', 'data': txt}, broadcast=True) + emit('from_server', {'cmd': 'enablesubmit', 'data': ''}, broadcast=True) #==================================================================# # @@ -1161,8 +1554,8 @@ def editsubmit(data): vars.actions[vars.editln-1] = data vars.mode = "play" - refresh_story() - emit('from_server', {'cmd': 'texteffect', 'data': vars.editln}) + update_story_chunk(vars.editln) + emit('from_server', {'cmd': 'texteffect', 'data': vars.editln}, broadcast=True) emit('from_server', {'cmd': 'editmode', 'data': 'false'}) #==================================================================# @@ -1176,21 +1569,51 @@ def deleterequest(): else: del vars.actions[vars.editln-1] vars.mode = "play" - refresh_story() + remove_story_chunk(vars.editln) emit('from_server', {'cmd': 'editmode', 'data': 'false'}) +#==================================================================# +# +#==================================================================# +def inlineedit(chunk, data): + chunk = int(chunk) + if(chunk == 0): + vars.prompt = data + else: + vars.actions[chunk-1] = data + + update_story_chunk(chunk) + emit('from_server', {'cmd': 'texteffect', 'data': chunk}, broadcast=True) + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + +#==================================================================# +# +#==================================================================# +def inlinedelete(chunk): + chunk = int(chunk) + # Don't delete prompt + if(chunk == 0): + # Send error message + update_story_chunk(chunk) + emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}) + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + else: + del vars.actions[chunk-1] + remove_story_chunk(chunk) + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + #==================================================================# # Toggles the game mode for memory editing and sends UI commands #==================================================================# def togglememorymode(): if(vars.mode == "play"): vars.mode = "memory" - emit('from_server', {'cmd': 'memmode', 'data': 'true'}) - emit('from_server', {'cmd': 'setinputtext', 'data': vars.memory}) - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}) + emit('from_server', {'cmd': 'memmode', 'data': 'true'}, broadcast=True) + emit('from_server', {'cmd': 'setinputtext', 'data': vars.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) elif(vars.mode == "memory"): vars.mode = "play" - emit('from_server', {'cmd': 'memmode', 'data': 'false'}) + emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) #==================================================================# # Toggles the game mode for WI editing and sends UI commands @@ -1198,21 +1621,22 @@ def togglememorymode(): def togglewimode(): if(vars.mode == "play"): vars.mode = "wi" - emit('from_server', {'cmd': 'wimode', 'data': 'true'}) + emit('from_server', {'cmd': 'wimode', 'data': 'true'}, broadcast=True) elif(vars.mode == "wi"): # Commit WI fields first requestwi() # Then set UI state back to Play vars.mode = "play" - emit('from_server', {'cmd': 'wimode', 'data': 'false'}) + emit('from_server', {'cmd': 'wimode', 'data': 'false'}, broadcast=True) + sendwi() #==================================================================# # #==================================================================# def addwiitem(): - ob = {"key": "", "content": "", "num": len(vars.worldinfo), "init": False} + ob = {"key": "", "keysecondary": "", "content": "", "num": len(vars.worldinfo), "init": False, "selective": False, "constant": False} vars.worldinfo.append(ob); - emit('from_server', {'cmd': 'addwiitem', 'data': ob}) + emit('from_server', {'cmd': 'addwiitem', 'data': ob}, broadcast=True) #==================================================================# # @@ -1222,7 +1646,7 @@ def sendwi(): ln = len(vars.worldinfo) # Clear contents of WI container - emit('from_server', {'cmd': 'clearwi', 'data': ''}) + emit('from_server', {'cmd': 'clearwi', 'data': ''}, broadcast=True) # If there are no WI entries, send an empty WI object if(ln == 0): @@ -1231,7 +1655,7 @@ def sendwi(): # Send contents of WI array for wi in vars.worldinfo: ob = wi - emit('from_server', {'cmd': 'addwiitem', 'data': ob}) + emit('from_server', {'cmd': 'addwiitem', 'data': ob}, broadcast=True) # Make sure last WI item is uninitialized if(vars.worldinfo[-1]["init"]): addwiitem() @@ -1261,8 +1685,11 @@ def organizewi(): #==================================================================# def commitwi(ar): for ob in ar: - vars.worldinfo[ob["num"]]["key"] = ob["key"] - vars.worldinfo[ob["num"]]["content"] = ob["content"] + vars.worldinfo[ob["num"]]["key"] = ob["key"] + vars.worldinfo[ob["num"]]["keysecondary"] = ob["keysecondary"] + vars.worldinfo[ob["num"]]["content"] = ob["content"] + vars.worldinfo[ob["num"]]["selective"] = ob["selective"] + vars.worldinfo[ob["num"]]["constant"] = ob.get("constant", False) # Was this a deletion request? If so, remove the requested index if(vars.deletewi >= 0): del vars.worldinfo[vars.deletewi] @@ -1302,27 +1729,56 @@ def checkworldinfo(txt): txt = "" depth += 1 + if(ln > 0): + chunks = collections.deque() + i = 0 + for key in reversed(vars.actions): + chunk = vars.actions[key] + chunks.appendleft(chunk) + i += 1 + if(i == depth): + break + if(ln >= depth): - txt = "".join(vars.actions[(depth*-1):]) + txt = "".join(chunks) elif(ln > 0): - txt = vars.prompt + "".join(vars.actions[(depth*-1):]) + txt = vars.prompt + "".join(chunks) elif(ln == 0): txt = vars.prompt # Scan text for matches on WI keys wimem = "" for wi in vars.worldinfo: + if(wi.get("constant", False)): + wimem = wimem + wi["content"] + "\n" + continue + if(wi["key"] != ""): # Split comma-separated keys keys = wi["key"].split(",") + keys_secondary = wi.get("keysecondary", "").split(",") + for k in keys: ky = k # Remove leading/trailing spaces if the option is enabled if(vars.wirmvwhtsp): ky = k.strip() if ky in txt: - wimem = wimem + wi["content"] + "\n" - break + if wi.get("selective", False) and len(keys_secondary): + found = False + for ks in keys_secondary: + ksy = ks + if(vars.wirmvwhtsp): + ksy = ks.strip() + if ksy in txt: + wimem = wimem + wi["content"] + "\n" + found = True + break + if found: + break + else: + wimem = wimem + wi["content"] + "\n" + break return wimem @@ -1334,7 +1790,7 @@ def memsubmit(data): # For now just send it to storage vars.memory = data vars.mode = "play" - emit('from_server', {'cmd': 'memmode', 'data': 'false'}) + emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) # Ask for contents of Author's Note field emit('from_server', {'cmd': 'getanote', 'data': ''}) @@ -1382,8 +1838,8 @@ def ikrequest(txt): genout = req.json()["data"]["text"] print("{0}{1}{2}".format(colors.CYAN, genout, colors.END)) vars.actions.append(genout) - refresh_story() - emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)}) + update_story_chunk('last') + emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() if len(vars.actions) else 0}, broadcast=True) set_aibusy(0) else: @@ -1395,7 +1851,7 @@ def ikrequest(txt): code = er["errors"][0]["extensions"]["code"] errmsg = "InferKit API Error: {0} - {1}".format(req.status_code, code) - emit('from_server', {'cmd': 'errmsg', 'data': errmsg}) + emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True) set_aibusy(0) #==================================================================# @@ -1432,8 +1888,8 @@ def oairequest(txt, min, max): genout = req.json()["choices"][0]["text"] print("{0}{1}{2}".format(colors.CYAN, genout, colors.END)) vars.actions.append(genout) - refresh_story() - emit('from_server', {'cmd': 'texteffect', 'data': len(vars.actions)}) + update_story_chunk('last') + emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() if len(vars.actions) else 0}, broadcast=True) set_aibusy(0) else: @@ -1444,7 +1900,7 @@ def oairequest(txt, min, max): message = er["error"]["message"] errmsg = "OpenAI API Error: {0} - {1}".format(type, message) - emit('from_server', {'cmd': 'errmsg', 'data': errmsg}) + emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True) set_aibusy(0) #==================================================================# @@ -1452,11 +1908,11 @@ def oairequest(txt, min, max): #==================================================================# def exitModes(): if(vars.mode == "edit"): - emit('from_server', {'cmd': 'editmode', 'data': 'false'}) + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) elif(vars.mode == "memory"): - emit('from_server', {'cmd': 'memmode', 'data': 'false'}) + emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) elif(vars.mode == "wi"): - emit('from_server', {'cmd': 'wimode', 'data': 'false'}) + emit('from_server', {'cmd': 'wimode', 'data': 'false'}, broadcast=True) vars.mode = "play" #==================================================================# @@ -1467,16 +1923,62 @@ def saveas(name): name = utils.cleanfilename(name) if(not fileops.saveexists(name) or (vars.saveow and vars.svowname == name)): # All clear to save - saveRequest(getcwd()+"/stories/"+name+".json") - emit('from_server', {'cmd': 'hidesaveas', 'data': ''}) + e = saveRequest(fileops.storypath(name)) vars.saveow = False vars.svowname = "" + if(e is None): + emit('from_server', {'cmd': 'hidesaveas', 'data': ''}) + else: + print("{0}{1}{2}".format(colors.RED, str(e), colors.END)) + emit('from_server', {'cmd': 'popuperror', 'data': str(e)}) else: # File exists, prompt for overwrite vars.saveow = True vars.svowname = name emit('from_server', {'cmd': 'askforoverwrite', 'data': ''}) +#==================================================================# +# Launch in-browser story-delete prompt +#==================================================================# +def deletesave(name): + name = utils.cleanfilename(name) + e = fileops.deletesave(name) + if(e is None): + if(vars.smandelete): + emit('from_server', {'cmd': 'hidepopupdelete', 'data': ''}) + getloadlist() + else: + emit('from_server', {'cmd': 'popuperror', 'data': "The server denied your request to delete this story"}) + else: + print("{0}{1}{2}".format(colors.RED, str(e), colors.END)) + emit('from_server', {'cmd': 'popuperror', 'data': str(e)}) + +#==================================================================# +# Launch in-browser story-rename prompt +#==================================================================# +def renamesave(name, newname): + # Check if filename exists already + name = utils.cleanfilename(name) + newname = utils.cleanfilename(newname) + if(not fileops.saveexists(newname) or name == newname or (vars.saveow and vars.svowname == newname)): + e = fileops.renamesave(name, newname) + vars.saveow = False + vars.svowname = "" + if(e is None): + if(vars.smanrename): + emit('from_server', {'cmd': 'hidepopuprename', 'data': ''}) + getloadlist() + else: + emit('from_server', {'cmd': 'popuperror', 'data': "The server denied your request to rename this story"}) + else: + print("{0}{1}{2}".format(colors.RED, str(e), colors.END)) + emit('from_server', {'cmd': 'popuperror', 'data': str(e)}) + else: + # File exists, prompt for overwrite + vars.saveow = True + vars.svowname = newname + emit('from_server', {'cmd': 'askforoverwrite', 'data': ''}) + #==================================================================# # Save the currently running story #==================================================================# @@ -1504,31 +2006,57 @@ def saveRequest(savpath): # Save path for future saves vars.savedir = savpath - + txtpath = os.path.splitext(savpath)[0] + ".txt" # Build json to write js = {} js["gamestarted"] = vars.gamestarted js["prompt"] = vars.prompt js["memory"] = vars.memory js["authorsnote"] = vars.authornote - js["actions"] = vars.actions + js["actions"] = tuple(vars.actions.values()) js["worldinfo"] = [] - + # Extract only the important bits of WI for wi in vars.worldinfo: - if(wi["key"] != ""): + if(wi["constant"] or wi["key"] != ""): js["worldinfo"].append({ "key": wi["key"], - "content": wi["content"] + "keysecondary": wi["keysecondary"], + "content": wi["content"], + "selective": wi["selective"], + "constant": wi["constant"] }) - + + txt = vars.prompt + "".join(vars.actions.values()) + # Write it - file = open(savpath, "w") + try: + file = open(savpath, "w") + except Exception as e: + return e try: file.write(json.dumps(js, indent=3)) - finally: + except Exception as e: file.close() + return e + file.close() + try: + file = open(txtpath, "w") + except Exception as e: + return e + try: + file.write(txt) + except Exception as e: + file.close() + return e + file.close() + + filename = path.basename(savpath) + if(filename.endswith('.json')): + filename = filename[:-5] + vars.laststory = filename + emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) print("{0}Story saved to {1}!{2}".format(colors.GREEN, path.basename(savpath), colors.END)) #==================================================================# @@ -1560,10 +2088,14 @@ def loadRequest(loadpath): vars.gamestarted = js["gamestarted"] vars.prompt = js["prompt"] vars.memory = js["memory"] - vars.actions = js["actions"] vars.worldinfo = [] vars.lastact = "" vars.lastctx = "" + + del vars.actions + vars.actions = structures.KoboldStoryRegister() + for s in js["actions"]: + vars.actions.append(s) # Try not to break older save files if("authorsnote" in js): @@ -1576,9 +2108,12 @@ def loadRequest(loadpath): for wi in js["worldinfo"]: vars.worldinfo.append({ "key": wi["key"], + "keysecondary": wi.get("keysecondary", ""), "content": wi["content"], "num": num, - "init": True + "init": True, + "selective": wi.get("selective", False), + "constant": wi.get("constant", False) }) num += 1 @@ -1591,10 +2126,17 @@ def loadRequest(loadpath): vars.loadselect = "" # Refresh game screen + filename = path.basename(loadpath) + if(filename.endswith('.json')): + filename = filename[:-5] + vars.laststory = filename + emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) sendwi() + emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) refresh_story() - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) - emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) + emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) print("{0}Story loaded from {1}!{2}".format(colors.GREEN, path.basename(loadpath), colors.END)) #==================================================================# @@ -1616,7 +2158,7 @@ def importRequest(): vars.importjs = vars.importjs["stories"] # Clear Popup Contents - emit('from_server', {'cmd': 'clearpopup', 'data': ''}) + emit('from_server', {'cmd': 'clearpopup', 'data': ''}, broadcast=True) # Initialize vars num = 0 @@ -1670,7 +2212,7 @@ def importgame(): vars.prompt = "" vars.memory = ref["memory"] vars.authornote = ref["authorsNote"] if type(ref["authorsNote"]) is str else "" - vars.actions = [] + vars.actions = structures.KoboldStoryRegister() vars.worldinfo = [] vars.lastact = "" vars.lastctx = "" @@ -1692,9 +2234,12 @@ def importgame(): for wi in ref["worldInfo"]: vars.worldinfo.append({ "key": wi["keys"], + "keysecondary": wi.get("keysecondary", ""), "content": wi["entry"], "num": num, - "init": True + "init": True, + "selective": wi.get("selective", False), + "constant": wi.get("constant", False) }) num += 1 @@ -1705,10 +2250,14 @@ def importgame(): vars.savedir = getcwd()+"\stories" # Refresh game screen + vars.laststory = None + emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) sendwi() + emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) refresh_story() - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) - emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) + emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) #==================================================================# # Import an aidg.club prompt and start a new game with it. @@ -1727,7 +2276,7 @@ def importAidgRequest(id): vars.prompt = js["promptContent"] vars.memory = js["memory"] vars.authornote = js["authorsNote"] - vars.actions = [] + vars.actions = structures.KoboldStoryRegister() vars.worldinfo = [] vars.lastact = "" vars.lastctx = "" @@ -1736,9 +2285,12 @@ def importAidgRequest(id): for wi in js["worldInfos"]: vars.worldinfo.append({ "key": wi["keys"], + "keysecondary": wi.get("keysecondary", ""), "content": wi["entry"], "num": num, - "init": True + "init": True, + "selective": wi.get("selective", False), + "constant": wi.get("constant", False) }) num += 1 @@ -1746,9 +2298,13 @@ def importAidgRequest(id): vars.savedir = getcwd()+"\stories" # Refresh game screen + vars.laststory = None + emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) sendwi() + emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) refresh_story() - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) #==================================================================# # Import World Info JSON file @@ -1767,9 +2323,12 @@ def wiimportrequest(): for wi in js: vars.worldinfo.append({ "key": wi["keys"], + "keysecondary": wi.get("keysecondary", ""), "content": wi["entry"], "num": num, - "init": True + "init": True, + "selective": wi.get("selective", False), + "constant": wi.get("constant", False) }) num += 1 @@ -1789,7 +2348,7 @@ def newGameRequest(): vars.gamestarted = False vars.prompt = "" vars.memory = "" - vars.actions = [] + vars.actions = structures.KoboldStoryRegister() vars.authornote = "" vars.worldinfo = [] @@ -1800,18 +2359,40 @@ def newGameRequest(): vars.savedir = getcwd()+"\stories" # Refresh game screen + vars.laststory = None + emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) sendwi() + emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) setStartState() +def randomGameRequest(topic): + newGameRequest() + vars.memory = "You generate the following " + topic + " story concept :" + actionsubmit("") + vars.memory = "" #==================================================================# # Final startup commands to launch Flask app #==================================================================# if __name__ == "__main__": + # Load settings from client.settings + loadmodelsettings() loadsettings() - + # Start Flask/SocketIO (Blocking, so this must be last method!) - print("{0}Server started!\rYou may now connect with a browser at http://127.0.0.1:5000/{1}".format(colors.GREEN, colors.END)) + #socketio.run(app, host='0.0.0.0', port=5000) - socketio.run(app) \ No newline at end of file + if(vars.remote): + from flask_cloudflared import _run_cloudflared + cloudflare = _run_cloudflared(5000) + with open('cloudflare.log', 'w') as cloudflarelog: + cloudflarelog.write("KoboldAI has finished loading and is available in the following link : " + cloudflare) + print(format(colors.GREEN) + "KoboldAI has finished loading and is available in the following link : " + cloudflare + format(colors.END)) + socketio.run(app, host='0.0.0.0', port=5000) + else: + import webbrowser + webbrowser.open_new('http://localhost:5000') + print("{0}Server started!\rYou may now connect with a browser at http://127.0.0.1:5000/{1}".format(colors.GREEN, colors.END)) + socketio.run(app) diff --git a/breakmodel.py b/breakmodel.py new file mode 100644 index 00000000..c5bdde28 --- /dev/null +++ b/breakmodel.py @@ -0,0 +1,489 @@ +''' +This is a MODIFIED version of arrmansa's low VRAM patch. +https://github.com/arrmansa/Basic-UI-for-GPT-J-6B-with-low-vram/blob/main/GPT-J-6B-Low-Vram-UI.ipynb +Copyright 2021 arrmansa +Copyright 2021 finetuneanon +Copyright 2018 The Hugging Face team +Released under the Apache License 2.0 + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +''' + + +import torch +import torch.cuda.comm +import copy +import gc + +from transformers.modeling_outputs import BaseModelOutputWithPast + +from transformers.utils import logging +logger = logging.get_logger(__name__) + + +class MaxSharedRamBlocksException(Exception): + def __init__(self, i: int): + self.corrected_max_shared_ram_blocks = i + super().__init__('max_shared_ram_blocks is set too high, please set it to '+str(i)) + + +breakmodel = True +gpu_device = 'cuda' +total_blocks = 24 +ram_blocks = 7 +max_shared_ram_blocks = None + + +def new_forward( + self, + input_ids=None, + past_key_values=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + embs=None, + ): + global max_shared_ram_blocks + + if breakmodel: + if max_shared_ram_blocks is None: + max_shared_ram_blocks = total_blocks + + if not hasattr(self, 'extrastorage'): + setattr(self,"extrastorage",{}) + torch.cuda.empty_cache() + + for i in range(ram_blocks,len(self.h)): + self.h[i].to(gpu_device) + + for i in range(ram_blocks): + self.h[i].to("cpu") + self.extrastorage[i] = copy.deepcopy(self.h[i]) + smalltensor = torch.tensor(0).to(gpu_device) + for param1 in self.h[i].parameters(): + param1.data = smalltensor + self.h[i].to(gpu_device) + + for i in range(len(self.h)): + for param in self.h[i].parameters(): + param.requires_grad = False + param.data = param.data.detach() + gc.collect() + torch.cuda.empty_cache() + + for i in range(ram_blocks): + for param in self.extrastorage[i].parameters(): + param.requires_grad = False + if i < max_shared_ram_blocks: + try: + param.data = param.data.detach().pin_memory() + except: + raise MaxSharedRamBlocksException(i) + else: + param.data = param.data.detach() + gc.collect() + torch.cuda.empty_cache() + + if ram_blocks: + for param1,param2 in zip(self.h[0].parameters(),self.extrastorage[0].parameters()): + param1.data = param2.data.to(gpu_device, non_blocking=False).detach() + + for param1,param2 in zip(self.h[ram_blocks-1].parameters(),self.extrastorage[ram_blocks-1].parameters()): + param1.data = param2.data.to(gpu_device, non_blocking=False).detach() + #END MODEL BREAK EDITS + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + batch_size = input_ids.shape[0] + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size = inputs_embeds.shape[0] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + if position_ids is not None: + position_ids = position_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + else: + past_length = past_key_values[0][0].size(-2) + + device = input_ids.device if input_ids is not None else inputs_embeds.device + if position_ids is None: + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) + + # Attention mask. + if attention_mask is not None: + assert batch_size > 0, "batch_size has to be defined and > 0" + global_attention_mask = attention_mask.view(batch_size, -1) + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + global_attention_mask = global_attention_mask[:, None, None, :] + + # Since global_attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + global_attention_mask = global_attention_mask.to(dtype=self.dtype) # fp16 compatibility + global_attention_mask = (1.0 - global_attention_mask) * -10000.0 + else: + global_attention_mask = None + + # Local causal attention mask + batch_size, seq_length = input_shape + full_seq_length = seq_length + past_length + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x num_heads x N x N + # head_mask has shape n_layer x batch x num_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.num_layers) + + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) + + if embs is not None and not (use_cache is not None and use_cache and past_key_values is not None and len(past_key_values) > 0 and past_key_values[0] is not None): + offset = 0 + for pos, emb in embs: + pos += offset + if len(emb.shape) == 2: + emb = emb.repeat(input_shape[0], 1, 1) + inputs_embeds[:, pos:pos+emb.shape[1]] = emb + offset += emb.shape[1] + + if hasattr(self, 'rotary') and self.rotary: + hidden_states = inputs_embeds + else: + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + + if token_type_ids is not None: + token_type_embeds = self.wte(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + hidden_states = self.drop(hidden_states) + + output_shape = input_shape + (hidden_states.size(-1),) + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + + if breakmodel: + copystream = torch.cuda.Stream(device=0,priority = -1) + + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + + if breakmodel: + if i in range(ram_blocks): + index1 = (i+1)%ram_blocks + for param1,param2 in zip(self.h[index1].parameters(),self.h[(i-1)%ram_blocks].parameters()): + param1.data = param2.data + for param1,param2 in zip(self.h[index1].parameters(),self.extrastorage[index1].parameters()): + with torch.cuda.stream(copystream): + torch.cuda.comm.broadcast(param2.data,out = [param1.data]) + + + attn_type = self.config.attention_layers[i] + attn_mask = global_attention_mask + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states.cpu(),) + + if getattr(self.config, "gradient_checkpointing", False) and self.training: + + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " + "`use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, use_cache, output_attentions) + + return custom_forward + + outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), + hidden_states, + None, + attn_mask, + head_mask[i], + ) + else: + outputs = block( + hidden_states, + layer_past=layer_past, + attention_mask=attn_mask, + head_mask=head_mask[i], + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + + + if breakmodel: + if i in range(ram_blocks): + torch.cuda.synchronize() + torch.cuda.empty_cache() + + if breakmodel: + del copystream + + torch.cuda.empty_cache() + + + hidden_states = self.ln_f(hidden_states) + + hidden_states = hidden_states.view(*output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) diff --git a/commandline.bat b/commandline.bat new file mode 100644 index 00000000..f60f61d9 --- /dev/null +++ b/commandline.bat @@ -0,0 +1,15 @@ +@echo off +cd %~dp0 +TITLE CMD for KoboldAI Runtime +SET /P M=nul +call K:\python\condabin\activate +cmd /k \ No newline at end of file diff --git a/environments/finetuneanon.yml b/environments/finetuneanon.yml index 2ac0cc55..aefbdc87 100644 --- a/environments/finetuneanon.yml +++ b/environments/finetuneanon.yml @@ -7,9 +7,11 @@ dependencies: - colorama - flask-socketio - pytorch + - cudatoolkit=11.1 - tensorflow-gpu - python=3.8.* - pip - git - pip: - - git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b \ No newline at end of file + - git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b + - flask-cloudflared \ No newline at end of file diff --git a/environments/huggingface.yml b/environments/huggingface.yml index bfbd33e5..d3fbd268 100644 --- a/environments/huggingface.yml +++ b/environments/huggingface.yml @@ -8,5 +8,10 @@ dependencies: - colorama - flask-socketio - pytorch + - python=3.8.* + - cudatoolkit=11.1 - tensorflow-gpu - - transformers \ No newline at end of file + - transformers + - pip + - pip: + - flask-cloudflared \ No newline at end of file diff --git a/fileops.py b/fileops.py index dc5cb66e..7fcc44cb 100644 --- a/fileops.py +++ b/fileops.py @@ -1,6 +1,7 @@ import tkinter as tk from tkinter import filedialog from os import getcwd, listdir, path +import os import json #==================================================================# @@ -54,19 +55,34 @@ def getdirpath(dir, title): else: return None +#==================================================================# +# Returns the path (as a string) to the given story by its name +#==================================================================# +def storypath(name): + return path.join(path.dirname(path.realpath(__file__)), "stories", name + ".json") + #==================================================================# # Returns an array of dicts containing story files in /stories #==================================================================# def getstoryfiles(): list = [] - for file in listdir(getcwd()+"/stories"): + for file in listdir(path.dirname(path.realpath(__file__))+"/stories"): if file.endswith(".json"): ob = {} ob["name"] = file.replace(".json", "") - f = open(getcwd()+"/stories/"+file, "r") - js = json.load(f) + f = open(path.dirname(path.realpath(__file__))+"/stories/"+file, "r") + try: + js = json.load(f) + except: + print(f"Browser loading error: {file} is malformed or not a JSON file.") + f.close() + continue f.close() - ob["actions"] = len(js["actions"]) + try: + ob["actions"] = len(js["actions"]) + except TypeError: + print(f"Browser loading error: {file} has incorrect format.") + continue list.append(ob) return list @@ -74,4 +90,22 @@ def getstoryfiles(): # Returns True if json file exists with requested save name #==================================================================# def saveexists(name): - return path.exists(getcwd()+"/stories/"+name+".json") \ No newline at end of file + return path.exists(storypath(name)) + +#==================================================================# +# Delete save file by name; returns None if successful, or the exception if not +#==================================================================# +def deletesave(name): + try: + os.remove(storypath(name)) + except Exception as e: + return e + +#==================================================================# +# Rename save file; returns None if successful, or the exception if not +#==================================================================# +def renamesave(name, new_name): + try: + os.replace(storypath(name), storypath(new_name)) + except Exception as e: + return e diff --git a/gensettings.py b/gensettings.py index 8f6a67a0..eb50476e 100644 --- a/gensettings.py +++ b/gensettings.py @@ -6,7 +6,7 @@ gensettingstf = [{ "min": 0.1, "max": 2.0, "step": 0.05, - "default": 1.0, + "default": 0.5, "tooltip": "Randomness of sampling. High values can increase creativity but may make text less sensible. Lower values will make text more predictable but can become repetitious." }, { @@ -14,11 +14,33 @@ gensettingstf = [{ "unit": "float", "label": "Top p Sampling", "id": "settopp", - "min": 0.1, + "min": 0.0, "max": 1.0, "step": 0.05, - "default": 1.0, - "tooltip": "Used to discard unlikely text in the sampling process. Lower values will make text more predictable but can become repetitious." + "default": 0.9, + "tooltip": "Used to discard unlikely text in the sampling process. Lower values will make text more predictable but can become repetitious. (Put this value on 1 to disable its effect)" + }, + { + "uitype": "slider", + "unit": "int", + "label": "Top k Sampling", + "id": "settopk", + "min": 0, + "max": 100, + "step": 1, + "default": 0, + "tooltip": "Alternative sampling method, can be combined with top_p. (Put this value on 0 to disable its effect)" + }, + { + "uitype": "slider", + "unit": "float", + "label": "Tail-free Sampling", + "id": "settfs", + "min": 0.0, + "max": 1.0, + "step": 0.05, + "default": 0.0, + "tooltip": "Alternative sampling method; it is recommended to disable top_p and top_k (set top_p to 1 and top_k to 0) if using this. 0.95 is thought to be a good value. (Put this value on 1 to disable its effect)" }, { "uitype": "slider", @@ -28,7 +50,7 @@ gensettingstf = [{ "min": 1.0, "max": 2.0, "step": 0.05, - "default": 1.0, + "default": 1.1, "tooltip": "Used to penalize words that were already generated or belong to the context." }, { @@ -39,7 +61,7 @@ gensettingstf = [{ "min": 16, "max": 512, "step": 2, - "default": 60, + "default": 80, "tooltip": "Number of tokens the AI should generate. Higher numbers will take longer to generate." }, { @@ -50,7 +72,7 @@ gensettingstf = [{ "min": 512, "max": 2048, "step": 8, - "default": 512, + "default": 1024, "tooltip": "Max number of tokens of context to submit to the AI for sampling. Make sure this is higher than Amount to Generate. Higher values increase VRAM/RAM usage." }, { @@ -72,7 +94,7 @@ gensettingstf = [{ "min": 1, "max": 5, "step": 1, - "default": 1, + "default": 3, "tooltip": "Number of historic actions to scan for W Info keys." }, { @@ -85,6 +107,17 @@ gensettingstf = [{ "step": 1, "default": 1, "tooltip": "Whether the prompt should be sent in the context of every action." + }, + { + "uitype": "toggle", + "unit": "bool", + "label": "Adventure Mode", + "id": "setadventure", + "min": 0, + "max": 1, + "step": 1, + "default": 0, + "tooltip": "Turn this on if you are playing a Choose your Adventure model." }] gensettingsik =[{ @@ -95,7 +128,7 @@ gensettingsik =[{ "min": 0.1, "max": 2.0, "step": 0.05, - "default": 1.0, + "default": 0.5, "tooltip": "Randomness of sampling. High values can increase creativity but may make text less sensible. Lower values will make text more predictable but can become repetitious." }, { @@ -103,12 +136,34 @@ gensettingsik =[{ "unit": "float", "label": "Top p Sampling", "id": "settopp", - "min": 0.1, + "min": 0.0, "max": 1.0, "step": 0.05, - "default": 1.0, + "default": 1.1, "tooltip": "Used to discard unlikely text in the sampling process. Lower values will make text more predictable but can become repetitious." }, + { + "uitype": "slider", + "unit": "int", + "label": "Top k Sampling", + "id": "settopk", + "min": 0, + "max": 100, + "step": 1, + "default": 0, + "tooltip": "Alternative sampling method, can be combined with top_p." + }, + { + "uitype": "slider", + "unit": "float", + "label": "Tail-free Sampling", + "id": "settfs", + "min": 0.0, + "max": 1.0, + "step": 0.05, + "default": 0.0, + "tooltip": "Alternative sampling method; it is recommended to disable (set to 0) top_p and top_k if using this. 0.95 is thought to be a good value." + }, { "uitype": "slider", "unit": "int", @@ -128,7 +183,7 @@ gensettingsik =[{ "min": 1, "max": 5, "step": 1, - "default": 1, + "default": 3, "tooltip": "Number of historic actions to scan for W Info keys." }, { @@ -141,6 +196,17 @@ gensettingsik =[{ "step": 1, "default": 1, "tooltip": "Whether the prompt should be sent in the context of every action." + }, + { + "uitype": "toggle", + "unit": "bool", + "label": "Adventure Mode", + "id": "setadventure", + "min": 0, + "max": 1, + "step": 1, + "default": 0, + "tooltip": "Turn this on if you are playing a Choose your Adventure model." }] formatcontrols = [{ diff --git a/install_requirements.bat b/install_requirements.bat index f368446b..756bba76 100644 --- a/install_requirements.bat +++ b/install_requirements.bat @@ -12,6 +12,7 @@ echo. SET /P B=Type the number of the desired option and then press ENTER: Reg add "HKLM\SYSTEM\CurrentControlSet\Control\FileSystem" /v "LongPathsEnabled" /t REG_DWORD /d "1" /f 2>nul +%~d0 cd %~dp0 if exist miniconda3\ ( diff --git a/notebook.bat b/notebook.bat new file mode 100644 index 00000000..b5b44948 --- /dev/null +++ b/notebook.bat @@ -0,0 +1,20 @@ +@echo off +cd %~dp0 +TITLE Jupyter for KoboldAI Runtime +SET /P M=nul +umamba.exe install --no-shortcuts -r K:\python\ -n base -c conda-forge jupyter +call K:\python\condabin\activate +jupyter notebook +subst K: /D +cmd /k \ No newline at end of file diff --git a/play.bat b/play.bat index 2d723fd9..67f615c9 100644 --- a/play.bat +++ b/play.bat @@ -1,18 +1,18 @@ @echo off cd %~dp0 -TITLE KoboldAI - Client +TITLE KoboldAI - Server SET /P M=nul call K:\python\condabin\activate -python aiserver.py +python aiserver.py %* subst K: /D cmd /k \ No newline at end of file diff --git a/readme.md b/readme.md new file mode 100644 index 00000000..6a22aa3d --- /dev/null +++ b/readme.md @@ -0,0 +1,156 @@ +# KoboldAI - Your gateway to GPT writing + +This is a browser-based front-end for AI-assisted writing with multiple local & remote AI models. It offers the standard array of tools, including Memory, Author's Note, World Info, Save & Load, adjustable AI settings, formatting options, and the ability to import existing AI Dungeon adventures. You can also turn on Adventure mode and play the game like AI Dungeon Unleashed. + +## Multiple ways to play + +Stories can be played like a Novel, or played like a text adventure game with an easy toggle to change between the two gameplay styles. This makes KoboldAI both a writing assistant and a game. The way you play and how good the AI will be depends on the model or service you decide to use. No matter if you want to use the free, fast power of Google Colab. Your own high end graphics card, an online service you have an API key for (Like OpenAI or Inferkit) or if you rather just run it slower on your CPU you will be able to find a way to use KoboldAI that works for you. + +### Adventure mode + +By default KoboldAI will run in a generic mode optimized for writing, but with the right model you can play this like AI Dungeon without any issues. You can enable this in the settings and bring your own prompt, try generating a random prompt or download one of the prompts available at [prompts.aidg.club](https://prompts.aidg.club) . + +The gameplay will be slightly different than the gameplay in AI Dungeon because we adopted the style of the Unleashed fork, giving you full control over all the characters because we do not automatically adapt your sentences behind the scenes. This means you can more reliably control characters that are not you. + +As a result of this what you need to type is slightly different, in AI Dungeon you would type ***take the sword*** while in KoboldAI you would type it like a sentence such as ***You take the sword*** and this is best done with the word You instead of I. + +To speak simply type : *You say "We should probably gather some supplies first"* +Just typing the quote might work, but the AI is at its best when you specify who does what in your commands. + +If you want to do this with your friends we advice using the main character as You and using the other characters by their name if you are playing on a model trained for Adventures. These models assume there is a You in the story. This mode does usually not perform well on Novel models because they do not know how to handle the input those are best used with regular story writing where you take turns with the AI. + +### Writing assistant + +If you want to use KoboldAI as a writing assistant this is best done in the regular mode with a model optimized for Novels. These models do not make the assumption that there is a You character and focus on Novel like writing. For writing these will often give you better results than Adventure or Generic models. That said, if you give it a good introduction to the story large generic models like 6B can be used if a more specific model is not available for what you wish to write. You can also try to use models that are not specific to what you wish to do, for example a NSFW Novel model for a SFW story if a SFW model is unavailable. This will mean you will have to correct the model more often because of its bias, but can still produce good enough results if it is familiar enough with your topic. + +## Play KoboldAI online for free on Google Colab (The easiest way to play) + +We provide multiple ready made versions to get you going, click on the name for a link to the specific version. These run entirely on Google's Servers and will automatically upload saves to your Google Drive if you choose to manually save a story. Each version has slightly different instructions on how to use them (Many need some space on your google drive to run, others may need some manual steps) that are listed on the page. + +TPU editions work on any configuration of TPU Google gives out at the time of writing. GPU editions are subject to a GPU lottery and may crash on launch if you are unlucky (Especially if a lot of users are using up the good GPU's or you have been using Colab often). + +[Click here to open the Recommended version](https://henk.tech/colabkobold) + +| Version | Model | Size | Style | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | -------- | --------------- | ------------------------------------------------------------ | +| [Adventure 6B](https://colab.research.google.com/drive/1vdAsD0xCc_YsAXqBUxb_QAwPOXkFJtxm?usp=sharing#sandboxMode=true) | [gpt-j-6b-adventure-jax](https://wandb.ai/ve-forbryderne/adventure/runs/carol-data/files/models) by ve_forbryderne (Download the -hf version if you plan to run this locally) | 6B TPU | Adventure | This is the Recommended version for AI Dungeon players, this is effectively a Free Griffin but with more control. This Colab edition provides better memory than Griffin would have given you, allowing for a more coherent experience. And while it will still generate characters like The Great Litch Lord that AI Dungeon players are familiar with it was trained on stories beyond AI Dungeon and is more balanced in its approaches. This is a TPU edition so it can fit a lot in memory | +| [Skein](https://colab.research.google.com/drive/1ZAKgkSyyfiZN87npKYaRM8vL4OF2Btfg?usp=sharing#sandboxMode=true) | gpt-j-6b-skein-jax by ve_forbryderne (Download the -hf version if you plan to run this locally) | 6B TPU | Novel/Adventure | Skein is a hybrid between a Novel model and the Adventure model. Because of this it needs a bit more context about the writing style (Needing a few retries in the random story generator if you use this). It was trained on both Light Novels and choose your own adventure stories along side extra information to help it understand story themes better. It is recommended to play this with Adventure mode enabled to prevent it from doing "Actions" even if you wish to use it for Novel writing. If you wish to use it for Novel writing you can do this by toggling the input to Story. | +| [Generic 6B TPU](https://colab.research.google.com/drive/1pG9Gz9PrqklNBESPNaXvfctMVnvwf_Q8#forceEdit=true&sandboxMode=true&scrollTo=jcxnaOk5Th4x) | [Original GPT-6-JAX Slim](https://the-eye.eu/public/AI/GPT-J-6B/step_383500_slim.tar.gz) (Requires a TPU and does not work local) | 6B TPU | Novel | The recommended model if you want a generic experience. This model is not optimized for anything in particular and works best when you give it a longer introduction. Make sure to include examples for the AI to learn from and write the first part of the story entirely yourself. Then it should be able to learn from your style and continue from there. Very sensitive to a high temp because it knows webpages and code, so when configured incorrectly it will easily end a story with 'Rate my blogpost, follow me on twitter' and the likes. | +| [Horni](https://colab.research.google.com/drive/1QwjkK_JeK9aYEkyM_6nrJXQARFMnBDmG?usp=sharing#sandboxMode=true) (Formerly Novel/NSFW) | [GPT-Neo-2.7B-Horni](https://storage.henk.tech/KoboldAI/gpt-neo-2.7B-horni.tar) by finetune | 2.7B GPU | Novel | One of the oldest models in our collection, tuned on Literotica to produce a Novel style model optimized towards NSFW content. Can still be used for SFW stories but will have a bias towards NSFW content. Because this is an older 2.7B model it is only compatible as a GPU instance. Most GPU's in Colab are powerful enough to run this well but it will crash if you get something weak like a Nvidia P7. | +| [Picard](https://colab.research.google.com/drive/1VNVKtbPaTcmkQzy8bEQkd9SUiUJBdbEL?usp=sharing#sandboxMode=true) | [Picard](https://storage.henk.tech/KoboldAI/gpt-neo-2.7B-picard.7z) by Mr Seeker | 2.7B GPU | Novel | Picard is a model trained for SFW Novels based on GPT-Neo-2.7B. It is focused on Novel style writing without the NSFW bias. While the name suggests a sci-fi model this model is designed for Novels of a variety of genre's. Most GPU's in Colab are powerful enough to run this well but it will crash if you get something weak like a Nvidia P7. | +| [Shinen](https://colab.research.google.com/drive/1-7Lkj-np2DaSnmq1OdPYkel6W2rh4E-0?usp=sharing#sandboxMode=true) | [Shinen](https://storage.henk.tech/KoboldAI/gpt-neo-2.7B-shinen.7z) by Mr Seeker | 2.7B GPU | Novel | Shinen is an alternative to the Horni model designed to be more explicit. If Horni is to tame for you shinen might produce better results. While it is a Novel model it is unsuitable for SFW stories due to its heavy NSFW bias. Shinen will not hold back. Most GPU's in Colab are powerful enough to run this well but it will crash if you get something weak like a Nvidia P7. | + +## Install KoboldAI on your own computer + +KoboldAI has a large number of dependencies you will need to install on your computer, unfortunately Python does not make it easy for us to provide instructions that work for everyone. The instructions below will work on most computers, but if you have multiple versions of Python installed conflicts can occur. + +### Downloading the latest version of KoboldAI + +KoboldAI is a rolling release on our github, the code you see is also the game. The easiest way to download the game is by clicking on the green Code button at the top of the page and clicking Download ZIP. + +### Installing KoboldAI on Windows 10 or higher using the KoboldAI Runtime Installer + +1. Extract the .zip to a location you wish to install KoboldAI, you will need roughly 20GB of free space for the installation (this does not include the models). +2. Open install_requirements.bat as administrator. +3. Choose either the Finetuneanon or the Regular version of transformers (Finetuneanon works better for GPU players but breaks CPU mode, only use this version if you have a modern Nvidia GPU with enough VRAM for the model you wish to run). +4. You will now be asked to choose the installation mode, we **strongly** recommend the Temporary K: drive option for anyone who does not already have a K: drive on their computer. This option eliminates most installation issues and also makes KoboldAI portable. The K: drive will be gone after a reboot and will automatically be recreated each time you play KoboldAI. +5. The installation will now automatically install its requirements, some stages may appear to freeze do not close the installer until it asks you to press a key. Before pressing a key to exit the installer please check if errors occurred. Most problems with the game crashing are related to installation/download errors. Disabling your antivirus can help if you get errors. +6. Use play.bat to play the game. + +### Manual installation / Linux / Mac + +We can not provide a step by step guide for manual installation due to the vast differences between the existing software configuration and the systems of our users. + +If you would like to manually install KoboldAI you will need some python/conda package management knowledge to manually do one of the following steps : + +1. Use our bundled environments files to install your own conda environment, this should also automatically install CUDA. +2. If you do not want to use conda install the requirements listed in requirements.txt and make sure that CUDA is properly installed. +3. Adapt and use our bundled docker files to create your own KoboldAI docker instance. + +### Using an AMD GPU on Linux + +AMD GPU's have terrible compute support, this will currently not work on Windows and will only work for a select few Linux GPU's. [You can find a list of the compatible GPU's here](https://github.com/RadeonOpenCompute/ROCm#Hardware-and-Software-Support). Any GPU that is not listed is guaranteed not to work with KoboldAI and we will not be able to provide proper support on GPU's that are not compatible with the versions of ROCm we require. This guide requires that you already followed the appropriate steps to configure both [ROCm](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html) and [Docker]([Install Docker Engine | Docker Documentation](https://docs.docker.com/engine/install/)) and is for advanced users only. + +1. Make sure you have installed both the latest version of [Docker](https://docs.docker.com/engine/install/), docker-compose and [ROCm](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html) on your system and have configured your user to have access to the Docker group (Sudo can interfere with the dialogues). +2. Assign our play-rocm.sh file execute permissions (chmod +x play-rocm.sh). +3. Run our play-rocm.sh file, it should now automatically install and create a suitable runtime for KoboldAI with AMD support and directly run the game afterwards. For X11 forwarding support you will need to run this as sudo at least once at the local machine. Otherwise use the command line options to load KoboldAI if you are playing this remotely. +4. Currently models automatically downloaded by the game are discarded on exit in the Docker version, it is strongly recommended that you manually download a model and load this using the custom model features to prevent unnecessary downloads. + +If you hit strange errors with the ROCm version where it fails on the installation be sure you are running the latest version of Docker and Docker-compose. Some versions will fail on the root elevation or lack the appropriate formats. + +### Troubleshooting + +There are multiple things that can go wrong with the way Python handles its dependencies, unfortunately we do not have direct step by step solutions for every scenario but there are a few common solutions you can try. + +#### ModuleNotFoundError + +This is ALWAYS either a download/installation failure or a conflict with other versions of Python. This is very common if users chose the subfolder option during the installation while putting KoboldAI in a location that has spaces in the path. When an antivirus sandboxes the installation or otherwise interferes with the downloads, systems with low disk space or when your operating system was not configured for Long FIle Paths (The installer will do this on Windows 10 and higher if you run it as administrator, anything other than Windows 10 is not supported by our installers). + +Another reason the installation may have failed is if you have conflicting installations of Python on your machine, if you press the Windows Key + R and enter %appdata% in the Run Dialog it will open the folder Python installs dependencies on some systems. If you have a Python folder in this location rename this folder and try to run the installer again. It should now no longer get stuck on existing dependencies. Try the game and see if it works well. If it does you can try renaming the folder back to see if it remains functional. + +The third reason the installation may have failed is if you have conda/mamba on your system for other reasons, in that case we recommend either removing your existing installations of python/conda if you do not need them and testing our installer again. Or using conda itself with our bundled environment files to let it create its runtime manually. **Keep in mind that if you go the manual route you should NEVER use play.bat but should instead run aiserver.py directly**. + +In general, the less versions of Python you have on your system the higher your chances of it installing correctly. We are consistently trying to mitigate these installation conflicts in our installers but for some users we can not yet avoid all conflicts. + +#### GPU not found errors + +GPU not found errors can be caused by one of two things, either you do not have a suitable Nvidia GPU (It needs Compute Capability 5.0 or higher to be able to play KoboldAI). Your Nvidia GPU is supported by KoboldAI but is not supported by the latest version of CUDA. Your Nvidia GPU is not yet supported by the latest version of CUDA or you have a dependency conflict like the ones mentioned above. + +Like with Python version conflicts we recommend uninstalling CUDA from your system if you have manually installed it and do not need it for anything else and trying again. If your GPU needs CUDA10 to function open environments\finetuneanon.yml and add a line that says - cudatoolkit=10.2 underneath dependencies: . After this you can run the installer again (Pick the option to delete the existing files) and it will download a CUDA10 compatible version. + +If you do not have a suitable Nvidia GPU that can run on CUDA10 or Higher and that supports Compute Capabilities 5.0 or higher we can not help you get the game detected on the GPU. Unless you are following our ROCm guide with a compatible AMD GPU. + +#### "LayerNormKernelImpl" not implemented for 'Half' + +This error only occurs when you are trying to run a model on the CPU mode while Finetuneanon's version of Transformers is installed. If you want/need to use the CPU mode use the install_requirements.bat file with the Official Transformers option and choose to delete all existing files. + +#### vocab.json / config.json is not found error + +If you get these errors you either did not select the correct folder for your custom model or the model you have downloaded is not (yet) compatible with KoboldAI. There exist a few models out there that are compatible and provide a pytorch_model.bin file but do not ship all the required files. In this case try downloading a compatible model of the same kind (For example another GPT-Neo if you downloaded a GPT-Neo model) and replace the pytorch_model.bin file with the one you are trying to run. Chances are this will work fine. + +## KoboldAI Compatible Models + +The models listed in the KoboldAI menu are generic models meant to easily get you going based on the Huggingface service. For higher quality models and fully offline use you will need to manually download a suitable model for your style. These are some of the models the community has available for you all tested to be compatible with KoboldAI and will be the brain of the AI. + + + +| **Model** | Type | **(V)RAM** | Repetition Penalty | Description | +| ------------------------------------------------------------ | --------------------------------- | ---------- | ------------------ | ------------------------------------------------------------ | +| [gpt-j-6b-adventure-jax-hf](https://api.wandb.ai/files/ve-forbryderne/adventure/carol-data/models/gpt-j-6b-adventure-hf.7z) | Adventure / 6B / Neo Custom | 16GB | 1.2 | This model has been trained on the AI Dungeon set with additional stories thrown in. It is the most well rounded AI Dungeon like model and can be seen as an improved Griffin. If you wish to play KoboldAI like AI Dungeon this is the one to pick. It works great with the random story generator if your temp is 0.5 . | +| [gpt-j-6b-skein-jax-hf](https://api.wandb.ai/files/ve-forbryderne/skein/files/gpt-j-6b-skein-hf.7z) | Adventure Novel / 6B / Neo Custom | 16GB | 1.1 | A hybrid of a few different datasets aimed to create a balanced story driven experience. If the adventure model is to focused on its own adventures and you want something a bit more generic this is the one for you. This model understands tags and adventure mode but can also be used as a writing assistant for your Novel. Its a good middle ground between a finetuned model and a generic model. It needs more guidance than some of the other models do making it less suitable for random story generation, but still focusses on writing rather than websites or code. If you want to use a model for existing story idea's this is a great choice. | +| [gpt-neo-2.7B-aid](https://storage.henk.tech/KoboldAI/gpt-neo-2.7B-aid.7z) | Adventure / 2.7B / Neo Custom | 8GB | 2.0 | This is one of the closest replications of the original AI Dungeon Classic model. Tuned on the same data that got uploaded alongside AI Dungeon. In KoboldAI we noticed this model performs better than the conversions of the original AI Dungeon model. It has all the traits you expect of AI Dungeon Classic while not having as many artifacts as this model was trained specifically for KoboldAI. Must be played with Adventure mode enabled to prevent it from doing actions on your behalf. | +| [gpt-neo-2.7B-horni](https://storage.henk.tech/KoboldAI/gpt-neo-2.7B-horni.tar) | Novel / 2.7B / Neo Custom | 8GB | 2.0 | One of the best novel models available for 2.7B focused on NSFW content. This model trains the AI to write in a story like fashion using a very large collection of Literotica stories. It is one of the original finetuned models for 2.7B. | +| [gpt-neo-2.7B-horni-ln](https://storage.henk.tech/KoboldAI/gpt-neo-2.7B-horni-ln.7z) | Novel / 2.7B / Neo Custom | 8GB | 2.0 | This model is much like the one above, but has been additionally trained on regular light novels. More likely to go SFW and is more focused towards themes found in these light novels over general cultural references. This is a good model for Novel writing especially if you want to add erotica to the mix. | +| [gpt-neo-2.7B-picard](https://storage.henk.tech/KoboldAI/gpt-neo-2.7B-picard.7z) | Novel / 2.7B / Neo Custom | 8GB | 2.0 | Picard is another Novel model, this time exclusively focused on SFW content of various genres. Unlike the name suggests this goes far beyond Star Trek stories and is not exclusively sci-fi. | +| [gpt-neo-2.7B-shinen](https://storage.henk.tech/KoboldAI/gpt-neo-2.7B-shinen.7z) | Novel / 2.7B / Neo Custom | 8GB | 2.0 | The most NSFW of them all, Shinen WILL make things sexual. This model will assume that whatever you are doing is meant to be a sex story and will sexualize constantly. It is designed for people who find Horni to tame. It was trained on SexStories instead of Literotica and was trained on tags making it easier to guide the AI to the right context. | +| [GPT-J-6B (Converted)](https://storage.henk.tech/KoboldAI/gpt-j-6b.7z) | Generic / 6B / Neo Custom | 16GB | 1.1 | This is the basis for all the other GPT-J-6B models, it has been trained on The Pile and is an open alternative for GPT Curie. Because it is a generic model it is not particularly good at anything and needs a long introduction to understand what you want to do. It is however the most flexible because it has no bias. If you want to do something that has no specific model available, such as writing a webpage article or coding this can be a good one to try. This specific version was converted by our community to be able to run as a GPT-Neo model on your GPU. | +| [AID-16Bit](https://storage.henk.tech/KoboldAI/aid-16bit.zip) | Adventure / 1.5B / GPT-2 Custom | 4GB | 2.0 | The original AI Dungeon Classic model converted to Pytorch and then converted to a 16-bit Model making it half the size. | +| [model_v5_pytorch](https://storage.henk.tech/KoboldAI/model_v5_pytorch.zip) (AI Dungeon's Original Model) | Adventure / 1.5B / GPT-2 Custom | 8GB | 2.0 | This is the original AI Dungeon Classic model converted to the Pytorch format compatible with AI Dungeon Clover and KoboldAI. We consider this model inferior to the GPT-Neo version because it has more artifacting due to its conversion. This is however the most authentic you can get to AI Dungeon Classic. | +| [Novel 774M](https://storage.henk.tech/KoboldAI/Novel%20model%20774M.rar) | Novel / 774M / GPT-2 Custom | 4GB | 2.0 | Novel 774M is made by the AI Dungeon Clover community, because of its small size and novel bias it is more suitable for CPU players that want to play with speed over substance or players who want to test a GPU with a low amount of VRAM. These performance savings are at the cost of story quality and you should not expect the kind of in depth story capabilities that the larger models offer. It was trained for SFW stories. | +| [Smut 774M](https://storage.henk.tech/KoboldAI/Smut%20model%20774M%2030K.rar) | Novel / 774M / GPT-2 Custom | 4GB | 2.0 | The NSFW version of the above, its a smaller GPT-2 based model made by the AI Dungeon Clover community. Gives decent speed on a CPU at the cost of story quality like the other 774M models. | +| [Mia](https://storage.henk.tech/KoboldAI/Mia.7z) | Adventure / 125M / Neo Custom | 1GB | 2.0 | Mia is the smallest Adventure model, it runs at very fast speeds on the CPU which makes it a good testing model for developers who do not have GPU access. Because of its small size it will constantly attempt to do actions on behalf of the player and it will not produce high quality stories. If you just need a small model for a quick test, or if you want to take the challenge of trying to run KoboldAI entirely on your phone this would be an easy model to use due to its small RAM requirements and fast (loading) speeds. | + + + +## Contributors + +This project contains work from the following contributors : + +- The Gantian - Creator of KoboldAI, has created most features such as the interface, the different AI model / API integrations and in general the largest part of the project. +- VE FORBRYDERNE - Contributed many features such as the Editing overhaul, Adventure Mode, expansions to the world info section, breakmodel integration and much more. +- Henk717 - Contributed the installation scripts, this readme, random story generator, the docker scripts, the foundation for the commandline interface and other smaller changes as well as integrating multiple parts of the code of different forks to unite it all. Not all code Github attributes to Henk717 is by Henk717 as some of it has been integrations of other people's work. We try to clarify this in the contributors list as much as we can. +- Frogging101 - top_k / tfs support +- UWUplus (Ralf) - Contributed storage systems for community colabs, as well as cleaning up and integrating the website dependencies/code better. He is also the maintainer of flask-cloudflared which we use to generate the cloudflare links. +- Javalar - Initial Performance increases on the story_refresh +- LexSong - Initial environment file adaptation for conda that served as a basis for the install_requirements.bat overhaul. +- Arrmansa - Breakmodel support for other projects that served as a basis for VE FORBRYDERNE's integration. + +As well as various Model creators who will be listed near their models, and all the testers who helped make this possible! + +Did we miss your contribution? Feel free to issue a commit adding your name to this list. + +## License + +KoboldAI is licensed with a AGPL license, in short this means that it can be used by anyone for any purpose. However, if you decide to make a publicly available instance your users are entitled to a copy of the source code including all modifications that you have made (which needs to be available trough an interface such as a button on your website), you may also not distribute this project in a form that does not contain the source code (Such as compiling / encrypting the code and distributing this version without also distributing the source code that includes the changes that you made. You are allowed to distribute this in a closed form if you also provide a separate archive with the source code.). + +umamba.exe is bundled for convenience because we observed that many of our users had trouble with command line download methods, it is not part of our project and does not fall under the AGPL license. It is licensed under the BSD-3-Clause license. diff --git a/readme.txt b/readme.txt deleted file mode 100644 index 5e67a05d..00000000 --- a/readme.txt +++ /dev/null @@ -1,80 +0,0 @@ -Thanks for checking out the KoboldAI Client! Get support and updates on the subreddit: -https://www.reddit.com/r/KoboldAI/ - -[ABOUT] - -This is a browser-based front-end for AI-assisted writing with multiple local & remote AI models. -It offers the standard array of tools, including Memory, Author's Note, World Info, Save & Load, -adjustable AI settings, formatting options, and the ability to import exising AI Dungeon adventures. -Current UI Snapshot: https://imgur.com/mjk5Yre - -For local generation, KoboldAI uses Transformers (https://huggingface.co/transformers/) to interact -with the AI models. This can be done either on CPU, or GPU with sufficient hardware. If you have a -high-end GPU with sufficient VRAM to run your model of choice, see -(https://www.tensorflow.org/install/gpu) for instructions on enabling GPU support. - -Transformers/Tensorflow can still be used on CPU if you do not have high-end hardware, but generation -times will be much longer. Alternatively, KoboldAI also supports utilizing remotely-hosted models. -The currently supported remote APIs are InferKit and Google Colab, see the dedicated sections below -for more info on these. - -[SETUP] - -1. Install a 64-bit version of Python. - (Development was done on 3.7, I have not tested newer versions) - Windows download link: https://www.python.org/ftp/python/3.7.9/python-3.7.9-amd64.exe -2. When installing Python make sure "Add Python to PATH" is selected. - (If pip isn't working, run the installer again and choose Modify to choose Optional features.) -3. Run install_requirements.bat. - (This will install the necessary python packages via pip) -4. Run play.bat -5. Select a model from the list. Flask will start and give you a message that it's ready to connect. -6. Open a web browser and enter http://127.0.0.1:5000/ - -[ENABLE COLORS IN WINDOWS 10 COMMAND LINE] - -If you see strange numeric tags in the console output, then your console of choice does not have -color support enabled. On Windows 10, you can enable color support by lanching the registry editor -and adding the REG_DWORD key VirtualTerminalLevel to Computer\HKEY_CURRENT_USER\Console and setting -its value to 1. - -[ENABLE GPU FOR SUPPORTED VIDEO CARDS] - -1. Install NVidia CUDA toolkit from https://developer.nvidia.com/cuda-10.2-download-archive -2. Visit PyTorch's website(https://pytorch.org/get-started/locally/) and select Pip under "Package" -and your version of CUDA under "Compute Platform" (I linked 10.2) to get the pip3 command. -3. Copy and paste pip3 command into command prompt to install torch with GPU support - -Be aware that when using GPU mode, inference will be MUCH faster but if your GPU doesn't have enough -VRAM to load the model it will crash the application. - -[IMPORT AI DUNGEON GAMES] - -To import your games from AI Dungeon, first grab CuriousNekomimi's AI Dungeon Content Archive Toolkit: -https://github.com/CuriousNekomimi/AIDCAT -Follow the video instructions for getting your access_token, and run aidcat.py in command prompt. -Choose option [1] Download your saved content. -Choose option [2] Download your adventures. -Save the JSON file to your computer using the prompt. -Run KoboldAI, and after connecting to the web GUI, press the Import button at the top. -Navigate to the JSON file exported from AIDCAT and select it. A prompt will appear in the GUI -presenting you with all Adventures scraped from your AI Dungeon account. -Select an Adventure and click the Accept button. - -[HOST GPT-NEO ON GOOGLE COLAB] - -If your computer does not have an 8GB GPU to run GPT-Neo locally, you can now run a Google Colab -notebook hosting a GPT-Neo-2.7B model remotely and connect to it using the KoboldAI client. -See the instructions on the Colab at the link below: -https://colab.research.google.com/drive/1uGe9f4ruIQog3RLxfUsoThakvLpHjIkX?usp=sharing - -[FOR INFERKIT INTEGRATION] - -If you would like to use InferKit's Megatron-11b model, sign up for a free account on their website. -https://inferkit.com/ -After verifying your email address, sign in and click on your profile picture in the top right. -In the drop down menu, click "API Key". -On the API Key page, click "Reveal API Key" and copy it. When starting KoboldAI and selecting the -InferKit API model, you will be asked to paste your API key into the terminal. After entering, -the API key will be stored in the client.settings file for future use. -You can see your remaining budget for generated characters on their website under "Billing & Usage". \ No newline at end of file diff --git a/remote-play.bat b/remote-play.bat new file mode 100644 index 00000000..5e429279 --- /dev/null +++ b/remote-play.bat @@ -0,0 +1 @@ +play --remote %* \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 043a9d4b..40f66af4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ -transformers == 4.5.1 +git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b tensorflow-gpu Flask == 1.1.2 Flask-SocketIO == 5.0.1 requests == 2.25.1 -torch == 1.8.1 \ No newline at end of file +torch == 1.8.1 +flask-cloudflared \ No newline at end of file diff --git a/static/application.js b/static/application.js index e5ebc806..2431db49 100644 --- a/static/application.js +++ b/static/application.js @@ -8,6 +8,7 @@ var socket; // UI references for jQuery var connect_status; var button_newgame; +var button_rndgame; var button_save; var button_saveas; var button_savetofile; @@ -17,12 +18,12 @@ var button_importwi; var button_impaidg; var button_settings; var button_format; +var button_mode; +var button_mode_label; var button_send; -var button_actedit; var button_actmem; var button_actback; var button_actretry; -var button_delete; var button_actwi; var game_text; var input_text; @@ -45,9 +46,9 @@ var aidg_accept; var aidg_close; var saveaspopup; var saveasinput; +var topic; var saveas_accept; var saveas_close; -var saveasoverwrite; var loadpopup; var loadcontent; var load_accept; @@ -55,9 +56,24 @@ var load_close; var nspopup; var ns_accept; var ns_close; +var rspopup; +var rs_accept; +var rs_close; var seqselmenu; var seqselcontents; +var storyname = null; +var memorymode = false; +var memorytext = ""; +var gamestarted = false; +var editmode = false; +var connected = false; +var newly_loaded = true; +var current_editing_chunk = null; +var chunk_conflict = false; +var sman_allow_delete = false; +var sman_allow_rename = false; + // Key states var shift_down = false; var do_clear_ent = false; @@ -65,6 +81,11 @@ var do_clear_ent = false; // Display vars var allowtoggle = false; var formatcount = 0; +var allowedit = true; // Whether clicking on chunks will edit them + +// Adventure +var action_mode = 0; // 0: story, 1: action +var adventure = false; //=================================================================// // METHODS @@ -115,6 +136,9 @@ function addSetting(ob) { if(allowtoggle) { socket.send({'cmd': $(this).attr('id'), 'data': $(this).prop('checked')}); } + if(ob.id == "setadventure"){ + setadventure($(this).prop('checked')); + } }); } } @@ -161,21 +185,53 @@ function addImportLine(ob) { function addWiLine(ob) { if(ob.init) { - wi_menu.append("
\ -
\ - \ - \ - \ -
\ -
\ - \ -
\ -
\ - \ -
\ -
"); + if(ob.selective){ + wi_menu.append("
\ +
\ + \ + \ + \ +
\ +
\ + \ + \ + \ + \ +
\ +
\ + \ +
\ +
\ + \ + \ +
\ +
"); + } else { + wi_menu.append("
\ +
\ + \ + \ + \ +
\ +
\ + \ + \ + \ + \ +
\ +
\ + \ +
\ +
\ + \ + \ +
\ +
"); + } // Send key value to text input $("#wikey"+ob.num).val(ob.key); + $("#wikeyprimary"+ob.num).val(ob.key); + $("#wikeysecondary"+ob.num).val(ob.keysecondary); // Assign delete event to button $("#btn_wi"+ob.num).on("click", function () { showWiDeleteConfirm(ob.num); @@ -188,11 +244,18 @@ function addWiLine(ob) { \ \ \ -
\ - \ +
\ + \ + \ + \ + \
\
\ - \ + \ +
\ +
\ + \ + \
\
"); // Assign function to expand WI item to button @@ -201,16 +264,48 @@ function addWiLine(ob) { }); } // Assign actions to other elements + wientry_onfocus = function () { + $("#constant-key-"+ob.num).addClass("constant-key-icon-clickthrough"); + } + wientry_onfocusout = function () { + $("#constant-key-"+ob.num).removeClass("constant-key-icon-clickthrough"); + } + $("#wikey"+ob.num).on("focus", wientry_onfocus); + $("#wikeyprimary"+ob.num).on("focus", wientry_onfocus); + $("#wikeysecondary"+ob.num).on("focus", wientry_onfocus); + $("#wikey"+ob.num).on("focusout", wientry_onfocusout); + $("#wikeyprimary"+ob.num).on("focusout", wientry_onfocusout); + $("#wikeysecondary"+ob.num).on("focusout", wientry_onfocusout); $("#btn_wican"+ob.num).on("click", function () { hideWiDeleteConfirm(ob.num); }); $("#btn_widel"+ob.num).on("click", function () { socket.send({'cmd': 'widelete', 'data': ob.num}); }); + $("#btn_wiselon"+ob.num).on("click", function () { + enableWiSelective(ob.num); + $("#wikey"+ob.num).addClass("wilistitem-selective"); + }); + $("#btn_wiseloff"+ob.num).on("click", function () { + disableWiSelective(ob.num); + $("#wikey"+ob.num).removeClass("wilistitem-selective"); + }); + $("#constant-key-"+ob.num).on("click", function () { + var element = $("#constant-key-"+ob.num); + if(element.hasClass("constant-key-icon-enabled")) { + socket.send({'cmd': 'wiconstantoff', 'data': ob.num}); + element.removeClass("constant-key-icon-enabled"); + $("#wikey"+ob.num).removeClass("wilistitem-constant"); + } else { + socket.send({'cmd': 'wiconstanton', 'data': ob.num}); + element.addClass("constant-key-icon-enabled"); + $("#wikey"+ob.num).addClass("wilistitem-constant"); + } + }); } function expandWiLine(num) { - show([$("#wikey"+num), $("#wientry"+num)]); + show([$("#wikey"+num), $("#wientry"+num), $("#constant-key-"+num), $("#btn_wiselon"+num)]); $("#btn_wi"+num).html("X"); $("#btn_wi"+num).off(); // Tell server the WI entry was initialized @@ -230,6 +325,22 @@ function hideWiDeleteConfirm(num) { hide([$("#btn_widel"+num), $("#btn_wican"+num)]); } +function enableWiSelective(num) { + hide([$("#btn_wiselon"+num), $("#wikey"+num)]); + // Tell server the WI entry is now selective + socket.send({'cmd': 'wiselon', 'data': num}); + $("#wikeyprimary"+num).val($("#wikey"+num).val()); + show([$("#wikeyprimary"+num), $("#wikeysecondary"+num), $("#btn_wiseloff"+num)]); +} + +function disableWiSelective(num) { + hide([$("#btn_wiseloff"+num), $("#wikeyprimary"+num), $("#wikeysecondary"+num)]); + // Tell server the WI entry is now non-selective + socket.send({'cmd': 'wiseloff', 'data': num}); + $("#wikey"+num).val($("#wikeyprimary"+num).val()); + show([$("#btn_wiselon"+num), $("#wikey"+num)]); +} + function highlightImportLine(ref) { $("#popupcontent > div").removeClass("popuplistselected"); ref.addClass("popuplistselected"); @@ -287,6 +398,12 @@ function hideWaitAnimation() { $('#waitanim').remove(); } +function scrollToBottom() { + setTimeout(function () { + $('#gamescreen').animate({scrollTop: $('#gamescreen').prop('scrollHeight')}, 500); + }, 5); +} + function hide(refs) { for(i=0; i *', function() { - editModeSelect($(this).attr("n")); - }); - disableSendBtn(); - hide([button_actback, button_actmem, button_actretry, button_actwi]); - show([button_delete]); + editmode = true; } function exitEditMode() { - // Remove class to each story chunk - hideMessage(); - button_actedit.html("Edit"); - game_text.children('chunk').removeClass("chunkhov"); - game_text.off('click', '> *'); - enableSendBtn(); - show([button_actback, button_actmem, button_actretry, button_actwi]); - hide([button_delete]); - input_text.val(""); -} - -function editModeSelect(n) { - socket.send({'cmd': 'editline', 'data': n}); + editmode = false; } function enterMemoryMode() { + memorymode = true; + setmodevisibility(false); showMessage("Edit the memory to be sent with each request to the AI."); button_actmem.html("Cancel"); - hide([button_actback, button_actretry, button_actedit, button_delete, button_actwi]); + hide([button_actback, button_actretry, button_actwi]); // Display Author's Note field anote_menu.slideDown("fast"); } function exitMemoryMode() { + memorymode = false; + setmodevisibility(adventure); hideMessage(); button_actmem.html("Memory"); - show([button_actback, button_actretry, button_actedit, button_actwi]); + show([button_actback, button_actretry, button_actwi]); input_text.val(""); // Hide Author's Note field anote_menu.slideUp("fast"); @@ -359,7 +459,7 @@ function exitMemoryMode() { function enterWiMode() { showMessage("World Info will be added to memory only when the key appears in submitted text or the last action."); button_actwi.html("Accept"); - hide([button_actedit, button_actback, button_actmem, button_actretry, game_text]); + hide([button_actback, button_actmem, button_actretry, game_text]); show([wi_menu]); disableSendBtn(); } @@ -368,17 +468,21 @@ function exitWiMode() { hideMessage(); button_actwi.html("W Info"); hide([wi_menu]); - show([button_actedit, button_actback, button_actmem, button_actretry, game_text]); + show([button_actback, button_actmem, button_actretry, game_text]); enableSendBtn(); + scrollToBottom(); } function returnWiList(ar) { var list = []; var i; for(i=0; i\ -
"+ar[i].name+"
\ -
"+ar[i].actions+"
\ + loadcontent.append("
\ +
\ + \ +
\ + \ +
\ +
\ +
"+ar[i].name+"
\ +
"+ar[i].actions+"
\ +
\
"); $("#load"+i).on("click", function () { enableButtons([load_accept]); socket.send({'cmd': 'loadselect', 'data': $(this).attr("name")}); highlightLoadLine($(this)); }); + + $("#loaddelete"+i).off("click").on("click", (function (name) { + return function () { + if(!sman_allow_delete) { + return; + } + $("#loadcontainerdelete-storyname").text(name); + $("#btn_dsaccept").off("click").on("click", (function (name) { + return function () { + hide([$(".saveasoverwrite"), $(".popuperror")]); + socket.send({'cmd': 'deletestory', 'data': name}); + } + })(name)); + $("#loadcontainerdelete").removeClass("hidden").addClass("flex"); + } + })(ar[i].name)); + + $("#loadrename"+i).off("click").on("click", (function (name) { + return function () { + if(!sman_allow_rename) { + return; + } + $("#newsavename").val("") + $("#loadcontainerrename-storyname").text(name); + var submit = (function (name) { + return function () { + hide([$(".saveasoverwrite"), $(".popuperror")]); + socket.send({'cmd': 'renamestory', 'data': name, 'newname': $("#newsavename").val()}); + } + })(name); + $("#btn_rensaccept").off("click").on("click", submit); + $("#newsavename").off("keydown").on("keydown", function (ev) { + if (ev.which == 13 && $(this).val() != "") { + submit(); + } + }); + $("#loadcontainerrename").removeClass("hidden").addClass("flex"); + $("#newsavename").val(name).select(); + } + })(ar[i].name)); } } function highlightLoadLine(ref) { - $("#loadlistcontent > div").removeClass("popuplistselected"); + $("#loadlistcontent > div > div.popuplistselected").removeClass("popuplistselected"); ref.addClass("popuplistselected"); } @@ -482,15 +650,24 @@ function hideNewStoryPopup() { nspopup.addClass("hidden"); } +function showRandomStoryPopup() { + rspopup.removeClass("hidden"); + rspopup.addClass("flex"); +} + +function hideRandomStoryPopup() { + rspopup.removeClass("flex"); + rspopup.addClass("hidden"); +} + function setStartState() { enableSendBtn(); enableButtons([button_actmem, button_actwi]); - disableButtons([button_actedit, button_actback, button_actretry]); - hide([wi_menu, button_delete]); - show([game_text, button_actedit, button_actmem, button_actwi, button_actback, button_actretry]); + disableButtons([button_actback, button_actretry]); + hide([wi_menu]); + show([game_text, button_actmem, button_actwi, button_actback, button_actretry]); hideMessage(); hideWaitAnimation(); - button_actedit.html("Edit"); button_actmem.html("Memory"); button_actwi.html("W Info"); hideAidgPopup(); @@ -516,6 +693,201 @@ function hidegenseqs() { $('#seqselmenu').slideUp("slow", function() { seqselcontents.html(""); }); + scrollToBottom(); +} + +function setmodevisibility(state) { + if(state){ // Enabling + show([button_mode]); + $("#inputrow").addClass("show_mode"); + } else{ // Disabling + hide([button_mode]); + $("#inputrow").removeClass("show_mode"); + } +} + +function setadventure(state) { + adventure = state; + if(state) { + game_text.addClass("adventure"); + } else { + game_text.removeClass("adventure"); + } + if(!memorymode){ + setmodevisibility(state); + } +} + +function autofocus(event) { + if(connected) { + if(event.target.tagName == "CHUNK") { + current_editing_chunk = event.target; + } + event.target.focus(); + } else { + event.preventDefault(); + } +} + +function chunkOnKeyDown(event) { + // Make escape commit the changes (Originally we had Enter here to but its not required and nicer for users if we let them type freely + // You can add the following after 27 if you want it back to committing on enter : || (!event.shiftKey && event.keyCode == 13) + if(event.keyCode == 27) { + setTimeout(function () { + event.target.blur(); + }, 5); + event.preventDefault(); + return; + } + + // Allow left and right arrow keys (and backspace) to move between chunks + switch(event.keyCode) { + case 37: // left + case 39: // right + var old_range = getSelection().getRangeAt(0); + var old_range_start = old_range.startOffset; + var old_range_end = old_range.endOffset; + var old_range_ancestor = old_range.commonAncestorContainer; + var old_range_start_container = old_range.startContainer; + var old_range_end_container = old_range.endContainer; + setTimeout(function () { + // Wait a few milliseconds and check if the caret has moved + var new_selection = getSelection(); + var new_range = new_selection.getRangeAt(0); + if(old_range_start != new_range.startOffset || old_range_end != new_range.endOffset || old_range_ancestor != new_range.commonAncestorContainer || old_range_start_container != new_range.startContainer || old_range_end_container != new_range.endContainer) { + return; + } + // If it hasn't moved, we're at the beginning or end of a chunk + // and the caret must be moved to a different chunk + var chunk = document.activeElement; + switch(event.keyCode) { + case 37: // left + if((chunk = chunk.previousSibling) && chunk.tagName == "CHUNK") { + var range = document.createRange(); + range.selectNodeContents(chunk); + range.collapse(false); + new_selection.removeAllRanges(); + new_selection.addRange(range); + } + break; + + case 39: // right + if((chunk = chunk.nextSibling) && chunk.tagName == "CHUNK") { + chunk.focus(); + } + } + }, 2); + return; + + case 8: // backspace + var old_length = document.activeElement.innerText.length; + setTimeout(function () { + // Wait a few milliseconds and compare the chunk's length + if(old_length != document.activeElement.innerText.length) { + return; + } + // If it's the same, we're at the beginning of a chunk + if((chunk = document.activeElement.previousSibling) && chunk.tagName == "CHUNK") { + var range = document.createRange(); + var selection = getSelection(); + range.selectNodeContents(chunk); + range.collapse(false); + selection.removeAllRanges(); + selection.addRange(range); + } + }, 2); + return + } + + // Don't allow any edits if not connected to server + if(!connected) { + event.preventDefault(); + return; + } + + // Prevent CTRL+B, CTRL+I and CTRL+U when editing chunks + if(event.ctrlKey || event.metaKey) { // metaKey is macOS's command key + switch(event.keyCode) { + case 66: + case 98: + case 73: + case 105: + case 85: + case 117: + event.preventDefault(); + return; + } + } +} + +function submitEditedChunk(event) { + // Don't do anything if the current chunk hasn't been edited or if someone + // else overwrote it while you were busy lollygagging + if(current_editing_chunk === null || chunk_conflict) { + chunk_conflict = false; + return; + } + + var chunk = current_editing_chunk; + current_editing_chunk = null; + + // Submit the edited chunk if it's not empty, otherwise delete it + if(chunk.innerText.length) { + socket.send({'cmd': 'inlineedit', 'chunk': chunk.getAttribute("n"), 'data': chunk.innerText.replace(/\u00a0/g, " ")}); + } else { + socket.send({'cmd': 'inlinedelete', 'data': chunk.getAttribute("n")}); + } +} + +function downloadStory(format) { + var filename_without_extension = storyname !== null ? storyname : "untitled"; + + var anchor = document.createElement('a'); + + var actionlist = $("chunk"); + var actionlist_compiled = []; + for(var i = 0; i < actionlist.length; i++) { + actionlist_compiled.push(actionlist[i].innerText.replace(/\u00a0/g, " ")); + } + + if(format == "plaintext") { + var objectURL = URL.createObjectURL(new Blob(actionlist_compiled)); + anchor.setAttribute('href', objectURL); + anchor.setAttribute('download', filename_without_extension + ".txt"); + anchor.click(); + URL.revokeObjectURL(objectURL); + return; + } + + var wilist = $(".wilistitem"); + var wilist_compiled = []; + for(var i = 0; i < wilist.length-1; i++) { + var selective = wilist[i].classList.contains("wilistitem-selective"); + wilist_compiled.push({ + key: selective ? $("#wikeyprimary"+i).val() : $("#wikey"+i).val(), + keysecondary: $("#wikeysecondary"+i).val(), + content: $("#wientry"+i).val(), + selective: selective, + constant: wilist[i].classList.contains("wilistitem-constant"), + }); + } + + var prompt = actionlist_compiled.shift(); + if(prompt === undefined) { + prompt = ""; + } + var objectURL = URL.createObjectURL(new Blob([JSON.stringify({ + gamestarted: gamestarted, + prompt: prompt, + memory: memorytext, + authorsnote: $("#anoteinput").val(), + actions: actionlist_compiled, + worldinfo: wilist_compiled, + }, null, 3)])); + anchor.setAttribute('href', objectURL); + anchor.setAttribute('download', filename_without_extension + ".json"); + anchor.click(); + URL.revokeObjectURL(objectURL); } //=================================================================// @@ -527,9 +899,12 @@ $(document).ready(function(){ // Bind UI references connect_status = $('#connectstatus'); button_newgame = $('#btn_newgame'); + button_rndgame = $('#btn_rndgame'); button_save = $('#btn_save'); button_saveas = $('#btn_saveas'); button_savetofile = $('#btn_savetofile'); + button_download = $('#btn_download'); + button_downloadtxt= $('#btn_downloadtxt'); button_load = $('#btn_load'); button_loadfrfile = $('#btn_loadfromfile'); button_import = $("#btn_import"); @@ -537,12 +912,12 @@ $(document).ready(function(){ button_impaidg = $("#btn_impaidg"); button_settings = $('#btn_settings'); button_format = $('#btn_format'); + button_mode = $('#btnmode') + button_mode_label = $('#btnmode_label') button_send = $('#btnsend'); - button_actedit = $('#btn_actedit'); button_actmem = $('#btn_actmem'); button_actback = $('#btn_actundo'); button_actretry = $('#btn_actretry'); - button_delete = $('#btn_delete'); button_actwi = $('#btn_actwi'); game_text = $('#gametext'); input_text = $('#input_text'); @@ -565,9 +940,9 @@ $(document).ready(function(){ aidg_close = $("#btn_aidgpopupclose"); saveaspopup = $("#saveascontainer"); saveasinput = $("#savename"); + topic = $("#topic"); saveas_accept = $("#btn_saveasaccept"); saveas_close = $("#btn_saveasclose"); - saveasoverwrite = $("#saveasoverwrite"); loadpopup = $("#loadcontainer"); loadcontent = $("#loadlistcontent"); load_accept = $("#btn_loadaccept"); @@ -575,16 +950,21 @@ $(document).ready(function(){ nspopup = $("#newgamecontainer"); ns_accept = $("#btn_nsaccept"); ns_close = $("#btn_nsclose"); + rspopup = $("#rndgamecontainer"); + rs_accept = $("#btn_rsaccept"); + rs_close = $("#btn_rsclose"); seqselmenu = $("#seqselmenu"); seqselcontents = $("#seqselcontents"); - // Connect to SocketIO server - loc = window.document.location; - socket = io.connect(loc.href); + // Connect to SocketIO server + socket = io.connect(window.document.origin); socket.on('from_server', function(msg) { - if(msg.cmd == "connected") { + if(msg.cmd == "connected") { // Connected to Server Actions + sman_allow_delete = msg.hasOwnProperty("smandelete") && msg.smandelete; + sman_allow_rename = msg.hasOwnProperty("smanrename") && msg.smanrename; + connected = true; connect_status.html("Connected to KoboldAI Process!"); connect_status.removeClass("color_orange"); connect_status.addClass("color_green"); @@ -592,26 +972,76 @@ $(document).ready(function(){ settings_menu.html(""); format_menu.html(""); wi_menu.html(""); + // Set up "Allow Editing" + $('body').on('input', autofocus).on('keydown', 'chunk', chunkOnKeyDown).on('focusout', 'chunk', submitEditedChunk); + $('#allowediting').prop('checked', allowedit).prop('disabled', false).change().on('change', function () { + if(allowtoggle) { + allowedit = $(this).prop('checked') + $("chunk").attr('contenteditable', allowedit) + } + }); } else if(msg.cmd == "updatescreen") { + var _gamestarted = gamestarted; + gamestarted = msg.gamestarted; + if(_gamestarted != gamestarted) { + action_mode = 0; + changemode(); + } // Send game content to Game Screen + if(allowedit && document.activeElement.tagName == "CHUNK") { + chunk_conflict = true; + } game_text.html(msg.data); + // Make content editable if need be + $('chunk').attr('contenteditable', allowedit); // Scroll to bottom of text - setTimeout(function () { - $('#gamescreen').animate({scrollTop: $('#gamescreen').prop('scrollHeight')}, 1000); - }, 5); + if(newly_loaded) { + scrollToBottom(); + } + newly_loaded = false; + hideMessage(); + } else if(msg.cmd == "scrolldown") { + scrollToBottom(); + } else if(msg.cmd == "updatechunk") { + hideMessage(); + const {index, html, last} = msg.data; + const existingChunk = game_text.children(`#n${index}`) + const newChunk = $(html); + if (existingChunk.length > 0) { + // Update existing chunk + existingChunk.before(newChunk); + existingChunk.remove(); + } else { + // Append at the end + game_text.append(newChunk); + } + newChunk.attr('contenteditable', allowedit); + hide([$('#curtain')]); + if(last) { + // Scroll to bottom of text if it's the last element + scrollToBottom(); + } + } else if(msg.cmd == "removechunk") { + hideMessage(); + let index = msg.data; + // Remove the chunk + game_text.children(`#n${index}`).remove() + hide([$('#curtain')]); } else if(msg.cmd == "setgamestate") { // Enable or Disable buttons if(msg.data == "ready") { enableSendBtn(); - enableButtons([button_actedit, button_actmem, button_actwi, button_actback, button_actretry]); + enableButtons([button_actmem, button_actwi, button_actback, button_actretry]); hideWaitAnimation(); } else if(msg.data == "wait") { disableSendBtn(); - disableButtons([button_actedit, button_actmem, button_actwi, button_actback, button_actretry]); + disableButtons([button_actmem, button_actwi, button_actback, button_actretry]); showWaitAnimation(); } else if(msg.data == "start") { setStartState(); } + } else if(msg.cmd == "setstoryname") { + storyname = msg.data; } else if(msg.cmd == "editmode") { // Enable or Disable edit mode if(msg.data == "true") { @@ -620,11 +1050,16 @@ $(document).ready(function(){ exitEditMode(); } } else if(msg.cmd == "setinputtext") { - // Set input box text for edit mode - input_text.val(msg.data); - } else if(msg.cmd == "enablesubmit") { - // Enables the submit button - enableSendBtn(); + // Set input box text for memory mode + if(memorymode) { + memorytext = msg.data; + input_text.val(msg.data); + } + } else if(msg.cmd == "setmemory") { + memorytext = msg.data; + if(memorymode) { + input_text.val(msg.data); + } } else if(msg.cmd == "memmode") { // Enable or Disable memory edit mode if(msg.data == "true") { @@ -646,6 +1081,14 @@ $(document).ready(function(){ // Send current top p value to input $("#settopp").val(parseFloat(msg.data)); $("#settoppcur").html(msg.data); + } else if(msg.cmd == "updatetopk") { + // Send current top k value to input + $("#settopk").val(parseFloat(msg.data)); + $("#settopkcur").html(msg.data); + } else if(msg.cmd == "updatetfs") { + // Send current tfs value to input + $("#settfs").val(parseFloat(msg.data)); + $("#settfscur").html(msg.data); } else if(msg.cmd == "updatereppen") { // Send current rep pen value to input $("#setreppen").val(parseFloat(msg.data)); @@ -668,6 +1111,12 @@ $(document).ready(function(){ } else if(msg.cmd == "setlabeltopp") { // Update setting label with value from server $("#settoppcur").html(msg.data); + } else if(msg.cmd == "setlabeltopk") { + // Update setting label with value from server + $("#settopkcur").html(msg.data); + } else if(msg.cmd == "setlabeltfs") { + // Update setting label with value from server + $("#settfscur").html(msg.data); } else if(msg.cmd == "setlabelreppen") { // Update setting label with value from server $("#setreppencur").html(msg.data); @@ -718,6 +1167,14 @@ $(document).ready(function(){ } else if(msg.cmd == "popupshow") { // Show/Hide Popup popupShow(msg.data); + } else if(msg.cmd == "hidepopupdelete") { + // Hide the dialog box that asks you to confirm deletion of a story + $("#loadcontainerdelete").removeClass("flex").addClass("hidden"); + hide([$(".saveasoverwrite"), $(".popuperror")]); + } else if(msg.cmd == "hidepopuprename") { + // Hide the story renaming dialog box + $("#loadcontainerrename").removeClass("flex").addClass("hidden"); + hide([$(".saveasoverwrite"), $(".popuperror")]); } else if(msg.cmd == "addimportline") { // Add import popup entry addImportLine(msg.data); @@ -751,7 +1208,11 @@ $(document).ready(function(){ buildLoadList(msg.data); } else if(msg.cmd == "askforoverwrite") { // Show overwrite warning - show([saveasoverwrite]); + show([$(".saveasoverwrite")]); + } else if(msg.cmd == "popuperror") { + // Show error in the current dialog box + $(".popuperror").text(msg.data); + show([$(".popuperror")]); } else if(msg.cmd == "genseqs") { // Parse generator sequences to UI parsegenseqs(msg.data); @@ -775,10 +1236,18 @@ $(document).ready(function(){ } else if(msg.cmd == "updateuseprompt") { // Update toggle state $("#setuseprompt").prop('checked', msg.data).change(); + } else if(msg.cmd == "updateadventure") { + // Update toggle state + $("#setadventure").prop('checked', msg.data).change(); + // Update adventure state + setadventure(msg.data); + } else if(msg.cmd == "runs_remotely") { + hide([button_loadfrfile, button_savetofile, button_import, button_importwi]); } - }); + }); socket.on('disconnect', function() { + connected = false; connect_status.html("Lost connection..."); connect_status.removeClass("color_green"); connect_status.addClass("color_orange"); @@ -788,25 +1257,23 @@ $(document).ready(function(){ button_send.on("click", function(ev) { dosubmit(); }); + + button_mode.on("click", function(ev) { + changemode(); + }); button_actretry.on("click", function(ev) { + hideMessage(); socket.send({'cmd': 'retry', 'data': ''}); hidegenseqs(); }); button_actback.on("click", function(ev) { + hideMessage(); socket.send({'cmd': 'back', 'data': ''}); hidegenseqs(); }); - button_actedit.on("click", function(ev) { - socket.send({'cmd': 'edit', 'data': ''}); - }); - - button_delete.on("click", function(ev) { - socket.send({'cmd': 'delete', 'data': ''}); - }); - button_actmem.on("click", function(ev) { socket.send({'cmd': 'memory', 'data': ''}); }); @@ -875,6 +1342,14 @@ $(document).ready(function(){ saveas_accept.on("click", function(ev) { sendSaveAsRequest(); }); + + button_download.on("click", function(ev) { + downloadStory('json'); + }); + + button_downloadtxt.on("click", function(ev) { + downloadStory('plaintext'); + }); button_load.on("click", function(ev) { socket.send({'cmd': 'loadlistrequest', 'data': ''}); @@ -885,6 +1360,7 @@ $(document).ready(function(){ }); load_accept.on("click", function(ev) { + newly_loaded = true; socket.send({'cmd': 'loadrequest', 'data': ''}); hideLoadPopup(); }); @@ -901,6 +1377,38 @@ $(document).ready(function(){ ns_close.on("click", function(ev) { hideNewStoryPopup(); }); + + $("#btn_dsclose").on("click", function () { + $("#loadcontainerdelete").removeClass("flex").addClass("hidden"); + hide([$(".saveasoverwrite"), $(".popuperror")]); + }); + + $("#newsavename").on("input", function (ev) { + if($(this).val() == "") { + disableButtons([$("#btn_rensaccept")]); + } else { + enableButtons([$("#btn_rensaccept")]); + } + hide([$(".saveasoverwrite"), $(".popuperror")]); + }); + + $("#btn_rensclose").on("click", function () { + $("#loadcontainerrename").removeClass("flex").addClass("hidden"); + hide([$(".saveasoverwrite"), $(".popuperror")]); + }); + + button_rndgame.on("click", function(ev) { + showRandomStoryPopup(); + }); + + rs_accept.on("click", function(ev) { + socket.send({'cmd': 'rndgame', 'data': topic.val()}); + hideRandomStoryPopup(); + }); + + rs_close.on("click", function(ev) { + hideRandomStoryPopup(); + }); anote_slider.on("input", function () { socket.send({'cmd': 'anotedepth', 'data': $(this).val()}); @@ -912,7 +1420,7 @@ $(document).ready(function(){ } else { enableButtons([saveas_accept]); } - hide([saveasoverwrite]); + hide([$(".saveasoverwrite"), $(".popuperror")]); }); // Bind Enter button to submit diff --git a/static/custom.css b/static/custom.css index 0fa720ca..da040295 100644 --- a/static/custom.css +++ b/static/custom.css @@ -6,6 +6,20 @@ chunk { color: #ffffff; } +#gametext.adventure action { + color: #9ff7fa; + font-weight: bold; +} + +chunk[contenteditable="true"]:focus, chunk[contenteditable="true"]:focus * { + color: #cdf !important; + font-weight: normal !important; +} + +chunk, chunk * { + outline: 0px solid transparent; +} + #topmenu { background-color: #337ab7; padding: 10px; @@ -53,7 +67,7 @@ chunk { } #gamescreen { - height: 500px; + height: 490px; margin-top: 10px; padding: 10px; display: flex; @@ -72,6 +86,7 @@ chunk { #gametext { max-height: 100%; width: 100%; + word-wrap: break-word; } #seqselmenu { @@ -97,12 +112,21 @@ chunk { margin-left: 20px; } +#inputrow.show_mode { + grid-template-columns: 7% 83% 10%; +} + #inputrow { margin-top: 10px; padding: 0px; width: 100%; display: grid; - grid-template-columns: 90% 10%; + grid-template-columns: 0% 90% 10%; +} + +#inputrowmode { + position: relative; + padding-right: 0px; } #inputrowleft { @@ -121,6 +145,13 @@ chunk { color: #ffffff; } +#btnmode { + width: 100%; + height: 100%; + overflow: auto; + overflow-x: hidden; +} + #btnsend { width: 100%; height: 100%; @@ -163,7 +194,7 @@ chunk { position: absolute; top: 0px; left: 0px; - z-index: 1; + z-index: 3; width: 100%; height: 100%; background-color: rgba(0,0,0,0.5); @@ -240,12 +271,6 @@ chunk { margin-top: 200px; } -#saveasoverwrite { - color: #ff9900; - font-weight: bold; - text-align: center; -} - #loadpopup { width: 500px; background-color: #262626; @@ -260,6 +285,18 @@ chunk { } } +#loadpopupdelete { + width: 350px; + background-color: #262626; + margin-top: 200px; +} + +#loadpopuprename { + width: 350px; + background-color: #262626; + margin-top: 200px; +} + #loadlistcontent { height: 325px; overflow-y: scroll; @@ -271,6 +308,12 @@ chunk { margin-top: 200px; } +#rspopup { + width: 800px; + background-color: #262626; + margin-top: 200px; +} + /*================= Classes =================*/ .aidgpopupcontent { @@ -282,6 +325,12 @@ chunk { text-align: center; } +.dialogheader { + padding: 10px 40px 10px 40px; + color: #737373; + text-align: center; +} + .anotelabel { font-size: 10pt; color: #ffffff; @@ -291,15 +340,36 @@ chunk { width: 100px; } +.box { + border-radius: 5px; + border: 1px solid #646464; + padding: 4px; + background: #373737; +} + +.box-label { + color: #ffffff; + padding-left: 10px; + padding-right: 10px; + padding-bottom: 5px; + padding-top: 5px; + display: inline-block; +} + .chunkhov:hover { color: #c0fc51; cursor: pointer; } -.colorfade { +.chunkhov:hover > action { + color: #00fa00; +} + +.colorfade, .colorfade * { -moz-transition:color 1s ease-in; - -o-transition:color 1s ease-in; - -webkit-transition:color 1s ease-in; + -o-transition:color 1s ease-in; + -webkit-transition:color 1s ease-in; + transition:color 1s ease-in; } .color_orange { @@ -339,8 +409,17 @@ chunk { text-decoration: none; } +.edit-flash, .edit-flash * { + color: #3bf723 !important; +} + .flex { display: flex; + align-items: center; +} + +.flex-push-right { + margin-left: auto; } .formatcolumn { @@ -376,21 +455,21 @@ chunk { } .helpicon { - display: inline-block; - font-family: sans-serif; - font-weight: bold; - text-align: center; - width: 2.2ex; - height: 2.4ex; - font-size: 1.4ex; - line-height: 1.8ex; - border-radius: 1.2ex; - margin-right: 4px; - padding: 1px; - color: #295071; - background: #ffffff; - border: 1px solid white; - text-decoration: none; + display: inline-block; + font-family: sans-serif; + font-weight: bold; + text-align: center; + width: 2.2ex; + height: 2.4ex; + font-size: 1.4ex; + line-height: 1.8ex; + border-radius: 1.2ex; + margin-right: 4px; + padding: 1px; + color: #295071; + background: #ffffff; + border: 1px solid white; + text-decoration: none; } .helpicon:hover { @@ -426,22 +505,85 @@ chunk { text-align: right; } -.loadlistheader { - padding-left: 10px; +.layer-container { display: grid; - grid-template-columns: 80% 20%; +} + +.layer-bottom { + grid-area: 1/1; + z-index: 0; +} + +.layer-top { + grid-area: 1/1; + z-index: 2; +} + +.icon-container { + position: relative; +} + +.constant-key-icon { + position: absolute !important; + top: 5px !important; + right: 5px !important; + z-index: 1; + transform: rotate(20deg); + -moz-transform: rotate(20deg); + -webkit-transform: rotate(20deg); + -ms-transform: rotate(20deg); + -o-transform: rotate(20deg); + opacity: 20%; +} + +*:hover > .constant-key-icon { + opacity: 40%; +} + +.constant-key-icon:hover { + opacity: 65%; + cursor: pointer; +} + +.constant-key-icon-enabled { + color: #3bf723; + opacity: 65% +} + +*:hover > .constant-key-icon-enabled { + opacity: 65%; +} + +.constant-key-icon-enabled:hover { + opacity: 100% +} + +.constant-key-icon-clickthrough { + opacity: 0% !important; + pointer-events: none; +} + +.constant-key-icon-clickthrough.constant-key-icon-enabled { + opacity: 35% !important; +} + +.loadlistheader { + padding-left: 68px; + padding-right: 20px; + display: flex; color: #737373; } .loadlistitem { padding: 5px 10px 5px 10px; - display: grid; - grid-template-columns: 80% 20%; + display: flex; + flex-grow: 1; color: #ffffff; -moz-transition: background-color 0.25s ease-in; - -o-transition: background-color 0.25s ease-in; - -webkit-transition: background-color 0.25s ease-in; + -o-transition: background-color 0.25s ease-in; + -webkit-transition: background-color 0.25s ease-in; + transition: background-color 0.25s ease-in; } .loadlistitem:hover { @@ -449,13 +591,37 @@ chunk { background-color: #688f1f; } +.loadlistpadding { + padding-right: 10px; +} + +.loadlisticon { + color: #333 +} + +.loadlisticon.allowed { + color: #ddd +} + +.loadlisticon.allowed:hover { + cursor: pointer; +} + +.loadlisticon-delete.allowed:hover { + color: #ef2929 +} + +.loadlisticon-rename.allowed:hover { + color: #fce94f +} + .navbar .navbar-nav .nav-link:hover { - border-radius: 5px; + border-radius: 5px; background-color: #98bcdb; } .navbar .navbar-nav .nav-link:focus { - border-radius: 5px; + border-radius: 5px; background-color: #98bcdb; } @@ -498,11 +664,15 @@ chunk { } +.nowrap { + white-space: nowrap; +} + .popupcontainer { position: absolute; top: 0px; left: 0px; - z-index: 1; + z-index: 3; width: 100%; height: 100%; background-color: rgba(0,0,0,0.5); @@ -517,8 +687,9 @@ chunk { color: #ffffff; -moz-transition: background-color 0.25s ease-in; - -o-transition: background-color 0.25s ease-in; - -webkit-transition: background-color 0.25s ease-in; + -o-transition: background-color 0.25s ease-in; + -webkit-transition: background-color 0.25s ease-in; + transition: background-color 0.25s ease-in; } .popuplistitem:hover { @@ -543,6 +714,11 @@ chunk { font-size: 12pt; } +.popuperror { + color: #ef2929; + text-align: center; +} + .popupfooter { width: 100%; padding: 10px; @@ -557,6 +733,12 @@ chunk { margin-right: 10px; } +.saveasoverwrite { + color: #ff9900; + font-weight: bold; + text-align: center; +} + .seqselheader { color: #737373; } @@ -567,8 +749,9 @@ chunk { padding: 5px; color: #ffffff; -moz-transition: all 0.15s ease-in; - -o-transition: all 0.15s ease-in; - -webkit-transition: all 0.15s ease-in; + -o-transition: all 0.15s ease-in; + -webkit-transition: all 0.15s ease-in; + transition: all 0.15s ease-in; } .seqselitem:hover { @@ -617,15 +800,20 @@ chunk { width: 50px; } +.width-auto { + width: auto; +} + .wilistitem { height: 80px; display: grid; - grid-template-columns: 4% 30% 66%; + grid-template-columns: 4% 30% 58% 8%; margin-bottom: 10px; } .wientry { padding-left: 10px; + padding-right: 10px; background-color: #212122; } @@ -642,7 +830,6 @@ chunk { } .wikey > input { - height: 100%; background-color: #404040; color: #ffffff; } @@ -651,4 +838,8 @@ chunk { width: 80%; overflow: hidden; font-size: 12pt; -} \ No newline at end of file +} + +.wiselective > button { + white-space: normal; +} diff --git a/static/open-iconic-bootstrap.min.css b/static/open-iconic-bootstrap.min.css new file mode 100644 index 00000000..dd5b1613 --- /dev/null +++ b/static/open-iconic-bootstrap.min.css @@ -0,0 +1,118 @@ +/*! + * Open Iconic's icons are released under the MIT license and its fonts are + * released under the SIL Open Font License. + * + * ----------------------------------------------------------------------------- + * + * The MIT License (MIT) + * + * Copyright (c) 2014 Waybury + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * ----------------------------------------------------------------------------- + * + * SIL OPEN FONT LICENSE Version 1.1 + * + * Copyright (c) 2014 Waybury + * + * PREAMBLE + * The goals of the Open Font License (OFL) are to stimulate worldwide + * development of collaborative font projects, to support the font creation + * efforts of academic and linguistic communities, and to provide a free and + * open framework in which fonts may be shared and improved in partnership + * with others. + * + * The OFL allows the licensed fonts to be used, studied, modified and + * redistributed freely as long as they are not sold by themselves. The + * fonts, including any derivative works, can be bundled, embedded, + * redistributed and/or sold with any software provided that any reserved + * names are not used by derivative works. The fonts and derivatives, + * however, cannot be released under any other type of license. The + * requirement for fonts to remain under this license does not apply + * to any document created using the fonts or their derivatives. + * + * DEFINITIONS + * "Font Software" refers to the set of files released by the Copyright + * Holder(s) under this license and clearly marked as such. This may + * include source files, build scripts and documentation. + * + * "Reserved Font Name" refers to any names specified as such after the + * copyright statement(s). + * + * "Original Version" refers to the collection of Font Software components as + * distributed by the Copyright Holder(s). + * + * "Modified Version" refers to any derivative made by adding to, deleting, + * or substituting -- in part or in whole -- any of the components of the + * Original Version, by changing formats or by porting the Font Software to a + * new environment. + * + * "Author" refers to any designer, engineer, programmer, technical + * writer or other person who contributed to the Font Software. + * + * PERMISSION & CONDITIONS + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of the Font Software, to use, study, copy, merge, embed, modify, + * redistribute, and sell modified and unmodified copies of the Font + * Software, subject to the following conditions: + * + * 1) Neither the Font Software nor any of its individual components, + * in Original or Modified Versions, may be sold by itself. + * + * 2) Original or Modified Versions of the Font Software may be bundled, + * redistributed and/or sold with any software, provided that each copy + * contains the above copyright notice and this license. These can be + * included either as stand-alone text files, human-readable headers or + * in the appropriate machine-readable metadata fields within text or + * binary files as long as those fields can be easily viewed by the user. + * + * 3) No Modified Version of the Font Software may use the Reserved Font + * Name(s) unless explicit written permission is granted by the corresponding + * Copyright Holder. This restriction only applies to the primary font name as + * presented to the users. + * + * 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font + * Software shall not be used to promote, endorse or advertise any + * Modified Version, except to acknowledge the contribution(s) of the + * Copyright Holder(s) and the Author(s) or with their explicit written + * permission. + * + * 5) The Font Software, modified or unmodified, in part or in whole, + * must be distributed entirely under this license, and must not be + * distributed under any other license. The requirement for fonts to + * remain under this license does not apply to any document created + * using the Font Software. + * + * TERMINATION + * This license becomes null and void if any of the above conditions are + * not met. + * + * DISCLAIMER + * THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT + * OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL + * DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM + * OTHER DEALINGS IN THE FONT SOFTWARE. + */ +@font-face{font-family:Icons;src:url(open-iconic.eot);src:url(open-iconic.eot?#iconic-sm) format('embedded-opentype'),url(open-iconic.woff) format('woff'),url(open-iconic.ttf) format('truetype'),url(open-iconic.otf) format('opentype'),url(open-iconic.svg#iconic-sm) format('svg');font-weight:400;font-style:normal}.oi{position:relative;top:1px;display:inline-block;speak:none;font-family:Icons;font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.oi:empty:before{width:1em;text-align:center;box-sizing:content-box}.oi.oi-align-center:before{text-align:center}.oi.oi-align-left:before{text-align:left}.oi.oi-align-right:before{text-align:right}.oi.oi-flip-horizontal:before{-webkit-transform:scale(-1,1);-ms-transform:scale(-1,1);transform:scale(-1,1)}.oi.oi-flip-vertical:before{-webkit-transform:scale(1,-1);-ms-transform:scale(-1,1);transform:scale(1,-1)}.oi.oi-flip-horizontal-vertical:before{-webkit-transform:scale(-1,-1);-ms-transform:scale(-1,1);transform:scale(-1,-1)}.oi-account-login:before{content:'\e000'}.oi-account-logout:before{content:'\e001'}.oi-action-redo:before{content:'\e002'}.oi-action-undo:before{content:'\e003'}.oi-align-center:before{content:'\e004'}.oi-align-left:before{content:'\e005'}.oi-align-right:before{content:'\e006'}.oi-aperture:before{content:'\e007'}.oi-arrow-bottom:before{content:'\e008'}.oi-arrow-circle-bottom:before{content:'\e009'}.oi-arrow-circle-left:before{content:'\e00a'}.oi-arrow-circle-right:before{content:'\e00b'}.oi-arrow-circle-top:before{content:'\e00c'}.oi-arrow-left:before{content:'\e00d'}.oi-arrow-right:before{content:'\e00e'}.oi-arrow-thick-bottom:before{content:'\e00f'}.oi-arrow-thick-left:before{content:'\e010'}.oi-arrow-thick-right:before{content:'\e011'}.oi-arrow-thick-top:before{content:'\e012'}.oi-arrow-top:before{content:'\e013'}.oi-audio-spectrum:before{content:'\e014'}.oi-audio:before{content:'\e015'}.oi-badge:before{content:'\e016'}.oi-ban:before{content:'\e017'}.oi-bar-chart:before{content:'\e018'}.oi-basket:before{content:'\e019'}.oi-battery-empty:before{content:'\e01a'}.oi-battery-full:before{content:'\e01b'}.oi-beaker:before{content:'\e01c'}.oi-bell:before{content:'\e01d'}.oi-bluetooth:before{content:'\e01e'}.oi-bold:before{content:'\e01f'}.oi-bolt:before{content:'\e020'}.oi-book:before{content:'\e021'}.oi-bookmark:before{content:'\e022'}.oi-box:before{content:'\e023'}.oi-briefcase:before{content:'\e024'}.oi-british-pound:before{content:'\e025'}.oi-browser:before{content:'\e026'}.oi-brush:before{content:'\e027'}.oi-bug:before{content:'\e028'}.oi-bullhorn:before{content:'\e029'}.oi-calculator:before{content:'\e02a'}.oi-calendar:before{content:'\e02b'}.oi-camera-slr:before{content:'\e02c'}.oi-caret-bottom:before{content:'\e02d'}.oi-caret-left:before{content:'\e02e'}.oi-caret-right:before{content:'\e02f'}.oi-caret-top:before{content:'\e030'}.oi-cart:before{content:'\e031'}.oi-chat:before{content:'\e032'}.oi-check:before{content:'\e033'}.oi-chevron-bottom:before{content:'\e034'}.oi-chevron-left:before{content:'\e035'}.oi-chevron-right:before{content:'\e036'}.oi-chevron-top:before{content:'\e037'}.oi-circle-check:before{content:'\e038'}.oi-circle-x:before{content:'\e039'}.oi-clipboard:before{content:'\e03a'}.oi-clock:before{content:'\e03b'}.oi-cloud-download:before{content:'\e03c'}.oi-cloud-upload:before{content:'\e03d'}.oi-cloud:before{content:'\e03e'}.oi-cloudy:before{content:'\e03f'}.oi-code:before{content:'\e040'}.oi-cog:before{content:'\e041'}.oi-collapse-down:before{content:'\e042'}.oi-collapse-left:before{content:'\e043'}.oi-collapse-right:before{content:'\e044'}.oi-collapse-up:before{content:'\e045'}.oi-command:before{content:'\e046'}.oi-comment-square:before{content:'\e047'}.oi-compass:before{content:'\e048'}.oi-contrast:before{content:'\e049'}.oi-copywriting:before{content:'\e04a'}.oi-credit-card:before{content:'\e04b'}.oi-crop:before{content:'\e04c'}.oi-dashboard:before{content:'\e04d'}.oi-data-transfer-download:before{content:'\e04e'}.oi-data-transfer-upload:before{content:'\e04f'}.oi-delete:before{content:'\e050'}.oi-dial:before{content:'\e051'}.oi-document:before{content:'\e052'}.oi-dollar:before{content:'\e053'}.oi-double-quote-sans-left:before{content:'\e054'}.oi-double-quote-sans-right:before{content:'\e055'}.oi-double-quote-serif-left:before{content:'\e056'}.oi-double-quote-serif-right:before{content:'\e057'}.oi-droplet:before{content:'\e058'}.oi-eject:before{content:'\e059'}.oi-elevator:before{content:'\e05a'}.oi-ellipses:before{content:'\e05b'}.oi-envelope-closed:before{content:'\e05c'}.oi-envelope-open:before{content:'\e05d'}.oi-euro:before{content:'\e05e'}.oi-excerpt:before{content:'\e05f'}.oi-expand-down:before{content:'\e060'}.oi-expand-left:before{content:'\e061'}.oi-expand-right:before{content:'\e062'}.oi-expand-up:before{content:'\e063'}.oi-external-link:before{content:'\e064'}.oi-eye:before{content:'\e065'}.oi-eyedropper:before{content:'\e066'}.oi-file:before{content:'\e067'}.oi-fire:before{content:'\e068'}.oi-flag:before{content:'\e069'}.oi-flash:before{content:'\e06a'}.oi-folder:before{content:'\e06b'}.oi-fork:before{content:'\e06c'}.oi-fullscreen-enter:before{content:'\e06d'}.oi-fullscreen-exit:before{content:'\e06e'}.oi-globe:before{content:'\e06f'}.oi-graph:before{content:'\e070'}.oi-grid-four-up:before{content:'\e071'}.oi-grid-three-up:before{content:'\e072'}.oi-grid-two-up:before{content:'\e073'}.oi-hard-drive:before{content:'\e074'}.oi-header:before{content:'\e075'}.oi-headphones:before{content:'\e076'}.oi-heart:before{content:'\e077'}.oi-home:before{content:'\e078'}.oi-image:before{content:'\e079'}.oi-inbox:before{content:'\e07a'}.oi-infinity:before{content:'\e07b'}.oi-info:before{content:'\e07c'}.oi-italic:before{content:'\e07d'}.oi-justify-center:before{content:'\e07e'}.oi-justify-left:before{content:'\e07f'}.oi-justify-right:before{content:'\e080'}.oi-key:before{content:'\e081'}.oi-laptop:before{content:'\e082'}.oi-layers:before{content:'\e083'}.oi-lightbulb:before{content:'\e084'}.oi-link-broken:before{content:'\e085'}.oi-link-intact:before{content:'\e086'}.oi-list-rich:before{content:'\e087'}.oi-list:before{content:'\e088'}.oi-location:before{content:'\e089'}.oi-lock-locked:before{content:'\e08a'}.oi-lock-unlocked:before{content:'\e08b'}.oi-loop-circular:before{content:'\e08c'}.oi-loop-square:before{content:'\e08d'}.oi-loop:before{content:'\e08e'}.oi-magnifying-glass:before{content:'\e08f'}.oi-map-marker:before{content:'\e090'}.oi-map:before{content:'\e091'}.oi-media-pause:before{content:'\e092'}.oi-media-play:before{content:'\e093'}.oi-media-record:before{content:'\e094'}.oi-media-skip-backward:before{content:'\e095'}.oi-media-skip-forward:before{content:'\e096'}.oi-media-step-backward:before{content:'\e097'}.oi-media-step-forward:before{content:'\e098'}.oi-media-stop:before{content:'\e099'}.oi-medical-cross:before{content:'\e09a'}.oi-menu:before{content:'\e09b'}.oi-microphone:before{content:'\e09c'}.oi-minus:before{content:'\e09d'}.oi-monitor:before{content:'\e09e'}.oi-moon:before{content:'\e09f'}.oi-move:before{content:'\e0a0'}.oi-musical-note:before{content:'\e0a1'}.oi-paperclip:before{content:'\e0a2'}.oi-pencil:before{content:'\e0a3'}.oi-people:before{content:'\e0a4'}.oi-person:before{content:'\e0a5'}.oi-phone:before{content:'\e0a6'}.oi-pie-chart:before{content:'\e0a7'}.oi-pin:before{content:'\e0a8'}.oi-play-circle:before{content:'\e0a9'}.oi-plus:before{content:'\e0aa'}.oi-power-standby:before{content:'\e0ab'}.oi-print:before{content:'\e0ac'}.oi-project:before{content:'\e0ad'}.oi-pulse:before{content:'\e0ae'}.oi-puzzle-piece:before{content:'\e0af'}.oi-question-mark:before{content:'\e0b0'}.oi-rain:before{content:'\e0b1'}.oi-random:before{content:'\e0b2'}.oi-reload:before{content:'\e0b3'}.oi-resize-both:before{content:'\e0b4'}.oi-resize-height:before{content:'\e0b5'}.oi-resize-width:before{content:'\e0b6'}.oi-rss-alt:before{content:'\e0b7'}.oi-rss:before{content:'\e0b8'}.oi-script:before{content:'\e0b9'}.oi-share-boxed:before{content:'\e0ba'}.oi-share:before{content:'\e0bb'}.oi-shield:before{content:'\e0bc'}.oi-signal:before{content:'\e0bd'}.oi-signpost:before{content:'\e0be'}.oi-sort-ascending:before{content:'\e0bf'}.oi-sort-descending:before{content:'\e0c0'}.oi-spreadsheet:before{content:'\e0c1'}.oi-star:before{content:'\e0c2'}.oi-sun:before{content:'\e0c3'}.oi-tablet:before{content:'\e0c4'}.oi-tag:before{content:'\e0c5'}.oi-tags:before{content:'\e0c6'}.oi-target:before{content:'\e0c7'}.oi-task:before{content:'\e0c8'}.oi-terminal:before{content:'\e0c9'}.oi-text:before{content:'\e0ca'}.oi-thumb-down:before{content:'\e0cb'}.oi-thumb-up:before{content:'\e0cc'}.oi-timer:before{content:'\e0cd'}.oi-transfer:before{content:'\e0ce'}.oi-trash:before{content:'\e0cf'}.oi-underline:before{content:'\e0d0'}.oi-vertical-align-bottom:before{content:'\e0d1'}.oi-vertical-align-center:before{content:'\e0d2'}.oi-vertical-align-top:before{content:'\e0d3'}.oi-video:before{content:'\e0d4'}.oi-volume-high:before{content:'\e0d5'}.oi-volume-low:before{content:'\e0d6'}.oi-volume-off:before{content:'\e0d7'}.oi-warning:before{content:'\e0d8'}.oi-wifi:before{content:'\e0d9'}.oi-wrench:before{content:'\e0da'}.oi-x:before{content:'\e0db'}.oi-yen:before{content:'\e0dc'}.oi-zoom-in:before{content:'\e0dd'}.oi-zoom-out:before{content:'\e0de'} \ No newline at end of file diff --git a/static/open-iconic.woff b/static/open-iconic.woff new file mode 100644 index 00000000..f9309988 Binary files /dev/null and b/static/open-iconic.woff differ diff --git a/structures.py b/structures.py new file mode 100644 index 00000000..287f92c1 --- /dev/null +++ b/structures.py @@ -0,0 +1,40 @@ +import collections +from typing import Iterable, Tuple + + +class KoboldStoryRegister(collections.OrderedDict): + ''' + Complexity-optimized class for keeping track of story chunks + ''' + + def __init__(self, sequence: Iterable[Tuple[int, str]] = ()): + super().__init__(sequence) + self.__next_id: int = len(sequence) + + def append(self, v: str) -> None: + self[self.__next_id] = v + self.increment_id() + + def pop(self) -> str: + return self.popitem()[1] + + def get_first_key(self) -> int: + return next(iter(self)) + + def get_last_key(self) -> int: + return next(reversed(self)) + + def __getitem__(self, k: int) -> str: + return super().__getitem__(k) + + def __setitem__(self, k: int, v: str) -> None: + return super().__setitem__(k, v) + + def increment_id(self) -> None: + self.__next_id += 1 + + def get_next_id(self) -> int: + return self.__next_id + + def set_next_id(self, x: int) -> None: + self.__next_id = x diff --git a/templates/index.html b/templates/index.html index 6efac6c5..e9bf94e4 100644 --- a/templates/index.html +++ b/templates/index.html @@ -13,6 +13,7 @@ +
@@ -27,8 +28,12 @@
-
- ... -