From 13fcf462e958d635415591edf01640449f54a0a5 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 22 Jun 2022 11:14:37 -0400 Subject: [PATCH 0001/1297] Moved VARS to koboldai_settings and broken into model, story, user, system variables. story class also re-written to include options (actions_metadata). actions_metadata will be removed in UI2. --- aiserver.py | 3063 ++++++++++++++++++++---------------------- bridge.lua | 46 +- koboldai_settings.py | 492 +++++++ test_aiserver.py | 2 +- tpu_mtj_backend.py | 64 +- utils.py | 30 +- 6 files changed, 2040 insertions(+), 1657 deletions(-) create mode 100644 koboldai_settings.py diff --git a/aiserver.py b/aiserver.py index 4bbd89b0..02e010c1 100644 --- a/aiserver.py +++ b/aiserver.py @@ -53,7 +53,7 @@ import fileops import gensettings from utils import debounce import utils -import structures +import koboldai_settings import torch from transformers import StoppingCriteria, GPT2TokenizerFast, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel, modeling_utils from transformers import __version__ as transformers_version @@ -109,7 +109,7 @@ class colors: # AI models Menu # This is a dict of lists where they key is the menu name, and the list is the menu items. -# Each item takes the 4 elements, 1: Text to display, 2: Model Name (var.model) or menu name (Key name for another menu), +# Each item takes the 4 elements, 1: Text to display, 2: Model Name (model_settings.model) or menu name (Key name for another menu), # 3: the memory requirement for the model, 4: if the item is a menu or not (True/False) model_menu = { 'mainmenu': [ @@ -212,141 +212,16 @@ model_menu = { ["Return to Main Menu", "mainmenu", "", True], ] } -# Variables -class vars: - lastact = "" # The last action received from the user - submission = "" # Same as above, but after applying input formatting - lastctx = "" # The last context submitted to the generator - model = "" # Model ID string chosen at startup - model_type = "" # Model Type (Automatically taken from the model config) - noai = False # Runs the script without starting up the transformers pipeline - aibusy = False # Stops submissions while the AI is working - max_length = 2048 # Maximum number of tokens to submit per action - ikmax = 3000 # Maximum number of characters to submit to InferKit - genamt = 80 # Amount of text for each action to generate - ikgen = 200 # Number of characters for InferKit to generate - rep_pen = 1.1 # Default generator repetition_penalty - rep_pen_slope = 0.7 # Default generator repetition penalty slope - rep_pen_range = 1024 # Default generator repetition penalty range - temp = 0.5 # Default generator temperature - top_p = 0.9 # Default generator top_p - top_k = 0 # Default generator top_k - top_a = 0.0 # Default generator top-a - tfs = 1.0 # Default generator tfs (tail-free sampling) - typical = 1.0 # Default generator typical sampling threshold - numseqs = 1 # Number of sequences to ask the generator to create - gamestarted = False # Whether the game has started (disables UI elements) - gamesaved = True # Whether or not current game is saved - serverstarted = False # Whether or not the Flask server has started - prompt = "" # Prompt - memory = "" # Text submitted to memory field - authornote = "" # Text submitted to Author's Note field - authornotetemplate = "[Author's note: <|>]" # Author's note template - setauthornotetemplate = authornotetemplate # Saved author's note template in settings - andepth = 3 # How far back in history to append author's note - actions = structures.KoboldStoryRegister() # Actions submitted by user and AI - actions_metadata = {} # List of dictonaries, one dictonary for every action that contains information about the action like alternative options. - # Contains at least the same number of items as actions. Back action will remove an item from actions, but not actions_metadata - # Dictonary keys are: - # Selected Text: (text the user had selected. None when this is a newly generated action) - # Alternative Generated Text: {Text, Pinned, Previous Selection, Edited} - # - worldinfo = [] # List of World Info key/value objects - worldinfo_i = [] # List of World Info key/value objects sans uninitialized entries - worldinfo_u = {} # Dictionary of World Info UID - key/value pairs - wifolders_d = {} # Dictionary of World Info folder UID-info pairs - wifolders_l = [] # List of World Info folder UIDs - wifolders_u = {} # Dictionary of pairs of folder UID - list of WI UID - modelconfig = {} # Raw contents of the model's config.json, or empty dictionary if none found - lua_state = None # Lua state of the Lua scripting system - lua_koboldbridge = None # `koboldbridge` from bridge.lua - lua_kobold = None # `kobold` from` bridge.lua - lua_koboldcore = None # `koboldcore` from bridge.lua - lua_logname = ... # Name of previous userscript that logged to terminal - lua_running = False # Whether or not Lua is running (i.e. wasn't stopped due to an error) - lua_edited = set() # Set of chunk numbers that were edited from a Lua generation modifier - lua_deleted = set() # Set of chunk numbers that were deleted from a Lua generation modifier - generated_tkns = 0 # If using a backend that supports Lua generation modifiers, how many tokens have already been generated, otherwise 0 - abort = False # Whether or not generation was aborted by clicking on the submit button during generation - compiling = False # If using a TPU Colab, this will be set to True when the TPU backend starts compiling and then set to False again - checking = False # Whether or not we are actively checking to see if TPU backend is compiling or not - sp_changed = False # This gets set to True whenever a userscript changes the soft prompt so that check_for_sp_change() can alert the browser that the soft prompt has changed - spfilename = "" # Filename of soft prompt to load, or an empty string if not using a soft prompt - userscripts = [] # List of userscripts to load - last_userscripts = [] # List of previous userscript filenames from the previous time userscripts were send via usstatitems - corescript = "default.lua" # Filename of corescript to load - # badwords = [] # Array of str/chr values that should be removed from output - badwordsids = [] - badwordsids_default = [[13460], [6880], [50256], [42496], [4613], [17414], [22039], [16410], [27], [29], [38430], [37922], [15913], [24618], [28725], [58], [47175], [36937], [26700], [12878], [16471], [37981], [5218], [29795], [13412], [45160], [3693], [49778], [4211], [20598], [36475], [33409], [44167], [32406], [29847], [29342], [42669], [685], [25787], [7359], [3784], [5320], [33994], [33490], [34516], [43734], [17635], [24293], [9959], [23785], [21737], [28401], [18161], [26358], [32509], [1279], [38155], [18189], [26894], [6927], [14610], [23834], [11037], [14631], [26933], [46904], [22330], [25915], [47934], [38214], [1875], [14692], [41832], [13163], [25970], [29565], [44926], [19841], [37250], [49029], [9609], [44438], [16791], [17816], [30109], [41888], [47527], [42924], [23984], [49074], [33717], [31161], [49082], [30138], [31175], [12240], [14804], [7131], [26076], [33250], [3556], [38381], [36338], [32756], [46581], [17912], [49146]] # Tokenized array of badwords used to prevent AI artifacting - badwordsids_neox = [[0], [1], [44162], [9502], [12520], [31841], [36320], [49824], [34417], [6038], [34494], [24815], [26635], [24345], [3455], [28905], [44270], [17278], [32666], [46880], [7086], [43189], [37322], [17778], [20879], [49821], [3138], [14490], [4681], [21391], [26786], [43134], [9336], [683], [48074], [41256], [19181], [29650], [28532], [36487], [45114], [46275], [16445], [15104], [11337], [1168], [5647], [29], [27482], [44965], [43782], [31011], [42944], [47389], [6334], [17548], [38329], [32044], [35487], [2239], [34761], [7444], [1084], [12399], [18990], [17636], [39083], [1184], [35830], [28365], [16731], [43467], [47744], [1138], [16079], [40116], [45564], [18297], [42368], [5456], [18022], [42696], [34476], [23505], [23741], [39334], [37944], [45382], [38709], [33440], [26077], [43600], [34418], [36033], [6660], [48167], [48471], [15775], [19884], [41533], [1008], [31053], [36692], [46576], [20095], [20629], [31759], [46410], [41000], [13488], [30952], [39258], [16160], [27655], [22367], [42767], [43736], [49694], [13811], [12004], [46768], [6257], [37471], [5264], [44153], [33805], [20977], [21083], [25416], [14277], [31096], [42041], [18331], [33376], [22372], [46294], [28379], [38475], [1656], [5204], [27075], [50001], [16616], [11396], [7748], [48744], [35402], [28120], [41512], [4207], [43144], [14767], [15640], [16595], [41305], [44479], [38958], [18474], [22734], [30522], [46267], [60], [13976], [31830], [48701], [39822], [9014], [21966], [31422], [28052], [34607], [2479], [3851], [32214], [44082], [45507], [3001], [34368], [34758], [13380], [38363], [4299], [46802], [30996], [12630], [49236], [7082], [8795], [5218], [44740], [9686], [9983], [45301], [27114], [40125], [1570], [26997], [544], [5290], [49193], [23781], [14193], [40000], [2947], [43781], [9102], [48064], [42274], [18772], [49384], [9884], [45635], [43521], [31258], [32056], [47686], [21760], [13143], [10148], [26119], [44308], [31379], [36399], [23983], [46694], [36134], [8562], [12977], [35117], [28591], [49021], [47093], [28653], [29013], [46468], [8605], [7254], [25896], [5032], [8168], [36893], [38270], [20499], [27501], [34419], [29547], [28571], [36586], [20871], [30537], [26842], [21375], [31148], [27618], [33094], [3291], [31789], [28391], [870], [9793], [41361], [47916], [27468], [43856], [8850], [35237], [15707], [47552], [2730], [41449], [45488], [3073], [49806], [21938], [24430], [22747], [20924], [46145], [20481], [20197], [8239], [28231], [17987], [42804], [47269], [29972], [49884], [21382], [46295], [36676], [34616], [3921], [26991], [27720], [46265], [654], [9855], [40354], [5291], [34904], [44342], [2470], [14598], [880], [19282], [2498], [24237], [21431], [16369], [8994], [44524], [45662], [13663], [37077], [1447], [37786], [30863], [42854], [1019], [20322], [4398], [12159], [44072], [48664], [31547], [18736], [9259], [31], [16354], [21810], [4357], [37982], [5064], [2033], [32871], [47446], [62], [22158], [37387], [8743], [47007], [17981], [11049], [4622], [37916], [36786], [35138], [29925], [14157], [18095], [27829], [1181], [22226], [5709], [4725], [30189], [37014], [1254], [11380], [42989], [696], [24576], [39487], [30119], [1092], [8088], [2194], [9899], [14412], [21828], [3725], [13544], [5180], [44679], [34398], [3891], [28739], [14219], [37594], [49550], [11326], [6904], [17266], [5749], [10174], [23405], [9955], [38271], [41018], [13011], [48392], [36784], [24254], [21687], [23734], [5413], [41447], [45472], [10122], [17555], [15830], [47384], [12084], [31350], [47940], [11661], [27988], [45443], [905], [49651], [16614], [34993], [6781], [30803], [35869], [8001], [41604], [28118], [46462], [46762], [16262], [17281], [5774], [10943], [5013], [18257], [6750], [4713], [3951], [11899], [38791], [16943], [37596], [9318], [18413], [40473], [13208], [16375]] - badwordsids_opt = [[44717], [46613], [48513], [49923], [50185], [48755], [8488], [43303], [49659], [48601], [49817], [45405], [48742], [49925], [47720], [11227], [48937], [48784], [50017], [42248], [49310], [48082], [49895], [50025], [49092], [49007], [8061], [44226], [0], [742], [28578], [15698], [49784], [46679], [39365], [49281], [49609], [48081], [48906], [46161], [48554], [49670], [48677], [49721], [49632], [48610], [48462], [47457], [10975], [46077], [28696], [48709], [43839], [49798], [49154], [48203], [49625], [48395], [50155], [47161], [49095], [48833], [49420], [49666], [48443], [22176], [49242], [48651], [49138], [49750], [40389], [48021], [21838], [49070], [45333], [40862], [1], [49915], [33525], [49858], [50254], [44403], [48992], [48872], [46117], [49853], [47567], [50206], [41552], [50068], [48999], [49703], [49940], [49329], [47620], [49868], [49962], [2], [44082], [50236], [31274], [50260], [47052], [42645], [49177], [17523], [48691], [49900], [49069], [49358], [48794], [47529], [46479], [48457], [646], [49910], [48077], [48935], [46386], [48902], [49151], [48759], [49803], [45587], [48392], [47789], [48654], [49836], [49230], [48188], [50264], [46844], [44690], [48505], [50161], [27779], [49995], [41833], [50154], [49097], [48520], [50018], [8174], [50084], [49366], [49526], [50193], [7479], [49982], [3]] - fp32_model = False # Whether or not the most recently loaded HF model was in fp32 format - deletewi = None # Temporary storage for UID to delete - wirmvwhtsp = False # Whether to remove leading whitespace from WI entries - widepth = 3 # How many historical actions to scan for WI hits - mode = "play" # Whether the interface is in play, memory, or edit mode - editln = 0 # Which line was last selected in Edit Mode - gpu_device = 0 # Which PyTorch device to use when using pure GPU generation - url = "https://api.inferkit.com/v1/models/standard/generate" # InferKit API URL - oaiurl = "" # OpenAI API URL - oaiengines = "https://api.openai.com/v1/engines" - colaburl = "" # Ngrok url for Google Colab mode - apikey = "" # API key to use for InferKit API calls - oaiapikey = "" # API key to use for OpenAI API calls - savedir = getcwd()+"\\stories" - hascuda = False # Whether torch has detected CUDA on the system - usegpu = False # Whether to launch pipeline with GPU support - custmodpth = "" # Filesystem location of custom model to run - formatoptns = {'frmttriminc': True, 'frmtrmblln': False, 'frmtrmspch': False, 'frmtadsnsp': False, 'singleline': False} # Container for state of formatting options - importnum = -1 # Selection on import popup list - importjs = {} # Temporary storage for import data - loadselect = "" # Temporary storage for story filename to load - spselect = "" # Temporary storage for soft prompt filename to load - spmeta = None # Metadata of current soft prompt, or None if not using a soft prompt - sp = None # Current soft prompt tensor (as a NumPy array) - sp_length = 0 # Length of current soft prompt in tokens, or 0 if not using a soft prompt - has_genmod = False # Whether or not at least one loaded Lua userscript has a generation modifier - svowname = "" # Filename that was flagged for overwrite confirm - saveow = False # Whether or not overwrite confirm has been displayed - autosave = False # Whether or not to automatically save after each action - genseqs = [] # Temporary storage for generated sequences - recentback = False # Whether Back button was recently used without Submitting or Retrying after - recentrng = None # If a new random game was recently generated without Submitting after, this is the topic used (as a string), otherwise this is None - recentrngm = None # If a new random game was recently generated without Submitting after, this is the memory used (as a string), otherwise this is None - useprompt = False # Whether to send the full prompt with every submit action - breakmodel = False # For GPU users, whether to use both system RAM and VRAM to conserve VRAM while offering speedup compared to CPU-only - bmsupported = False # Whether the breakmodel option is supported (GPT-Neo/GPT-J/XGLM/OPT only, currently) - nobreakmodel = False # Something specifically requested Breakmodel to be disabled (For example a models config) - smandelete = False # Whether stories can be deleted from inside the browser - smanrename = False # Whether stories can be renamed from inside the browser - allowsp = False # Whether we are allowed to use soft prompts (by default enabled if we're using GPT-2, GPT-Neo or GPT-J) - modeldim = -1 # Embedding dimension of your model (e.g. it's 4096 for GPT-J-6B and 2560 for GPT-Neo-2.7B) - laststory = None # Filename (without extension) of most recent story JSON file we loaded - regex_sl = re.compile(r'\n*(?<=.) *\n(.|\n)*') # Pattern for limiting the output to a single line - acregex_ai = re.compile(r'\n* *>(.|\n)*') # Pattern for matching adventure actions from the AI so we can remove them - acregex_ui = re.compile(r'^ *(>.*)$', re.MULTILINE) # Pattern for matching actions in the HTML-escaped story so we can apply colouring, etc (make sure to encase part to format in parentheses) - comregex_ai = re.compile(r'(?:\n<\|(?:.|\n)*?\|>(?=\n|$))|(?:<\|(?:.|\n)*?\|>\n?)') # Pattern for matching comments to remove them before sending them to the AI - comregex_ui = re.compile(r'(<\|(?:.|\n)*?\|>)') # Pattern for matching comments in the editor - sampler_order = utils.default_sampler_order.copy() - chatmode = False - chatname = "You" - adventure = False - actionmode = 1 - dynamicscan = False - host = False - flaskwebgui = False - nopromptgen = False - rngpersist = False - nogenmod = False - welcome = False # Custom Welcome Text (False is default) - newlinemode = "ns" - quiet = False # If set will suppress any story text from being printed to the console (will only be seen on the client web page) - debug = False # If set to true, will send debug information to the client for display - lazy_load = True # Whether or not to use torch_lazy_loader.py for transformers models in order to reduce CPU memory usage - use_colab_tpu = os.environ.get("COLAB_TPU_ADDR", "") != "" or os.environ.get("TPU_NAME", "") != "" # Whether or not we're in a Colab TPU instance or Kaggle TPU instance and are going to use the TPU rather than the CPU - revision = None -utils.vars = vars +model_settings = koboldai_settings.model_settings() +story_settings = koboldai_settings.story_settings() +user_settings = koboldai_settings.user_settings() +system_settings = koboldai_settings.system_settings() + +utils.model_settings = model_settings +utils.story_settings = story_settings +utils.user_settings = user_settings +utils.system_settings = system_settings class Send_to_socketio(object): def write(self, bar): @@ -370,6 +245,7 @@ app = Flask(__name__, root_path=os.getcwd()) app.config['SECRET KEY'] = 'secret!' app.config['TEMPLATES_AUTO_RELOAD'] = True socketio = SocketIO(app, async_method="eventlet") +koboldai_settings.socketio = socketio print("{0}OK!{1}".format(colors.GREEN, colors.END)) #==================================================================# @@ -379,7 +255,7 @@ def sendModelSelection(menu="mainmenu", folder="./models"): #If we send one of the manual load options, send back the list of model directories, otherwise send the menu if menu in ('NeoCustom', 'GPT2Custom'): (paths, breadcrumbs) = get_folder_path_info(folder) - if vars.host: + if system_settings.host: breadcrumbs = [] menu_list = [[folder, menu, "", False] for folder in paths] menu_list.append(["Return to Main Menu", "mainmenu", "", True]) @@ -425,29 +301,29 @@ def getModelSelection(modellist): i += 1 print(" "); modelsel = 0 - vars.model = '' - while(vars.model == ''): + model_settings.model = '' + while(model_settings.model == ''): modelsel = input("Model #> ") if(modelsel.isnumeric() and int(modelsel) > 0 and int(modelsel) <= len(modellist)): - vars.model = modellist[int(modelsel)-1][1] + model_settings.model = modellist[int(modelsel)-1][1] else: print("{0}Please enter a valid selection.{1}".format(colors.RED, colors.END)) # Model Lists try: - getModelSelection(eval(vars.model)) + getModelSelection(eval(model_settings.model)) except Exception as e: - if(vars.model == "Return"): + if(model_settings.model == "Return"): getModelSelection(mainmenu) # If custom model was selected, get the filesystem location and store it - if(vars.model == "NeoCustom" or vars.model == "GPT2Custom"): + if(model_settings.model == "NeoCustom" or model_settings.model == "GPT2Custom"): print("{0}Please choose the folder where pytorch_model.bin is located:{1}\n".format(colors.CYAN, colors.END)) modpath = fileops.getdirpath(getcwd() + "/models", "Select Model Folder") if(modpath): # Save directory to vars - vars.custmodpth = modpath + model_settings.custmodpth = modpath else: # Print error and retry model selection print("{0}Model select cancelled!{1}".format(colors.RED, colors.END)) @@ -458,7 +334,7 @@ def check_if_dir_is_model(path): if os.path.exists(path): try: from transformers import AutoConfig - model_config = AutoConfig.from_pretrained(path, revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(path, revision=model_settings.revision, cache_dir="cache") except: return False return True @@ -482,11 +358,11 @@ def getmodelname(): if(args.configname): modelname = args.configname return modelname - if(vars.model in ("NeoCustom", "GPT2Custom", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): - modelname = os.path.basename(os.path.normpath(vars.custmodpth)) + if(model_settings.model in ("NeoCustom", "GPT2Custom", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): + modelname = os.path.basename(os.path.normpath(model_settings.custmodpth)) return modelname else: - modelname = vars.model + modelname = model_settings.model return modelname #==================================================================# @@ -609,25 +485,25 @@ def device_config(config): while(len(breakmodel.gpu_blocks) and breakmodel.gpu_blocks[-1] == 0): breakmodel.gpu_blocks.pop() if(len(breakmodel.gpu_blocks) and breakmodel.gpu_blocks[-1] in (-1, utils.num_layers(config))): - vars.breakmodel = False - vars.usegpu = True - vars.gpu_device = len(breakmodel.gpu_blocks)-1 + system_settings.breakmodel = False + system_settings.usegpu = True + system_settings.gpu_device = len(breakmodel.gpu_blocks)-1 return if(not breakmodel.gpu_blocks): print("Nothing assigned to a GPU, reverting to CPU only mode") import breakmodel breakmodel.primary_device = "cpu" - vars.breakmodel = False - vars.usegpu = False + system_settings.breakmodel = False + system_settings.usegpu = False return def move_model_to_devices(model): global generator - if(not utils.HAS_ACCELERATE and not vars.breakmodel): - if(vars.usegpu): - model = model.half().to(vars.gpu_device) + if(not utils.HAS_ACCELERATE and not system_settings.breakmodel): + if(system_settings.usegpu): + model = model.half().to(system_settings.gpu_device) else: model = model.to('cpu').float() generator = model.generate @@ -699,56 +575,56 @@ def loadmodelsettings(): except Exception as e: try: try: - js = json.load(open(vars.custmodpth + "/config.json", "r")) + js = json.load(open(model_settings.custmodpth + "/config.json", "r")) except Exception as e: - js = json.load(open(vars.custmodpth.replace('/', '_') + "/config.json", "r")) + js = json.load(open(model_settings.custmodpth.replace('/', '_') + "/config.json", "r")) except Exception as e: js = {} - if vars.model_type == "xglm" or js.get("compat", "j") == "fairseq_lm": - vars.newlinemode = "s" # Default to newline mode if using XGLM - if vars.model_type == "opt" or vars.model_type == "bloom": - vars.newlinemode = "ns" # Handle but don't convert newlines if using Fairseq models that have newlines trained in them - vars.modelconfig = js + if model_settings.model_type == "xglm" or js.get("compat", "j") == "fairseq_lm": + model_settings.newlinemode = "s" # Default to newline mode if using XGLM + if model_settings.model_type == "opt" or model_settings.model_type == "bloom": + model_settings.newlinemode = "ns" # Handle but don't convert newlines if using Fairseq models that have newlines trained in them + model_settings.modelconfig = js if("badwordsids" in js): - vars.badwordsids = js["badwordsids"] + model_settings.badwordsids = js["badwordsids"] if("nobreakmodel" in js): - vars.nobreakmodel = js["nobreakmodel"] + system_settings.nobreakmodel = js["nobreakmodel"] if("sampler_order" in js): - vars.sampler_order = js["sampler_order"] + model_settings.sampler_order = js["sampler_order"] if("temp" in js): - vars.temp = js["temp"] + model_settings.temp = js["temp"] if("top_p" in js): - vars.top_p = js["top_p"] + model_settings.top_p = js["top_p"] if("top_k" in js): - vars.top_k = js["top_k"] + model_settings.top_k = js["top_k"] if("tfs" in js): - vars.tfs = js["tfs"] + model_settings.tfs = js["tfs"] if("typical" in js): - vars.typical = js["typical"] + model_settings.typical = js["typical"] if("top_a" in js): - vars.top_a = js["top_a"] + model_settings.top_a = js["top_a"] if("rep_pen" in js): - vars.rep_pen = js["rep_pen"] + model_settings.rep_pen = js["rep_pen"] if("rep_pen_slope" in js): - vars.rep_pen_slope = js["rep_pen_slope"] + model_settings.rep_pen_slope = js["rep_pen_slope"] if("rep_pen_range" in js): - vars.rep_pen_range = js["rep_pen_range"] + model_settings.rep_pen_range = js["rep_pen_range"] if("adventure" in js): - vars.adventure = js["adventure"] + story_settings.adventure = js["adventure"] if("chatmode" in js): - vars.chatmode = js["chatmode"] + story_settings.chatmode = js["chatmode"] if("dynamicscan" in js): - vars.dynamicscan = js["dynamicscan"] + story_settings.dynamicscan = js["dynamicscan"] if("formatoptns" in js): - vars.formatoptns = js["formatoptns"] + user_settings.formatoptns = js["formatoptns"] if("welcome" in js): - vars.welcome = js["welcome"] + system_settings.welcome = js["welcome"] if("newlinemode" in js): - vars.newlinemode = js["newlinemode"] + model_settings.newlinemode = js["newlinemode"] if("antemplate" in js): - vars.setauthornotetemplate = js["antemplate"] - if(not vars.gamestarted): - vars.authornotetemplate = vars.setauthornotetemplate + story_settings.setauthornotetemplate = js["antemplate"] + if(not story_settings.gamestarted): + story_settings.authornotetemplate = story_settings.setauthornotetemplate #==================================================================# # Take settings from vars and write them to client settings file @@ -756,41 +632,41 @@ def loadmodelsettings(): def savesettings(): # Build json to write js = {} - js["apikey"] = vars.apikey - js["andepth"] = vars.andepth - js["sampler_order"] = vars.sampler_order - js["temp"] = vars.temp - js["top_p"] = vars.top_p - js["top_k"] = vars.top_k - js["tfs"] = vars.tfs - js["typical"] = vars.typical - js["top_a"] = vars.top_a - js["rep_pen"] = vars.rep_pen - js["rep_pen_slope"] = vars.rep_pen_slope - js["rep_pen_range"] = vars.rep_pen_range - js["genamt"] = vars.genamt - js["max_length"] = vars.max_length - js["ikgen"] = vars.ikgen - js["formatoptns"] = vars.formatoptns - js["numseqs"] = vars.numseqs - js["widepth"] = vars.widepth - js["useprompt"] = vars.useprompt - js["adventure"] = vars.adventure - js["chatmode"] = vars.chatmode - js["chatname"] = vars.chatname - js["dynamicscan"] = vars.dynamicscan - js["nopromptgen"] = vars.nopromptgen - js["rngpersist"] = vars.rngpersist - js["nogenmod"] = vars.nogenmod - js["autosave"] = vars.autosave - js["welcome"] = vars.welcome - js["newlinemode"] = vars.newlinemode + js["apikey"] = model_settings.apikey + js["andepth"] = story_settings.andepth + js["sampler_order"] = model_settings.sampler_order + js["temp"] = model_settings.temp + js["top_p"] = model_settings.top_p + js["top_k"] = model_settings.top_k + js["tfs"] = model_settings.tfs + js["typical"] = model_settings.typical + js["top_a"] = model_settings.top_a + js["rep_pen"] = model_settings.rep_pen + js["rep_pen_slope"] = model_settings.rep_pen_slope + js["rep_pen_range"] = model_settings.rep_pen_range + js["genamt"] = model_settings.genamt + js["max_length"] = model_settings.max_length + js["ikgen"] = model_settings.ikgen + js["formatoptns"] = user_settings.formatoptns + js["numseqs"] = model_settings.numseqs + js["widepth"] = user_settings.widepth + js["useprompt"] = story_settings.useprompt + js["adventure"] = story_settings.adventure + js["chatmode"] = story_settings.chatmode + js["chatname"] = story_settings.chatname + js["dynamicscan"] = story_settings.dynamicscan + js["nopromptgen"] = user_settings.nopromptgen + js["rngpersist"] = user_settings.rngpersist + js["nogenmod"] = user_settings.nogenmod + js["autosave"] = user_settings.autosave + js["welcome"] = system_settings.welcome + js["newlinemode"] = model_settings.newlinemode - js["antemplate"] = vars.setauthornotetemplate + js["antemplate"] = story_settings.setauthornotetemplate - js["userscripts"] = vars.userscripts - js["corescript"] = vars.corescript - js["softprompt"] = vars.spfilename + js["userscripts"] = system_settings.userscripts + js["corescript"] = system_settings.corescript + js["softprompt"] = system_settings.spfilename # Write it if not os.path.exists('settings'): @@ -832,82 +708,82 @@ def loadsettings(): def processsettings(js): # Copy file contents to vars if("apikey" in js): - vars.apikey = js["apikey"] + model_settings.apikey = js["apikey"] if("andepth" in js): - vars.andepth = js["andepth"] + story_settings.andepth = js["andepth"] if("sampler_order" in js): - vars.sampler_order = js["sampler_order"] + model_settings.sampler_order = js["sampler_order"] if("temp" in js): - vars.temp = js["temp"] + model_settings.temp = js["temp"] if("top_p" in js): - vars.top_p = js["top_p"] + model_settings.top_p = js["top_p"] if("top_k" in js): - vars.top_k = js["top_k"] + model_settings.top_k = js["top_k"] if("tfs" in js): - vars.tfs = js["tfs"] + model_settings.tfs = js["tfs"] if("typical" in js): - vars.typical = js["typical"] + model_settings.typical = js["typical"] if("top_a" in js): - vars.top_a = js["top_a"] + model_settings.top_a = js["top_a"] if("rep_pen" in js): - vars.rep_pen = js["rep_pen"] + model_settings.rep_pen = js["rep_pen"] if("rep_pen_slope" in js): - vars.rep_pen_slope = js["rep_pen_slope"] + model_settings.rep_pen_slope = js["rep_pen_slope"] if("rep_pen_range" in js): - vars.rep_pen_range = js["rep_pen_range"] + model_settings.rep_pen_range = js["rep_pen_range"] if("genamt" in js): - vars.genamt = js["genamt"] + model_settings.genamt = js["genamt"] if("max_length" in js): - vars.max_length = js["max_length"] + model_settings.max_length = js["max_length"] if("ikgen" in js): - vars.ikgen = js["ikgen"] + model_settings.ikgen = js["ikgen"] if("formatoptns" in js): - vars.formatoptns = js["formatoptns"] + user_settings.formatoptns = js["formatoptns"] if("numseqs" in js): - vars.numseqs = js["numseqs"] + model_settings.numseqs = js["numseqs"] if("widepth" in js): - vars.widepth = js["widepth"] + user_settings.widepth = js["widepth"] if("useprompt" in js): - vars.useprompt = js["useprompt"] + story_settings.useprompt = js["useprompt"] if("adventure" in js): - vars.adventure = js["adventure"] + story_settings.adventure = js["adventure"] if("chatmode" in js): - vars.chatmode = js["chatmode"] + story_settings.chatmode = js["chatmode"] if("chatname" in js): - vars.chatname = js["chatname"] + story_settings.chatname = js["chatname"] if("dynamicscan" in js): - vars.dynamicscan = js["dynamicscan"] + story_settings.dynamicscan = js["dynamicscan"] if("nopromptgen" in js): - vars.nopromptgen = js["nopromptgen"] + user_settings.nopromptgen = js["nopromptgen"] if("rngpersist" in js): - vars.rngpersist = js["rngpersist"] + user_settings.rngpersist = js["rngpersist"] if("nogenmod" in js): - vars.nogenmod = js["nogenmod"] + user_settings.nogenmod = js["nogenmod"] if("autosave" in js): - vars.autosave = js["autosave"] + user_settings.autosave = js["autosave"] if("newlinemode" in js): - vars.newlinemode = js["newlinemode"] + model_settings.newlinemode = js["newlinemode"] if("welcome" in js): - vars.welcome = js["welcome"] + system_settings.welcome = js["welcome"] if("antemplate" in js): - vars.setauthornotetemplate = js["antemplate"] - if(not vars.gamestarted): - vars.authornotetemplate = vars.setauthornotetemplate + story_settings.setauthornotetemplate = js["antemplate"] + if(not story_settings.gamestarted): + story_settings.authornotetemplate = story_settings.setauthornotetemplate if("userscripts" in js): - vars.userscripts = [] + system_settings.userscripts = [] for userscript in js["userscripts"]: if type(userscript) is not str: continue userscript = userscript.strip() if len(userscript) != 0 and all(q not in userscript for q in ("..", ":")) and all(userscript[0] not in q for q in ("/", "\\")) and os.path.exists(fileops.uspath(userscript)): - vars.userscripts.append(userscript) + system_settings.userscripts.append(userscript) if("corescript" in js and type(js["corescript"]) is str and all(q not in js["corescript"] for q in ("..", ":")) and all(js["corescript"][0] not in q for q in ("/", "\\"))): - vars.corescript = js["corescript"] + system_settings.corescript = js["corescript"] else: - vars.corescript = "default.lua" + system_settings.corescript = "default.lua" #==================================================================# # Load a soft prompt from a file @@ -916,38 +792,38 @@ def processsettings(js): def check_for_sp_change(): while(True): time.sleep(0.1) - if(vars.sp_changed): + if(system_settings.sp_changed): with app.app_context(): - emit('from_server', {'cmd': 'spstatitems', 'data': {vars.spfilename: vars.spmeta} if vars.allowsp and len(vars.spfilename) else {}}, namespace=None, broadcast=True) - vars.sp_changed = False + emit('from_server', {'cmd': 'spstatitems', 'data': {system_settings.spfilename: system_settings.spmeta} if system_settings.allowsp and len(system_settings.spfilename) else {}}, namespace=None, broadcast=True) + system_settings.sp_changed = False socketio.start_background_task(check_for_sp_change) def spRequest(filename): - if(not vars.allowsp): + if(not system_settings.allowsp): raise RuntimeError("Soft prompts are not supported by your current model/backend") - old_filename = vars.spfilename + old_filename = system_settings.spfilename - vars.spfilename = "" + system_settings.spfilename = "" settingschanged() if(len(filename) == 0): - vars.sp = None - vars.sp_length = 0 + system_settings.sp = None + system_settings.sp_length = 0 if(old_filename != filename): - vars.sp_changed = True + system_settings.sp_changed = True return global np if 'np' not in globals(): import numpy as np - z, version, shape, fortran_order, dtype = fileops.checksp(filename, vars.modeldim) + z, version, shape, fortran_order, dtype = fileops.checksp(filename, model_settings.modeldim) if not isinstance(z, zipfile.ZipFile): raise RuntimeError(f"{repr(filename)} is not a valid soft prompt file") with z.open('meta.json') as f: - vars.spmeta = json.load(f) + system_settings.spmeta = json.load(f) z.close() with np.load(fileops.sppath(filename), allow_pickle=False) as f: @@ -963,10 +839,10 @@ def spRequest(filename): tensor = np.float32(tensor) assert not np.isinf(tensor).any() and not np.isnan(tensor).any() - vars.sp_length = tensor.shape[-2] - vars.spmeta["n_tokens"] = vars.sp_length + system_settings.sp_length = tensor.shape[-2] + system_settings.spmeta["n_tokens"] = system_settings.sp_length - if(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): + if(system_settings.use_colab_tpu or model_settings.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): rows = tensor.shape[0] padding_amount = tpu_mtj_backend.params["seq"] - (tpu_mtj_backend.params["seq"] % -tpu_mtj_backend.params["cores_per_replica"]) - rows tensor = np.pad(tensor, ((0, padding_amount), (0, 0))) @@ -975,14 +851,14 @@ def spRequest(filename): -1, tpu_mtj_backend.params.get("d_embed", tpu_mtj_backend.params["d_model"]), ) - vars.sp = tpu_mtj_backend.shard_xmap(np.float32(tensor)) + system_settings.sp = tpu_mtj_backend.shard_xmap(np.float32(tensor)) else: - vars.sp = torch.from_numpy(tensor) + system_settings.sp = torch.from_numpy(tensor) - vars.spfilename = filename + system_settings.spfilename = filename settingschanged() if(old_filename != filename): - vars.sp_changed = True + system_settings.sp_changed = True #==================================================================# # Startup @@ -1029,8 +905,8 @@ def general_startup(override_args=None): else: args = parser.parse_args() - vars.model = args.model; - vars.revision = args.revision + model_settings.model = args.model; + model_settings.revision = args.revision if args.colab: args.remote = True; @@ -1042,53 +918,53 @@ def general_startup(override_args=None): args.noaimenu = True; if args.quiet: - vars.quiet = True + system_settings.quiet = True if args.nobreakmodel: - vars.nobreakmodel = True; + system_settings.nobreakmodel = True; if args.remote: - vars.host = True; + system_settings.host = True; if args.ngrok: - vars.host = True; + system_settings.host = True; if args.localtunnel: - vars.host = True; + system_settings.host = True; if args.host: - vars.host = True; + system_settings.host = True; if args.cpu: - vars.use_colab_tpu = False + system_settings.use_colab_tpu = False - vars.smandelete = vars.host == args.override_delete - vars.smanrename = vars.host == args.override_rename + system_settings.smandelete = system_settings.host == args.override_delete + system_settings.smanrename = system_settings.host == args.override_rename - vars.aria2_port = args.aria2_port or 6799 + system_settings.aria2_port = args.aria2_port or 6799 #Now let's look to see if we are going to force a load of a model from a user selected folder - if(vars.model == "selectfolder"): + if(model_settings.model == "selectfolder"): print("{0}Please choose the folder where pytorch_model.bin is located:{1}\n".format(colors.CYAN, colors.END)) modpath = fileops.getdirpath(getcwd() + "/models", "Select Model Folder") if(modpath): # Save directory to vars - vars.model = "NeoCustom" - vars.custmodpth = modpath + model_settings.model = "NeoCustom" + model_settings.custmodpth = modpath elif args.model: - print("Welcome to KoboldAI!\nYou have selected the following Model:", vars.model) + print("Welcome to KoboldAI!\nYou have selected the following Model:", model_settings.model) if args.path: print("You have selected the following path for your Model :", args.path) - vars.custmodpth = args.path; - vars.colaburl = args.path + "/request"; # Lets just use the same parameter to keep it simple + model_settings.custmodpth = args.path; + model_settings.colaburl = args.path + "/request"; # Lets just use the same parameter to keep it simple #==================================================================# # Load Model #==================================================================# def tpumtjgetsofttokens(): soft_tokens = None - if(vars.sp is None): + if(system_settings.sp is None): global np if 'np' not in globals(): import numpy as np @@ -1101,10 +977,10 @@ def tpumtjgetsofttokens(): -1, tpu_mtj_backend.params.get("d_embed", tpu_mtj_backend.params["d_model"]), ) - vars.sp = tpu_mtj_backend.shard_xmap(tensor) + system_settings.sp = tpu_mtj_backend.shard_xmap(tensor) soft_tokens = np.arange( tpu_mtj_backend.params["n_vocab"] + tpu_mtj_backend.params["n_vocab_padding"], - tpu_mtj_backend.params["n_vocab"] + tpu_mtj_backend.params["n_vocab_padding"] + vars.sp_length, + tpu_mtj_backend.params["n_vocab"] + tpu_mtj_backend.params["n_vocab_padding"] + system_settings.sp_length, dtype=np.uint32 ) return soft_tokens @@ -1171,19 +1047,19 @@ def get_model_info(model, directory=""): def get_layer_count(model, directory=""): if(model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]): - if(vars.model == "GPT2Custom"): - model_config = open(vars.custmodpth + "/config.json", "r") + if(model_settings.model == "GPT2Custom"): + model_config = open(model_settings.custmodpth + "/config.json", "r") # Get the model_type from the config or assume a model type if it isn't present else: from transformers import AutoConfig if directory == "": - model_config = AutoConfig.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") - elif(os.path.isdir(vars.custmodpth.replace('/', '_'))): - model_config = AutoConfig.from_pretrained(vars.custmodpth.replace('/', '_'), revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache") + elif(os.path.isdir(model_settings.custmodpth.replace('/', '_'))): + model_config = AutoConfig.from_pretrained(model_settings.custmodpth.replace('/', '_'), revision=model_settings.revision, cache_dir="cache") elif(os.path.isdir(directory)): - model_config = AutoConfig.from_pretrained(directory, revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(directory, revision=model_settings.revision, cache_dir="cache") else: - model_config = AutoConfig.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") @@ -1193,10 +1069,10 @@ def get_layer_count(model, directory=""): def get_oai_models(key): - vars.oaiapikey = key - if vars.model == 'OAI': + model_settings.oaiapikey = key + if model_settings.model == 'OAI': url = "https://api.openai.com/v1/engines" - elif vars.model == 'GooseAI': + elif model_settings.model == 'GooseAI': url = "https://api.goose.ai/v1/engines" else: return @@ -1225,8 +1101,8 @@ def get_oai_models(key): # If the client settings file doesn't exist, create it # Write API key to file os.makedirs('settings', exist_ok=True) - if path.exists("settings/{}.settings".format(vars.model)): - with open("settings/{}.settings".format(vars.model), "r") as file: + if path.exists("settings/{}.settings".format(model_settings.model)): + with open("settings/{}.settings".format(model_settings.model), "r") as file: js = json.load(file) if 'online_model' in js: online_model = js['online_model'] @@ -1234,7 +1110,7 @@ def get_oai_models(key): if js['apikey'] != key: changed=True if changed: - with open("settings/{}.settings".format(vars.model), "w") as file: + with open("settings/{}.settings".format(model_settings.model), "w") as file: js["apikey"] = key file.write(json.dumps(js, indent=3)) @@ -1257,15 +1133,15 @@ def patch_causallm(model): if(Embedding._koboldai_patch_causallm_model.get_input_embeddings() is not self): return old_embedding_call(self, input_ids, *args, **kwargs) assert input_ids is not None - if(vars.sp is not None): + if(system_settings.sp is not None): shifted_input_ids = input_ids - model.config.vocab_size input_ids.clamp_(max=model.config.vocab_size-1) inputs_embeds = old_embedding_call(self, input_ids, *args, **kwargs) - if(vars.sp is not None): - vars.sp = vars.sp.to(inputs_embeds.dtype).to(inputs_embeds.device) + if(system_settings.sp is not None): + system_settings.sp = system_settings.sp.to(inputs_embeds.dtype).to(inputs_embeds.device) inputs_embeds = torch.where( (shifted_input_ids >= 0)[..., None], - vars.sp[shifted_input_ids.clamp(min=0)], + system_settings.sp[shifted_input_ids.clamp(min=0)], inputs_embeds, ) return inputs_embeds @@ -1279,7 +1155,7 @@ def patch_transformers(): old_from_pretrained = PreTrainedModel.from_pretrained.__func__ @classmethod def new_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - vars.fp32_model = False + model_settings.fp32_model = False utils.num_shards = None utils.current_shard = 0 utils.from_pretrained_model_name = pretrained_model_name_or_path @@ -1351,10 +1227,10 @@ def patch_transformers(): if(not isinstance(field_name, str) and isinstance(field_name, Iterable)): conds = [] for f, v in zip(field_name, var_name): - conds.append(getattr(vars, v)) + conds.append(getattr(model_settings, v)) setattr(self, f, conds[-1]) else: - conds = getattr(vars, var_name) + conds = getattr(model_settings, var_name) setattr(self, field_name, conds) assert len(args) == 2 if(cond is None or cond(conds)): @@ -1384,15 +1260,15 @@ def patch_transformers(): scores_shape = scores.shape scores_list = scores.tolist() - vars.lua_koboldbridge.logits = vars.lua_state.table() + system_settings.lua_koboldbridge.logits = system_settings.lua_state.table() for r, row in enumerate(scores_list): - vars.lua_koboldbridge.logits[r+1] = vars.lua_state.table(*row) - vars.lua_koboldbridge.vocab_size = scores_shape[-1] + system_settings.lua_koboldbridge.logits[r+1] = system_settings.lua_state.table(*row) + system_settings.lua_koboldbridge.vocab_size = scores_shape[-1] execute_genmod() scores = torch.tensor( - tuple(tuple(row.values()) for row in vars.lua_koboldbridge.logits.values()), + tuple(tuple(row.values()) for row in system_settings.lua_koboldbridge.logits.values()), device=scores.device, dtype=scores.dtype, ) @@ -1418,7 +1294,7 @@ def patch_transformers(): self.__warper_list.append(TemperatureLogitsWarper(temperature=0.5)) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, *args, **kwargs): - for k in vars.sampler_order: + for k in model_settings.sampler_order: scores = self.__warper_list[k](input_ids, scores, *args, **kwargs) return scores @@ -1430,7 +1306,7 @@ def patch_transformers(): kwargs["logits_warper"] = new_get_logits_warper( beams=1, ) - if(vars.newlinemode == "s") or (vars.newlinemode == "ns"): + if(model_settings.newlinemode == "s") or (model_settings.newlinemode == "ns"): kwargs["eos_token_id"] = -1 kwargs.setdefault("pad_token_id", 2) return new_sample.old_sample(self, *args, **kwargs) @@ -1463,29 +1339,29 @@ def patch_transformers(): scores: torch.FloatTensor, **kwargs, ) -> bool: - vars.generated_tkns += 1 - if(vars.lua_koboldbridge.generated_cols and vars.generated_tkns != vars.lua_koboldbridge.generated_cols): - raise RuntimeError(f"Inconsistency detected between KoboldAI Python and Lua backends ({vars.generated_tkns} != {vars.lua_koboldbridge.generated_cols})") - if(vars.abort or vars.generated_tkns >= vars.genamt): + story_settings.generated_tkns += 1 + if(system_settings.lua_koboldbridge.generated_cols and story_settings.generated_tkns != system_settings.lua_koboldbridge.generated_cols): + raise RuntimeError(f"Inconsistency detected between KoboldAI Python and Lua backends ({story_settings.generated_tkns} != {system_settings.lua_koboldbridge.generated_cols})") + if(system_settings.abort or story_settings.generated_tkns >= model_settings.genamt): self.regeneration_required = False self.halt = False return True assert input_ids.ndim == 2 assert len(self.excluded_world_info) == input_ids.shape[0] - self.regeneration_required = vars.lua_koboldbridge.regeneration_required - self.halt = not vars.lua_koboldbridge.generating - vars.lua_koboldbridge.regeneration_required = False + self.regeneration_required = system_settings.lua_koboldbridge.regeneration_required + self.halt = not system_settings.lua_koboldbridge.generating + system_settings.lua_koboldbridge.regeneration_required = False - for i in range(vars.numseqs): - vars.lua_koboldbridge.generated[i+1][vars.generated_tkns] = int(input_ids[i, -1].item()) + for i in range(model_settings.numseqs): + system_settings.lua_koboldbridge.generated[i+1][story_settings.generated_tkns] = int(input_ids[i, -1].item()) - if(not vars.dynamicscan): + if(not story_settings.dynamicscan): return self.regeneration_required or self.halt - tail = input_ids[..., -vars.generated_tkns:] + tail = input_ids[..., -story_settings.generated_tkns:] for i, t in enumerate(tail): decoded = utils.decodenewlines(tokenizer.decode(t)) - _, found = checkworldinfo(decoded, force_use_txt=True, actions=vars._actions) + _, found = checkworldinfo(decoded, force_use_txt=True, actions=story_settings._actions) found -= self.excluded_world_info[i] if(len(found) != 0): self.regeneration_required = True @@ -1512,11 +1388,11 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal global tokenizer if not utils.HAS_ACCELERATE: disk_layers = None - vars.noai = False + system_settings.noai = False if not initial_load: set_aibusy(True) - if vars.model != 'ReadOnly': - emit('from_server', {'cmd': 'model_load_status', 'data': "Loading {}".format(vars.model)}, broadcast=True) + if model_settings.model != 'ReadOnly': + emit('from_server', {'cmd': 'model_load_status', 'data': "Loading {}".format(model_settings.model)}, broadcast=True) #Have to add a sleep so the server will send the emit for some reason time.sleep(0.1) if gpu_layers is not None: @@ -1525,22 +1401,34 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal args.breakmodel_disklayers = int(disk_layers) #We need to wipe out the existing model and refresh the cuda cache + #Show what's in VRAM + import gc model = None generator = None model_config = None try: - torch.cuda.empty_cache() + with torch.no_grad(): + torch.cuda.empty_cache() except: pass + + for obj in gc.get_objects(): + try: + if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)): + del obj + gc.collect() + torch.cuda.empty_cache() + except: + pass #Reload our badwords - vars.badwordsids = vars.badwordsids_default + model_settings.badwordsids = koboldai_settings.badwordsids_default #Let's set the GooseAI or OpenAI server URLs if that's applicable if online_model != "": - if path.exists("settings/{}.settings".format(vars.model)): + if path.exists("settings/{}.settings".format(model_settings.model)): changed=False - with open("settings/{}.settings".format(vars.model), "r") as file: + with open("settings/{}.settings".format(model_settings.model), "r") as file: # Check if API key exists js = json.load(file) if 'online_model' in js: @@ -1551,135 +1439,135 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal changed=True js['online_model'] = online_model if changed: - with open("settings/{}.settings".format(vars.model), "w") as file: + with open("settings/{}.settings".format(model_settings.model), "w") as file: file.write(json.dumps(js, indent=3)) # Swap OAI Server if GooseAI was selected - if(vars.model == "GooseAI"): - vars.oaiengines = "https://api.goose.ai/v1/engines" - vars.model = "OAI" + if(model_settings.model == "GooseAI"): + model_settings.oaiengines = "https://api.goose.ai/v1/engines" + model_settings.model = "OAI" args.configname = "GooseAI" + "/" + online_model else: - args.configname = vars.model + "/" + online_model - vars.oaiurl = vars.oaiengines + "/{0}/completions".format(online_model) + args.configname = model_settings.model + "/" + online_model + model_settings.oaiurl = model_settings.oaiengines + "/{0}/completions".format(online_model) # If transformers model was selected & GPU available, ask to use CPU or GPU - if(vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): - vars.allowsp = True + if(model_settings.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): + system_settings.allowsp = True # Test for GPU support # Make model path the same as the model name to make this consistent with the other loading method if it isn't a known model type # This code is not just a workaround for below, it is also used to make the behavior consistent with other loading methods - Henk717 - if(not vars.model in ["NeoCustom", "GPT2Custom"]): - vars.custmodpth = vars.model - elif(vars.model == "NeoCustom"): - vars.model = os.path.basename(os.path.normpath(vars.custmodpth)) + if(not model_settings.model in ["NeoCustom", "GPT2Custom"]): + model_settings.custmodpth = model_settings.model + elif(model_settings.model == "NeoCustom"): + model_settings.model = os.path.basename(os.path.normpath(model_settings.custmodpth)) # Get the model_type from the config or assume a model type if it isn't present from transformers import AutoConfig - if(os.path.isdir(vars.custmodpth.replace('/', '_'))): + if(os.path.isdir(model_settings.custmodpth.replace('/', '_'))): try: - model_config = AutoConfig.from_pretrained(vars.custmodpth.replace('/', '_'), revision=vars.revision, cache_dir="cache") - vars.model_type = model_config.model_type + model_config = AutoConfig.from_pretrained(model_settings.custmodpth.replace('/', '_'), revision=model_settings.revision, cache_dir="cache") + model_settings.model_type = model_config.model_type except ValueError as e: - vars.model_type = "not_found" - elif(os.path.isdir("models/{}".format(vars.custmodpth.replace('/', '_')))): + model_settings.model_type = "not_found" + elif(os.path.isdir("models/{}".format(model_settings.custmodpth.replace('/', '_')))): try: - model_config = AutoConfig.from_pretrained("models/{}".format(vars.custmodpth.replace('/', '_')), revision=vars.revision, cache_dir="cache") - vars.model_type = model_config.model_type + model_config = AutoConfig.from_pretrained("models/{}".format(model_settings.custmodpth.replace('/', '_')), revision=model_settings.revision, cache_dir="cache") + model_settings.model_type = model_config.model_type except ValueError as e: - vars.model_type = "not_found" + model_settings.model_type = "not_found" else: try: - model_config = AutoConfig.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - vars.model_type = model_config.model_type + model_config = AutoConfig.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") + model_settings.model_type = model_config.model_type except ValueError as e: - vars.model_type = "not_found" - if(vars.model_type == "not_found" and vars.model == "NeoCustom"): - vars.model_type = "gpt_neo" - elif(vars.model_type == "not_found" and vars.model == "GPT2Custom"): - vars.model_type = "gpt2" - elif(vars.model_type == "not_found"): + model_settings.model_type = "not_found" + if(model_settings.model_type == "not_found" and model_settings.model == "NeoCustom"): + model_settings.model_type = "gpt_neo" + elif(model_settings.model_type == "not_found" and model_settings.model == "GPT2Custom"): + model_settings.model_type = "gpt2" + elif(model_settings.model_type == "not_found"): print("WARNING: No model type detected, assuming Neo (If this is a GPT2 model use the other menu option or --model GPT2Custom)") - vars.model_type = "gpt_neo" + model_settings.model_type = "gpt_neo" - if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): + if(not system_settings.use_colab_tpu and model_settings.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): loadmodelsettings() loadsettings() print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="") - vars.hascuda = torch.cuda.is_available() - vars.bmsupported = (utils.HAS_ACCELERATE or vars.model_type in ("gpt_neo", "gptj", "xglm", "opt")) and not vars.nobreakmodel + system_settings.hascuda = torch.cuda.is_available() + system_settings.bmsupported = (utils.HAS_ACCELERATE or model_settings.model_type in ("gpt_neo", "gptj", "xglm", "opt")) and not system_settings.nobreakmodel if(args.breakmodel is not None and args.breakmodel): print("WARNING: --breakmodel is no longer supported. Breakmodel mode is now automatically enabled when --breakmodel_gpulayers is used (see --help for details).", file=sys.stderr) if(args.breakmodel_layers is not None): print("WARNING: --breakmodel_layers is deprecated. Use --breakmodel_gpulayers instead (see --help for details).", file=sys.stderr) - if(args.model and vars.bmsupported and not args.breakmodel_gpulayers and not args.breakmodel_layers and (not utils.HAS_ACCELERATE or not args.breakmodel_disklayers)): + if(args.model and system_settings.bmsupported and not args.breakmodel_gpulayers and not args.breakmodel_layers and (not utils.HAS_ACCELERATE or not args.breakmodel_disklayers)): print("WARNING: Model launched without the --breakmodel_gpulayers argument, defaulting to GPU only mode.", file=sys.stderr) - vars.bmsupported = False - if(not vars.bmsupported and (args.breakmodel_gpulayers is not None or args.breakmodel_layers is not None or args.breakmodel_disklayers is not None)): + system_settings.bmsupported = False + if(not system_settings.bmsupported and (args.breakmodel_gpulayers is not None or args.breakmodel_layers is not None or args.breakmodel_disklayers is not None)): print("WARNING: This model does not support hybrid generation. --breakmodel_gpulayers will be ignored.", file=sys.stderr) - if(vars.hascuda): + if(system_settings.hascuda): print("{0}FOUND!{1}".format(colors.GREEN, colors.END)) else: print("{0}NOT FOUND!{1}".format(colors.YELLOW, colors.END)) if args.model: - if(vars.hascuda): + if(system_settings.hascuda): genselected = True - vars.usegpu = True - vars.breakmodel = utils.HAS_ACCELERATE - if(vars.bmsupported): - vars.usegpu = False - vars.breakmodel = True + system_settings.usegpu = True + system_settings.breakmodel = utils.HAS_ACCELERATE + if(system_settings.bmsupported): + system_settings.usegpu = False + system_settings.breakmodel = True if(args.cpu): - vars.usegpu = False - vars.breakmodel = utils.HAS_ACCELERATE - elif(vars.hascuda): - if(vars.bmsupported): + system_settings.usegpu = False + system_settings.breakmodel = utils.HAS_ACCELERATE + elif(system_settings.hascuda): + if(system_settings.bmsupported): genselected = True - vars.usegpu = False - vars.breakmodel = True + system_settings.usegpu = False + system_settings.breakmodel = True else: genselected = False else: genselected = False - if(vars.hascuda): + if(system_settings.hascuda): if(use_gpu): - if(vars.bmsupported): - vars.breakmodel = True - vars.usegpu = False + if(system_settings.bmsupported): + system_settings.breakmodel = True + system_settings.usegpu = False genselected = True else: - vars.breakmodel = False - vars.usegpu = True + system_settings.breakmodel = False + system_settings.usegpu = True genselected = True else: - vars.breakmodel = utils.HAS_ACCELERATE - vars.usegpu = False + system_settings.breakmodel = utils.HAS_ACCELERATE + system_settings.usegpu = False genselected = True # Ask for API key if InferKit was selected - if(vars.model == "InferKit"): - vars.apikey = vars.oaiapikey + if(model_settings.model == "InferKit"): + model_settings.apikey = model_settings.oaiapikey # Swap OAI Server if GooseAI was selected - if(vars.model == "GooseAI"): - vars.oaiengines = "https://api.goose.ai/v1/engines" - vars.model = "OAI" + if(model_settings.model == "GooseAI"): + model_settings.oaiengines = "https://api.goose.ai/v1/engines" + model_settings.model = "OAI" args.configname = "GooseAI" # Ask for API key if OpenAI was selected - if(vars.model == "OAI"): + if(model_settings.model == "OAI"): if not args.configname: args.configname = "OAI" - if(vars.model == "ReadOnly"): - vars.noai = True + if(model_settings.model == "ReadOnly"): + system_settings.noai = True # Start transformers and create pipeline - if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): - if(not vars.noai): + if(not system_settings.use_colab_tpu and model_settings.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): + if(not system_settings.noai): print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END)) for m in ("GPTJModel", "XGLMModel"): try: @@ -1690,7 +1578,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal # Lazy loader import torch_lazy_loader def get_lazy_load_callback(n_layers, convert_to_float16=True): - if not vars.lazy_load: + if not model_settings.lazy_load: return from tqdm.auto import tqdm @@ -1723,10 +1611,10 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal for key, value in model_dict.items(): original_key = get_original_key(key) if isinstance(value, torch_lazy_loader.LazyTensor) and not any(original_key.startswith(n) for n in utils.layers_module_names): - device_map[key] = vars.gpu_device if vars.hascuda and vars.usegpu else "cpu" if not vars.hascuda or not vars.breakmodel else breakmodel.primary_device + device_map[key] = system_settings.gpu_device if system_settings.hascuda and system_settings.usegpu else "cpu" if not system_settings.hascuda or not system_settings.breakmodel else breakmodel.primary_device else: layer = int(max((n for n in utils.layers_module_names if original_key.startswith(n)), key=len).rsplit(".", 1)[1]) - device = vars.gpu_device if vars.hascuda and vars.usegpu else "disk" if layer < disk_blocks and layer < ram_blocks else "cpu" if not vars.hascuda or not vars.breakmodel else "shared" if layer < ram_blocks else bisect.bisect_right(cumulative_gpu_blocks, layer - ram_blocks) + device = system_settings.gpu_device if system_settings.hascuda and system_settings.usegpu else "disk" if layer < disk_blocks and layer < ram_blocks else "cpu" if not system_settings.hascuda or not system_settings.breakmodel else "shared" if layer < ram_blocks else bisect.bisect_right(cumulative_gpu_blocks, layer - ram_blocks) device_map[key] = device if utils.num_shards is None or utils.current_shard == 0: @@ -1774,10 +1662,10 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal #print(f"Transferring <{key}> to {f'({device.upper()})' if isinstance(device, str) else '[device ' + str(device) + ']'} ... ", end="", flush=True) model_dict[key] = model_dict[key].materialize(f, map_location="cpu") if model_dict[key].dtype is torch.float32: - vars.fp32_model = True - if convert_to_float16 and breakmodel.primary_device != "cpu" and vars.hascuda and (vars.breakmodel or vars.usegpu) and model_dict[key].dtype is torch.float32: + model_settings.fp32_model = True + if convert_to_float16 and breakmodel.primary_device != "cpu" and system_settings.hascuda and (system_settings.breakmodel or system_settings.usegpu) and model_dict[key].dtype is torch.float32: model_dict[key] = model_dict[key].to(torch.float16) - if breakmodel.primary_device == "cpu" or (not vars.usegpu and not vars.breakmodel and model_dict[key].dtype is torch.float16): + if breakmodel.primary_device == "cpu" or (not system_settings.usegpu and not system_settings.breakmodel and model_dict[key].dtype is torch.float16): model_dict[key] = model_dict[key].to(torch.float32) if device == "shared": model_dict[key] = model_dict[key].to("cpu").detach_() @@ -1834,7 +1722,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal @contextlib.contextmanager def maybe_use_float16(always_use=False): - if(always_use or (vars.hascuda and args.lowmem and (vars.usegpu or vars.breakmodel))): + if(always_use or (system_settings.hascuda and args.lowmem and (system_settings.usegpu or system_settings.breakmodel))): original_dtype = torch.get_default_dtype() torch.set_default_dtype(torch.float16) yield True @@ -1843,22 +1731,22 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal yield False # If custom GPT2 model was chosen - if(vars.model == "GPT2Custom"): - vars.lazy_load = False - model_config = open(vars.custmodpth + "/config.json", "r") + if(model_settings.model == "GPT2Custom"): + model_settings.lazy_load = False + model_config = open(model_settings.custmodpth + "/config.json", "r") js = json.load(model_config) with(maybe_use_float16()): try: - model = GPT2LMHeadModel.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") raise e - tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - vars.modeldim = get_hidden_size_from_model(model) + tokenizer = GPT2TokenizerFast.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") + model_settings.modeldim = get_hidden_size_from_model(model) # Is CUDA available? If so, use GPU, otherwise fall back to CPU - if(vars.hascuda and vars.usegpu): - model = model.half().to(vars.gpu_device) + if(system_settings.hascuda and system_settings.usegpu): + model = model.half().to(system_settings.gpu_device) generator = model.generate else: model = model.to('cpu').float() @@ -1870,23 +1758,23 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal # We must disable low_cpu_mem_usage (by setting lowmem to {}) if # using a GPT-2 model because GPT-2 is not compatible with this # feature yet - if(vars.model_type == "gpt2"): + if(model_settings.model_type == "gpt2"): lowmem = {} - vars.lazy_load = False # Also, lazy loader doesn't support GPT-2 models + model_settings.lazy_load = False # Also, lazy loader doesn't support GPT-2 models # If we're using torch_lazy_loader, we need to get breakmodel config # early so that it knows where to load the individual model tensors - if(utils.HAS_ACCELERATE or vars.lazy_load and vars.hascuda and vars.breakmodel): + if(utils.HAS_ACCELERATE or model_settings.lazy_load and system_settings.hascuda and system_settings.breakmodel): device_config(model_config) # Download model from Huggingface if it does not exist, otherwise load locally #If we specify a model and it's in the root directory, we need to move it to the models directory (legacy folder structure to new) - if os.path.isdir(vars.model.replace('/', '_')): + if os.path.isdir(model_settings.model.replace('/', '_')): import shutil - shutil.move(vars.model.replace('/', '_'), "models/{}".format(vars.model.replace('/', '_'))) + shutil.move(model_settings.model.replace('/', '_'), "models/{}".format(model_settings.model.replace('/', '_'))) print("\n", flush=True) - if(vars.lazy_load): # If we're using lazy loader, we need to figure out what the model's hidden layers are called + if(model_settings.lazy_load): # If we're using lazy loader, we need to figure out what the model's hidden layers are called with torch_lazy_loader.use_lazy_torch_load(dematerialized_modules=True, use_accelerate_init_empty_weights=True): try: metamodel = AutoModelForCausalLM.from_config(model_config) @@ -1895,45 +1783,45 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal utils.layers_module_names = utils.get_layers_module_names(metamodel) utils.module_names = list(metamodel.state_dict().keys()) utils.named_buffers = list(metamodel.named_buffers(recurse=True)) - with maybe_use_float16(), torch_lazy_loader.use_lazy_torch_load(enable=vars.lazy_load, callback=get_lazy_load_callback(utils.num_layers(model_config)) if vars.lazy_load else None, dematerialized_modules=True): - if(vars.lazy_load): # torch_lazy_loader.py and low_cpu_mem_usage can't be used at the same time + with maybe_use_float16(), torch_lazy_loader.use_lazy_torch_load(enable=model_settings.lazy_load, callback=get_lazy_load_callback(utils.num_layers(model_config)) if model_settings.lazy_load else None, dematerialized_modules=True): + if(model_settings.lazy_load): # torch_lazy_loader.py and low_cpu_mem_usage can't be used at the same time lowmem = {} - if(os.path.isdir(vars.custmodpth)): + if(os.path.isdir(model_settings.custmodpth)): try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") except Exception as e: pass try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", **lowmem) - elif(os.path.isdir("models/{}".format(vars.model.replace('/', '_')))): + model = GPTNeoForCausalLM.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache", **lowmem) + elif(os.path.isdir("models/{}".format(model_settings.model.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache") except Exception as e: pass try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache", **lowmem) else: old_rebuild_tensor = torch._utils._rebuild_tensor def new_rebuild_tensor(storage: Union[torch_lazy_loader.LazyTensor, torch.Storage], storage_offset, shape, stride): @@ -1944,86 +1832,86 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal if(not isinstance(dtype, torch.dtype)): dtype = storage.storage_type(0).dtype if(dtype is torch.float32 and len(shape) >= 2): - vars.fp32_model = True + model_settings.fp32_model = True return old_rebuild_tensor(storage, storage_offset, shape, stride) torch._utils._rebuild_tensor = new_rebuild_tensor try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache") except Exception as e: pass try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache", **lowmem) torch._utils._rebuild_tensor = old_rebuild_tensor if not args.colab or args.savemodel: import shutil - tokenizer.save_pretrained("models/{}".format(vars.model.replace('/', '_'))) - if(vars.fp32_model): # Use save_pretrained to convert fp32 models to fp16 + tokenizer.save_pretrained("models/{}".format(model_settings.model.replace('/', '_'))) + if(model_settings.fp32_model): # Use save_pretrained to convert fp32 models to fp16 model = model.half() - model.save_pretrained("models/{}".format(vars.model.replace('/', '_')), max_shard_size="500MiB") + model.save_pretrained("models/{}".format(model_settings.model.replace('/', '_')), max_shard_size="500MiB") else: # For fp16 models, we can just copy the model files directly import transformers.configuration_utils import transformers.modeling_utils import transformers.file_utils # Save the config.json - shutil.move(transformers.file_utils.get_from_cache(transformers.file_utils.hf_bucket_url(vars.model, transformers.configuration_utils.CONFIG_NAME, revision=vars.revision), cache_dir="cache", local_files_only=True), os.path.join("models/{}".format(vars.model.replace('/', '_')), transformers.configuration_utils.CONFIG_NAME)) + shutil.move(transformers.file_utils.get_from_cache(transformers.file_utils.hf_bucket_url(model_settings.model, transformers.configuration_utils.CONFIG_NAME, revision=model_settings.revision), cache_dir="cache", local_files_only=True), os.path.join("models/{}".format(model_settings.model.replace('/', '_')), transformers.configuration_utils.CONFIG_NAME)) if(utils.num_shards is None): # Save the pytorch_model.bin of an unsharded model - shutil.move(transformers.file_utils.get_from_cache(transformers.file_utils.hf_bucket_url(vars.model, transformers.modeling_utils.WEIGHTS_NAME, revision=vars.revision), cache_dir="cache", local_files_only=True), os.path.join("models/{}".format(vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_NAME)) + shutil.move(transformers.file_utils.get_from_cache(transformers.file_utils.hf_bucket_url(model_settings.model, transformers.modeling_utils.WEIGHTS_NAME, revision=model_settings.revision), cache_dir="cache", local_files_only=True), os.path.join("models/{}".format(model_settings.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_NAME)) else: with open(utils.from_pretrained_index_filename) as f: map_data = json.load(f) filenames = set(map_data["weight_map"].values()) # Save the pytorch_model.bin.index.json of a sharded model - shutil.move(utils.from_pretrained_index_filename, os.path.join("models/{}".format(vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_INDEX_NAME)) + shutil.move(utils.from_pretrained_index_filename, os.path.join("models/{}".format(model_settings.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_INDEX_NAME)) # Then save the pytorch_model-#####-of-#####.bin files for filename in filenames: - shutil.move(transformers.file_utils.get_from_cache(transformers.file_utils.hf_bucket_url(vars.model, filename, revision=vars.revision), cache_dir="cache", local_files_only=True), os.path.join("models/{}".format(vars.model.replace('/', '_')), filename)) + shutil.move(transformers.file_utils.get_from_cache(transformers.file_utils.hf_bucket_url(model_settings.model, filename, revision=model_settings.revision), cache_dir="cache", local_files_only=True), os.path.join("models/{}".format(model_settings.model.replace('/', '_')), filename)) shutil.rmtree("cache/") - if(vars.badwordsids is vars.badwordsids_default and vars.model_type not in ("gpt2", "gpt_neo", "gptj")): - vars.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] + if(model_settings.badwordsids is koboldai_settings.badwordsids_default and model_settings.model_type not in ("gpt2", "gpt_neo", "gptj")): + model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] patch_causallm(model) - if(vars.hascuda): - if(vars.usegpu): - vars.modeldim = get_hidden_size_from_model(model) - model = model.half().to(vars.gpu_device) + if(system_settings.hascuda): + if(system_settings.usegpu): + model_settings.modeldim = get_hidden_size_from_model(model) + model = model.half().to(system_settings.gpu_device) generator = model.generate - elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel) - vars.modeldim = get_hidden_size_from_model(model) - if(not vars.lazy_load): + elif(system_settings.breakmodel): # Use both RAM and VRAM (breakmodel) + model_settings.modeldim = get_hidden_size_from_model(model) + if(not model_settings.lazy_load): device_config(model.config) move_model_to_devices(model) elif(utils.HAS_ACCELERATE and __import__("breakmodel").disk_blocks > 0): move_model_to_devices(model) - vars.modeldim = get_hidden_size_from_model(model) + model_settings.modeldim = get_hidden_size_from_model(model) generator = model.generate else: model = model.to('cpu').float() - vars.modeldim = get_hidden_size_from_model(model) + model_settings.modeldim = get_hidden_size_from_model(model) generator = model.generate elif(utils.HAS_ACCELERATE and __import__("breakmodel").disk_blocks > 0): move_model_to_devices(model) - vars.modeldim = get_hidden_size_from_model(model) + model_settings.modeldim = get_hidden_size_from_model(model) generator = model.generate else: model.to('cpu').float() - vars.modeldim = get_hidden_size_from_model(model) + model_settings.modeldim = get_hidden_size_from_model(model) generator = model.generate # Suppress Author's Note by flagging square brackets (Old implementation) @@ -2031,20 +1919,20 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal #vocab_keys = vocab.keys() #vars.badwords = gettokenids("[") #for key in vars.badwords: - # vars.badwordsids.append([vocab[key]]) + # model_settings.badwordsids.append([vocab[key]]) - print("{0}OK! {1} pipeline created!{2}".format(colors.GREEN, vars.model, colors.END)) + print("{0}OK! {1} pipeline created!{2}".format(colors.GREEN, model_settings.model, colors.END)) else: from transformers import GPT2TokenizerFast - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") else: from transformers import PreTrainedModel from transformers import modeling_utils old_from_pretrained = PreTrainedModel.from_pretrained.__func__ @classmethod def new_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - vars.fp32_model = False + model_settings.fp32_model = False utils.num_shards = None utils.current_shard = 0 utils.from_pretrained_model_name = pretrained_model_name_or_path @@ -2067,15 +1955,15 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal def tpumtjgenerate_warper_callback(scores) -> "np.array": scores_shape = scores.shape scores_list = scores.tolist() - vars.lua_koboldbridge.logits = vars.lua_state.table() + system_settings.lua_koboldbridge.logits = system_settings.lua_state.table() for r, row in enumerate(scores_list): - vars.lua_koboldbridge.logits[r+1] = vars.lua_state.table(*row) - vars.lua_koboldbridge.vocab_size = scores_shape[-1] + system_settings.lua_koboldbridge.logits[r+1] = system_settings.lua_state.table(*row) + system_settings.lua_koboldbridge.vocab_size = scores_shape[-1] execute_genmod() scores = np.array( - tuple(tuple(row.values()) for row in vars.lua_koboldbridge.logits.values()), + tuple(tuple(row.values()) for row in system_settings.lua_koboldbridge.logits.values()), dtype=scores.dtype, ) assert scores.shape == scores_shape @@ -2083,24 +1971,24 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal return scores def tpumtjgenerate_stopping_callback(generated, n_generated, excluded_world_info) -> Tuple[List[set], bool, bool]: - vars.generated_tkns += 1 + story_settings.generated_tkns += 1 assert len(excluded_world_info) == len(generated) - regeneration_required = vars.lua_koboldbridge.regeneration_required - halt = vars.abort or not vars.lua_koboldbridge.generating or vars.generated_tkns >= vars.genamt - vars.lua_koboldbridge.regeneration_required = False + regeneration_required = system_settings.lua_koboldbridge.regeneration_required + halt = system_settings.abort or not system_settings.lua_koboldbridge.generating or story_settings.generated_tkns >= model_settings.genamt + system_settings.lua_koboldbridge.regeneration_required = False global past - for i in range(vars.numseqs): - vars.lua_koboldbridge.generated[i+1][vars.generated_tkns] = int(generated[i, tpu_mtj_backend.params["seq"] + n_generated - 1].item()) + for i in range(model_settings.numseqs): + system_settings.lua_koboldbridge.generated[i+1][story_settings.generated_tkns] = int(generated[i, tpu_mtj_backend.params["seq"] + n_generated - 1].item()) - if(not vars.dynamicscan or halt): + if(not story_settings.dynamicscan or halt): return excluded_world_info, regeneration_required, halt for i, t in enumerate(generated): decoded = utils.decodenewlines(tokenizer.decode(past[i])) + utils.decodenewlines(tokenizer.decode(t[tpu_mtj_backend.params["seq"] : tpu_mtj_backend.params["seq"] + n_generated])) - _, found = checkworldinfo(decoded, force_use_txt=True, actions=vars._actions) + _, found = checkworldinfo(decoded, force_use_txt=True, actions=story_settings._actions) found -= excluded_world_info[i] if(len(found) != 0): regeneration_required = True @@ -2109,60 +1997,60 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal def tpumtjgenerate_compiling_callback() -> None: print(colors.GREEN + "TPU backend compilation triggered" + colors.END) - vars.compiling = True + system_settings.compiling = True def tpumtjgenerate_stopped_compiling_callback() -> None: - vars.compiling = False + system_settings.compiling = False def tpumtjgenerate_settings_callback() -> dict: return { - "sampler_order": vars.sampler_order, - "top_p": float(vars.top_p), - "temp": float(vars.temp), - "top_k": int(vars.top_k), - "tfs": float(vars.tfs), - "typical": float(vars.typical), - "top_a": float(vars.top_a), - "repetition_penalty": float(vars.rep_pen), - "rpslope": float(vars.rep_pen_slope), - "rprange": int(vars.rep_pen_range), + "sampler_order": model_settings.sampler_order, + "top_p": float(model_settings.top_p), + "temp": float(model_settings.temp), + "top_k": int(model_settings.top_k), + "tfs": float(model_settings.tfs), + "typical": float(model_settings.typical), + "top_a": float(model_settings.top_a), + "repetition_penalty": float(model_settings.rep_pen), + "rpslope": float(model_settings.rep_pen_slope), + "rprange": int(model_settings.rep_pen_range), } # If we're running Colab or OAI, we still need a tokenizer. - if(vars.model == "Colab"): + if(model_settings.model == "Colab"): from transformers import GPT2TokenizerFast - tokenizer = GPT2TokenizerFast.from_pretrained("EleutherAI/gpt-neo-2.7B", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("EleutherAI/gpt-neo-2.7B", revision=model_settings.revision, cache_dir="cache") loadsettings() - elif(vars.model == "OAI"): + elif(model_settings.model == "OAI"): from transformers import GPT2TokenizerFast - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") loadsettings() # Load the TPU backend if requested - elif(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): + elif(system_settings.use_colab_tpu or model_settings.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): global tpu_mtj_backend import tpu_mtj_backend - if(vars.model == "TPUMeshTransformerGPTNeoX"): - vars.badwordsids = vars.badwordsids_neox + if(model_settings.model == "TPUMeshTransformerGPTNeoX"): + model_settings.badwordsids = koboldai_settings.badwordsids_neox print("{0}Initializing Mesh Transformer JAX, please wait...{1}".format(colors.PURPLE, colors.END)) - if vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX") and (not vars.custmodpth or not os.path.isdir(vars.custmodpth)): - raise FileNotFoundError(f"The specified model path {repr(vars.custmodpth)} is not the path to a valid folder") + if model_settings.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX") and (not model_settings.custmodpth or not os.path.isdir(model_settings.custmodpth)): + raise FileNotFoundError(f"The specified model path {repr(model_settings.custmodpth)} is not the path to a valid folder") import tpu_mtj_backend - if(vars.model == "TPUMeshTransformerGPTNeoX"): + if(model_settings.model == "TPUMeshTransformerGPTNeoX"): tpu_mtj_backend.pad_token_id = 2 - tpu_mtj_backend.vars = vars + tpu_mtj_backend.model_settings = model_settings tpu_mtj_backend.warper_callback = tpumtjgenerate_warper_callback tpu_mtj_backend.stopping_callback = tpumtjgenerate_stopping_callback tpu_mtj_backend.compiling_callback = tpumtjgenerate_compiling_callback tpu_mtj_backend.stopped_compiling_callback = tpumtjgenerate_stopped_compiling_callback tpu_mtj_backend.settings_callback = tpumtjgenerate_settings_callback - vars.allowsp = True + system_settings.allowsp = True loadmodelsettings() loadsettings() - tpu_mtj_backend.load_model(vars.custmodpth, hf_checkpoint=vars.model not in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX") and vars.use_colab_tpu, **vars.modelconfig) - vars.modeldim = int(tpu_mtj_backend.params.get("d_embed", tpu_mtj_backend.params["d_model"])) + tpu_mtj_backend.load_model(model_settings.custmodpth, hf_checkpoint=model_settings.model not in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX") and system_settings.use_colab_tpu, **model_settings.modelconfig) + model_settings.modeldim = int(tpu_mtj_backend.params.get("d_embed", tpu_mtj_backend.params["d_model"])) tokenizer = tpu_mtj_backend.tokenizer - if(vars.badwordsids is vars.badwordsids_default and vars.model_type not in ("gpt2", "gpt_neo", "gptj")): - vars.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] + if(model_settings.badwordsids is koboldai_settings.badwordsids_default and model_settings.model_type not in ("gpt2", "gpt_neo", "gptj")): + model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] else: loadsettings() @@ -2176,7 +2064,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal emit('from_server', {'cmd': 'hide_model_name'}, broadcast=True) time.sleep(0.1) - if not vars.gamestarted: + if not story_settings.gamestarted: setStartState() sendsettings() refresh_settings() @@ -2189,7 +2077,7 @@ def index(): if 'new_ui' in request.args: return render_template('index_new.html', hide_ai_menu=args.noaimenu) else: - return render_template('index.html', hide_ai_menu=args.noaimenu, flaskwebgui=vars.flaskwebgui) + return render_template('index.html', hide_ai_menu=args.noaimenu, flaskwebgui=system_settings.flaskwebgui) @app.route('/favicon.ico') def favicon(): return send_from_directory(app.root_path, @@ -2199,9 +2087,9 @@ def download(): save_format = request.args.get("format", "json").strip().lower() if(save_format == "plaintext"): - txt = vars.prompt + "".join(vars.actions.values()) + txt = story_settings.prompt + "".join(story_settings.actions.values()) save = Response(txt) - filename = path.basename(vars.savedir) + filename = path.basename(system_settings.savedir) if filename[-5:] == ".json": filename = filename[:-5] save.headers.set('Content-Disposition', 'attachment', filename='%s.txt' % filename) @@ -2209,17 +2097,17 @@ def download(): # Build json to write js = {} - js["gamestarted"] = vars.gamestarted - js["prompt"] = vars.prompt - js["memory"] = vars.memory - js["authorsnote"] = vars.authornote - js["anotetemplate"] = vars.authornotetemplate - js["actions"] = tuple(vars.actions.values()) - js["actions_metadata"] = vars.actions_metadata + js["gamestarted"] = story_settings.gamestarted + js["prompt"] = story_settings.prompt + js["memory"] = story_settings.memory + js["authorsnote"] = story_settings.authornote + js["anotetemplate"] = story_settings.authornotetemplate + js["actions"] = tuple(story_settings.actions.values()) + js["actions_metadata"] = story_settings.actions_metadata js["worldinfo"] = [] # Extract only the important bits of WI - for wi in vars.worldinfo: + for wi in story_settings.worldinfo: if(wi["constant"] or wi["key"] != ""): js["worldinfo"].append({ "key": wi["key"], @@ -2232,7 +2120,7 @@ def download(): }) save = Response(json.dumps(js, indent=3)) - filename = path.basename(vars.savedir) + filename = path.basename(system_settings.savedir) if filename[-5:] == ".json": filename = filename[:-5] save.headers.set('Content-Disposition', 'attachment', filename='%s.json' % filename) @@ -2250,17 +2138,17 @@ def lua_startup(): file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r") js = json.load(file) if("userscripts" in js): - vars.userscripts = [] + system_settings.userscripts = [] for userscript in js["userscripts"]: if type(userscript) is not str: continue userscript = userscript.strip() if len(userscript) != 0 and all(q not in userscript for q in ("..", ":")) and all(userscript[0] not in q for q in ("/", "\\")) and os.path.exists(fileops.uspath(userscript)): - vars.userscripts.append(userscript) + system_settings.userscripts.append(userscript) if("corescript" in js and type(js["corescript"]) is str and all(q not in js["corescript"] for q in ("..", ":")) and all(js["corescript"][0] not in q for q in ("/", "\\"))): - vars.corescript = js["corescript"] + system_settings.corescript = js["corescript"] else: - vars.corescript = "default.lua" + system_settings.corescript = "default.lua" file.close() #==================================================================# @@ -2271,26 +2159,29 @@ def lua_startup(): print(colors.PURPLE + "Initializing Lua Bridge... " + colors.END, end="", flush=True) # Set up Lua state - vars.lua_state = lupa.LuaRuntime(unpack_returned_tuples=True) + system_settings.lua_state = lupa.LuaRuntime(unpack_returned_tuples=True) # Load bridge.lua bridged = { "corescript_path": "cores", "userscript_path": "userscripts", "config_path": "userscripts", - "lib_paths": vars.lua_state.table("lualibs", os.path.join("extern", "lualibs")), - "vars": vars, + "lib_paths": system_settings.lua_state.table("lualibs", os.path.join("extern", "lualibs")), + "model_settings": model_settings, + "story_settings": story_settings, + "user_settings": user_settings, + "system_settings": system_settings, } for kwarg in _bridged: bridged[kwarg] = _bridged[kwarg] try: - vars.lua_kobold, vars.lua_koboldcore, vars.lua_koboldbridge = vars.lua_state.globals().dofile("bridge.lua")( - vars.lua_state.globals().python, + system_settings.lua_kobold, system_settings.lua_koboldcore, system_settings.lua_koboldbridge = system_settings.lua_state.globals().dofile("bridge.lua")( + system_settings.lua_state.globals().python, bridged, ) except lupa.LuaError as e: print(colors.RED + "ERROR!" + colors.END) - vars.lua_koboldbridge.obliterate_multiverse() + system_settings.lua_koboldbridge.obliterate_multiverse() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) exit(1) @@ -2327,33 +2218,33 @@ def load_lua_scripts(): lst = fileops.getusfiles(long_desc=True) filenames_dict = {ob["filename"]: i for i, ob in enumerate(lst)} - for filename in vars.userscripts: + for filename in system_settings.userscripts: if filename in filenames_dict: i = filenames_dict[filename] filenames.append(filename) modulenames.append(lst[i]["modulename"]) descriptions.append(lst[i]["description"]) - vars.has_genmod = False + system_settings.has_genmod = False try: - vars.lua_koboldbridge.obliterate_multiverse() - tpool.execute(vars.lua_koboldbridge.load_corescript, vars.corescript) - vars.has_genmod = tpool.execute(vars.lua_koboldbridge.load_userscripts, filenames, modulenames, descriptions) - vars.lua_running = True + system_settings.lua_koboldbridge.obliterate_multiverse() + tpool.execute(system_settings.lua_koboldbridge.load_corescript, system_settings.corescript) + system_settings.has_genmod = tpool.execute(system_settings.lua_koboldbridge.load_userscripts, filenames, modulenames, descriptions) + system_settings.lua_running = True except lupa.LuaError as e: try: - vars.lua_koboldbridge.obliterate_multiverse() + system_settings.lua_koboldbridge.obliterate_multiverse() except: pass - vars.lua_running = False - if(vars.serverstarted): + system_settings.lua_running = False + if(system_settings.serverstarted): emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) print("{0}{1}{2}".format(colors.YELLOW, "Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.", colors.END), file=sys.stderr) - if(vars.serverstarted): + if(system_settings.serverstarted): set_aibusy(0) #==================================================================# @@ -2361,9 +2252,9 @@ def load_lua_scripts(): #==================================================================# @bridged_kwarg() def lua_print(msg): - if(vars.lua_logname != vars.lua_koboldbridge.logging_name): - vars.lua_logname = vars.lua_koboldbridge.logging_name - print(colors.BLUE + lua_log_format_name(vars.lua_logname) + ":" + colors.END, file=sys.stderr) + if(system_settings.lua_logname != system_settings.lua_koboldbridge.logging_name): + system_settings.lua_logname = system_settings.lua_koboldbridge.logging_name + print(colors.BLUE + lua_log_format_name(system_settings.lua_logname) + ":" + colors.END, file=sys.stderr) print(colors.PURPLE + msg.replace("\033", "") + colors.END) #==================================================================# @@ -2371,9 +2262,9 @@ def lua_print(msg): #==================================================================# @bridged_kwarg() def lua_warn(msg): - if(vars.lua_logname != vars.lua_koboldbridge.logging_name): - vars.lua_logname = vars.lua_koboldbridge.logging_name - print(colors.BLUE + lua_log_format_name(vars.lua_logname) + ":" + colors.END, file=sys.stderr) + if(system_settings.lua_logname != system_settings.lua_koboldbridge.logging_name): + system_settings.lua_logname = system_settings.lua_koboldbridge.logging_name + print(colors.BLUE + lua_log_format_name(system_settings.lua_logname) + ":" + colors.END, file=sys.stderr) print(colors.YELLOW + msg.replace("\033", "") + colors.END) #==================================================================# @@ -2386,7 +2277,7 @@ def lua_decode(tokens): if("tokenizer" not in globals()): from transformers import GPT2TokenizerFast global tokenizer - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") return utils.decodenewlines(tokenizer.decode(tokens)) #==================================================================# @@ -2398,7 +2289,7 @@ def lua_encode(string): if("tokenizer" not in globals()): from transformers import GPT2TokenizerFast global tokenizer - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") return tokenizer.encode(utils.encodenewlines(string), max_length=int(4e9), truncation=True) #==================================================================# @@ -2409,8 +2300,8 @@ def lua_encode(string): def lua_compute_context(submission, entries, folders, kwargs): assert type(submission) is str if(kwargs is None): - kwargs = vars.lua_state.table() - actions = vars._actions if vars.lua_koboldbridge.userstate == "genmod" else vars.actions + kwargs = system_settings.lua_state.table() + actions = story_settings._actions if system_settings.lua_koboldbridge.userstate == "genmod" else story_settings.actions allowed_entries = None allowed_folders = None if(entries is not None): @@ -2447,7 +2338,7 @@ def lua_compute_context(submission, entries, folders, kwargs): @bridged_kwarg() def lua_get_attr(uid, k): assert type(uid) is int and type(k) is str - if(uid in vars.worldinfo_u and k in ( + if(uid in story_settings.worldinfo_u and k in ( "key", "keysecondary", "content", @@ -2458,7 +2349,7 @@ def lua_get_attr(uid, k): "constant", "uid", )): - return vars.worldinfo_u[uid][k] + return story_settings.worldinfo_u[uid][k] #==================================================================# # Set property of a world info entry given its UID, property name and new value @@ -2466,7 +2357,7 @@ def lua_get_attr(uid, k): @bridged_kwarg() def lua_set_attr(uid, k, v): assert type(uid) is int and type(k) is str - assert uid in vars.worldinfo_u and k in ( + assert uid in story_settings.worldinfo_u and k in ( "key", "keysecondary", "content", @@ -2474,11 +2365,11 @@ def lua_set_attr(uid, k, v): "selective", "constant", ) - if(type(vars.worldinfo_u[uid][k]) is int and type(v) is float): + if(type(story_settings.worldinfo_u[uid][k]) is int and type(v) is float): v = int(v) - assert type(vars.worldinfo_u[uid][k]) is type(v) - vars.worldinfo_u[uid][k] = v - print(colors.GREEN + f"{lua_log_format_name(vars.lua_koboldbridge.logging_name)} set {k} of world info entry {uid} to {v}" + colors.END) + assert type(story_settings.worldinfo_u[uid][k]) is type(v) + story_settings.worldinfo_u[uid][k] = v + print(colors.GREEN + f"{lua_log_format_name(system_settings.lua_koboldbridge.logging_name)} set {k} of world info entry {uid} to {v}" + colors.END) #==================================================================# # Get property of a world info folder given its UID and property name @@ -2486,10 +2377,10 @@ def lua_set_attr(uid, k, v): @bridged_kwarg() def lua_folder_get_attr(uid, k): assert type(uid) is int and type(k) is str - if(uid in vars.wifolders_d and k in ( + if(uid in story_settings.wifolders_d and k in ( "name", )): - return vars.wifolders_d[uid][k] + return story_settings.wifolders_d[uid][k] #==================================================================# # Set property of a world info folder given its UID, property name and new value @@ -2497,37 +2388,37 @@ def lua_folder_get_attr(uid, k): @bridged_kwarg() def lua_folder_set_attr(uid, k, v): assert type(uid) is int and type(k) is str - assert uid in vars.wifolders_d and k in ( + assert uid in story_settings.wifolders_d and k in ( "name", ) - if(type(vars.wifolders_d[uid][k]) is int and type(v) is float): + if(type(story_settings.wifolders_d[uid][k]) is int and type(v) is float): v = int(v) - assert type(vars.wifolders_d[uid][k]) is type(v) - vars.wifolders_d[uid][k] = v - print(colors.GREEN + f"{lua_log_format_name(vars.lua_koboldbridge.logging_name)} set {k} of world info folder {uid} to {v}" + colors.END) + assert type(story_settings.wifolders_d[uid][k]) is type(v) + story_settings.wifolders_d[uid][k] = v + print(colors.GREEN + f"{lua_log_format_name(system_settings.lua_koboldbridge.logging_name)} set {k} of world info folder {uid} to {v}" + colors.END) #==================================================================# # Get the "Amount to Generate" #==================================================================# @bridged_kwarg() def lua_get_genamt(): - return vars.genamt + return model_settings.genamt #==================================================================# # Set the "Amount to Generate" #==================================================================# @bridged_kwarg() def lua_set_genamt(genamt): - assert vars.lua_koboldbridge.userstate != "genmod" and type(genamt) in (int, float) and genamt >= 0 - print(colors.GREEN + f"{lua_log_format_name(vars.lua_koboldbridge.logging_name)} set genamt to {int(genamt)}" + colors.END) - vars.genamt = int(genamt) + assert system_settings.lua_koboldbridge.userstate != "genmod" and type(genamt) in (int, float) and genamt >= 0 + print(colors.GREEN + f"{lua_log_format_name(system_settings.lua_koboldbridge.logging_name)} set genamt to {int(genamt)}" + colors.END) + model_settings.genamt = int(genamt) #==================================================================# # Get the "Gens Per Action" #==================================================================# @bridged_kwarg() def lua_get_numseqs(): - return vars.numseqs + return model_settings.numseqs #==================================================================# # Set the "Gens Per Action" @@ -2535,8 +2426,8 @@ def lua_get_numseqs(): @bridged_kwarg() def lua_set_numseqs(numseqs): assert type(numseqs) in (int, float) and numseqs >= 1 - print(colors.GREEN + f"{lua_log_format_name(vars.lua_koboldbridge.logging_name)} set numseqs to {int(numseqs)}" + colors.END) - vars.numseqs = int(numseqs) + print(colors.GREEN + f"{lua_log_format_name(system_settings.lua_koboldbridge.logging_name)} set numseqs to {int(numseqs)}" + colors.END) + model_settings.numseqs = int(numseqs) #==================================================================# # Check if a setting exists with the given name @@ -2600,30 +2491,30 @@ def lua_has_setting(setting): #==================================================================# @bridged_kwarg() def lua_get_setting(setting): - if(setting in ("settemp", "temp")): return vars.temp - if(setting in ("settopp", "topp", "top_p")): return vars.top_p - if(setting in ("settopk", "topk", "top_k")): return vars.top_k - if(setting in ("settfs", "tfs")): return vars.tfs - if(setting in ("settypical", "typical")): return vars.typical - if(setting in ("settopa", "topa")): return vars.top_a - if(setting in ("setreppen", "reppen")): return vars.rep_pen - if(setting in ("setreppenslope", "reppenslope")): return vars.rep_pen_slope - if(setting in ("setreppenrange", "reppenrange")): return vars.rep_pen_range - if(setting in ("settknmax", "tknmax")): return vars.max_length - if(setting == "anotedepth"): return vars.andepth - if(setting in ("setwidepth", "widepth")): return vars.widepth - if(setting in ("setuseprompt", "useprompt")): return vars.useprompt - if(setting in ("setadventure", "adventure")): return vars.adventure - if(setting in ("setchatmode", "chatmode")): return vars.chatmode - if(setting in ("setdynamicscan", "dynamicscan")): return vars.dynamicscan - if(setting in ("setnopromptgen", "nopromptgen")): return vars.nopromptgen - if(setting in ("autosave", "autosave")): return vars.autosave - if(setting in ("setrngpersist", "rngpersist")): return vars.rngpersist - if(setting in ("frmttriminc", "triminc")): return vars.formatoptns["frmttriminc"] - if(setting in ("frmtrmblln", "rmblln")): return vars.formatoptns["frmttrmblln"] - if(setting in ("frmtrmspch", "rmspch")): return vars.formatoptns["frmttrmspch"] - if(setting in ("frmtadsnsp", "adsnsp")): return vars.formatoptns["frmtadsnsp"] - if(setting in ("frmtsingleline", "singleline")): return vars.formatoptns["singleline"] + if(setting in ("settemp", "temp")): return model_settings.temp + if(setting in ("settopp", "topp", "top_p")): return model_settings.top_p + if(setting in ("settopk", "topk", "top_k")): return model_settings.top_k + if(setting in ("settfs", "tfs")): return model_settings.tfs + if(setting in ("settypical", "typical")): return model_settings.typical + if(setting in ("settopa", "topa")): return model_settings.top_a + if(setting in ("setreppen", "reppen")): return model_settings.rep_pen + if(setting in ("setreppenslope", "reppenslope")): return model_settings.rep_pen_slope + if(setting in ("setreppenrange", "reppenrange")): return model_settings.rep_pen_range + if(setting in ("settknmax", "tknmax")): return model_settings.max_length + if(setting == "anotedepth"): return story_settings.andepth + if(setting in ("setwidepth", "widepth")): return user_settings.widepth + if(setting in ("setuseprompt", "useprompt")): return story_settings.useprompt + if(setting in ("setadventure", "adventure")): return story_settings.adventure + if(setting in ("setchatmode", "chatmode")): return story_settings.chatmode + if(setting in ("setdynamicscan", "dynamicscan")): return story_settings.dynamicscan + if(setting in ("setnopromptgen", "nopromptgen")): return user_settings.nopromptgen + if(setting in ("autosave", "autosave")): return user_settings.autosave + if(setting in ("setrngpersist", "rngpersist")): return user_settings.rngpersist + if(setting in ("frmttriminc", "triminc")): return user_settings.formatoptns["frmttriminc"] + if(setting in ("frmtrmblln", "rmblln")): return user_settings.formatoptns["frmttrmblln"] + if(setting in ("frmtrmspch", "rmspch")): return user_settings.formatoptns["frmttrmspch"] + if(setting in ("frmtadsnsp", "adsnsp")): return user_settings.formatoptns["frmtadsnsp"] + if(setting in ("frmtsingleline", "singleline")): return user_settings.formatoptns["singleline"] #==================================================================# # Set the setting with the given name if it exists @@ -2633,40 +2524,40 @@ def lua_set_setting(setting, v): actual_type = type(lua_get_setting(setting)) assert v is not None and (actual_type is type(v) or (actual_type is int and type(v) is float)) v = actual_type(v) - print(colors.GREEN + f"{lua_log_format_name(vars.lua_koboldbridge.logging_name)} set {setting} to {v}" + colors.END) + print(colors.GREEN + f"{lua_log_format_name(system_settings.lua_koboldbridge.logging_name)} set {setting} to {v}" + colors.END) if(setting in ("setadventure", "adventure") and v): - vars.actionmode = 1 - if(setting in ("settemp", "temp")): vars.temp = v - if(setting in ("settopp", "topp")): vars.top_p = v - if(setting in ("settopk", "topk")): vars.top_k = v - if(setting in ("settfs", "tfs")): vars.tfs = v - if(setting in ("settypical", "typical")): vars.typical = v - if(setting in ("settopa", "topa")): vars.top_a = v - if(setting in ("setreppen", "reppen")): vars.rep_pen = v - if(setting in ("setreppenslope", "reppenslope")): vars.rep_pen_slope = v - if(setting in ("setreppenrange", "reppenrange")): vars.rep_pen_range = v - if(setting in ("settknmax", "tknmax")): vars.max_length = v; return True - if(setting == "anotedepth"): vars.andepth = v; return True - if(setting in ("setwidepth", "widepth")): vars.widepth = v; return True - if(setting in ("setuseprompt", "useprompt")): vars.useprompt = v; return True - if(setting in ("setadventure", "adventure")): vars.adventure = v - if(setting in ("setdynamicscan", "dynamicscan")): vars.dynamicscan = v - if(setting in ("setnopromptgen", "nopromptgen")): vars.nopromptgen = v - if(setting in ("autosave", "noautosave")): vars.autosave = v - if(setting in ("setrngpersist", "rngpersist")): vars.rngpersist = v - if(setting in ("setchatmode", "chatmode")): vars.chatmode = v - if(setting in ("frmttriminc", "triminc")): vars.formatoptns["frmttriminc"] = v - if(setting in ("frmtrmblln", "rmblln")): vars.formatoptns["frmttrmblln"] = v - if(setting in ("frmtrmspch", "rmspch")): vars.formatoptns["frmttrmspch"] = v - if(setting in ("frmtadsnsp", "adsnsp")): vars.formatoptns["frmtadsnsp"] = v - if(setting in ("frmtsingleline", "singleline")): vars.formatoptns["singleline"] = v + story_settings.actionmode = 1 + if(setting in ("settemp", "temp")): model_settings.temp = v + if(setting in ("settopp", "topp")): model_settings.top_p = v + if(setting in ("settopk", "topk")): model_settings.top_k = v + if(setting in ("settfs", "tfs")): model_settings.tfs = v + if(setting in ("settypical", "typical")): model_settings.typical = v + if(setting in ("settopa", "topa")): model_settings.top_a = v + if(setting in ("setreppen", "reppen")): model_settings.rep_pen = v + if(setting in ("setreppenslope", "reppenslope")): model_settings.rep_pen_slope = v + if(setting in ("setreppenrange", "reppenrange")): model_settings.rep_pen_range = v + if(setting in ("settknmax", "tknmax")): model_settings.max_length = v; return True + if(setting == "anotedepth"): story_settings.andepth = v; return True + if(setting in ("setwidepth", "widepth")): user_settings.widepth = v; return True + if(setting in ("setuseprompt", "useprompt")): story_settings.useprompt = v; return True + if(setting in ("setadventure", "adventure")): story_settings.adventure = v + if(setting in ("setdynamicscan", "dynamicscan")): story_settings.dynamicscan = v + if(setting in ("setnopromptgen", "nopromptgen")): user_settings.nopromptgen = v + if(setting in ("autosave", "noautosave")): user_settings.autosave = v + if(setting in ("setrngpersist", "rngpersist")): user_settings.rngpersist = v + if(setting in ("setchatmode", "chatmode")): story_settings.chatmode = v + if(setting in ("frmttriminc", "triminc")): user_settings.formatoptns["frmttriminc"] = v + if(setting in ("frmtrmblln", "rmblln")): user_settings.formatoptns["frmttrmblln"] = v + if(setting in ("frmtrmspch", "rmspch")): user_settings.formatoptns["frmttrmspch"] = v + if(setting in ("frmtadsnsp", "adsnsp")): user_settings.formatoptns["frmtadsnsp"] = v + if(setting in ("frmtsingleline", "singleline")): user_settings.formatoptns["singleline"] = v #==================================================================# # Get contents of memory #==================================================================# @bridged_kwarg() def lua_get_memory(): - return vars.memory + return story_settings.memory #==================================================================# # Set contents of memory @@ -2674,14 +2565,14 @@ def lua_get_memory(): @bridged_kwarg() def lua_set_memory(m): assert type(m) is str - vars.memory = m + story_settings.memory = m #==================================================================# # Get contents of author's note #==================================================================# @bridged_kwarg() def lua_get_authorsnote(): - return vars.authornote + return story_settings.authornote #==================================================================# # Set contents of author's note @@ -2689,14 +2580,14 @@ def lua_get_authorsnote(): @bridged_kwarg() def lua_set_authorsnote(m): assert type(m) is str - vars.authornote = m + story_settings.authornote = m #==================================================================# # Get contents of author's note template #==================================================================# @bridged_kwarg() def lua_get_authorsnotetemplate(): - return vars.authornotetemplate + return story_settings.authornotetemplate #==================================================================# # Set contents of author's note template @@ -2704,7 +2595,7 @@ def lua_get_authorsnotetemplate(): @bridged_kwarg() def lua_set_authorsnotetemplate(m): assert type(m) is str - vars.authornotetemplate = m + story_settings.authornotetemplate = m #==================================================================# # Save settings and send them to client @@ -2723,35 +2614,35 @@ def lua_set_chunk(k, v): assert k >= 0 assert k != 0 or len(v) != 0 if(len(v) == 0): - print(colors.GREEN + f"{lua_log_format_name(vars.lua_koboldbridge.logging_name)} deleted story chunk {k}" + colors.END) + print(colors.GREEN + f"{lua_log_format_name(system_settings.lua_koboldbridge.logging_name)} deleted story chunk {k}" + colors.END) chunk = int(k) - if(vars.lua_koboldbridge.userstate == "genmod"): - del vars._actions[chunk-1] - vars.lua_deleted.add(chunk) - if(not hasattr(vars, "_actions") or vars._actions is not vars.actions): + if(system_settings.lua_koboldbridge.userstate == "genmod"): + del story_settings._actions[chunk-1] + story_settings.lua_deleted.add(chunk) + if(not hasattr(story_settings, "_actions") or story_settings._actions is not story_settings.actions): #Instead of deleting we'll blank out the text. This way our actions and actions_metadata stay in sync and we can restore the chunk on an undo - vars.actions[chunk-1] = "" - vars.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": vars.actions_metadata[chunk-1]['Selected Text'], "Pinned": False, "Editted": True}] + vars.actions_metadata[chunk-1]['Alternative Text'] - vars.actions_metadata[chunk-1]['Selected Text'] = '' + story_settings.actions[chunk-1] = "" + story_settings.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": story_settings.actions_metadata[chunk-1]['Selected Text'], "Pinned": False, "Editted": True}] + story_settings.actions_metadata[chunk-1]['Alternative Text'] + story_settings.actions_metadata[chunk-1]['Selected Text'] = '' send_debug() else: if(k == 0): - print(colors.GREEN + f"{lua_log_format_name(vars.lua_koboldbridge.logging_name)} edited prompt chunk" + colors.END) + print(colors.GREEN + f"{lua_log_format_name(system_settings.lua_koboldbridge.logging_name)} edited prompt chunk" + colors.END) else: - print(colors.GREEN + f"{lua_log_format_name(vars.lua_koboldbridge.logging_name)} edited story chunk {k}" + colors.END) + print(colors.GREEN + f"{lua_log_format_name(system_settings.lua_koboldbridge.logging_name)} edited story chunk {k}" + colors.END) chunk = int(k) if(chunk == 0): - if(vars.lua_koboldbridge.userstate == "genmod"): - vars._prompt = v - vars.lua_edited.add(chunk) - vars.prompt = v + if(system_settings.lua_koboldbridge.userstate == "genmod"): + story_settings._prompt = v + story_settings.lua_edited.add(chunk) + story_settings.prompt = v else: - if(vars.lua_koboldbridge.userstate == "genmod"): - vars._actions[chunk-1] = v - vars.lua_edited.add(chunk) - vars.actions[chunk-1] = v - vars.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": vars.actions_metadata[chunk-1]['Selected Text'], "Pinned": False, "Editted": True}] + vars.actions_metadata[chunk-1]['Alternative Text'] - vars.actions_metadata[chunk-1]['Selected Text'] = v + if(system_settings.lua_koboldbridge.userstate == "genmod"): + story_settings._actions[chunk-1] = v + story_settings.lua_edited.add(chunk) + story_settings.actions[chunk-1] = v + story_settings.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": story_settings.actions_metadata[chunk-1]['Selected Text'], "Pinned": False, "Editted": True}] + story_settings.actions_metadata[chunk-1]['Alternative Text'] + story_settings.actions_metadata[chunk-1]['Selected Text'] = v send_debug() #==================================================================# @@ -2759,27 +2650,27 @@ def lua_set_chunk(k, v): #==================================================================# @bridged_kwarg() def lua_get_modeltype(): - if(vars.noai): + if(system_settings.noai): return "readonly" - if(vars.model in ("Colab", "OAI", "InferKit")): + if(model_settings.model in ("Colab", "OAI", "InferKit")): return "api" - if(not vars.use_colab_tpu and vars.model not in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX") and (vars.model in ("GPT2Custom", "NeoCustom") or vars.model_type in ("gpt2", "gpt_neo", "gptj"))): + if(not system_settings.use_colab_tpu and model_settings.model not in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX") and (model_settings.model in ("GPT2Custom", "NeoCustom") or model_settings.model_type in ("gpt2", "gpt_neo", "gptj"))): hidden_size = get_hidden_size_from_model(model) - if(vars.model in ("gpt2",) or (vars.model_type == "gpt2" and hidden_size == 768)): + if(model_settings.model in ("gpt2",) or (model_settings.model_type == "gpt2" and hidden_size == 768)): return "gpt2" - if(vars.model in ("gpt2-medium",) or (vars.model_type == "gpt2" and hidden_size == 1024)): + if(model_settings.model in ("gpt2-medium",) or (model_settings.model_type == "gpt2" and hidden_size == 1024)): return "gpt2-medium" - if(vars.model in ("gpt2-large",) or (vars.model_type == "gpt2" and hidden_size == 1280)): + if(model_settings.model in ("gpt2-large",) or (model_settings.model_type == "gpt2" and hidden_size == 1280)): return "gpt2-large" - if(vars.model in ("gpt2-xl",) or (vars.model_type == "gpt2" and hidden_size == 1600)): + if(model_settings.model in ("gpt2-xl",) or (model_settings.model_type == "gpt2" and hidden_size == 1600)): return "gpt2-xl" - if(vars.model_type == "gpt_neo" and hidden_size == 768): + if(model_settings.model_type == "gpt_neo" and hidden_size == 768): return "gpt-neo-125M" - if(vars.model in ("EleutherAI/gpt-neo-1.3B",) or (vars.model_type == "gpt_neo" and hidden_size == 2048)): + if(model_settings.model in ("EleutherAI/gpt-neo-1.3B",) or (model_settings.model_type == "gpt_neo" and hidden_size == 2048)): return "gpt-neo-1.3B" - if(vars.model in ("EleutherAI/gpt-neo-2.7B",) or (vars.model_type == "gpt_neo" and hidden_size == 2560)): + if(model_settings.model in ("EleutherAI/gpt-neo-2.7B",) or (model_settings.model_type == "gpt_neo" and hidden_size == 2560)): return "gpt-neo-2.7B" - if(vars.model in ("EleutherAI/gpt-j-6B",) or ((vars.use_colab_tpu or vars.model == "TPUMeshTransformerGPTJ") and tpu_mtj_backend.params["d_model"] == 4096) or (vars.model_type in ("gpt_neo", "gptj") and hidden_size == 4096)): + if(model_settings.model in ("EleutherAI/gpt-j-6B",) or ((system_settings.use_colab_tpu or model_settings.model == "TPUMeshTransformerGPTJ") and tpu_mtj_backend.params["d_model"] == 4096) or (model_settings.model_type in ("gpt_neo", "gptj") and hidden_size == 4096)): return "gpt-j-6B" return "unknown" @@ -2788,11 +2679,11 @@ def lua_get_modeltype(): #==================================================================# @bridged_kwarg() def lua_get_modelbackend(): - if(vars.noai): + if(system_settings.noai): return "readonly" - if(vars.model in ("Colab", "OAI", "InferKit")): + if(model_settings.model in ("Colab", "OAI", "InferKit")): return "api" - if(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): + if(system_settings.use_colab_tpu or model_settings.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): return "mtj" return "transformers" @@ -2801,7 +2692,7 @@ def lua_get_modelbackend(): #==================================================================# @bridged_kwarg() def lua_is_custommodel(): - return vars.model in ("GPT2Custom", "NeoCustom", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX") + return model_settings.model in ("GPT2Custom", "NeoCustom", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX") #==================================================================# # Return the filename (as a string) of the current soft prompt, or @@ -2809,7 +2700,7 @@ def lua_is_custommodel(): #==================================================================# @bridged_kwarg() def lua_get_spfilename(): - return vars.spfilename.strip() or None + return system_settings.spfilename.strip() or None #==================================================================# # When called with a string as argument, sets the current soft prompt; @@ -2831,14 +2722,14 @@ def lua_set_spfilename(filename: Union[str, None]): #==================================================================# def execute_inmod(): setgamesaved(False) - vars.lua_logname = ... - vars.lua_edited = set() - vars.lua_deleted = set() + system_settings.lua_logname = ... + story_settings.lua_edited = set() + story_settings.lua_deleted = set() try: - tpool.execute(vars.lua_koboldbridge.execute_inmod) + tpool.execute(system_settings.lua_koboldbridge.execute_inmod) except lupa.LuaError as e: - vars.lua_koboldbridge.obliterate_multiverse() - vars.lua_running = False + system_settings.lua_koboldbridge.obliterate_multiverse() + system_settings.lua_running = False emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) @@ -2847,28 +2738,28 @@ def execute_inmod(): set_aibusy(0) def execute_genmod(): - vars.lua_koboldbridge.execute_genmod() + system_settings.lua_koboldbridge.execute_genmod() def execute_outmod(): setgamesaved(False) emit('from_server', {'cmd': 'hidemsg', 'data': ''}, broadcast=True) try: - tpool.execute(vars.lua_koboldbridge.execute_outmod) + tpool.execute(system_settings.lua_koboldbridge.execute_outmod) except lupa.LuaError as e: - vars.lua_koboldbridge.obliterate_multiverse() - vars.lua_running = False + system_settings.lua_koboldbridge.obliterate_multiverse() + system_settings.lua_running = False emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) print("{0}{1}{2}".format(colors.YELLOW, "Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.", colors.END), file=sys.stderr) set_aibusy(0) - if(vars.lua_koboldbridge.resend_settings_required): - vars.lua_koboldbridge.resend_settings_required = False + if(system_settings.lua_koboldbridge.resend_settings_required): + system_settings.lua_koboldbridge.resend_settings_required = False lua_resend_settings() - for k in vars.lua_edited: - inlineedit(k, vars.actions[k]) - for k in vars.lua_deleted: + for k in story_settings.lua_edited: + inlineedit(k, story_settings.actions[k]) + for k in story_settings.lua_deleted: inlinedelete(k) @@ -2882,93 +2773,93 @@ def execute_outmod(): @socketio.on('connect') def do_connect(): print("{0}Client connected!{1}".format(colors.GREEN, colors.END)) - emit('from_server', {'cmd': 'setchatname', 'data': vars.chatname}) - emit('from_server', {'cmd': 'setanotetemplate', 'data': vars.authornotetemplate}) - emit('from_server', {'cmd': 'connected', 'smandelete': vars.smandelete, 'smanrename': vars.smanrename, 'modelname': getmodelname()}) - if(vars.host): + emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}) + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}) + emit('from_server', {'cmd': 'connected', 'smandelete': system_settings.smandelete, 'smanrename': system_settings.smanrename, 'modelname': getmodelname()}) + if(system_settings.host): emit('from_server', {'cmd': 'runs_remotely'}) - if(vars.flaskwebgui): + if(system_settings.flaskwebgui): emit('from_server', {'cmd': 'flaskwebgui'}) - if(vars.allowsp): - emit('from_server', {'cmd': 'allowsp', 'data': vars.allowsp}) + if(system_settings.allowsp): + emit('from_server', {'cmd': 'allowsp', 'data': system_settings.allowsp}) sendUSStatItems() - emit('from_server', {'cmd': 'spstatitems', 'data': {vars.spfilename: vars.spmeta} if vars.allowsp and len(vars.spfilename) else {}}, broadcast=True) + emit('from_server', {'cmd': 'spstatitems', 'data': {system_settings.spfilename: system_settings.spmeta} if system_settings.allowsp and len(system_settings.spfilename) else {}}, broadcast=True) - if(not vars.gamestarted): + if(not story_settings.gamestarted): setStartState() sendsettings() refresh_settings() - vars.laststory = None - emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}) + user_settings.laststory = None + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}) - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}) - vars.mode = "play" + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}) + story_settings.mode = "play" else: # Game in session, send current game data and ready state to browser refresh_story() sendsettings() refresh_settings() - emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}) + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}) - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}) - if(vars.mode == "play"): - if(not vars.aibusy): + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}) + if(story_settings.mode == "play"): + if(not system_settings.aibusy): emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) else: emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}) - elif(vars.mode == "edit"): + elif(story_settings.mode == "edit"): emit('from_server', {'cmd': 'editmode', 'data': 'true'}) - elif(vars.mode == "memory"): + elif(story_settings.mode == "memory"): emit('from_server', {'cmd': 'memmode', 'data': 'true'}) - elif(vars.mode == "wi"): + elif(story_settings.mode == "wi"): emit('from_server', {'cmd': 'wimode', 'data': 'true'}) - emit('from_server', {'cmd': 'gamesaved', 'data': vars.gamesaved}, broadcast=True) + emit('from_server', {'cmd': 'gamesaved', 'data': story_settings.gamesaved}, broadcast=True) #==================================================================# # Event triggered when browser SocketIO sends data to the server #==================================================================# @socketio.on('message') def get_message(msg): - if not vars.quiet: + if not system_settings.quiet: print("{0}Data received:{1}{2}".format(colors.GREEN, msg, colors.END)) # Submit action if(msg['cmd'] == 'submit'): - if(vars.mode == "play"): - if(vars.aibusy): + if(story_settings.mode == "play"): + if(system_settings.aibusy): if(msg.get('allowabort', False)): - vars.abort = True + system_settings.abort = True return - vars.abort = False - vars.lua_koboldbridge.feedback = None - if(vars.chatmode): + system_settings.abort = False + system_settings.lua_koboldbridge.feedback = None + if(story_settings.chatmode): if(type(msg['chatname']) is not str): raise ValueError("Chatname must be a string") - vars.chatname = msg['chatname'] + story_settings.chatname = msg['chatname'] settingschanged() - emit('from_server', {'cmd': 'setchatname', 'data': vars.chatname}) - vars.recentrng = vars.recentrngm = None + emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}) + story_settings.recentrng = story_settings.recentrngm = None actionsubmit(msg['data'], actionmode=msg['actionmode']) - elif(vars.mode == "edit"): + elif(story_settings.mode == "edit"): editsubmit(msg['data']) - elif(vars.mode == "memory"): + elif(story_settings.mode == "memory"): memsubmit(msg['data']) # Retry Action elif(msg['cmd'] == 'retry'): - if(vars.aibusy): + if(system_settings.aibusy): if(msg.get('allowabort', False)): - vars.abort = True + system_settings.abort = True return - vars.abort = False - if(vars.chatmode): + system_settings.abort = False + if(story_settings.chatmode): if(type(msg['chatname']) is not str): raise ValueError("Chatname must be a string") - vars.chatname = msg['chatname'] + story_settings.chatname = msg['chatname'] settingschanged() - emit('from_server', {'cmd': 'setchatname', 'data': vars.chatname}) + emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}) actionretry(msg['data']) # Back/Undo Action elif(msg['cmd'] == 'back'): @@ -2978,11 +2869,11 @@ def get_message(msg): actionredo() # EditMode Action (old) elif(msg['cmd'] == 'edit'): - if(vars.mode == "play"): - vars.mode = "edit" + if(story_settings.mode == "play"): + story_settings.mode = "edit" emit('from_server', {'cmd': 'editmode', 'data': 'true'}, broadcast=True) - elif(vars.mode == "edit"): - vars.mode = "play" + elif(story_settings.mode == "edit"): + story_settings.mode = "play" emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) # EditLine Action (old) elif(msg['cmd'] == 'editline'): @@ -2997,75 +2888,75 @@ def get_message(msg): deleterequest() elif(msg['cmd'] == 'memory'): togglememorymode() - elif(not vars.host and msg['cmd'] == 'savetofile'): + elif(not system_settings.host and msg['cmd'] == 'savetofile'): savetofile() - elif(not vars.host and msg['cmd'] == 'loadfromfile'): + elif(not system_settings.host and msg['cmd'] == 'loadfromfile'): loadfromfile() elif(msg['cmd'] == 'loadfromstring'): loadRequest(json.loads(msg['data']), filename=msg['filename']) - elif(not vars.host and msg['cmd'] == 'import'): + elif(not system_settings.host and msg['cmd'] == 'import'): importRequest() elif(msg['cmd'] == 'newgame'): newGameRequest() elif(msg['cmd'] == 'rndgame'): randomGameRequest(msg['data'], memory=msg['memory']) elif(msg['cmd'] == 'settemp'): - vars.temp = float(msg['data']) + model_settings.temp = float(msg['data']) emit('from_server', {'cmd': 'setlabeltemp', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'settopp'): - vars.top_p = float(msg['data']) + model_settings.top_p = float(msg['data']) emit('from_server', {'cmd': 'setlabeltopp', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'settopk'): - vars.top_k = int(msg['data']) + model_settings.top_k = int(msg['data']) emit('from_server', {'cmd': 'setlabeltopk', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'settfs'): - vars.tfs = float(msg['data']) + model_settings.tfs = float(msg['data']) emit('from_server', {'cmd': 'setlabeltfs', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'settypical'): - vars.typical = float(msg['data']) + model_settings.typical = float(msg['data']) emit('from_server', {'cmd': 'setlabeltypical', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'settopa'): - vars.top_a = float(msg['data']) + model_settings.top_a = float(msg['data']) emit('from_server', {'cmd': 'setlabeltopa', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'setreppen'): - vars.rep_pen = float(msg['data']) + model_settings.rep_pen = float(msg['data']) emit('from_server', {'cmd': 'setlabelreppen', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'setreppenslope'): - vars.rep_pen_slope = float(msg['data']) + model_settings.rep_pen_slope = float(msg['data']) emit('from_server', {'cmd': 'setlabelreppenslope', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'setreppenrange'): - vars.rep_pen_range = float(msg['data']) + model_settings.rep_pen_range = float(msg['data']) emit('from_server', {'cmd': 'setlabelreppenrange', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'setoutput'): - vars.genamt = int(msg['data']) + model_settings.genamt = int(msg['data']) emit('from_server', {'cmd': 'setlabeloutput', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'settknmax'): - vars.max_length = int(msg['data']) + model_settings.max_length = int(msg['data']) emit('from_server', {'cmd': 'setlabeltknmax', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() elif(msg['cmd'] == 'setikgen'): - vars.ikgen = int(msg['data']) + model_settings.ikgen = int(msg['data']) emit('from_server', {'cmd': 'setlabelikgen', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() @@ -3074,50 +2965,50 @@ def get_message(msg): anotesubmit(msg['data'], template=msg['template']) # Author's Note depth update elif(msg['cmd'] == 'anotedepth'): - vars.andepth = int(msg['data']) + story_settings.andepth = int(msg['data']) emit('from_server', {'cmd': 'setlabelanotedepth', 'data': msg['data']}, broadcast=True) settingschanged() refresh_settings() # Format - Trim incomplete sentences elif(msg['cmd'] == 'frmttriminc'): - if('frmttriminc' in vars.formatoptns): - vars.formatoptns["frmttriminc"] = msg['data'] + if('frmttriminc' in user_settings.formatoptns): + user_settings.formatoptns["frmttriminc"] = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'frmtrmblln'): - if('frmtrmblln' in vars.formatoptns): - vars.formatoptns["frmtrmblln"] = msg['data'] + if('frmtrmblln' in user_settings.formatoptns): + user_settings.formatoptns["frmtrmblln"] = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'frmtrmspch'): - if('frmtrmspch' in vars.formatoptns): - vars.formatoptns["frmtrmspch"] = msg['data'] + if('frmtrmspch' in user_settings.formatoptns): + user_settings.formatoptns["frmtrmspch"] = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'frmtadsnsp'): - if('frmtadsnsp' in vars.formatoptns): - vars.formatoptns["frmtadsnsp"] = msg['data'] + if('frmtadsnsp' in user_settings.formatoptns): + user_settings.formatoptns["frmtadsnsp"] = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'singleline'): - if('singleline' in vars.formatoptns): - vars.formatoptns["singleline"] = msg['data'] + if('singleline' in user_settings.formatoptns): + user_settings.formatoptns["singleline"] = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'importselect'): - vars.importnum = int(msg["data"].replace("import", "")) + user_settings.importnum = int(msg["data"].replace("import", "")) elif(msg['cmd'] == 'importcancel'): emit('from_server', {'cmd': 'popupshow', 'data': False}) - vars.importjs = {} + user_settings.importjs = {} elif(msg['cmd'] == 'importaccept'): emit('from_server', {'cmd': 'popupshow', 'data': False}) importgame() elif(msg['cmd'] == 'wi'): togglewimode() elif(msg['cmd'] == 'wiinit'): - if(int(msg['data']) < len(vars.worldinfo)): + if(int(msg['data']) < len(story_settings.worldinfo)): setgamesaved(False) - vars.worldinfo[msg['data']]["init"] = True + story_settings.worldinfo[msg['data']]["init"] = True addwiitem(folder_uid=msg['folder']) elif(msg['cmd'] == 'wifolderinit'): addwifolder() @@ -3130,20 +3021,20 @@ def get_message(msg): elif(msg['cmd'] == 'wifolderdelete'): deletewifolder(msg['data']) elif(msg['cmd'] == 'wiexpand'): - assert 0 <= int(msg['data']) < len(vars.worldinfo) + assert 0 <= int(msg['data']) < len(story_settings.worldinfo) setgamesaved(False) emit('from_server', {'cmd': 'wiexpand', 'data': msg['data']}, broadcast=True) elif(msg['cmd'] == 'wiexpandfolder'): - assert 0 <= int(msg['data']) < len(vars.worldinfo) + assert 0 <= int(msg['data']) < len(story_settings.worldinfo) setgamesaved(False) emit('from_server', {'cmd': 'wiexpandfolder', 'data': msg['data']}, broadcast=True) elif(msg['cmd'] == 'wifoldercollapsecontent'): setgamesaved(False) - vars.wifolders_d[msg['data']]['collapsed'] = True + story_settings.wifolders_d[msg['data']]['collapsed'] = True emit('from_server', {'cmd': 'wifoldercollapsecontent', 'data': msg['data']}, broadcast=True) elif(msg['cmd'] == 'wifolderexpandcontent'): setgamesaved(False) - vars.wifolders_d[msg['data']]['collapsed'] = False + story_settings.wifolders_d[msg['data']]['collapsed'] = False emit('from_server', {'cmd': 'wifolderexpandcontent', 'data': msg['data']}, broadcast=True) elif(msg['cmd'] == 'wiupdate'): setgamesaved(False) @@ -3151,31 +3042,31 @@ def get_message(msg): fields = ("key", "keysecondary", "content", "comment") for field in fields: if(field in msg['data'] and type(msg['data'][field]) is str): - vars.worldinfo[num][field] = msg['data'][field] - emit('from_server', {'cmd': 'wiupdate', 'num': msg['num'], 'data': {field: vars.worldinfo[num][field] for field in fields}}, broadcast=True) + story_settings.worldinfo[num][field] = msg['data'][field] + emit('from_server', {'cmd': 'wiupdate', 'num': msg['num'], 'data': {field: story_settings.worldinfo[num][field] for field in fields}}, broadcast=True) elif(msg['cmd'] == 'wifolderupdate'): setgamesaved(False) uid = int(msg['uid']) fields = ("name", "collapsed") for field in fields: if(field in msg['data'] and type(msg['data'][field]) is (str if field != "collapsed" else bool)): - vars.wifolders_d[uid][field] = msg['data'][field] - emit('from_server', {'cmd': 'wifolderupdate', 'uid': msg['uid'], 'data': {field: vars.wifolders_d[uid][field] for field in fields}}, broadcast=True) + story_settings.wifolders_d[uid][field] = msg['data'][field] + emit('from_server', {'cmd': 'wifolderupdate', 'uid': msg['uid'], 'data': {field: story_settings.wifolders_d[uid][field] for field in fields}}, broadcast=True) elif(msg['cmd'] == 'wiselon'): setgamesaved(False) - vars.worldinfo[msg['data']]["selective"] = True + story_settings.worldinfo[msg['data']]["selective"] = True emit('from_server', {'cmd': 'wiselon', 'data': msg['data']}, broadcast=True) elif(msg['cmd'] == 'wiseloff'): setgamesaved(False) - vars.worldinfo[msg['data']]["selective"] = False + story_settings.worldinfo[msg['data']]["selective"] = False emit('from_server', {'cmd': 'wiseloff', 'data': msg['data']}, broadcast=True) elif(msg['cmd'] == 'wiconstanton'): setgamesaved(False) - vars.worldinfo[msg['data']]["constant"] = True + story_settings.worldinfo[msg['data']]["constant"] = True emit('from_server', {'cmd': 'wiconstanton', 'data': msg['data']}, broadcast=True) elif(msg['cmd'] == 'wiconstantoff'): setgamesaved(False) - vars.worldinfo[msg['data']]["constant"] = False + story_settings.worldinfo[msg['data']]["constant"] = False emit('from_server', {'cmd': 'wiconstantoff', 'data': msg['data']}, broadcast=True) elif(msg['cmd'] == 'sendwilist'): commitwi(msg['data']) @@ -3193,15 +3084,15 @@ def get_message(msg): unloaded, loaded = getuslist() emit('from_server', {'cmd': 'buildus', 'data': {"unloaded": unloaded, "loaded": loaded}}) elif(msg['cmd'] == 'samplerlistrequest'): - emit('from_server', {'cmd': 'buildsamplers', 'data': vars.sampler_order}) + emit('from_server', {'cmd': 'buildsamplers', 'data': model_settings.sampler_order}) elif(msg['cmd'] == 'usloaded'): - vars.userscripts = [] + system_settings.userscripts = [] for userscript in msg['data']: if type(userscript) is not str: continue userscript = userscript.strip() if len(userscript) != 0 and all(q not in userscript for q in ("..", ":")) and all(userscript[0] not in q for q in ("/", "\\")) and os.path.exists(fileops.uspath(userscript)): - vars.userscripts.append(userscript) + system_settings.userscripts.append(userscript) settingschanged() elif(msg['cmd'] == 'usload'): load_lua_scripts() @@ -3211,11 +3102,11 @@ def get_message(msg): sampler_order = msg["data"] if(not isinstance(sampler_order, list)): raise ValueError(f"Sampler order must be a list, but got a {type(sampler_order)}") - if(len(sampler_order) != len(vars.sampler_order)): - raise ValueError(f"Sampler order must be a list of length {len(vars.sampler_order)}, but got a list of length {len(sampler_order)}") + if(len(sampler_order) != len(model_settings.sampler_order)): + raise ValueError(f"Sampler order must be a list of length {len(model_settings.sampler_order)}, but got a list of length {len(sampler_order)}") if(not all(isinstance(e, int) for e in sampler_order)): raise ValueError(f"Sampler order must be a list of ints, but got a list with at least one non-int element") - vars.sampler_order = sampler_order + model_settings.sampler_order = sampler_order settingschanged() elif(msg['cmd'] == 'list_model'): sendModelSelection(menu=msg['data']) @@ -3225,8 +3116,8 @@ def get_message(msg): changed = True if not utils.HAS_ACCELERATE: msg['disk_layers'] = "0" - if os.path.exists("settings/" + vars.model.replace('/', '_') + ".breakmodel"): - with open("settings/" + vars.model.replace('/', '_') + ".breakmodel", "r") as file: + if os.path.exists("settings/" + model_settings.model.replace('/', '_') + ".breakmodel"): + with open("settings/" + model_settings.model.replace('/', '_') + ".breakmodel", "r") as file: data = file.read().split('\n')[:2] if len(data) < 2: data.append("0") @@ -3234,10 +3125,10 @@ def get_message(msg): if gpu_layers == msg['gpu_layers'] and disk_layers == msg['disk_layers']: changed = False if changed: - f = open("settings/" + vars.model.replace('/', '_') + ".breakmodel", "w") + f = open("settings/" + model_settings.model.replace('/', '_') + ".breakmodel", "w") f.write(msg['gpu_layers'] + '\n' + msg['disk_layers']) f.close() - vars.colaburl = msg['url'] + "/request" + model_settings.colaburl = msg['url'] + "/request" load_model(use_gpu=msg['use_gpu'], gpu_layers=msg['gpu_layers'], disk_layers=msg['disk_layers'], online_model=msg['online_model']) elif(msg['cmd'] == 'show_model'): print("Model Name: {}".format(getmodelname())) @@ -3254,7 +3145,7 @@ def get_message(msg): # If we're on a custom line that we have selected a model for, the path variable will be in msg # so if that's missing we need to run the menu to show the model folders in the models folder if msg['data'] in ('NeoCustom', 'GPT2Custom') and 'path' not in msg and 'path_modelname' not in msg: - if 'folder' not in msg or vars.host: + if 'folder' not in msg or system_settings.host: folder = "./models" else: folder = msg['folder'] @@ -3262,32 +3153,32 @@ def get_message(msg): elif msg['data'] in ('NeoCustom', 'GPT2Custom') and 'path_modelname' in msg: #Here the user entered custom text in the text box. This could be either a model name or a path. if check_if_dir_is_model(msg['path_modelname']): - vars.model = msg['data'] - vars.custmodpth = msg['path_modelname'] + model_settings.model = msg['data'] + model_settings.custmodpth = msg['path_modelname'] get_model_info(msg['data'], directory=msg['path']) else: - vars.model = msg['path_modelname'] + model_settings.model = msg['path_modelname'] try: - get_model_info(vars.model) + get_model_info(model_settings.model) except: emit('from_server', {'cmd': 'errmsg', 'data': "The model entered doesn't exist."}) elif msg['data'] in ('NeoCustom', 'GPT2Custom'): if check_if_dir_is_model(msg['path']): - vars.model = msg['data'] - vars.custmodpth = msg['path'] + model_settings.model = msg['data'] + model_settings.custmodpth = msg['path'] get_model_info(msg['data'], directory=msg['path']) else: - if vars.host: + if system_settings.host: sendModelSelection(menu=msg['data'], folder="./models") else: sendModelSelection(menu=msg['data'], folder=msg['path']) else: - vars.model = msg['data'] + model_settings.model = msg['data'] if 'path' in msg: - vars.custmodpth = msg['path'] + model_settings.custmodpth = msg['path'] get_model_info(msg['data'], directory=msg['path']) else: - get_model_info(vars.model) + get_model_info(model_settings.model) elif(msg['cmd'] == 'delete_model'): if "{}/models".format(os.getcwd()) in os.path.abspath(msg['data']) or "{}\\models".format(os.getcwd()) in os.path.abspath(msg['data']): if check_if_dir_is_model(msg['data']): @@ -3302,74 +3193,74 @@ def get_message(msg): elif(msg['cmd'] == 'OAI_Key_Update'): get_oai_models(msg['key']) elif(msg['cmd'] == 'loadselect'): - vars.loadselect = msg["data"] + user_settings.loadselect = msg["data"] elif(msg['cmd'] == 'spselect'): - vars.spselect = msg["data"] + user_settings.spselect = msg["data"] elif(msg['cmd'] == 'loadrequest'): - loadRequest(fileops.storypath(vars.loadselect)) + loadRequest(fileops.storypath(user_settings.loadselect)) elif(msg['cmd'] == 'sprequest'): - spRequest(vars.spselect) + spRequest(user_settings.spselect) elif(msg['cmd'] == 'deletestory'): deletesave(msg['data']) elif(msg['cmd'] == 'renamestory'): renamesave(msg['data'], msg['newname']) elif(msg['cmd'] == 'clearoverwrite'): - vars.svowname = "" - vars.saveow = False + user_settings.svowname = "" + user_settings.saveow = False elif(msg['cmd'] == 'seqsel'): selectsequence(msg['data']) elif(msg['cmd'] == 'seqpin'): pinsequence(msg['data']) elif(msg['cmd'] == 'setnumseq'): - vars.numseqs = int(msg['data']) + model_settings.numseqs = int(msg['data']) emit('from_server', {'cmd': 'setlabelnumseq', 'data': msg['data']}) settingschanged() refresh_settings() elif(msg['cmd'] == 'setwidepth'): - vars.widepth = int(msg['data']) + user_settings.widepth = int(msg['data']) emit('from_server', {'cmd': 'setlabelwidepth', 'data': msg['data']}) settingschanged() refresh_settings() elif(msg['cmd'] == 'setuseprompt'): - vars.useprompt = msg['data'] + story_settings.useprompt = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'setadventure'): - vars.adventure = msg['data'] - vars.chatmode = False + story_settings.adventure = msg['data'] + story_settings.chatmode = False settingschanged() refresh_settings() elif(msg['cmd'] == 'autosave'): - vars.autosave = msg['data'] + user_settings.autosave = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'setchatmode'): - vars.chatmode = msg['data'] - vars.adventure = False + story_settings.chatmode = msg['data'] + story_settings.adventure = False settingschanged() refresh_settings() elif(msg['cmd'] == 'setdynamicscan'): - vars.dynamicscan = msg['data'] + story_settings.dynamicscan = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'setnopromptgen'): - vars.nopromptgen = msg['data'] + user_settings.nopromptgen = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'setrngpersist'): - vars.rngpersist = msg['data'] + user_settings.rngpersist = msg['data'] settingschanged() refresh_settings() elif(msg['cmd'] == 'setnogenmod'): - vars.nogenmod = msg['data'] + user_settings.nogenmod = msg['data'] settingschanged() refresh_settings() - elif(not vars.host and msg['cmd'] == 'importwi'): + elif(not system_settings.host and msg['cmd'] == 'importwi'): wiimportrequest() elif(msg['cmd'] == 'debug'): - vars.debug = msg['data'] + user_settings.debug = msg['data'] emit('from_server', {'cmd': 'set_debug', 'data': msg['data']}, broadcast=True) - if vars.debug: + if user_settings.debug: send_debug() #==================================================================# @@ -3377,10 +3268,10 @@ def get_message(msg): #==================================================================# def sendUSStatItems(): _, loaded = getuslist() - loaded = loaded if vars.lua_running else [] + loaded = loaded if system_settings.lua_running else [] last_userscripts = [e["filename"] for e in loaded] - emit('from_server', {'cmd': 'usstatitems', 'data': loaded, 'flash': last_userscripts != vars.last_userscripts}, broadcast=True) - vars.last_userscripts = last_userscripts + emit('from_server', {'cmd': 'usstatitems', 'data': loaded, 'flash': last_userscripts != system_settings.last_userscripts}, broadcast=True) + system_settings.last_userscripts = last_userscripts #==================================================================# # KoboldAI Markup Formatting (Mixture of Markdown and sanitized html) @@ -3394,15 +3285,15 @@ def kml(txt): # Send start message and tell Javascript to set UI state #==================================================================# def setStartState(): - if(vars.welcome): - txt = kml(vars.welcome) + "
" + if(system_settings.welcome): + txt = kml(system_settings.welcome) + "
" else: txt = "Welcome to KoboldAI! You are running "+getmodelname()+".
" - if(not vars.noai and not vars.welcome): + if(not system_settings.noai and not system_settings.welcome): txt = txt + "Please load a game or enter a prompt below to begin!
" - if(vars.noai): + if(system_settings.noai): txt = txt + "Please load or import a story to read. There is no AI in this mode." - emit('from_server', {'cmd': 'updatescreen', 'gamestarted': vars.gamestarted, 'data': txt}, broadcast=True) + emit('from_server', {'cmd': 'updatescreen', 'gamestarted': story_settings.gamestarted, 'data': txt}, broadcast=True) emit('from_server', {'cmd': 'setgamestate', 'data': 'start'}, broadcast=True) #==================================================================# @@ -3411,7 +3302,7 @@ def setStartState(): def sendsettings(): # Send settings for selected AI type emit('from_server', {'cmd': 'reset_menus'}) - if(vars.model != "InferKit"): + if(model_settings.model != "InferKit"): for set in gensettings.gensettingstf: emit('from_server', {'cmd': 'addsetting', 'data': set}) else: @@ -3422,47 +3313,47 @@ def sendsettings(): for frm in gensettings.formatcontrols: emit('from_server', {'cmd': 'addformat', 'data': frm}) # Add format key to vars if it wasn't loaded with client.settings - if(not frm["id"] in vars.formatoptns): - vars.formatoptns[frm["id"]] = False; + if(not frm["id"] in user_settings.formatoptns): + user_settings.formatoptns[frm["id"]] = False; #==================================================================# # Set value of gamesaved #==================================================================# def setgamesaved(gamesaved): assert type(gamesaved) is bool - if(gamesaved != vars.gamesaved): + if(gamesaved != story_settings.gamesaved): emit('from_server', {'cmd': 'gamesaved', 'data': gamesaved}, broadcast=True) - vars.gamesaved = gamesaved + story_settings.gamesaved = gamesaved #==================================================================# # Take input text from SocketIO and decide what to do with it #==================================================================# def check_for_backend_compilation(): - if(vars.checking): + if(system_settings.checking): return - vars.checking = True + system_settings.checking = True for _ in range(31): time.sleep(0.06276680299820175) - if(vars.compiling): + if(system_settings.compiling): emit('from_server', {'cmd': 'warnmsg', 'data': 'Compiling TPU backend—this usually takes 1–2 minutes...'}, broadcast=True) break - vars.checking = False + system_settings.checking = False def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, disable_recentrng=False): # Ignore new submissions if the AI is currently busy - if(vars.aibusy): + if(system_settings.aibusy): return while(True): set_aibusy(1) if(disable_recentrng): - vars.recentrng = vars.recentrngm = None + story_settings.recentrng = story_settings.recentrngm = None - vars.recentback = False - vars.recentedit = False - vars.actionmode = actionmode + story_settings.recentback = False + story_settings.recentedit = False + story_settings.actionmode = actionmode # "Action" mode if(actionmode == 1): @@ -3472,30 +3363,30 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, data = f"\n\n> {data}\n" # "Chat" mode - if(vars.chatmode and vars.gamestarted): + if(story_settings.chatmode and story_settings.gamestarted): data = re.sub(r'\n+', ' ', data) if(len(data)): - data = f"\n{vars.chatname}: {data}\n" + data = f"\n{story_settings.chatname}: {data}\n" # If we're not continuing, store a copy of the raw input if(data != ""): - vars.lastact = data + story_settings.lastact = data - if(not vars.gamestarted): - vars.submission = data + if(not story_settings.gamestarted): + story_settings.submission = data execute_inmod() - data = vars.submission + data = story_settings.submission if(not force_submit and len(data.strip()) == 0): assert False # Start the game - vars.gamestarted = True - if(not vars.noai and vars.lua_koboldbridge.generating and (not vars.nopromptgen or force_prompt_gen)): + story_settings.gamestarted = True + if(not system_settings.noai and system_settings.lua_koboldbridge.generating and (not user_settings.nopromptgen or force_prompt_gen)): # Save this first action as the prompt - vars.prompt = data + story_settings.prompt = data # Clear the startup text from game screen emit('from_server', {'cmd': 'updatescreen', 'gamestarted': False, 'data': 'Please wait, generating story...'}, broadcast=True) calcsubmit(data) # Run the first action through the generator - if(not vars.abort and vars.lua_koboldbridge.restart_sequence is not None and len(vars.genseqs) == 0): + if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None and len(story_settings.genseqs) == 0): data = "" force_submit = True disable_recentrng = True @@ -3504,28 +3395,28 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, break else: # Save this first action as the prompt - vars.prompt = data if len(data) > 0 else '"' - for i in range(vars.numseqs): - vars.lua_koboldbridge.outputs[i+1] = "" + story_settings.prompt = data if len(data) > 0 else '"' + for i in range(model_settings.numseqs): + system_settings.lua_koboldbridge.outputs[i+1] = "" execute_outmod() - vars.lua_koboldbridge.regeneration_required = False + system_settings.lua_koboldbridge.regeneration_required = False genout = [] - for i in range(vars.numseqs): - genout.append({"generated_text": vars.lua_koboldbridge.outputs[i+1]}) + for i in range(model_settings.numseqs): + genout.append({"generated_text": system_settings.lua_koboldbridge.outputs[i+1]}) assert type(genout[-1]["generated_text"]) is str if(len(genout) == 1): genresult(genout[0]["generated_text"], flash=False) refresh_story() - if(len(vars.actions) > 0): - emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() + 1}, broadcast=True) - if(not vars.abort and vars.lua_koboldbridge.restart_sequence is not None): + if(len(story_settings.actions) > 0): + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1}, broadcast=True) + if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None): data = "" force_submit = True disable_recentrng = True continue else: - if(not vars.abort and vars.lua_koboldbridge.restart_sequence is not None and vars.lua_koboldbridge.restart_sequence > 0): - genresult(genout[vars.lua_koboldbridge.restart_sequence-1]["generated_text"], flash=False) + if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None and system_settings.lua_koboldbridge.restart_sequence > 0): + genresult(genout[system_settings.lua_koboldbridge.restart_sequence-1]["generated_text"], flash=False) refresh_story() data = "" force_submit = True @@ -3538,42 +3429,42 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, break else: # Apply input formatting & scripts before sending to tokenizer - if(vars.actionmode == 0): + if(story_settings.actionmode == 0): data = applyinputformatting(data) - vars.submission = data + story_settings.submission = data execute_inmod() - data = vars.submission + data = story_settings.submission # Dont append submission if it's a blank/continue action if(data != ""): # Store the result in the Action log - if(len(vars.prompt.strip()) == 0): - vars.prompt = data + if(len(story_settings.prompt.strip()) == 0): + story_settings.prompt = data else: - vars.actions.append(data) + story_settings.actions.append(data) # we now need to update the actions_metadata # we'll have two conditions. # 1. This is totally new (user entered) - if vars.actions.get_last_key() not in vars.actions_metadata: - vars.actions_metadata[vars.actions.get_last_key()] = {"Selected Text": data, "Alternative Text": []} + if story_settings.actions.get_last_key() not in story_settings.actions_metadata: + story_settings.actions_metadata[story_settings.actions.get_last_key()] = {"Selected Text": data, "Alternative Text": []} else: # 2. We've selected a chunk of text that is was presented previously try: - alternatives = [item['Text'] for item in vars.actions_metadata[len(vars.actions)-1]["Alternative Text"]] + alternatives = [item['Text'] for item in story_settings.actions_metadata[len(story_settings.actions)-1]["Alternative Text"]] except: - print(len(vars.actions)) - print(vars.actions_metadata) + print(len(story_settings.actions)) + print(story_settings.actions_metadata) raise if data in alternatives: - alternatives = [item for item in vars.actions_metadata[vars.actions.get_last_key() ]["Alternative Text"] if item['Text'] != data] - vars.actions_metadata[vars.actions.get_last_key()]["Alternative Text"] = alternatives - vars.actions_metadata[vars.actions.get_last_key()]["Selected Text"] = data + alternatives = [item for item in story_settings.actions_metadata[story_settings.actions.get_last_key() ]["Alternative Text"] if item['Text'] != data] + story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"] = alternatives + story_settings.actions_metadata[story_settings.actions.get_last_key()]["Selected Text"] = data update_story_chunk('last') send_debug() - if(not vars.noai and vars.lua_koboldbridge.generating): + if(not system_settings.noai and system_settings.lua_koboldbridge.generating): # Off to the tokenizer! calcsubmit(data) - if(not vars.abort and vars.lua_koboldbridge.restart_sequence is not None and len(vars.genseqs) == 0): + if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None and len(story_settings.genseqs) == 0): data = "" force_submit = True disable_recentrng = True @@ -3581,24 +3472,24 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) break else: - for i in range(vars.numseqs): - vars.lua_koboldbridge.outputs[i+1] = "" + for i in range(model_settings.numseqs): + system_settings.lua_koboldbridge.outputs[i+1] = "" execute_outmod() - vars.lua_koboldbridge.regeneration_required = False + system_settings.lua_koboldbridge.regeneration_required = False genout = [] - for i in range(vars.numseqs): - genout.append({"generated_text": vars.lua_koboldbridge.outputs[i+1]}) + for i in range(model_settings.numseqs): + genout.append({"generated_text": system_settings.lua_koboldbridge.outputs[i+1]}) assert type(genout[-1]["generated_text"]) is str if(len(genout) == 1): genresult(genout[0]["generated_text"]) - if(not vars.abort and vars.lua_koboldbridge.restart_sequence is not None): + if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None): data = "" force_submit = True disable_recentrng = True continue else: - if(not vars.abort and vars.lua_koboldbridge.restart_sequence is not None and vars.lua_koboldbridge.restart_sequence > 0): - genresult(genout[vars.lua_koboldbridge.restart_sequence-1]["generated_text"]) + if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None and system_settings.lua_koboldbridge.restart_sequence > 0): + genresult(genout[system_settings.lua_koboldbridge.restart_sequence-1]["generated_text"]) data = "" force_submit = True disable_recentrng = True @@ -3612,46 +3503,46 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, # #==================================================================# def actionretry(data): - if(vars.noai): + if(system_settings.noai): emit('from_server', {'cmd': 'errmsg', 'data': "Retry function unavailable in Read Only mode."}) return - if(vars.recentrng is not None): - if(not vars.aibusy): - randomGameRequest(vars.recentrng, memory=vars.recentrngm) + if(story_settings.recentrng is not None): + if(not system_settings.aibusy): + randomGameRequest(story_settings.recentrng, memory=story_settings.recentrngm) return if actionback(): - actionsubmit("", actionmode=vars.actionmode, force_submit=True) + actionsubmit("", actionmode=story_settings.actionmode, force_submit=True) send_debug() - elif(not vars.useprompt): + elif(not story_settings.useprompt): emit('from_server', {'cmd': 'errmsg', 'data': "Please enable \"Always Add Prompt\" to retry with your prompt."}) #==================================================================# # #==================================================================# def actionback(): - if(vars.aibusy): + if(system_settings.aibusy): return # Remove last index of actions and refresh game screen - if(len(vars.genseqs) == 0 and len(vars.actions) > 0): + if(len(story_settings.genseqs) == 0 and len(story_settings.actions) > 0): # We are going to move the selected text to alternative text in the actions_metadata variable so we can redo this action - vars.actions_metadata[vars.actions.get_last_key() ]['Alternative Text'] = [{'Text': vars.actions_metadata[vars.actions.get_last_key() ]['Selected Text'], + story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Alternative Text'] = [{'Text': story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Selected Text'], 'Pinned': False, "Previous Selection": True, - "Edited": False}] + vars.actions_metadata[vars.actions.get_last_key() ]['Alternative Text'] - vars.actions_metadata[vars.actions.get_last_key() ]['Selected Text'] = "" + "Edited": False}] + story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Alternative Text'] + story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Selected Text'] = "" - last_key = vars.actions.get_last_key() - vars.actions.pop() - vars.recentback = True + last_key = story_settings.actions.get_last_key() + story_settings.actions.pop() + story_settings.recentback = True remove_story_chunk(last_key + 1) #for the redo to not get out of whack, need to reset the max # in the actions sequence - vars.actions.set_next_id(last_key) + story_settings.actions.set_next_id(last_key) success = True - elif(len(vars.genseqs) == 0): + elif(len(story_settings.genseqs) == 0): emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}) success = False else: - vars.genseqs = [] + story_settings.genseqs = [] success = True send_debug() return success @@ -3661,35 +3552,35 @@ def actionredo(): #First we need to find the next valid key #We might have deleted text so we don't want to show a redo for that blank chunk - restore_id = vars.actions.get_last_key()+1 - if restore_id in vars.actions_metadata: + restore_id = story_settings.actions.get_last_key()+1 + if restore_id in story_settings.actions_metadata: ok_to_use = False while not ok_to_use: - for item in vars.actions_metadata[restore_id]['Alternative Text']: + for item in story_settings.actions_metadata[restore_id]['Alternative Text']: if item['Previous Selection'] and item['Text'] != "": ok_to_use = True if not ok_to_use: restore_id+=1 - if restore_id not in vars.actions_metadata: + if restore_id not in story_settings.actions_metadata: return else: - vars.actions.set_next_id(restore_id) + story_settings.actions.set_next_id(restore_id) - if restore_id in vars.actions_metadata: - genout = [{"generated_text": item['Text']} for item in vars.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]==True)] + if restore_id in story_settings.actions_metadata: + genout = [{"generated_text": item['Text']} for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]==True)] if len(genout) > 0: - genout = genout + [{"generated_text": item['Text']} for item in vars.actions_metadata[restore_id]['Alternative Text'] if (item["Pinned"]==True) and (item["Previous Selection"]==False)] + genout = genout + [{"generated_text": item['Text']} for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Pinned"]==True) and (item["Previous Selection"]==False)] if len(genout) == 1: - vars.actions_metadata[restore_id]['Alternative Text'] = [item for item in vars.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]!=True)] + story_settings.actions_metadata[restore_id]['Alternative Text'] = [item for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]!=True)] genresult(genout[0]['generated_text'], flash=True, ignore_formatting=True) else: # Store sequences in memory until selection is made - vars.genseqs = genout + story_settings.genseqs = genout # Send sequences to UI for selection - genout = [[item['Text'], "redo"] for item in vars.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]==True)] + genout = [[item['Text'], "redo"] for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]==True)] emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True) else: @@ -3704,14 +3595,14 @@ def calcsubmitbudgetheader(txt, **kwargs): winfo, found_entries = checkworldinfo(txt, **kwargs) # Add a newline to the end of memory - if(vars.memory != "" and vars.memory[-1] != "\n"): - mem = vars.memory + "\n" + if(story_settings.memory != "" and story_settings.memory[-1] != "\n"): + mem = story_settings.memory + "\n" else: - mem = vars.memory + mem = story_settings.memory # Build Author's Note if set - if(vars.authornote != ""): - anotetxt = ("\n" + vars.authornotetemplate + "\n").replace("<|>", vars.authornote) + if(story_settings.authornote != ""): + anotetxt = ("\n" + story_settings.authornotetemplate + "\n").replace("<|>", story_settings.authornote) else: anotetxt = "" @@ -3723,44 +3614,44 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, anotetkns = [] # Placeholder for Author's Note tokens lnanote = 0 # Placeholder for Author's Note length - lnsp = vars.sp_length + lnsp = system_settings.sp_length if("tokenizer" not in globals()): from transformers import GPT2TokenizerFast global tokenizer - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") lnheader = len(tokenizer._koboldai_header) # Calculate token budget - prompttkns = tokenizer.encode(utils.encodenewlines(vars.comregex_ai.sub('', vars.prompt)), max_length=int(2e9), truncation=True) + prompttkns = tokenizer.encode(utils.encodenewlines(system_settings.comregex_ai.sub('', story_settings.prompt)), max_length=int(2e9), truncation=True) lnprompt = len(prompttkns) memtokens = tokenizer.encode(utils.encodenewlines(mem), max_length=int(2e9), truncation=True) lnmem = len(memtokens) - if(lnmem > vars.max_length - lnheader - lnsp - vars.genamt - budget_deduction): + if(lnmem > model_settings.max_length - lnheader - lnsp - model_settings.genamt - budget_deduction): raise OverflowError("The memory in your story is too long. Please either write a shorter memory text or increase the Max Tokens setting. If you are using a soft prompt, additionally consider using a smaller soft prompt.") witokens = tokenizer.encode(utils.encodenewlines(winfo), max_length=int(2e9), truncation=True) lnwi = len(witokens) - if(lnmem + lnwi > vars.max_length - lnheader - lnsp - vars.genamt - budget_deduction): + if(lnmem + lnwi > model_settings.max_length - lnheader - lnsp - model_settings.genamt - budget_deduction): raise OverflowError("The current active world info keys take up too many tokens. Please either write shorter world info, decrease World Info Depth or increase the Max Tokens setting. If you are using a soft prompt, additionally consider using a smaller soft prompt.") if(anotetxt != ""): anotetkns = tokenizer.encode(utils.encodenewlines(anotetxt), max_length=int(2e9), truncation=True) lnanote = len(anotetkns) - if(lnmem + lnwi + lnanote > vars.max_length - lnheader - lnsp - vars.genamt - budget_deduction): + if(lnmem + lnwi + lnanote > model_settings.max_length - lnheader - lnsp - model_settings.genamt - budget_deduction): raise OverflowError("The author's note in your story is too long. Please either write a shorter author's note or increase the Max Tokens setting. If you are using a soft prompt, additionally consider using a smaller soft prompt.") - if(vars.useprompt): - budget = vars.max_length - lnsp - lnprompt - lnmem - lnanote - lnwi - vars.genamt - budget_deduction + if(story_settings.useprompt): + budget = model_settings.max_length - lnsp - lnprompt - lnmem - lnanote - lnwi - model_settings.genamt - budget_deduction else: - budget = vars.max_length - lnsp - lnmem - lnanote - lnwi - vars.genamt - budget_deduction + budget = model_settings.max_length - lnsp - lnmem - lnanote - lnwi - model_settings.genamt - budget_deduction - lnsubmission = len(tokenizer.encode(utils.encodenewlines(vars.comregex_ai.sub('', submission)), max_length=int(2e9), truncation=True)) if submission is not None else 0 - maybe_lnprompt = lnprompt if vars.useprompt and actionlen > 0 else 0 + lnsubmission = len(tokenizer.encode(utils.encodenewlines(system_settings.comregex_ai.sub('', submission)), max_length=int(2e9), truncation=True)) if submission is not None else 0 + maybe_lnprompt = lnprompt if story_settings.useprompt and actionlen > 0 else 0 - if(lnmem + lnwi + lnanote + maybe_lnprompt + lnsubmission > vars.max_length - lnheader - lnsp - vars.genamt - budget_deduction): + if(lnmem + lnwi + lnanote + maybe_lnprompt + lnsubmission > model_settings.max_length - lnheader - lnsp - model_settings.genamt - budget_deduction): raise OverflowError("Your submission is too long. Please either write a shorter submission or increase the Max Tokens setting. If you are using a soft prompt, additionally consider using a smaller soft prompt. If you are using the Always Add Prompt setting, turning it off may help.") assert budget >= 0 @@ -3768,20 +3659,20 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if(actionlen == 0): # First/Prompt action tokens = tokenizer._koboldai_header + memtokens + witokens + anotetkns + prompttkns - assert len(tokens) <= vars.max_length - lnsp - vars.genamt - budget_deduction + assert len(tokens) <= model_settings.max_length - lnsp - model_settings.genamt - budget_deduction ln = len(tokens) + lnsp - return tokens, ln+1, ln+vars.genamt + return tokens, ln+1, ln+model_settings.genamt else: tokens = [] # Check if we have the action depth to hit our A.N. depth - if(anotetxt != "" and actionlen < vars.andepth): + if(anotetxt != "" and actionlen < story_settings.andepth): forceanote = True # Get most recent action tokens up to our budget n = 0 for key in reversed(actions): - chunk = vars.comregex_ai.sub('', actions[key]) + chunk = system_settings.comregex_ai.sub('', actions[key]) assert budget >= 0 if(budget <= 0): @@ -3798,7 +3689,7 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, break # Inject Author's Note if we've reached the desired depth - if(n == vars.andepth-1): + if(n == story_settings.andepth-1): if(anotetxt != ""): tokens = anotetkns + tokens # A.N. len already taken from bdgt anoteadded = True @@ -3806,7 +3697,7 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, # If we're not using the prompt every time and there's still budget left, # add some prompt. - if(not vars.useprompt): + if(not story_settings.useprompt): if(budget > 0): prompttkns = prompttkns[-budget:] else: @@ -3823,9 +3714,9 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, tokens = tokenizer._koboldai_header + memtokens + witokens + prompttkns + tokens # Send completed bundle to generator - assert len(tokens) <= vars.max_length - lnsp - vars.genamt - budget_deduction + assert len(tokens) <= model_settings.max_length - lnsp - model_settings.genamt - budget_deduction ln = len(tokens) + lnsp - return tokens, ln+1, ln+vars.genamt + return tokens, ln+1, ln+model_settings.genamt #==================================================================# # Take submitted text and build the text to be given to generator @@ -3834,48 +3725,48 @@ def calcsubmit(txt): anotetxt = "" # Placeholder for Author's Note text forceanote = False # In case we don't have enough actions to hit A.N. depth anoteadded = False # In case our budget runs out before we hit A.N. depth - actionlen = len(vars.actions) + actionlen = len(story_settings.actions) winfo, mem, anotetxt, found_entries = calcsubmitbudgetheader(txt) # For all transformers models - if(vars.model != "InferKit"): - subtxt, min, max = calcsubmitbudget(actionlen, winfo, mem, anotetxt, vars.actions, submission=txt) + if(model_settings.model != "InferKit"): + subtxt, min, max = calcsubmitbudget(actionlen, winfo, mem, anotetxt, story_settings.actions, submission=txt) if(actionlen == 0): - if(not vars.use_colab_tpu and vars.model not in ["Colab", "OAI", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): + if(not system_settings.use_colab_tpu and model_settings.model not in ["Colab", "OAI", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): generate(subtxt, min, max, found_entries=found_entries) - elif(vars.model == "Colab"): + elif(model_settings.model == "Colab"): sendtocolab(utils.decodenewlines(tokenizer.decode(subtxt)), min, max) - elif(vars.model == "OAI"): + elif(model_settings.model == "OAI"): oairequest(utils.decodenewlines(tokenizer.decode(subtxt)), min, max) - elif(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): + elif(system_settings.use_colab_tpu or model_settings.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): tpumtjgenerate(subtxt, min, max, found_entries=found_entries) else: - if(not vars.use_colab_tpu and vars.model not in ["Colab", "OAI", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): + if(not system_settings.use_colab_tpu and model_settings.model not in ["Colab", "OAI", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): generate(subtxt, min, max, found_entries=found_entries) - elif(vars.model == "Colab"): + elif(model_settings.model == "Colab"): sendtocolab(utils.decodenewlines(tokenizer.decode(subtxt)), min, max) - elif(vars.model == "OAI"): + elif(model_settings.model == "OAI"): oairequest(utils.decodenewlines(tokenizer.decode(subtxt)), min, max) - elif(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): + elif(system_settings.use_colab_tpu or model_settings.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): tpumtjgenerate(subtxt, min, max, found_entries=found_entries) # For InferKit web API else: # Check if we have the action depth to hit our A.N. depth - if(anotetxt != "" and actionlen < vars.andepth): + if(anotetxt != "" and actionlen < story_settings.andepth): forceanote = True - if(vars.useprompt): - budget = vars.ikmax - len(vars.comregex_ai.sub('', vars.prompt)) - len(anotetxt) - len(mem) - len(winfo) - 1 + if(story_settings.useprompt): + budget = model_settings.ikmax - len(system_settings.comregex_ai.sub('', story_settings.prompt)) - len(anotetxt) - len(mem) - len(winfo) - 1 else: - budget = vars.ikmax - len(anotetxt) - len(mem) - len(winfo) - 1 + budget = model_settings.ikmax - len(anotetxt) - len(mem) - len(winfo) - 1 subtxt = "" - prompt = vars.comregex_ai.sub('', vars.prompt) + prompt = system_settings.comregex_ai.sub('', story_settings.prompt) n = 0 - for key in reversed(vars.actions): - chunk = vars.actions[key] + for key in reversed(story_settings.actions): + chunk = story_settings.actions[key] if(budget <= 0): break @@ -3891,14 +3782,14 @@ def calcsubmit(txt): # If we're not using the prompt every time and there's still budget left, # add some prompt. - if(not vars.useprompt): + if(not story_settings.useprompt): if(budget > 0): - prompt = vars.comregex_ai.sub('', vars.prompt)[-budget:] + prompt = system_settings.comregex_ai.sub('', story_settings.prompt)[-budget:] else: prompt = "" # Inject Author's Note if we've reached the desired depth - if(n == vars.andepth-1): + if(n == story_settings.andepth-1): if(anotetxt != ""): subtxt = anotetxt + subtxt # A.N. len already taken from bdgt anoteadded = True @@ -3922,61 +3813,61 @@ def calcsubmit(txt): def _generate(txt, minimum, maximum, found_entries): gen_in = torch.tensor(txt, dtype=torch.long)[None] - if(vars.sp is not None): + if(system_settings.sp is not None): soft_tokens = torch.arange( model.config.vocab_size, - model.config.vocab_size + vars.sp.shape[0], + model.config.vocab_size + system_settings.sp.shape[0], ) gen_in = torch.cat((soft_tokens[None], gen_in), dim=-1) - assert gen_in.shape[-1] + vars.genamt <= vars.max_length + assert gen_in.shape[-1] + model_settings.genamt <= model_settings.max_length - if(vars.hascuda and vars.usegpu): - gen_in = gen_in.to(vars.gpu_device) - elif(vars.hascuda and vars.breakmodel): + if(system_settings.hascuda and system_settings.usegpu): + gen_in = gen_in.to(system_settings.gpu_device) + elif(system_settings.hascuda and system_settings.breakmodel): gen_in = gen_in.to(breakmodel.primary_device) else: gen_in = gen_in.to('cpu') model.kai_scanner_excluded_world_info = found_entries - vars._actions = vars.actions - vars._prompt = vars.prompt - if(vars.dynamicscan): - vars._actions = vars._actions.copy() + story_settings._actions = story_settings.actions + story_settings._prompt = story_settings.prompt + if(story_settings.dynamicscan): + story_settings._actions = story_settings._actions.copy() with torch.no_grad(): already_generated = 0 - numseqs = vars.numseqs + numseqs = model_settings.numseqs while True: genout = generator( gen_in, do_sample=True, max_length=int(2e9), repetition_penalty=1.1, - bad_words_ids=vars.badwordsids, + bad_words_ids=model_settings.badwordsids, use_cache=True, num_return_sequences=numseqs ) already_generated += len(genout[0]) - len(gen_in[0]) - assert already_generated <= vars.genamt + assert already_generated <= model_settings.genamt if(model.kai_scanner.halt or not model.kai_scanner.regeneration_required): break assert genout.ndim >= 2 - assert genout.shape[0] == vars.numseqs - if(vars.lua_koboldbridge.generated_cols and vars.generated_tkns != vars.lua_koboldbridge.generated_cols): + assert genout.shape[0] == model_settings.numseqs + if(system_settings.lua_koboldbridge.generated_cols and story_settings.generated_tkns != system_settings.lua_koboldbridge.generated_cols): raise RuntimeError("Inconsistency detected between KoboldAI Python and Lua backends") - if(already_generated != vars.generated_tkns): + if(already_generated != story_settings.generated_tkns): raise RuntimeError("WI scanning error") - for r in range(vars.numseqs): + for r in range(model_settings.numseqs): for c in range(already_generated): - assert vars.lua_koboldbridge.generated[r+1][c+1] is not None - genout[r][genout.shape[-1] - already_generated + c] = vars.lua_koboldbridge.generated[r+1][c+1] + assert system_settings.lua_koboldbridge.generated[r+1][c+1] is not None + genout[r][genout.shape[-1] - already_generated + c] = system_settings.lua_koboldbridge.generated[r+1][c+1] encoded = [] - for i in range(vars.numseqs): + for i in range(model_settings.numseqs): txt = utils.decodenewlines(tokenizer.decode(genout[i, -already_generated:])) - winfo, mem, anotetxt, _found_entries = calcsubmitbudgetheader(txt, force_use_txt=True, actions=vars._actions) + winfo, mem, anotetxt, _found_entries = calcsubmitbudgetheader(txt, force_use_txt=True, actions=story_settings._actions) found_entries[i].update(_found_entries) - txt, _, _ = calcsubmitbudget(len(vars._actions), winfo, mem, anotetxt, vars._actions, submission=txt) + txt, _, _ = calcsubmitbudget(len(story_settings._actions), winfo, mem, anotetxt, story_settings._actions, submission=txt) encoded.append(torch.tensor(txt, dtype=torch.long, device=genout.device)) max_length = len(max(encoded, key=len)) encoded = torch.stack(tuple(torch.nn.functional.pad(e, (max_length - len(e), 0), value=model.config.pad_token_id or model.config.eos_token_id) for e in encoded)) @@ -3987,14 +3878,14 @@ def _generate(txt, minimum, maximum, found_entries): ), dim=-1 ) - if(vars.sp is not None): + if(system_settings.sp is not None): soft_tokens = torch.arange( model.config.vocab_size, - model.config.vocab_size + vars.sp.shape[0], + model.config.vocab_size + system_settings.sp.shape[0], device=genout.device, ) - genout = torch.cat((soft_tokens.tile(vars.numseqs, 1), genout), dim=-1) - assert genout.shape[-1] + vars.genamt - already_generated <= vars.max_length + genout = torch.cat((soft_tokens.tile(model_settings.numseqs, 1), genout), dim=-1) + assert genout.shape[-1] + model_settings.genamt - already_generated <= model_settings.max_length diff = genout.shape[-1] - gen_in.shape[-1] minimum += diff maximum += diff @@ -4005,20 +3896,20 @@ def _generate(txt, minimum, maximum, found_entries): def generate(txt, minimum, maximum, found_entries=None): - vars.generated_tkns = 0 + story_settings.generated_tkns = 0 if(found_entries is None): found_entries = set() - found_entries = tuple(found_entries.copy() for _ in range(vars.numseqs)) + found_entries = tuple(found_entries.copy() for _ in range(model_settings.numseqs)) - if not vars.quiet: + if not system_settings.quiet: print("{0}Min:{1}, Max:{2}, Txt:{3}{4}".format(colors.YELLOW, minimum, maximum, utils.decodenewlines(tokenizer.decode(txt)), colors.END)) # Store context in memory to use it for comparison with generated content - vars.lastctx = utils.decodenewlines(tokenizer.decode(txt)) + story_settings.lastctx = utils.decodenewlines(tokenizer.decode(txt)) # Clear CUDA cache if using GPU - if(vars.hascuda and (vars.usegpu or vars.breakmodel)): + if(system_settings.hascuda and (system_settings.usegpu or system_settings.breakmodel)): gc.collect() torch.cuda.empty_cache() @@ -4027,8 +3918,8 @@ def generate(txt, minimum, maximum, found_entries=None): genout, already_generated = tpool.execute(_generate, txt, minimum, maximum, found_entries) except Exception as e: if(issubclass(type(e), lupa.LuaError)): - vars.lua_koboldbridge.obliterate_multiverse() - vars.lua_running = False + system_settings.lua_koboldbridge.obliterate_multiverse() + system_settings.lua_running = False emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) @@ -4040,16 +3931,16 @@ def generate(txt, minimum, maximum, found_entries=None): set_aibusy(0) return - for i in range(vars.numseqs): - vars.lua_koboldbridge.generated[i+1][vars.generated_tkns] = int(genout[i, -1].item()) - vars.lua_koboldbridge.outputs[i+1] = utils.decodenewlines(tokenizer.decode(genout[i, -already_generated:])) + for i in range(model_settings.numseqs): + system_settings.lua_koboldbridge.generated[i+1][story_settings.generated_tkns] = int(genout[i, -1].item()) + system_settings.lua_koboldbridge.outputs[i+1] = utils.decodenewlines(tokenizer.decode(genout[i, -already_generated:])) execute_outmod() - if(vars.lua_koboldbridge.regeneration_required): - vars.lua_koboldbridge.regeneration_required = False + if(system_settings.lua_koboldbridge.regeneration_required): + system_settings.lua_koboldbridge.regeneration_required = False genout = [] - for i in range(vars.numseqs): - genout.append({"generated_text": vars.lua_koboldbridge.outputs[i+1]}) + for i in range(model_settings.numseqs): + genout.append({"generated_text": system_settings.lua_koboldbridge.outputs[i+1]}) assert type(genout[-1]["generated_text"]) is str else: genout = [{"generated_text": utils.decodenewlines(tokenizer.decode(tokens[-already_generated:]))} for tokens in genout] @@ -4057,13 +3948,13 @@ def generate(txt, minimum, maximum, found_entries=None): if(len(genout) == 1): genresult(genout[0]["generated_text"]) else: - if(vars.lua_koboldbridge.restart_sequence is not None and vars.lua_koboldbridge.restart_sequence > 0): - genresult(genout[vars.lua_koboldbridge.restart_sequence-1]["generated_text"]) + if(system_settings.lua_koboldbridge.restart_sequence is not None and system_settings.lua_koboldbridge.restart_sequence > 0): + genresult(genout[system_settings.lua_koboldbridge.restart_sequence-1]["generated_text"]) else: genselect(genout) # Clear CUDA cache again if using GPU - if(vars.hascuda and (vars.usegpu or vars.breakmodel)): + if(system_settings.hascuda and (system_settings.usegpu or system_settings.breakmodel)): del genout gc.collect() torch.cuda.empty_cache() @@ -4074,30 +3965,30 @@ def generate(txt, minimum, maximum, found_entries=None): # Deal with a single return sequence from generate() #==================================================================# def genresult(genout, flash=True, ignore_formatting=False): - if not vars.quiet: + if not system_settings.quiet: print("{0}{1}{2}".format(colors.CYAN, genout, colors.END)) # Format output before continuing if not ignore_formatting: genout = applyoutputformatting(genout) - vars.lua_koboldbridge.feedback = genout + system_settings.lua_koboldbridge.feedback = genout if(len(genout) == 0): return # Add formatted text to Actions array and refresh the game screen - if(len(vars.prompt.strip()) == 0): - vars.prompt = genout + if(len(story_settings.prompt.strip()) == 0): + story_settings.prompt = genout else: - vars.actions.append(genout) - if vars.actions.get_last_key() not in vars.actions_metadata: - vars.actions_metadata[vars.actions.get_last_key()] = {'Selected Text': genout, 'Alternative Text': []} + story_settings.actions.append(genout) + if story_settings.actions.get_last_key() not in story_settings.actions_metadata: + story_settings.actions_metadata[story_settings.actions.get_last_key()] = {'Selected Text': genout, 'Alternative Text': []} else: - vars.actions_metadata[vars.actions.get_last_key()]['Selected Text'] = genout + story_settings.actions_metadata[story_settings.actions.get_last_key()]['Selected Text'] = genout update_story_chunk('last') if(flash): - emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() + 1 if len(vars.actions) else 0}, broadcast=True) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) send_debug() #==================================================================# @@ -4108,30 +3999,30 @@ def genselect(genout): for result in genout: # Apply output formatting rules to sequences result["generated_text"] = applyoutputformatting(result["generated_text"]) - if not vars.quiet: + if not system_settings.quiet: print("{0}[Result {1}]\n{2}{3}".format(colors.CYAN, i, result["generated_text"], colors.END)) i += 1 # Add the options to the actions metadata # If we've already generated text for this action but haven't selected one we'll want to kill all non-pinned, non-previous selection, and non-edited options then add the new ones - if vars.actions.get_next_id() in vars.actions_metadata: - if (vars.actions_metadata[vars.actions.get_next_id()]['Selected Text'] == ""): - vars.actions_metadata[vars.actions.get_next_id()]['Alternative Text'] = [{"Text": item['Text'], "Pinned": item['Pinned'], + if story_settings.actions.get_next_id() in story_settings.actions_metadata: + if (story_settings.actions_metadata[story_settings.actions.get_next_id()]['Selected Text'] == ""): + story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] = [{"Text": item['Text'], "Pinned": item['Pinned'], "Previous Selection": item["Previous Selection"], - "Edited": item["Edited"]} for item in vars.actions_metadata[vars.actions.get_next_id()]['Alternative Text'] + "Edited": item["Edited"]} for item in story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] if item['Pinned'] or item["Previous Selection"] or item["Edited"]] + [{"Text": text["generated_text"], "Pinned": False, "Previous Selection": False, "Edited": False} for text in genout] else: - vars.actions_metadata[vars.actions.get_next_id()] = {'Selected Text': '', 'Alternative Text': [{"Text": text["generated_text"], "Pinned": False, "Previous Selection": False, "Edited": False} for text in genout]} + story_settings.actions_metadata[story_settings.actions.get_next_id()] = {'Selected Text': '', 'Alternative Text': [{"Text": text["generated_text"], "Pinned": False, "Previous Selection": False, "Edited": False} for text in genout]} else: - vars.actions_metadata[vars.actions.get_next_id()] = {'Selected Text': '', 'Alternative Text': [{"Text": text["generated_text"], "Pinned": False, "Previous Selection": False, "Edited": False} for text in genout]} + story_settings.actions_metadata[story_settings.actions.get_next_id()] = {'Selected Text': '', 'Alternative Text': [{"Text": text["generated_text"], "Pinned": False, "Previous Selection": False, "Edited": False} for text in genout]} - genout = [{"generated_text": item['Text']} for item in vars.actions_metadata[vars.actions.get_next_id()]['Alternative Text'] if (item["Previous Selection"]==False) and (item["Edited"]==False)] + genout = [{"generated_text": item['Text']} for item in story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] if (item["Previous Selection"]==False) and (item["Edited"]==False)] # Store sequences in memory until selection is made - vars.genseqs = genout + story_settings.genseqs = genout - genout = [[item['Text'], "pinned" if item['Pinned'] else "normal"] for item in vars.actions_metadata[vars.actions.get_next_id()]['Alternative Text'] if (item["Previous Selection"]==False) and (item["Edited"]==False)] + genout = [[item['Text'], "pinned" if item['Pinned'] else "normal"] for item in story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] if (item["Previous Selection"]==False) and (item["Edited"]==False)] # Send sequences to UI for selection emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True) @@ -4141,21 +4032,21 @@ def genselect(genout): # Send selected sequence to action log and refresh UI #==================================================================# def selectsequence(n): - if(len(vars.genseqs) == 0): + if(len(story_settings.genseqs) == 0): return - vars.lua_koboldbridge.feedback = vars.genseqs[int(n)]["generated_text"] - if(len(vars.lua_koboldbridge.feedback) != 0): - vars.actions.append(vars.lua_koboldbridge.feedback) + system_settings.lua_koboldbridge.feedback = story_settings.genseqs[int(n)]["generated_text"] + if(len(system_settings.lua_koboldbridge.feedback) != 0): + story_settings.actions.append(system_settings.lua_koboldbridge.feedback) #We'll want to remove the option from the alternative text and put it in selected text - vars.actions_metadata[vars.actions.get_last_key() ]['Alternative Text'] = [item for item in vars.actions_metadata[vars.actions.get_last_key()]['Alternative Text'] if item['Text'] != vars.lua_koboldbridge.feedback] - vars.actions_metadata[vars.actions.get_last_key() ]['Selected Text'] = vars.lua_koboldbridge.feedback + story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Alternative Text'] = [item for item in story_settings.actions_metadata[story_settings.actions.get_last_key()]['Alternative Text'] if item['Text'] != system_settings.lua_koboldbridge.feedback] + story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Selected Text'] = system_settings.lua_koboldbridge.feedback update_story_chunk('last') - emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() + 1 if len(vars.actions) else 0}, broadcast=True) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) - vars.genseqs = [] + story_settings.genseqs = [] - if(vars.lua_koboldbridge.restart_sequence is not None): - actionsubmit("", actionmode=vars.actionmode, force_submit=True, disable_recentrng=True) + if(system_settings.lua_koboldbridge.restart_sequence is not None): + actionsubmit("", actionmode=story_settings.actionmode, force_submit=True, disable_recentrng=True) send_debug() #==================================================================# @@ -4163,14 +4054,14 @@ def selectsequence(n): #==================================================================# def pinsequence(n): if n.isnumeric(): - text = vars.genseqs[int(n)]['generated_text'] - if text in [item['Text'] for item in vars.actions_metadata[vars.actions.get_next_id()]['Alternative Text']]: - alternatives = vars.actions_metadata[vars.actions.get_next_id()]['Alternative Text'] + text = story_settings.genseqs[int(n)]['generated_text'] + if text in [item['Text'] for item in story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text']]: + alternatives = story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] for i in range(len(alternatives)): if alternatives[i]['Text'] == text: alternatives[i]['Pinned'] = not alternatives[i]['Pinned'] break - vars.actions_metadata[vars.actions.get_next_id()]['Alternative Text'] = alternatives + story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] = alternatives send_debug() @@ -4179,33 +4070,33 @@ def pinsequence(n): #==================================================================# def sendtocolab(txt, min, max): # Log request to console - if not vars.quiet: + if not system_settings.quiet: print("{0}Tokens:{1}, Txt:{2}{3}".format(colors.YELLOW, min-1, txt, colors.END)) # Store context in memory to use it for comparison with generated content - vars.lastctx = txt + story_settings.lastctx = txt # Build request JSON data reqdata = { 'text': txt, 'min': min, 'max': max, - 'rep_pen': vars.rep_pen, - 'rep_pen_slope': vars.rep_pen_slope, - 'rep_pen_range': vars.rep_pen_range, - 'temperature': vars.temp, - 'top_p': vars.top_p, - 'top_k': vars.top_k, - 'tfs': vars.tfs, - 'typical': vars.typical, - 'topa': vars.top_a, - 'numseqs': vars.numseqs, + 'rep_pen': model_settings.rep_pen, + 'rep_pen_slope': model_settings.rep_pen_slope, + 'rep_pen_range': model_settings.rep_pen_range, + 'temperature': model_settings.temp, + 'top_p': model_settings.top_p, + 'top_k': model_settings.top_k, + 'tfs': model_settings.tfs, + 'typical': model_settings.typical, + 'topa': model_settings.top_a, + 'numseqs': model_settings.numseqs, 'retfultxt': False } # Create request req = requests.post( - vars.colaburl, + model_settings.colaburl, json = reqdata ) @@ -4219,15 +4110,15 @@ def sendtocolab(txt, min, max): else: genout = js["seqs"] - for i in range(vars.numseqs): - vars.lua_koboldbridge.outputs[i+1] = genout[i] + for i in range(model_settings.numseqs): + system_settings.lua_koboldbridge.outputs[i+1] = genout[i] execute_outmod() - if(vars.lua_koboldbridge.regeneration_required): - vars.lua_koboldbridge.regeneration_required = False + if(system_settings.lua_koboldbridge.regeneration_required): + system_settings.lua_koboldbridge.regeneration_required = False genout = [] - for i in range(vars.numseqs): - genout.append(vars.lua_koboldbridge.outputs[i+1]) + for i in range(model_settings.numseqs): + genout.append(system_settings.lua_koboldbridge.outputs[i+1]) assert type(genout[-1]) is str if(len(genout) == 1): @@ -4237,8 +4128,8 @@ def sendtocolab(txt, min, max): seqs = [] for seq in genout: seqs.append({"generated_text": seq}) - if(vars.lua_koboldbridge.restart_sequence is not None and vars.lua_koboldbridge.restart_sequence > 0): - genresult(genout[vars.lua_koboldbridge.restart_sequence-1]["generated_text"]) + if(system_settings.lua_koboldbridge.restart_sequence is not None and system_settings.lua_koboldbridge.restart_sequence > 0): + genresult(genout[system_settings.lua_koboldbridge.restart_sequence-1]["generated_text"]) else: genselect(genout) @@ -4246,9 +4137,9 @@ def sendtocolab(txt, min, max): #genout = applyoutputformatting(getnewcontent(genout)) # Add formatted text to Actions array and refresh the game screen - #vars.actions.append(genout) + #story_settings.actions.append(genout) #refresh_story() - #emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() + 1 if len(vars.actions) else 0}) + #emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}) set_aibusy(0) else: @@ -4261,19 +4152,19 @@ def sendtocolab(txt, min, max): # Send text to TPU mesh transformer backend #==================================================================# def tpumtjgenerate(txt, minimum, maximum, found_entries=None): - vars.generated_tkns = 0 + story_settings.generated_tkns = 0 if(found_entries is None): found_entries = set() - found_entries = tuple(found_entries.copy() for _ in range(vars.numseqs)) + found_entries = tuple(found_entries.copy() for _ in range(model_settings.numseqs)) - if not vars.quiet: + if not system_settings.quiet: print("{0}Min:{1}, Max:{2}, Txt:{3}{4}".format(colors.YELLOW, minimum, maximum, utils.decodenewlines(tokenizer.decode(txt)), colors.END)) - vars._actions = vars.actions - vars._prompt = vars.prompt - if(vars.dynamicscan): - vars._actions = vars._actions.copy() + story_settings._actions = story_settings.actions + story_settings._prompt = story_settings.prompt + if(story_settings.dynamicscan): + story_settings._actions = story_settings._actions.copy() # Submit input text to generator try: @@ -4283,38 +4174,38 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): socketio.start_background_task(copy_current_request_context(check_for_backend_compilation)) - if(vars.dynamicscan or (not vars.nogenmod and vars.has_genmod)): + if(story_settings.dynamicscan or (not user_settings.nogenmod and system_settings.has_genmod)): - context = np.tile(np.uint32(txt), (vars.numseqs, 1)) - past = np.empty((vars.numseqs, 0), dtype=np.uint32) + context = np.tile(np.uint32(txt), (model_settings.numseqs, 1)) + past = np.empty((model_settings.numseqs, 0), dtype=np.uint32) while(True): genout, n_generated, regeneration_required, halt = tpool.execute( tpu_mtj_backend.infer_dynamic, context, gen_len = maximum-minimum+1, - numseqs=vars.numseqs, - soft_embeddings=vars.sp, + numseqs=model_settings.numseqs, + soft_embeddings=system_settings.sp, soft_tokens=soft_tokens, excluded_world_info=found_entries, ) past = np.pad(past, ((0, 0), (0, n_generated))) - for r in range(vars.numseqs): - for c in range(vars.lua_koboldbridge.generated_cols): - assert vars.lua_koboldbridge.generated[r+1][c+1] is not None - past[r, c] = vars.lua_koboldbridge.generated[r+1][c+1] + for r in range(model_settings.numseqs): + for c in range(system_settings.lua_koboldbridge.generated_cols): + assert system_settings.lua_koboldbridge.generated[r+1][c+1] is not None + past[r, c] = system_settings.lua_koboldbridge.generated[r+1][c+1] - if(vars.abort or halt or not regeneration_required): + if(system_settings.abort or halt or not regeneration_required): break print("(regeneration triggered)") encoded = [] - for i in range(vars.numseqs): + for i in range(model_settings.numseqs): txt = utils.decodenewlines(tokenizer.decode(past[i])) - winfo, mem, anotetxt, _found_entries = calcsubmitbudgetheader(txt, force_use_txt=True, actions=vars._actions) + winfo, mem, anotetxt, _found_entries = calcsubmitbudgetheader(txt, force_use_txt=True, actions=story_settings._actions) found_entries[i].update(_found_entries) - txt, _, _ = calcsubmitbudget(len(vars._actions), winfo, mem, anotetxt, vars._actions, submission=txt) + txt, _, _ = calcsubmitbudget(len(story_settings._actions), winfo, mem, anotetxt, story_settings._actions, submission=txt) encoded.append(np.array(txt, dtype=np.uint32)) max_length = len(max(encoded, key=len)) encoded = np.stack(tuple(np.pad(e, (max_length - len(e), 0), constant_values=tpu_mtj_backend.pad_token_id) for e in encoded)) @@ -4331,29 +4222,29 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): tpu_mtj_backend.infer_static, np.uint32(txt), gen_len = maximum-minimum+1, - temp=vars.temp, - top_p=vars.top_p, - top_k=vars.top_k, - tfs=vars.tfs, - typical=vars.typical, - top_a=vars.top_a, - numseqs=vars.numseqs, - repetition_penalty=vars.rep_pen, - rpslope=vars.rep_pen_slope, - rprange=vars.rep_pen_range, - soft_embeddings=vars.sp, + temp=model_settings.temp, + top_p=model_settings.top_p, + top_k=model_settings.top_k, + tfs=model_settings.tfs, + typical=model_settings.typical, + top_a=model_settings.top_a, + numseqs=model_settings.numseqs, + repetition_penalty=model_settings.rep_pen, + rpslope=model_settings.rep_pen_slope, + rprange=model_settings.rep_pen_range, + soft_embeddings=system_settings.sp, soft_tokens=soft_tokens, - sampler_order=vars.sampler_order, + sampler_order=model_settings.sampler_order, ) past = genout - for i in range(vars.numseqs): - vars.lua_koboldbridge.generated[i+1] = vars.lua_state.table(*genout[i].tolist()) - vars.lua_koboldbridge.generated_cols = vars.generated_tkns = genout[0].shape[-1] + for i in range(model_settings.numseqs): + system_settings.lua_koboldbridge.generated[i+1] = system_settings.lua_state.table(*genout[i].tolist()) + system_settings.lua_koboldbridge.generated_cols = story_settings.generated_tkns = genout[0].shape[-1] except Exception as e: if(issubclass(type(e), lupa.LuaError)): - vars.lua_koboldbridge.obliterate_multiverse() - vars.lua_running = False + system_settings.lua_koboldbridge.obliterate_multiverse() + system_settings.lua_running = False emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) @@ -4365,16 +4256,16 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): set_aibusy(0) return - for i in range(vars.numseqs): - vars.lua_koboldbridge.outputs[i+1] = utils.decodenewlines(tokenizer.decode(past[i])) + for i in range(model_settings.numseqs): + system_settings.lua_koboldbridge.outputs[i+1] = utils.decodenewlines(tokenizer.decode(past[i])) genout = past execute_outmod() - if(vars.lua_koboldbridge.regeneration_required): - vars.lua_koboldbridge.regeneration_required = False + if(system_settings.lua_koboldbridge.regeneration_required): + system_settings.lua_koboldbridge.regeneration_required = False genout = [] - for i in range(vars.numseqs): - genout.append({"generated_text": vars.lua_koboldbridge.outputs[i+1]}) + for i in range(model_settings.numseqs): + genout.append({"generated_text": system_settings.lua_koboldbridge.outputs[i+1]}) assert type(genout[-1]["generated_text"]) is str else: genout = [{"generated_text": utils.decodenewlines(tokenizer.decode(txt))} for txt in genout] @@ -4382,8 +4273,8 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): if(len(genout) == 1): genresult(genout[0]["generated_text"]) else: - if(vars.lua_koboldbridge.restart_sequence is not None and vars.lua_koboldbridge.restart_sequence > 0): - genresult(genout[vars.lua_koboldbridge.restart_sequence-1]["generated_text"]) + if(system_settings.lua_koboldbridge.restart_sequence is not None and system_settings.lua_koboldbridge.restart_sequence > 0): + genresult(genout[system_settings.lua_koboldbridge.restart_sequence-1]["generated_text"]) else: genselect(genout) @@ -4401,11 +4292,11 @@ def formatforhtml(txt): #==================================================================# def getnewcontent(txt): # If the submitted context was blank, then everything is new - if(vars.lastctx == ""): + if(story_settings.lastctx == ""): return txt # Tokenize the last context and the generated content - ctxtokens = tokenizer.encode(utils.encodenewlines(vars.lastctx), max_length=int(2e9), truncation=True) + ctxtokens = tokenizer.encode(utils.encodenewlines(story_settings.lastctx), max_length=int(2e9), truncation=True) txttokens = tokenizer.encode(utils.encodenewlines(txt), max_length=int(2e9), truncation=True) dif = (len(txttokens) - len(ctxtokens)) * -1 @@ -4419,8 +4310,8 @@ def getnewcontent(txt): #==================================================================# def applyinputformatting(txt): # Add sentence spacing - if(vars.formatoptns["frmtadsnsp"]): - txt = utils.addsentencespacing(txt, vars) + if(user_settings.formatoptns["frmtadsnsp"]): + txt = utils.addsentencespacing(txt, story_settings) return txt @@ -4432,21 +4323,21 @@ def applyoutputformatting(txt): txt = utils.fixquotes(txt) # Adventure mode clipping of all characters after '>' - if(vars.adventure): - txt = vars.acregex_ai.sub('', txt) + if(story_settings.adventure): + txt = system_settings.acregex_ai.sub('', txt) # Trim incomplete sentences - if(vars.formatoptns["frmttriminc"] and not vars.chatmode): + if(user_settings.formatoptns["frmttriminc"] and not story_settings.chatmode): txt = utils.trimincompletesentence(txt) # Replace blank lines - if(vars.formatoptns["frmtrmblln"] or vars.chatmode): + if(user_settings.formatoptns["frmtrmblln"] or story_settings.chatmode): txt = utils.replaceblanklines(txt) # Remove special characters - if(vars.formatoptns["frmtrmspch"]): - txt = utils.removespecialchars(txt, vars) + if(user_settings.formatoptns["frmtrmspch"]): + txt = utils.removespecialchars(txt, story_settings) # Single Line Mode - if(vars.formatoptns["singleline"] or vars.chatmode): - txt = utils.singlelineprocessing(txt, vars) + if(user_settings.formatoptns["singleline"] or story_settings.chatmode): + txt = utils.singlelineprocessing(txt, story_settings) return txt @@ -4454,15 +4345,15 @@ def applyoutputformatting(txt): # Sends the current story content to the Game Screen #==================================================================# def refresh_story(): - text_parts = ['', vars.comregex_ui.sub(lambda m: '\n'.join('' + l + '' for l in m.group().split('\n')), html.escape(vars.prompt)), ''] - for idx in vars.actions: - item = vars.actions[idx] + text_parts = ['', system_settings.comregex_ui.sub(lambda m: '\n'.join('' + l + '' for l in m.group().split('\n')), html.escape(story_settings.prompt)), ''] + for idx in story_settings.actions: + item = story_settings.actions[idx] idx += 1 item = html.escape(item) - item = vars.comregex_ui.sub(lambda m: '\n'.join('' + l + '' for l in m.group().split('\n')), item) # Add special formatting to comments - item = vars.acregex_ui.sub('\\1', item) # Add special formatting to adventure actions + item = system_settings.comregex_ui.sub(lambda m: '\n'.join('' + l + '' for l in m.group().split('\n')), item) # Add special formatting to comments + item = system_settings.acregex_ui.sub('\\1', item) # Add special formatting to adventure actions text_parts.extend(('', item, '')) - emit('from_server', {'cmd': 'updatescreen', 'gamestarted': vars.gamestarted, 'data': formatforhtml(''.join(text_parts))}, broadcast=True) + emit('from_server', {'cmd': 'updatescreen', 'gamestarted': story_settings.gamestarted, 'data': formatforhtml(''.join(text_parts))}, broadcast=True) #==================================================================# @@ -4470,7 +4361,7 @@ def refresh_story(): #==================================================================# def update_story_chunk(idx: Union[int, str]): if idx == 'last': - if len(vars.actions) <= 1: + if len(story_settings.actions) <= 1: # In this case, we are better off just refreshing the whole thing as the # prompt might not have been shown yet (with a "Generating story..." # message instead). @@ -4478,20 +4369,20 @@ def update_story_chunk(idx: Union[int, str]): setgamesaved(False) return - idx = (vars.actions.get_last_key() if len(vars.actions) else 0) + 1 + idx = (story_settings.actions.get_last_key() if len(story_settings.actions) else 0) + 1 if idx == 0: - text = vars.prompt + text = story_settings.prompt else: # Actions are 0 based, but in chunks 0 is the prompt. # So the chunk index is one more than the corresponding action index. - if(idx - 1 not in vars.actions): + if(idx - 1 not in story_settings.actions): return - text = vars.actions[idx - 1] + text = story_settings.actions[idx - 1] item = html.escape(text) - item = vars.comregex_ui.sub(lambda m: '\n'.join('' + l + '' for l in m.group().split('\n')), item) # Add special formatting to comments - item = vars.acregex_ui.sub('\\1', item) # Add special formatting to adventure actions + item = system_settings.comregex_ui.sub(lambda m: '\n'.join('' + l + '' for l in m.group().split('\n')), item) # Add special formatting to comments + item = system_settings.acregex_ui.sub('\\1', item) # Add special formatting to adventure actions chunk_text = f'{formatforhtml(item)}' emit('from_server', {'cmd': 'updatechunk', 'data': {'index': idx, 'html': chunk_text}}, broadcast=True) @@ -4499,7 +4390,7 @@ def update_story_chunk(idx: Union[int, str]): setgamesaved(False) #If we've set the auto save flag, we'll now save the file - if vars.autosave and (".json" in vars.savedir): + if user_settings.autosave and (".json" in system_settings.savedir): save() @@ -4518,40 +4409,40 @@ def refresh_settings(): # Suppress toggle change events while loading state emit('from_server', {'cmd': 'allowtoggle', 'data': False}, broadcast=True) - if(vars.model != "InferKit"): - emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp}, broadcast=True) - emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p}, broadcast=True) - emit('from_server', {'cmd': 'updatetopk', 'data': vars.top_k}, broadcast=True) - emit('from_server', {'cmd': 'updatetfs', 'data': vars.tfs}, broadcast=True) - emit('from_server', {'cmd': 'updatetypical', 'data': vars.typical}, broadcast=True) - emit('from_server', {'cmd': 'updatetopa', 'data': vars.top_a}, broadcast=True) - emit('from_server', {'cmd': 'updatereppen', 'data': vars.rep_pen}, broadcast=True) - emit('from_server', {'cmd': 'updatereppenslope', 'data': vars.rep_pen_slope}, broadcast=True) - emit('from_server', {'cmd': 'updatereppenrange', 'data': vars.rep_pen_range}, broadcast=True) - emit('from_server', {'cmd': 'updateoutlen', 'data': vars.genamt}, broadcast=True) - emit('from_server', {'cmd': 'updatetknmax', 'data': vars.max_length}, broadcast=True) - emit('from_server', {'cmd': 'updatenumseq', 'data': vars.numseqs}, broadcast=True) + if(model_settings.model != "InferKit"): + emit('from_server', {'cmd': 'updatetemp', 'data': model_settings.temp}, broadcast=True) + emit('from_server', {'cmd': 'updatetopp', 'data': model_settings.top_p}, broadcast=True) + emit('from_server', {'cmd': 'updatetopk', 'data': model_settings.top_k}, broadcast=True) + emit('from_server', {'cmd': 'updatetfs', 'data': model_settings.tfs}, broadcast=True) + emit('from_server', {'cmd': 'updatetypical', 'data': model_settings.typical}, broadcast=True) + emit('from_server', {'cmd': 'updatetopa', 'data': model_settings.top_a}, broadcast=True) + emit('from_server', {'cmd': 'updatereppen', 'data': model_settings.rep_pen}, broadcast=True) + emit('from_server', {'cmd': 'updatereppenslope', 'data': model_settings.rep_pen_slope}, broadcast=True) + emit('from_server', {'cmd': 'updatereppenrange', 'data': model_settings.rep_pen_range}, broadcast=True) + emit('from_server', {'cmd': 'updateoutlen', 'data': model_settings.genamt}, broadcast=True) + emit('from_server', {'cmd': 'updatetknmax', 'data': model_settings.max_length}, broadcast=True) + emit('from_server', {'cmd': 'updatenumseq', 'data': model_settings.numseqs}, broadcast=True) else: - emit('from_server', {'cmd': 'updatetemp', 'data': vars.temp}, broadcast=True) - emit('from_server', {'cmd': 'updatetopp', 'data': vars.top_p}, broadcast=True) - emit('from_server', {'cmd': 'updateikgen', 'data': vars.ikgen}, broadcast=True) + emit('from_server', {'cmd': 'updatetemp', 'data': model_settings.temp}, broadcast=True) + emit('from_server', {'cmd': 'updatetopp', 'data': model_settings.top_p}, broadcast=True) + emit('from_server', {'cmd': 'updateikgen', 'data': model_settings.ikgen}, broadcast=True) - emit('from_server', {'cmd': 'updateanotedepth', 'data': vars.andepth}, broadcast=True) - emit('from_server', {'cmd': 'updatewidepth', 'data': vars.widepth}, broadcast=True) - emit('from_server', {'cmd': 'updateuseprompt', 'data': vars.useprompt}, broadcast=True) - emit('from_server', {'cmd': 'updateadventure', 'data': vars.adventure}, broadcast=True) - emit('from_server', {'cmd': 'updatechatmode', 'data': vars.chatmode}, broadcast=True) - emit('from_server', {'cmd': 'updatedynamicscan', 'data': vars.dynamicscan}, broadcast=True) - emit('from_server', {'cmd': 'updateautosave', 'data': vars.autosave}, broadcast=True) - emit('from_server', {'cmd': 'updatenopromptgen', 'data': vars.nopromptgen}, broadcast=True) - emit('from_server', {'cmd': 'updaterngpersist', 'data': vars.rngpersist}, broadcast=True) - emit('from_server', {'cmd': 'updatenogenmod', 'data': vars.nogenmod}, broadcast=True) + emit('from_server', {'cmd': 'updateanotedepth', 'data': story_settings.andepth}, broadcast=True) + emit('from_server', {'cmd': 'updatewidepth', 'data': user_settings.widepth}, broadcast=True) + emit('from_server', {'cmd': 'updateuseprompt', 'data': story_settings.useprompt}, broadcast=True) + emit('from_server', {'cmd': 'updateadventure', 'data': story_settings.adventure}, broadcast=True) + emit('from_server', {'cmd': 'updatechatmode', 'data': story_settings.chatmode}, broadcast=True) + emit('from_server', {'cmd': 'updatedynamicscan', 'data': story_settings.dynamicscan}, broadcast=True) + emit('from_server', {'cmd': 'updateautosave', 'data': user_settings.autosave}, broadcast=True) + emit('from_server', {'cmd': 'updatenopromptgen', 'data': user_settings.nopromptgen}, broadcast=True) + emit('from_server', {'cmd': 'updaterngpersist', 'data': user_settings.rngpersist}, broadcast=True) + emit('from_server', {'cmd': 'updatenogenmod', 'data': user_settings.nogenmod}, broadcast=True) - emit('from_server', {'cmd': 'updatefrmttriminc', 'data': vars.formatoptns["frmttriminc"]}, broadcast=True) - emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': vars.formatoptns["frmtrmblln"]}, broadcast=True) - emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': vars.formatoptns["frmtrmspch"]}, broadcast=True) - emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': vars.formatoptns["frmtadsnsp"]}, broadcast=True) - emit('from_server', {'cmd': 'updatesingleline', 'data': vars.formatoptns["singleline"]}, broadcast=True) + emit('from_server', {'cmd': 'updatefrmttriminc', 'data': user_settings.formatoptns["frmttriminc"]}, broadcast=True) + emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': user_settings.formatoptns["frmtrmblln"]}, broadcast=True) + emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': user_settings.formatoptns["frmtrmspch"]}, broadcast=True) + emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': user_settings.formatoptns["frmtadsnsp"]}, broadcast=True) + emit('from_server', {'cmd': 'updatesingleline', 'data': user_settings.formatoptns["singleline"]}, broadcast=True) # Allow toggle events again emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True) @@ -4561,10 +4452,10 @@ def refresh_settings(): #==================================================================# def set_aibusy(state): if(state): - vars.aibusy = True + system_settings.aibusy = True emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, broadcast=True) else: - vars.aibusy = False + system_settings.aibusy = False emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) #==================================================================# @@ -4572,11 +4463,11 @@ def set_aibusy(state): #==================================================================# def editrequest(n): if(n == 0): - txt = vars.prompt + txt = story_settings.prompt else: - txt = vars.actions[n-1] + txt = story_settings.actions[n-1] - vars.editln = n + story_settings.editln = n emit('from_server', {'cmd': 'setinputtext', 'data': txt}, broadcast=True) emit('from_server', {'cmd': 'enablesubmit', 'data': ''}, broadcast=True) @@ -4584,19 +4475,19 @@ def editrequest(n): # #==================================================================# def editsubmit(data): - vars.recentedit = True - if(vars.editln == 0): - vars.prompt = data + story_settings.recentedit = True + if(story_settings.editln == 0): + story_settings.prompt = data else: - vars.actions_metadata[vars.editln-1]['Alternative Text'] = vars.actions_metadata[vars.editln-1]['Alternative Text'] + [{"Text": vars.actions[vars.editln-1], "Pinned": False, + story_settings.actions_metadata[story_settings.editln-1]['Alternative Text'] = story_settings.actions_metadata[story_settings.editln-1]['Alternative Text'] + [{"Text": story_settings.actions[story_settings.editln-1], "Pinned": False, "Previous Selection": False, "Edited": True}] - vars.actions_metadata[vars.editln-1]['Selected Text'] = data - vars.actions[vars.editln-1] = data + story_settings.actions_metadata[story_settings.editln-1]['Selected Text'] = data + story_settings.actions[story_settings.editln-1] = data - vars.mode = "play" - update_story_chunk(vars.editln) - emit('from_server', {'cmd': 'texteffect', 'data': vars.editln}, broadcast=True) + story_settings.mode = "play" + update_story_chunk(story_settings.editln) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.editln}, broadcast=True) emit('from_server', {'cmd': 'editmode', 'data': 'false'}) send_debug() @@ -4604,18 +4495,18 @@ def editsubmit(data): # #==================================================================# def deleterequest(): - vars.recentedit = True + story_settings.recentedit = True # Don't delete prompt - if(vars.editln == 0): + if(story_settings.editln == 0): # Send error message pass else: - vars.actions_metadata[vars.editln-1]['Alternative Text'] = [{"Text": vars.actions[vars.editln-1], "Pinned": False, - "Previous Selection": True, "Edited": False}] + vars.actions_metadata[vars.editln-1]['Alternative Text'] - vars.actions_metadata[vars.editln-1]['Selected Text'] = '' - vars.actions[vars.editln-1] = '' - vars.mode = "play" - remove_story_chunk(vars.editln) + story_settings.actions_metadata[story_settings.editln-1]['Alternative Text'] = [{"Text": story_settings.actions[story_settings.editln-1], "Pinned": False, + "Previous Selection": True, "Edited": False}] + story_settings.actions_metadata[story_settings.editln-1]['Alternative Text'] + story_settings.actions_metadata[story_settings.editln-1]['Selected Text'] = '' + story_settings.actions[story_settings.editln-1] = '' + story_settings.mode = "play" + remove_story_chunk(story_settings.editln) emit('from_server', {'cmd': 'editmode', 'data': 'false'}) send_debug() @@ -4623,19 +4514,19 @@ def deleterequest(): # #==================================================================# def inlineedit(chunk, data): - vars.recentedit = True + story_settings.recentedit = True chunk = int(chunk) if(chunk == 0): if(len(data.strip()) == 0): return - vars.prompt = data + story_settings.prompt = data else: - if(chunk-1 in vars.actions): - vars.actions_metadata[chunk-1]['Alternative Text'] = vars.actions_metadata[chunk-1]['Alternative Text'] + [{"Text": vars.actions[chunk-1], "Pinned": False, + if(chunk-1 in story_settings.actions): + story_settings.actions_metadata[chunk-1]['Alternative Text'] = story_settings.actions_metadata[chunk-1]['Alternative Text'] + [{"Text": story_settings.actions[chunk-1], "Pinned": False, "Previous Selection": False, "Edited": True}] - vars.actions_metadata[chunk-1]['Selected Text'] = data - vars.actions[chunk-1] = data + story_settings.actions_metadata[chunk-1]['Selected Text'] = data + story_settings.actions[chunk-1] = data else: print(f"WARNING: Attempted to edit non-existent chunk {chunk}") @@ -4649,7 +4540,7 @@ def inlineedit(chunk, data): # #==================================================================# def inlinedelete(chunk): - vars.recentedit = True + story_settings.recentedit = True chunk = int(chunk) # Don't delete prompt if(chunk == 0): @@ -4658,12 +4549,12 @@ def inlinedelete(chunk): emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}) emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) else: - if(chunk-1 in vars.actions): - vars.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": vars.actions[chunk-1], "Pinned": False, + if(chunk-1 in story_settings.actions): + story_settings.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": story_settings.actions[chunk-1], "Pinned": False, "Previous Selection": True, - "Edited": False}] + vars.actions_metadata[chunk-1]['Alternative Text'] - vars.actions_metadata[chunk-1]['Selected Text'] = '' - vars.actions[chunk-1] = '' + "Edited": False}] + story_settings.actions_metadata[chunk-1]['Alternative Text'] + story_settings.actions_metadata[chunk-1]['Selected Text'] = '' + story_settings.actions[chunk-1] = '' else: print(f"WARNING: Attempted to delete non-existent chunk {chunk}") setgamesaved(False) @@ -4675,28 +4566,28 @@ def inlinedelete(chunk): # Toggles the game mode for memory editing and sends UI commands #==================================================================# def togglememorymode(): - if(vars.mode == "play"): - vars.mode = "memory" + if(story_settings.mode == "play"): + story_settings.mode = "memory" emit('from_server', {'cmd': 'memmode', 'data': 'true'}, broadcast=True) - emit('from_server', {'cmd': 'setinputtext', 'data': vars.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': vars.authornotetemplate}, broadcast=True) - elif(vars.mode == "memory"): - vars.mode = "play" + emit('from_server', {'cmd': 'setinputtext', 'data': story_settings.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) + elif(story_settings.mode == "memory"): + story_settings.mode = "play" emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) #==================================================================# # Toggles the game mode for WI editing and sends UI commands #==================================================================# def togglewimode(): - if(vars.mode == "play"): - vars.mode = "wi" + if(story_settings.mode == "play"): + story_settings.mode = "wi" emit('from_server', {'cmd': 'wimode', 'data': 'true'}, broadcast=True) - elif(vars.mode == "wi"): + elif(story_settings.mode == "wi"): # Commit WI fields first requestwi() # Then set UI state back to Play - vars.mode = "play" + story_settings.mode = "play" emit('from_server', {'cmd': 'wimode', 'data': 'false'}, broadcast=True) sendwi() @@ -4704,17 +4595,17 @@ def togglewimode(): # #==================================================================# def addwiitem(folder_uid=None): - assert folder_uid is None or folder_uid in vars.wifolders_d - ob = {"key": "", "keysecondary": "", "content": "", "comment": "", "folder": folder_uid, "num": len(vars.worldinfo), "init": False, "selective": False, "constant": False} - vars.worldinfo.append(ob) + assert folder_uid is None or folder_uid in story_settings.wifolders_d + ob = {"key": "", "keysecondary": "", "content": "", "comment": "", "folder": folder_uid, "num": len(story_settings.worldinfo), "init": False, "selective": False, "constant": False} + story_settings.worldinfo.append(ob) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid if(folder_uid is not None): - vars.wifolders_u[folder_uid].append(vars.worldinfo[-1]) + story_settings.wifolders_u[folder_uid].append(story_settings.worldinfo[-1]) emit('from_server', {'cmd': 'addwiitem', 'data': ob}, broadcast=True) #==================================================================# @@ -4723,12 +4614,12 @@ def addwiitem(folder_uid=None): def addwifolder(): while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.wifolders_d): + if(uid not in story_settings.wifolders_d): break ob = {"name": "", "collapsed": False} - vars.wifolders_d[uid] = ob - vars.wifolders_l.append(uid) - vars.wifolders_u[uid] = [] + story_settings.wifolders_d[uid] = ob + story_settings.wifolders_l.append(uid) + story_settings.wifolders_u[uid] = [] emit('from_server', {'cmd': 'addwifolder', 'uid': uid, 'data': ob}, broadcast=True) addwiitem(folder_uid=uid) @@ -4738,20 +4629,20 @@ def addwifolder(): #==================================================================# def movewiitem(dst, src): setgamesaved(False) - if(vars.worldinfo_u[src]["folder"] is not None): - for i, e in enumerate(vars.wifolders_u[vars.worldinfo_u[src]["folder"]]): - if(e is vars.worldinfo_u[src]): - vars.wifolders_u[vars.worldinfo_u[src]["folder"]].pop(i) + if(story_settings.worldinfo_u[src]["folder"] is not None): + for i, e in enumerate(story_settings.wifolders_u[story_settings.worldinfo_u[src]["folder"]]): + if(e is story_settings.worldinfo_u[src]): + story_settings.wifolders_u[story_settings.worldinfo_u[src]["folder"]].pop(i) break - if(vars.worldinfo_u[dst]["folder"] is not None): - vars.wifolders_u[vars.worldinfo_u[dst]["folder"]].append(vars.worldinfo_u[src]) - vars.worldinfo_u[src]["folder"] = vars.worldinfo_u[dst]["folder"] - for i, e in enumerate(vars.worldinfo): - if(e is vars.worldinfo_u[src]): + if(story_settings.worldinfo_u[dst]["folder"] is not None): + story_settings.wifolders_u[story_settings.worldinfo_u[dst]["folder"]].append(story_settings.worldinfo_u[src]) + story_settings.worldinfo_u[src]["folder"] = story_settings.worldinfo_u[dst]["folder"] + for i, e in enumerate(story_settings.worldinfo): + if(e is story_settings.worldinfo_u[src]): _src = i - elif(e is vars.worldinfo_u[dst]): + elif(e is story_settings.worldinfo_u[dst]): _dst = i - vars.worldinfo.insert(_dst - (_dst >= _src), vars.worldinfo.pop(_src)) + story_settings.worldinfo.insert(_dst - (_dst >= _src), story_settings.worldinfo.pop(_src)) sendwi() #==================================================================# @@ -4760,12 +4651,12 @@ def movewiitem(dst, src): #==================================================================# def movewifolder(dst, src): setgamesaved(False) - vars.wifolders_l.remove(src) + story_settings.wifolders_l.remove(src) if(dst is None): # If dst is None, that means we should move src to be the last folder - vars.wifolders_l.append(src) + story_settings.wifolders_l.append(src) else: - vars.wifolders_l.insert(vars.wifolders_l.index(dst), src) + story_settings.wifolders_l.insert(story_settings.wifolders_l.index(dst), src) sendwi() #==================================================================# @@ -4773,15 +4664,15 @@ def movewifolder(dst, src): #==================================================================# def sendwi(): # Cache len of WI - ln = len(vars.worldinfo) + ln = len(story_settings.worldinfo) # Clear contents of WI container - emit('from_server', {'cmd': 'wistart', 'wifolders_d': vars.wifolders_d, 'wifolders_l': vars.wifolders_l, 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'wistart', 'wifolders_d': story_settings.wifolders_d, 'wifolders_l': story_settings.wifolders_l, 'data': ''}, broadcast=True) # Stable-sort WI entries in order of folder stablesortwi() - vars.worldinfo_i = [wi for wi in vars.worldinfo if wi["init"]] + story_settings.worldinfo_i = [wi for wi in story_settings.worldinfo if wi["init"]] # If there are no WI entries, send an empty WI object if(ln == 0): @@ -4789,9 +4680,9 @@ def sendwi(): else: # Send contents of WI array last_folder = ... - for wi in vars.worldinfo: + for wi in story_settings.worldinfo: if(wi["folder"] != last_folder): - emit('from_server', {'cmd': 'addwifolder', 'uid': wi["folder"], 'data': vars.wifolders_d[wi["folder"]] if wi["folder"] is not None else None}, broadcast=True) + emit('from_server', {'cmd': 'addwifolder', 'uid': wi["folder"], 'data': story_settings.wifolders_d[wi["folder"]] if wi["folder"] is not None else None}, broadcast=True) last_folder = wi["folder"] ob = wi emit('from_server', {'cmd': 'addwiitem', 'data': ob}, broadcast=True) @@ -4803,7 +4694,7 @@ def sendwi(): #==================================================================# def requestwi(): list = [] - for wi in vars.worldinfo: + for wi in story_settings.worldinfo: list.append(wi["num"]) emit('from_server', {'cmd': 'requestwiitem', 'data': list}) @@ -4812,11 +4703,11 @@ def requestwi(): # and items in different folders are sorted based on the order of the folders #==================================================================# def stablesortwi(): - mapping = {uid: index for index, uid in enumerate(vars.wifolders_l)} - vars.worldinfo.sort(key=lambda x: mapping[x["folder"]] if x["folder"] is not None else float("inf")) + mapping = {uid: index for index, uid in enumerate(story_settings.wifolders_l)} + story_settings.worldinfo.sort(key=lambda x: mapping[x["folder"]] if x["folder"] is not None else float("inf")) last_folder = ... last_wi = None - for i, wi in enumerate(vars.worldinfo): + for i, wi in enumerate(story_settings.worldinfo): wi["num"] = i wi["init"] = True if(wi["folder"] != last_folder): @@ -4826,8 +4717,8 @@ def stablesortwi(): last_wi = wi if(last_wi is not None): last_wi["init"] = False - for folder in vars.wifolders_u: - vars.wifolders_u[folder].sort(key=lambda x: x["num"]) + for folder in story_settings.wifolders_u: + story_settings.wifolders_u[folder].sort(key=lambda x: x["num"]) #==================================================================# # Extract object from server and send it to WI objects @@ -4835,54 +4726,54 @@ def stablesortwi(): def commitwi(ar): for ob in ar: ob["uid"] = int(ob["uid"]) - vars.worldinfo_u[ob["uid"]]["key"] = ob["key"] - vars.worldinfo_u[ob["uid"]]["keysecondary"] = ob["keysecondary"] - vars.worldinfo_u[ob["uid"]]["content"] = ob["content"] - vars.worldinfo_u[ob["uid"]]["comment"] = ob.get("comment", "") - vars.worldinfo_u[ob["uid"]]["folder"] = ob.get("folder", None) - vars.worldinfo_u[ob["uid"]]["selective"] = ob["selective"] - vars.worldinfo_u[ob["uid"]]["constant"] = ob.get("constant", False) + story_settings.worldinfo_u[ob["uid"]]["key"] = ob["key"] + story_settings.worldinfo_u[ob["uid"]]["keysecondary"] = ob["keysecondary"] + story_settings.worldinfo_u[ob["uid"]]["content"] = ob["content"] + story_settings.worldinfo_u[ob["uid"]]["comment"] = ob.get("comment", "") + story_settings.worldinfo_u[ob["uid"]]["folder"] = ob.get("folder", None) + story_settings.worldinfo_u[ob["uid"]]["selective"] = ob["selective"] + story_settings.worldinfo_u[ob["uid"]]["constant"] = ob.get("constant", False) stablesortwi() - vars.worldinfo_i = [wi for wi in vars.worldinfo if wi["init"]] + story_settings.worldinfo_i = [wi for wi in story_settings.worldinfo if wi["init"]] #==================================================================# # #==================================================================# def deletewi(uid): - if(uid in vars.worldinfo_u): + if(uid in story_settings.worldinfo_u): setgamesaved(False) # Store UID of deletion request - vars.deletewi = uid - if(vars.deletewi is not None): - if(vars.worldinfo_u[vars.deletewi]["folder"] is not None): - for i, e in enumerate(vars.wifolders_u[vars.worldinfo_u[vars.deletewi]["folder"]]): - if(e is vars.worldinfo_u[vars.deletewi]): - vars.wifolders_u[vars.worldinfo_u[vars.deletewi]["folder"]].pop(i) - for i, e in enumerate(vars.worldinfo): - if(e is vars.worldinfo_u[vars.deletewi]): - del vars.worldinfo[i] + story_settings.deletewi = uid + if(story_settings.deletewi is not None): + if(story_settings.worldinfo_u[story_settings.deletewi]["folder"] is not None): + for i, e in enumerate(story_settings.wifolders_u[story_settings.worldinfo_u[story_settings.deletewi]["folder"]]): + if(e is story_settings.worldinfo_u[story_settings.deletewi]): + story_settings.wifolders_u[story_settings.worldinfo_u[story_settings.deletewi]["folder"]].pop(i) + for i, e in enumerate(story_settings.worldinfo): + if(e is story_settings.worldinfo_u[story_settings.deletewi]): + del story_settings.worldinfo[i] break - del vars.worldinfo_u[vars.deletewi] + del story_settings.worldinfo_u[story_settings.deletewi] # Send the new WI array structure sendwi() # And reset deletewi - vars.deletewi = None + story_settings.deletewi = None #==================================================================# # #==================================================================# def deletewifolder(uid): uid = int(uid) - del vars.wifolders_u[uid] - del vars.wifolders_d[uid] - del vars.wifolders_l[vars.wifolders_l.index(uid)] + del story_settings.wifolders_u[uid] + del story_settings.wifolders_d[uid] + del story_settings.wifolders_l[story_settings.wifolders_l.index(uid)] setgamesaved(False) # Delete uninitialized entries in the folder we're going to delete - vars.worldinfo = [wi for wi in vars.worldinfo if wi["folder"] != uid or wi["init"]] - vars.worldinfo_i = [wi for wi in vars.worldinfo if wi["init"]] + story_settings.worldinfo = [wi for wi in story_settings.worldinfo if wi["folder"] != uid or wi["init"]] + story_settings.worldinfo_i = [wi for wi in story_settings.worldinfo if wi["init"]] # Move WI entries that are inside of the folder we're going to delete # so that they're outside of all folders - for wi in vars.worldinfo: + for wi in story_settings.worldinfo: if(wi["folder"] == uid): wi["folder"] = None @@ -4895,21 +4786,21 @@ def checkworldinfo(txt, allowed_entries=None, allowed_folders=None, force_use_tx original_txt = txt if(actions is None): - actions = vars.actions + actions = story_settings.actions # Dont go any further if WI is empty - if(len(vars.worldinfo) == 0): + if(len(story_settings.worldinfo) == 0): return "", set() # Cache actions length ln = len(actions) # Don't bother calculating action history if widepth is 0 - if(vars.widepth > 0 and scan_story): - depth = vars.widepth + if(user_settings.widepth > 0 and scan_story): + depth = user_settings.widepth # If this is not a continue, add 1 to widepth since submitted # text is already in action history @ -1 - if(not force_use_txt and (txt != "" and vars.prompt != txt)): + if(not force_use_txt and (txt != "" and story_settings.prompt != txt)): txt = "" depth += 1 @@ -4926,9 +4817,9 @@ def checkworldinfo(txt, allowed_entries=None, allowed_folders=None, force_use_tx if(ln >= depth): txt = "".join(chunks) elif(ln > 0): - txt = vars.comregex_ai.sub('', vars.prompt) + "".join(chunks) + txt = system_settings.comregex_ai.sub('', story_settings.prompt) + "".join(chunks) elif(ln == 0): - txt = vars.comregex_ai.sub('', vars.prompt) + txt = system_settings.comregex_ai.sub('', story_settings.prompt) if(force_use_txt): txt += original_txt @@ -4936,7 +4827,7 @@ def checkworldinfo(txt, allowed_entries=None, allowed_folders=None, force_use_tx # Scan text for matches on WI keys wimem = "" found_entries = set() - for wi in vars.worldinfo: + for wi in story_settings.worldinfo: if(allowed_entries is not None and wi["uid"] not in allowed_entries): continue if(allowed_folders is not None and wi["folder"] not in allowed_folders): @@ -4955,14 +4846,14 @@ def checkworldinfo(txt, allowed_entries=None, allowed_folders=None, force_use_tx for k in keys: ky = k # Remove leading/trailing spaces if the option is enabled - if(vars.wirmvwhtsp): + if(user_settings.wirmvwhtsp): ky = k.strip() if ky in txt: if wi.get("selective", False) and len(keys_secondary): found = False for ks in keys_secondary: ksy = ks - if(vars.wirmvwhtsp): + if(user_settings.wirmvwhtsp): ksy = ks.strip() if ksy in txt: wimem = wimem + wi["content"] + "\n" @@ -4985,10 +4876,10 @@ def memsubmit(data): emit('from_server', {'cmd': 'setinputtext', 'data': data}, broadcast=True) # Maybe check for length at some point # For now just send it to storage - if(data != vars.memory): + if(data != story_settings.memory): setgamesaved(False) - vars.memory = data - vars.mode = "play" + story_settings.memory = data + story_settings.mode = "play" emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) # Ask for contents of Author's Note field @@ -5001,46 +4892,46 @@ def anotesubmit(data, template=""): assert type(data) is str and type(template) is str # Maybe check for length at some point # For now just send it to storage - if(data != vars.authornote): + if(data != story_settings.authornote): setgamesaved(False) - vars.authornote = data + story_settings.authornote = data - if(vars.authornotetemplate != template): - vars.setauthornotetemplate = template + if(story_settings.authornotetemplate != template): + story_settings.setauthornotetemplate = template settingschanged() - vars.authornotetemplate = template + story_settings.authornotetemplate = template - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': vars.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) #==================================================================# # Assembles game data into a request to InferKit API #==================================================================# def ikrequest(txt): # Log request to console - if not vars.quiet: + if not system_settings.quiet: print("{0}Len:{1}, Txt:{2}{3}".format(colors.YELLOW, len(txt), txt, colors.END)) # Build request JSON data reqdata = { 'forceNoEnd': True, - 'length': vars.ikgen, + 'length': model_settings.ikgen, 'prompt': { 'isContinuation': False, 'text': txt }, 'startFromBeginning': False, 'streamResponse': False, - 'temperature': vars.temp, - 'topP': vars.top_p + 'temperature': model_settings.temp, + 'topP': model_settings.top_p } # Create request req = requests.post( - vars.url, + model_settings.url, json = reqdata, headers = { - 'Authorization': 'Bearer '+vars.apikey + 'Authorization': 'Bearer '+model_settings.apikey } ) @@ -5048,28 +4939,28 @@ def ikrequest(txt): if(req.status_code == 200): genout = req.json()["data"]["text"] - vars.lua_koboldbridge.outputs[1] = genout + system_settings.lua_koboldbridge.outputs[1] = genout execute_outmod() - if(vars.lua_koboldbridge.regeneration_required): - vars.lua_koboldbridge.regeneration_required = False - genout = vars.lua_koboldbridge.outputs[1] + if(system_settings.lua_koboldbridge.regeneration_required): + system_settings.lua_koboldbridge.regeneration_required = False + genout = system_settings.lua_koboldbridge.outputs[1] assert genout is str - if not vars.quiet: + if not system_settings.quiet: print("{0}{1}{2}".format(colors.CYAN, genout, colors.END)) - vars.actions.append(genout) - if vars.actions.get_last_key() in vars.actions_metadata: - vars.actions_metadata[vars.actions.get_last_key()] = {"Selected Text": genout, "Alternative Text": []} + story_settings.actions.append(genout) + if story_settings.actions.get_last_key() in story_settings.actions_metadata: + story_settings.actions_metadata[story_settings.actions.get_last_key()] = {"Selected Text": genout, "Alternative Text": []} else: # 2. We've selected a chunk of text that is was presented previously - alternatives = [item['Text'] for item in vars.actions_metadata[vars.actions.get_last_key()]["Alternative Text"]] + alternatives = [item['Text'] for item in story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"]] if genout in alternatives: - alternatives = [item for item in vars.actions_metadata[vars.actions.get_last_key()]["Alternative Text"] if item['Text'] != genout] - vars.actions_metadata[vars.actions.get_last_key()]["Alternative Text"] = alternatives - vars.actions_metadata[vars.actions.get_last_key()]["Selected Text"] = genout + alternatives = [item for item in story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"] if item['Text'] != genout] + story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"] = alternatives + story_settings.actions_metadata[story_settings.actions.get_last_key()]["Selected Text"] = genout update_story_chunk('last') - emit('from_server', {'cmd': 'texteffect', 'data': vars.actions.get_last_key() + 1 if len(vars.actions) else 0}, broadcast=True) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) send_debug() set_aibusy(0) else: @@ -5089,44 +4980,44 @@ def ikrequest(txt): #==================================================================# def oairequest(txt, min, max): # Log request to console - if not vars.quiet: + if not system_settings.quiet: print("{0}Len:{1}, Txt:{2}{3}".format(colors.YELLOW, len(txt), txt, colors.END)) # Store context in memory to use it for comparison with generated content - vars.lastctx = txt + story_settings.lastctx = txt # Build request JSON data if 'GooseAI' in args.configname: reqdata = { 'prompt': txt, - 'max_tokens': vars.genamt, - 'temperature': vars.temp, - 'top_a': vars.top_a, - 'top_p': vars.top_p, - 'top_k': vars.top_k, - 'tfs': vars.tfs, - 'typical_p': vars.typical, - 'repetition_penalty': vars.rep_pen, - 'repetition_penalty_slope': vars.rep_pen_slope, - 'repetition_penalty_range': vars.rep_pen_range, - 'n': vars.numseqs, + 'max_tokens': model_settings.genamt, + 'temperature': model_settings.temp, + 'top_a': model_settings.top_a, + 'top_p': model_settings.top_p, + 'top_k': model_settings.top_k, + 'tfs': model_settings.tfs, + 'typical_p': model_settings.typical, + 'repetition_penalty': model_settings.rep_pen, + 'repetition_penalty_slope': model_settings.rep_pen_slope, + 'repetition_penalty_range': model_settings.rep_pen_range, + 'n': model_settings.numseqs, 'stream': False } else: reqdata = { 'prompt': txt, - 'max_tokens': vars.genamt, - 'temperature': vars.temp, - 'top_p': vars.top_p, - 'n': vars.numseqs, + 'max_tokens': model_settings.genamt, + 'temperature': model_settings.temp, + 'top_p': model_settings.top_p, + 'n': model_settings.numseqs, 'stream': False } req = requests.post( - vars.oaiurl, + model_settings.oaiurl, json = reqdata, headers = { - 'Authorization': 'Bearer '+vars.oaiapikey, + 'Authorization': 'Bearer '+model_settings.oaiapikey, 'Content-Type': 'application/json' } ) @@ -5136,48 +5027,48 @@ def oairequest(txt, min, max): outputs = [out["text"] for out in req.json()["choices"]] for idx in range(len(outputs)): - vars.lua_koboldbridge.outputs[idx+1] = outputs[idx] + system_settings.lua_koboldbridge.outputs[idx+1] = outputs[idx] execute_outmod() - if (vars.lua_koboldbridge.regeneration_required): - vars.lua_koboldbridge.regeneration_required = False + if (system_settings.lua_koboldbridge.regeneration_required): + system_settings.lua_koboldbridge.regeneration_required = False genout = [] for i in range(len(outputs)): genout.append( - {"generated_text": vars.lua_koboldbridge.outputs[i + 1]}) + {"generated_text": system_settings.lua_koboldbridge.outputs[i + 1]}) assert type(genout[-1]["generated_text"]) is str else: genout = [ {"generated_text": utils.decodenewlines(txt)} for txt in outputs] - if vars.actions.get_last_key() not in vars.actions_metadata: - vars.actions_metadata[vars.actions.get_last_key()] = { + if story_settings.actions.get_last_key() not in story_settings.actions_metadata: + story_settings.actions_metadata[story_settings.actions.get_last_key()] = { "Selected Text": genout[0], "Alternative Text": []} else: # 2. We've selected a chunk of text that is was presented previously try: - alternatives = [item['Text'] for item in vars.actions_metadata[len(vars.actions)-1]["Alternative Text"]] + alternatives = [item['Text'] for item in story_settings.actions_metadata[len(story_settings.actions)-1]["Alternative Text"]] except: - print(len(vars.actions)) - print(vars.actions_metadata) + print(len(story_settings.actions)) + print(story_settings.actions_metadata) raise if genout in alternatives: - alternatives = [item for item in vars.actions_metadata[vars.actions.get_last_key() ]["Alternative Text"] if item['Text'] != genout] - vars.actions_metadata[vars.actions.get_last_key()]["Alternative Text"] = alternatives - vars.actions_metadata[vars.actions.get_last_key()]["Selected Text"] = genout + alternatives = [item for item in story_settings.actions_metadata[story_settings.actions.get_last_key() ]["Alternative Text"] if item['Text'] != genout] + story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"] = alternatives + story_settings.actions_metadata[story_settings.actions.get_last_key()]["Selected Text"] = genout if (len(genout) == 1): genresult(genout[0]["generated_text"]) else: - if (vars.lua_koboldbridge.restart_sequence is not None and - vars.lua_koboldbridge.restart_sequence > 0): - genresult(genout[vars.lua_koboldbridge.restart_sequence - 1][ + if (system_settings.lua_koboldbridge.restart_sequence is not None and + system_settings.lua_koboldbridge.restart_sequence > 0): + genresult(genout[system_settings.lua_koboldbridge.restart_sequence - 1][ "generated_text"]) else: genselect(genout) - if not vars.quiet: + if not system_settings.quiet: print("{0}{1}{2}".format(colors.CYAN, genout, colors.END)) set_aibusy(0) @@ -5196,13 +5087,13 @@ def oairequest(txt, min, max): # Forces UI to Play mode #==================================================================# def exitModes(): - if(vars.mode == "edit"): + if(story_settings.mode == "edit"): emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) - elif(vars.mode == "memory"): + elif(story_settings.mode == "memory"): emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) - elif(vars.mode == "wi"): + elif(story_settings.mode == "wi"): emit('from_server', {'cmd': 'wimode', 'data': 'false'}, broadcast=True) - vars.mode = "play" + story_settings.mode = "play" #==================================================================# # Launch in-browser save prompt @@ -5213,11 +5104,11 @@ def saveas(data): savepins = data['pins'] # Check if filename exists already name = utils.cleanfilename(name) - if(not fileops.saveexists(name) or (vars.saveow and vars.svowname == name)): + if(not fileops.saveexists(name) or (user_settings.saveow and user_settings.svowname == name)): # All clear to save e = saveRequest(fileops.storypath(name), savepins=savepins) - vars.saveow = False - vars.svowname = "" + user_settings.saveow = False + user_settings.svowname = "" if(e is None): emit('from_server', {'cmd': 'hidesaveas', 'data': ''}) else: @@ -5225,8 +5116,8 @@ def saveas(data): emit('from_server', {'cmd': 'popuperror', 'data': str(e)}) else: # File exists, prompt for overwrite - vars.saveow = True - vars.svowname = name + user_settings.saveow = True + user_settings.svowname = name emit('from_server', {'cmd': 'askforoverwrite', 'data': ''}) #==================================================================# @@ -5236,7 +5127,7 @@ def deletesave(name): name = utils.cleanfilename(name) e = fileops.deletesave(name) if(e is None): - if(vars.smandelete): + if(system_settings.smandelete): emit('from_server', {'cmd': 'hidepopupdelete', 'data': ''}) getloadlist() else: @@ -5252,12 +5143,12 @@ def renamesave(name, newname): # Check if filename exists already name = utils.cleanfilename(name) newname = utils.cleanfilename(newname) - if(not fileops.saveexists(newname) or name == newname or (vars.saveow and vars.svowname == newname)): + if(not fileops.saveexists(newname) or name == newname or (user_settings.saveow and user_settings.svowname == newname)): e = fileops.renamesave(name, newname) - vars.saveow = False - vars.svowname = "" + user_settings.saveow = False + user_settings.svowname = "" if(e is None): - if(vars.smanrename): + if(system_settings.smanrename): emit('from_server', {'cmd': 'hidepopuprename', 'data': ''}) getloadlist() else: @@ -5267,8 +5158,8 @@ def renamesave(name, newname): emit('from_server', {'cmd': 'popuperror', 'data': str(e)}) else: # File exists, prompt for overwrite - vars.saveow = True - vars.svowname = newname + user_settings.saveow = True + user_settings.svowname = newname emit('from_server', {'cmd': 'askforoverwrite', 'data': ''}) #==================================================================# @@ -5276,8 +5167,8 @@ def renamesave(name, newname): #==================================================================# def save(): # Check if a file is currently open - if(".json" in vars.savedir): - saveRequest(vars.savedir) + if(".json" in system_settings.savedir): + saveRequest(system_settings.savedir) else: emit('from_server', {'cmd': 'saveas', 'data': ''}) @@ -5285,7 +5176,7 @@ def save(): # Save the story via file browser #==================================================================# def savetofile(): - savpath = fileops.getsavepath(vars.savedir, "Save Story As", [("Json", "*.json")]) + savpath = fileops.getsavepath(system_settings.savedir, "Save Story As", [("Json", "*.json")]) saveRequest(savpath) #==================================================================# @@ -5297,24 +5188,24 @@ def saveRequest(savpath, savepins=True): exitModes() # Save path for future saves - vars.savedir = savpath + system_settings.savedir = savpath txtpath = os.path.splitext(savpath)[0] + ".txt" # Build json to write js = {} - js["gamestarted"] = vars.gamestarted - js["prompt"] = vars.prompt - js["memory"] = vars.memory - js["authorsnote"] = vars.authornote - js["anotetemplate"] = vars.authornotetemplate - js["actions"] = tuple(vars.actions.values()) + js["gamestarted"] = story_settings.gamestarted + js["prompt"] = story_settings.prompt + js["memory"] = story_settings.memory + js["authorsnote"] = story_settings.authornote + js["anotetemplate"] = story_settings.authornotetemplate + js["actions"] = tuple(story_settings.actions.values()) if savepins: - js["actions_metadata"] = vars.actions_metadata + js["actions_metadata"] = story_settings.actions_metadata js["worldinfo"] = [] - js["wifolders_d"] = vars.wifolders_d - js["wifolders_l"] = vars.wifolders_l + js["wifolders_d"] = story_settings.wifolders_d + js["wifolders_l"] = story_settings.wifolders_l # Extract only the important bits of WI - for wi in vars.worldinfo_i: + for wi in story_settings.worldinfo_i: if(True): js["worldinfo"].append({ "key": wi["key"], @@ -5326,7 +5217,7 @@ def saveRequest(savpath, savepins=True): "constant": wi["constant"] }) - txt = vars.prompt + "".join(vars.actions.values()) + txt = story_settings.prompt + "".join(story_settings.actions.values()) # Write it try: @@ -5354,8 +5245,8 @@ def saveRequest(savpath, savepins=True): filename = path.basename(savpath) if(filename.endswith('.json')): filename = filename[:-5] - vars.laststory = filename - emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) + user_settings.laststory = filename + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) setgamesaved(True) print("{0}Story saved to {1}!{2}".format(colors.GREEN, path.basename(savpath), colors.END)) @@ -5369,8 +5260,8 @@ def getloadlist(): # Show list of soft prompts #==================================================================# def getsplist(): - if(vars.allowsp): - emit('from_server', {'cmd': 'buildsp', 'data': fileops.getspfiles(vars.modeldim)}) + if(system_settings.allowsp): + emit('from_server', {'cmd': 'buildsp', 'data': fileops.getspfiles(model_settings.modeldim)}) #==================================================================# # Get list of userscripts @@ -5379,13 +5270,13 @@ def getuslist(): files = {i: v for i, v in enumerate(fileops.getusfiles())} loaded = [] unloaded = [] - userscripts = set(vars.userscripts) + userscripts = set(system_settings.userscripts) for i in range(len(files)): if files[i]["filename"] not in userscripts: unloaded.append(files[i]) files = {files[k]["filename"]: files[k] for k in files} userscripts = set(files.keys()) - for filename in vars.userscripts: + for filename in system_settings.userscripts: if filename in userscripts: loaded.append(files[filename]) return unloaded, loaded @@ -5394,7 +5285,7 @@ def getuslist(): # Load a saved story via file browser #==================================================================# def loadfromfile(): - loadpath = fileops.getloadpath(vars.savedir, "Select Story File", [("Json", "*.json")]) + loadpath = fileops.getloadpath(system_settings.savedir, "Select Story File", [("Json", "*.json")]) loadRequest(loadpath) #==================================================================# @@ -5417,22 +5308,22 @@ def loadRequest(loadpath, filename=None): filename = "untitled.json" # Copy file contents to vars - vars.gamestarted = js["gamestarted"] - vars.prompt = js["prompt"] - vars.memory = js["memory"] - vars.worldinfo = [] - vars.worldinfo = [] - vars.worldinfo_u = {} - vars.wifolders_d = {int(k): v for k, v in js.get("wifolders_d", {}).items()} - vars.wifolders_l = js.get("wifolders_l", []) - vars.wifolders_u = {uid: [] for uid in vars.wifolders_d} - vars.lastact = "" - vars.submission = "" - vars.lastctx = "" - vars.genseqs = [] + story_settings.gamestarted = js["gamestarted"] + story_settings.prompt = js["prompt"] + story_settings.memory = js["memory"] + story_settings.worldinfo = [] + story_settings.worldinfo = [] + story_settings.worldinfo_u = {} + story_settings.wifolders_d = {int(k): v for k, v in js.get("wifolders_d", {}).items()} + story_settings.wifolders_l = js.get("wifolders_l", []) + story_settings.wifolders_u = {uid: [] for uid in story_settings.wifolders_d} + story_settings.lastact = "" + story_settings.submission = "" + story_settings.lastctx = "" + story_settings.genseqs = [] - del vars.actions - vars.actions = structures.KoboldStoryRegister() + del story_settings.actions + story_settings.actions = koboldai_settings.KoboldStoryRegister() actions = collections.deque(js["actions"]) @@ -5440,59 +5331,59 @@ def loadRequest(loadpath, filename=None): if type(js["actions_metadata"]) == dict: temp = js["actions_metadata"] - vars.actions_metadata = {} + story_settings.actions_metadata = {} #we need to redo the numbering of the actions_metadata since the actions list doesn't preserve it's number on saving if len(temp) > 0: counter = 0 temp = {int(k):v for k,v in temp.items()} for i in range(max(temp)+1): if i in temp: - vars.actions_metadata[counter] = temp[i] + story_settings.actions_metadata[counter] = temp[i] counter += 1 del temp else: #fix if we're using the old metadata format - vars.actions_metadata = {} + story_settings.actions_metadata = {} i = 0 for text in js['actions']: - vars.actions_metadata[i] = {'Selected Text': text, 'Alternative Text': []} + story_settings.actions_metadata[i] = {'Selected Text': text, 'Alternative Text': []} i+=1 else: - vars.actions_metadata = {} + story_settings.actions_metadata = {} i = 0 for text in js['actions']: - vars.actions_metadata[i] = {'Selected Text': text, 'Alternative Text': []} + story_settings.actions_metadata[i] = {'Selected Text': text, 'Alternative Text': []} i+=1 - if(len(vars.prompt.strip()) == 0): + if(len(story_settings.prompt.strip()) == 0): while(len(actions)): action = actions.popleft() if(len(action.strip()) != 0): - vars.prompt = action + story_settings.prompt = action break else: - vars.gamestarted = False - if(vars.gamestarted): + story_settings.gamestarted = False + if(story_settings.gamestarted): for s in actions: - vars.actions.append(s) + story_settings.actions.append(s) # Try not to break older save files if("authorsnote" in js): - vars.authornote = js["authorsnote"] + story_settings.authornote = js["authorsnote"] else: - vars.authornote = "" + story_settings.authornote = "" if("anotetemplate" in js): - vars.authornotetemplate = js["anotetemplate"] + story_settings.authornotetemplate = js["anotetemplate"] else: - vars.authornotetemplate = "[Author's note: <|>]" + story_settings.authornotetemplate = "[Author's note: <|>]" if("worldinfo" in js): num = 0 for wi in js["worldinfo"]: - vars.worldinfo.append({ + story_settings.worldinfo.append({ "key": wi["key"], "keysecondary": wi.get("keysecondary", ""), "content": wi["content"], @@ -5506,44 +5397,44 @@ def loadRequest(loadpath, filename=None): }) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid - if(vars.worldinfo[-1]["folder"] is not None): - vars.wifolders_u[vars.worldinfo[-1]["folder"]].append(vars.worldinfo[-1]) + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"] is not None): + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) num += 1 - for uid in vars.wifolders_l + [None]: - vars.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) + for uid in story_settings.wifolders_l + [None]: + story_settings.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid - if(vars.worldinfo[-1]["folder"] is not None): - vars.wifolders_u[vars.worldinfo[-1]["folder"]].append(vars.worldinfo[-1]) + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"] is not None): + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) stablesortwi() - vars.worldinfo_i = [wi for wi in vars.worldinfo if wi["init"]] + story_settings.worldinfo_i = [wi for wi in story_settings.worldinfo if wi["init"]] # Save path for save button - vars.savedir = loadpath + system_settings.savedir = loadpath # Clear loadselect var - vars.loadselect = "" + user_settings.loadselect = "" # Refresh game screen _filename = filename if(filename.endswith('.json')): _filename = filename[:-5] - vars.laststory = _filename - emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) + user_settings.laststory = _filename + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) setgamesaved(True) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': vars.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) refresh_story() emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) @@ -5555,7 +5446,7 @@ def loadRequest(loadpath, filename=None): # Import an AIDungon game exported with Mimi's tool #==================================================================# def importRequest(): - importpath = fileops.getloadpath(vars.savedir, "Select AID CAT File", [("Json", "*.json")]) + importpath = fileops.getloadpath(system_settings.savedir, "Select AID CAT File", [("Json", "*.json")]) if(importpath): # Leave Edit/Memory mode before continuing @@ -5563,21 +5454,21 @@ def importRequest(): # Read file contents into JSON object file = open(importpath, "rb") - vars.importjs = json.load(file) + user_settings.importjs = json.load(file) # If a bundle file is being imported, select just the Adventures object - if type(vars.importjs) is dict and "stories" in vars.importjs: - vars.importjs = vars.importjs["stories"] + if type(user_settings.importjs) is dict and "stories" in user_settings.importjs: + user_settings.importjs = user_settings.importjs["stories"] # Clear Popup Contents emit('from_server', {'cmd': 'clearpopup', 'data': ''}, broadcast=True) # Initialize vars num = 0 - vars.importnum = -1 + user_settings.importnum = -1 # Get list of stories - for story in vars.importjs: + for story in user_settings.importjs: ob = {} ob["num"] = num if(story["title"] != "" and story["title"] != None): @@ -5602,57 +5493,57 @@ def importRequest(): # Import an AIDungon game selected in popup #==================================================================# def importgame(): - if(vars.importnum >= 0): + if(user_settings.importnum >= 0): # Cache reference to selected game - ref = vars.importjs[vars.importnum] + ref = user_settings.importjs[user_settings.importnum] # Copy game contents to vars - vars.gamestarted = True + story_settings.gamestarted = True # Support for different versions of export script if("actions" in ref): if(len(ref["actions"]) > 0): - vars.prompt = ref["actions"][0]["text"] + story_settings.prompt = ref["actions"][0]["text"] else: - vars.prompt = "" + story_settings.prompt = "" elif("actionWindow" in ref): if(len(ref["actionWindow"]) > 0): - vars.prompt = ref["actionWindow"][0]["text"] + story_settings.prompt = ref["actionWindow"][0]["text"] else: - vars.prompt = "" + story_settings.prompt = "" else: - vars.prompt = "" - vars.memory = ref["memory"] - vars.authornote = ref["authorsNote"] if type(ref["authorsNote"]) is str else "" - vars.authornotetemplate = "[Author's note: <|>]" - vars.actions = structures.KoboldStoryRegister() - vars.actions_metadata = {} - vars.worldinfo = [] - vars.worldinfo_i = [] - vars.worldinfo_u = {} - vars.wifolders_d = {} - vars.wifolders_l = [] - vars.wifolders_u = {uid: [] for uid in vars.wifolders_d} - vars.lastact = "" - vars.submission = "" - vars.lastctx = "" + story_settings.prompt = "" + story_settings.memory = ref["memory"] + story_settings.authornote = ref["authorsNote"] if type(ref["authorsNote"]) is str else "" + story_settings.authornotetemplate = "[Author's note: <|>]" + story_settings.actions = koboldai_settings.KoboldStoryRegister() + story_settings.actions_metadata = {} + story_settings.worldinfo = [] + story_settings.worldinfo_i = [] + story_settings.worldinfo_u = {} + story_settings.wifolders_d = {} + story_settings.wifolders_l = [] + story_settings.wifolders_u = {uid: [] for uid in story_settings.wifolders_d} + story_settings.lastact = "" + story_settings.submission = "" + story_settings.lastctx = "" # Get all actions except for prompt if("actions" in ref): if(len(ref["actions"]) > 1): for act in ref["actions"][1:]: - vars.actions.append(act["text"]) + story_settings.actions.append(act["text"]) elif("actionWindow" in ref): if(len(ref["actionWindow"]) > 1): for act in ref["actionWindow"][1:]: - vars.actions.append(act["text"]) + story_settings.actions.append(act["text"]) # Get just the important parts of world info if(ref["worldInfo"] != None): if(len(ref["worldInfo"]) > 1): num = 0 for wi in ref["worldInfo"]: - vars.worldinfo.append({ + story_settings.worldinfo.append({ "key": wi["keys"], "keysecondary": wi.get("keysecondary", ""), "content": wi["entry"], @@ -5666,41 +5557,41 @@ def importgame(): }) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid - if(vars.worldinfo[-1]["folder"]) is not None: - vars.wifolders_u[vars.worldinfo[-1]["folder"]].append(vars.worldinfo[-1]) + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"]) is not None: + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) num += 1 - for uid in vars.wifolders_l + [None]: - vars.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) + for uid in story_settings.wifolders_l + [None]: + story_settings.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid - if(vars.worldinfo[-1]["folder"] is not None): - vars.wifolders_u[vars.worldinfo[-1]["folder"]].append(vars.worldinfo[-1]) + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"] is not None): + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) stablesortwi() - vars.worldinfo_i = [wi for wi in vars.worldinfo if wi["init"]] + story_settings.worldinfo_i = [wi for wi in story_settings.worldinfo if wi["init"]] # Clear import data - vars.importjs = {} + user_settings.importjs = {} # Reset current save - vars.savedir = getcwd()+"\\stories" + system_settings.savedir = getcwd()+"\\stories" # Refresh game screen - vars.laststory = None - emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) + user_settings.laststory = None + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) setgamesaved(False) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': vars.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) refresh_story() emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) @@ -5718,26 +5609,26 @@ def importAidgRequest(id): js = req.json() # Import game state - vars.gamestarted = True - vars.prompt = js["promptContent"] - vars.memory = js["memory"] - vars.authornote = js["authorsNote"] - vars.authornotetemplate = "[Author's note: <|>]" - vars.actions = structures.KoboldStoryRegister() - vars.actions_metadata = {} - vars.worldinfo = [] - vars.worldinfo_i = [] - vars.worldinfo_u = {} - vars.wifolders_d = {} - vars.wifolders_l = [] - vars.wifolders_u = {uid: [] for uid in vars.wifolders_d} - vars.lastact = "" - vars.submission = "" - vars.lastctx = "" + story_settings.gamestarted = True + story_settings.prompt = js["promptContent"] + story_settings.memory = js["memory"] + story_settings.authornote = js["authorsNote"] + story_settings.authornotetemplate = "[Author's note: <|>]" + story_settings.actions = koboldai_settings.KoboldStoryRegister() + story_settings.actions_metadata = {} + story_settings.worldinfo = [] + story_settings.worldinfo_i = [] + story_settings.worldinfo_u = {} + story_settings.wifolders_d = {} + story_settings.wifolders_l = [] + story_settings.wifolders_u = {uid: [] for uid in story_settings.wifolders_d} + story_settings.lastact = "" + story_settings.submission = "" + story_settings.lastctx = "" num = 0 for wi in js["worldInfos"]: - vars.worldinfo.append({ + story_settings.worldinfo.append({ "key": wi["keys"], "keysecondary": wi.get("keysecondary", ""), "content": wi["entry"], @@ -5751,38 +5642,38 @@ def importAidgRequest(id): }) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid - if(vars.worldinfo[-1]["folder"]) is not None: - vars.wifolders_u[vars.worldinfo[-1]["folder"]].append(vars.worldinfo[-1]) + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"]) is not None: + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) num += 1 - for uid in vars.wifolders_l + [None]: - vars.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) + for uid in story_settings.wifolders_l + [None]: + story_settings.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid - if(vars.worldinfo[-1]["folder"] is not None): - vars.wifolders_u[vars.worldinfo[-1]["folder"]].append(vars.worldinfo[-1]) + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"] is not None): + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) stablesortwi() - vars.worldinfo_i = [wi for wi in vars.worldinfo if wi["init"]] + story_settings.worldinfo_i = [wi for wi in story_settings.worldinfo if wi["init"]] # Reset current save - vars.savedir = getcwd()+"\\stories" + system_settings.savedir = getcwd()+"\\stories" # Refresh game screen - vars.laststory = None - emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) + user_settings.laststory = None + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) setgamesaved(False) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': vars.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) refresh_story() emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) @@ -5790,18 +5681,18 @@ def importAidgRequest(id): # Import World Info JSON file #==================================================================# def wiimportrequest(): - importpath = fileops.getloadpath(vars.savedir, "Select World Info File", [("Json", "*.json")]) + importpath = fileops.getloadpath(system_settings.savedir, "Select World Info File", [("Json", "*.json")]) if(importpath): file = open(importpath, "rb") js = json.load(file) if(len(js) > 0): # If the most recent WI entry is blank, remove it. - if(not vars.worldinfo[-1]["init"]): - del vars.worldinfo[-1] + if(not story_settings.worldinfo[-1]["init"]): + del story_settings.worldinfo[-1] # Now grab the new stuff - num = len(vars.worldinfo) + num = len(story_settings.worldinfo) for wi in js: - vars.worldinfo.append({ + story_settings.worldinfo.append({ "key": wi["keys"], "keysecondary": wi.get("keysecondary", ""), "content": wi["entry"], @@ -5815,26 +5706,26 @@ def wiimportrequest(): }) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid - if(vars.worldinfo[-1]["folder"]) is not None: - vars.wifolders_u[vars.worldinfo[-1]["folder"]].append(vars.worldinfo[-1]) + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"]) is not None: + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) num += 1 for uid in [None]: - vars.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) + story_settings.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in vars.worldinfo_u): + if(uid not in story_settings.worldinfo_u): break - vars.worldinfo_u[uid] = vars.worldinfo[-1] - vars.worldinfo[-1]["uid"] = uid - if(vars.worldinfo[-1]["folder"] is not None): - vars.wifolders_u[vars.worldinfo[-1]["folder"]].append(vars.worldinfo[-1]) + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"] is not None): + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) - if not vars.quiet: - print("{0}".format(vars.worldinfo[0])) + if not system_settings.quiet: + print("{0}".format(story_settings.worldinfo[0])) # Refresh game screen setgamesaved(False) @@ -5848,54 +5739,54 @@ def newGameRequest(): exitModes() # Clear vars values - vars.gamestarted = False - vars.prompt = "" - vars.memory = "" - vars.actions = structures.KoboldStoryRegister() - vars.actions_metadata = {} + story_settings.gamestarted = False + story_settings.prompt = "" + story_settings.memory = "" + story_settings.actions = koboldai_settings.KoboldStoryRegister() + story_settings.actions_metadata = {} - vars.authornote = "" - vars.authornotetemplate = vars.setauthornotetemplate - vars.worldinfo = [] - vars.worldinfo_i = [] - vars.worldinfo_u = {} - vars.wifolders_d = {} - vars.wifolders_l = [] - vars.lastact = "" - vars.submission = "" - vars.lastctx = "" + story_settings.authornote = "" + story_settings.authornotetemplate = story_settings.setauthornotetemplate + story_settings.worldinfo = [] + story_settings.worldinfo_i = [] + story_settings.worldinfo_u = {} + story_settings.wifolders_d = {} + story_settings.wifolders_l = [] + story_settings.lastact = "" + story_settings.submission = "" + story_settings.lastctx = "" # Reset current save - vars.savedir = getcwd()+"\\stories" + system_settings.savedir = getcwd()+"\\stories" # Refresh game screen - vars.laststory = None - emit('from_server', {'cmd': 'setstoryname', 'data': vars.laststory}, broadcast=True) + user_settings.laststory = None + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) setgamesaved(True) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': vars.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': vars.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) setStartState() def randomGameRequest(topic, memory=""): - if(vars.noai): + if(system_settings.noai): newGameRequest() - vars.memory = memory - emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) + story_settings.memory = memory + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) return - vars.recentrng = topic - vars.recentrngm = memory + story_settings.recentrng = topic + story_settings.recentrngm = memory newGameRequest() setgamesaved(False) _memory = memory if(len(memory) > 0): _memory = memory.rstrip() + "\n\n" - vars.memory = _memory + "You generate the following " + topic + " story concept :" - vars.lua_koboldbridge.feedback = None + story_settings.memory = _memory + "You generate the following " + topic + " story concept :" + system_settings.lua_koboldbridge.feedback = None actionsubmit("", force_submit=True, force_prompt_gen=True) - vars.memory = memory - emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True) + story_settings.memory = memory + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) def final_startup(): # Prevent tokenizer from taking extra time the first time it's used @@ -5910,26 +5801,26 @@ def final_startup(): if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")): file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r") js = json.load(file) - if(vars.allowsp and "softprompt" in js and type(js["softprompt"]) is str and all(q not in js["softprompt"] for q in ("..", ":")) and (len(js["softprompt"]) == 0 or all(js["softprompt"][0] not in q for q in ("/", "\\")))): + if(system_settings.allowsp and "softprompt" in js and type(js["softprompt"]) is str and all(q not in js["softprompt"] for q in ("..", ":")) and (len(js["softprompt"]) == 0 or all(js["softprompt"][0] not in q for q in ("/", "\\")))): spRequest(js["softprompt"]) else: - vars.spfilename = "" + system_settings.spfilename = "" file.close() # Precompile TPU backend if required - if(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): + if(system_settings.use_colab_tpu or model_settings.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): soft_tokens = tpumtjgetsofttokens() - if(vars.dynamicscan or (not vars.nogenmod and vars.has_genmod)): + if(story_settings.dynamicscan or (not user_settings.nogenmod and system_settings.has_genmod)): threading.Thread( target=tpu_mtj_backend.infer_dynamic, - args=(np.tile(np.uint32((23403, 727, 20185)), (vars.numseqs, 1)),), + args=(np.tile(np.uint32((23403, 727, 20185)), (model_settings.numseqs, 1)),), kwargs={ - "soft_embeddings": vars.sp, + "soft_embeddings": system_settings.sp, "soft_tokens": soft_tokens, "gen_len": 1, "use_callback": False, - "numseqs": vars.numseqs, - "excluded_world_info": list(set() for _ in range(vars.numseqs)), + "numseqs": model_settings.numseqs, + "excluded_world_info": list(set() for _ in range(model_settings.numseqs)), }, ).start() else: @@ -5937,42 +5828,42 @@ def final_startup(): target=tpu_mtj_backend.infer_static, args=(np.uint32((23403, 727, 20185)),), kwargs={ - "soft_embeddings": vars.sp, + "soft_embeddings": system_settings.sp, "soft_tokens": soft_tokens, "gen_len": 1, - "numseqs": vars.numseqs, + "numseqs": model_settings.numseqs, }, ).start() def send_debug(): - if vars.debug: + if user_settings.debug: debug_info = "" try: - debug_info = "{}Newline Mode: {}\n".format(debug_info, vars.newlinemode) + debug_info = "{}Newline Mode: {}\n".format(debug_info, model_settings.newlinemode) except: pass try: - debug_info = "{}Action Length: {}\n".format(debug_info, vars.actions.get_last_key()) + debug_info = "{}Action Length: {}\n".format(debug_info, story_settings.actions.get_last_key()) except: pass try: - debug_info = "{}Actions Metadata Length: {}\n".format(debug_info, max(vars.actions_metadata) if len(vars.actions_metadata) > 0 else 0) + debug_info = "{}Actions Metadata Length: {}\n".format(debug_info, max(story_settings.actions_metadata) if len(story_settings.actions_metadata) > 0 else 0) except: pass try: - debug_info = "{}Actions: {}\n".format(debug_info, [k for k in vars.actions]) + debug_info = "{}Actions: {}\n".format(debug_info, [k for k in story_settings.actions]) except: pass try: - debug_info = "{}Actions Metadata: {}\n".format(debug_info, [k for k in vars.actions_metadata]) + debug_info = "{}Actions Metadata: {}\n".format(debug_info, [k for k in story_settings.actions_metadata]) except: pass try: - debug_info = "{}Last Action: {}\n".format(debug_info, vars.actions[vars.actions.get_last_key()]) + debug_info = "{}Last Action: {}\n".format(debug_info, story_settings.actions[story_settings.actions.get_last_key()]) except: pass try: - debug_info = "{}Last Metadata: {}\n".format(debug_info, vars.actions_metadata[max(vars.actions_metadata)]) + debug_info = "{}Last Metadata: {}\n".format(debug_info, story_settings.actions_metadata[max(story_settings.actions_metadata)]) except: pass @@ -5988,15 +5879,15 @@ if __name__ == "__main__": general_startup() patch_transformers() #show_select_model_list() - if vars.model == "" or vars.model is None: - vars.model = "ReadOnly" + if model_settings.model == "" or model_settings.model is None: + model_settings.model = "ReadOnly" load_model(initial_load=True) # Start Flask/SocketIO (Blocking, so this must be last method!) port = args.port if "port" in args and args.port is not None else 5000 #socketio.run(app, host='0.0.0.0', port=port) - if(vars.host): + if(system_settings.host): if(args.localtunnel): import subprocess, shutil localtunnel = subprocess.Popen([shutil.which('lt'), '-p', str(port), 'http'], stdout=subprocess.PIPE) @@ -6027,7 +5918,7 @@ if __name__ == "__main__": else: print("{0}Webserver has started, you can now connect to this machine at port {1}{2}" .format(colors.GREEN, port, colors.END)) - vars.serverstarted = True + system_settings.serverstarted = True socketio.run(app, host='0.0.0.0', port=port) else: if args.unblock: @@ -6035,27 +5926,27 @@ if __name__ == "__main__": webbrowser.open_new('http://localhost:{0}'.format(port)) print("{0}Server started!\nYou may now connect with a browser at http://127.0.0.1:{1}/{2}" .format(colors.GREEN, port, colors.END)) - vars.serverstarted = True + system_settings.serverstarted = True socketio.run(app, port=port, host='0.0.0.0') else: try: from flaskwebgui import FlaskUI - vars.serverstarted = True - vars.flaskwebgui = True + system_settings.serverstarted = True + system_settings.flaskwebgui = True FlaskUI(app, socketio=socketio, start_server="flask-socketio", maximized=True, close_server_on_exit=True).run() except: import webbrowser webbrowser.open_new('http://localhost:{0}'.format(port)) print("{0}Server started!\nYou may now connect with a browser at http://127.0.0.1:{1}/{2}" .format(colors.GREEN, port, colors.END)) - vars.serverstarted = True + system_settings.serverstarted = True socketio.run(app, port=port) else: general_startup() patch_transformers() #show_select_model_list() - if vars.model == "" or vars.model is None: - vars.model = "ReadOnly" + if model_settings.model == "" or model_settings.model is None: + model_settings.model = "ReadOnly" load_model(initial_load=True) print("{0}\nServer started in WSGI mode!{1}".format(colors.GREEN, colors.END), flush=True) diff --git a/bridge.lua b/bridge.lua index fc6c8823..069ff4c2 100644 --- a/bridge.lua +++ b/bridge.lua @@ -380,7 +380,7 @@ return function(_python, _bridged) ---@return boolean function KoboldWorldInfoEntry:is_valid() - return _python.as_attrgetter(bridged.vars.worldinfo_u).get(rawget(self, "_uid")) ~= nil + return _python.as_attrgetter(bridged.story_settings.worldinfo_u).get(rawget(self, "_uid")) ~= nil end ---@param submission? string @@ -475,7 +475,7 @@ return function(_python, _bridged) if not check_validity(self) or type(u) ~= "number" then return end - local query = _python.as_attrgetter(bridged.vars.worldinfo_u).get(u) + local query = _python.as_attrgetter(bridged.story_settings.worldinfo_u).get(u) if query == nil or (rawget(self, "_name") == "KoboldWorldInfoFolder" and self.uid ~= _python.as_attrgetter(query).get("folder")) then return end @@ -521,7 +521,7 @@ return function(_python, _bridged) ---@return boolean function KoboldWorldInfoFolder:is_valid() - return _python.as_attrgetter(bridged.vars.wifolders_d).get(rawget(self, "_uid")) ~= nil + return _python.as_attrgetter(bridged.story_settings.wifolders_d).get(rawget(self, "_uid")) ~= nil end ---@param t KoboldWorldInfoFolder @@ -530,7 +530,7 @@ return function(_python, _bridged) if not check_validity(t) then return 0 end - return math.tointeger(_python.builtins.len(_python.as_attrgetter(bridged.vars.wifolders_u).get(t.uid))) - 1 + return math.tointeger(_python.builtins.len(_python.as_attrgetter(bridged.story_settings.wifolders_u).get(t.uid))) - 1 end KoboldWorldInfoFolder_mt._kobold_next = KoboldWorldInfoEntry_mt._kobold_next @@ -547,7 +547,7 @@ return function(_python, _bridged) elseif rawget(t, "_name") == "KoboldWorldInfoFolder" and k == "name" then return bridged.folder_get_attr(t.uid, k) elseif type(k) == "number" then - local query = rawget(t, "_name") == "KoboldWorldInfoFolder" and _python.as_attrgetter(bridged.vars.wifolders_u).get(t.uid) or bridged.vars.worldinfo_i + local query = rawget(t, "_name") == "KoboldWorldInfoFolder" and _python.as_attrgetter(bridged.story_settings.wifolders_u).get(t.uid) or bridged.story_settings.worldinfo_i k = math.tointeger(k) if k == nil or k < 1 or k > #t then return @@ -598,7 +598,7 @@ return function(_python, _bridged) if not check_validity(self) or type(u) ~= "number" then return end - local query = _python.as_attrgetter(bridged.vars.wifolders_d).get(u) + local query = _python.as_attrgetter(bridged.story_settings.wifolders_d).get(u) if query == nil then return end @@ -618,7 +618,7 @@ return function(_python, _bridged) if not check_validity(t) then return 0 end - return _python.builtins.len(bridged.vars.wifolders_l) + return _python.builtins.len(bridged.story_settings.wifolders_l) end KoboldWorldInfoFolderSelector_mt._kobold_next = KoboldWorldInfoEntry_mt._kobold_next @@ -632,7 +632,7 @@ return function(_python, _bridged) return end local folder = deepcopy(KoboldWorldInfoFolder) - rawset(folder, "_uid", math.tointeger(bridged.vars.wifolders_l[k-1])) + rawset(folder, "_uid", math.tointeger(bridged.story_settings.wifolders_l[k-1])) return folder end @@ -671,7 +671,7 @@ return function(_python, _bridged) if not check_validity(t) then return 0 end - return math.tointeger(_python.builtins.len(bridged.vars.worldinfo)) - math.tointeger(_python.builtins.len(bridged.vars.wifolders_l)) - 1 + return math.tointeger(_python.builtins.len(bridged.story_settings.worldinfo)) - math.tointeger(_python.builtins.len(bridged.story_settings.wifolders_l)) - 1 end KoboldWorldInfo_mt._kobold_next = KoboldWorldInfoEntry_mt._kobold_next @@ -724,12 +724,12 @@ return function(_python, _bridged) end if k == "content" then if rawget(t, "_num") == 0 then - if bridged.vars.gamestarted then - local prompt = koboldbridge.userstate == "genmod" and bridged.vars._prompt or bridged.vars.prompt + if bridged.story_settings.gamestarted then + local prompt = koboldbridge.userstate == "genmod" and bridged.vars._prompt or bridged.story_settings.prompt return prompt end end - local actions = koboldbridge.userstate == "genmod" and bridged.vars._actions or bridged.vars.actions + local actions = koboldbridge.userstate == "genmod" and bridged.vars._actions or bridged.story_settings.actions return _python.as_attrgetter(actions).get(math.tointeger(rawget(t, "_num")) - 1) end end @@ -751,7 +751,7 @@ return function(_python, _bridged) error("Attempted to set the prompt chunk's content to the empty string; this is not allowed") return end - local actions = koboldbridge.userstate == "genmod" and bridged.vars._actions or bridged.vars.actions + local actions = koboldbridge.userstate == "genmod" and bridged.vars._actions or bridged.story_settings.actions if _k ~= 0 and _python.as_attrgetter(actions).get(_k-1) == nil then return end @@ -776,11 +776,11 @@ return function(_python, _bridged) ---@return fun(): KoboldStoryChunk, table, nil function KoboldStory:forward_iter() - local actions = koboldbridge.userstate == "genmod" and bridged.vars._actions or bridged.vars.actions + local actions = koboldbridge.userstate == "genmod" and bridged.vars._actions or bridged.story_settings.actions local nxt, iterator = _python.iter(actions) local run_once = false local function f() - if not bridged.vars.gamestarted then + if not bridged.story_settings.gamestarted then return end local chunk = deepcopy(KoboldStoryChunk) @@ -804,11 +804,11 @@ return function(_python, _bridged) ---@return fun(): KoboldStoryChunk, table, nil function KoboldStory:reverse_iter() - local actions = koboldbridge.userstate == "genmod" and bridged.vars._actions or bridged.vars.actions + local actions = koboldbridge.userstate == "genmod" and bridged.vars._actions or bridged.story_settings.actions local nxt, iterator = _python.iter(_python.builtins.reversed(actions)) local last_run = false local function f() - if not bridged.vars.gamestarted or last_run then + if not bridged.story_settings.gamestarted or last_run then return end local chunk = deepcopy(KoboldStoryChunk) @@ -1038,7 +1038,7 @@ return function(_python, _bridged) ---@param t KoboldLib ---@return string function KoboldLib_getters.submission(t) - return bridged.vars.submission + return bridged.story_settings.submission end ---@param t KoboldLib @@ -1050,11 +1050,11 @@ return function(_python, _bridged) elseif type(v) ~= "string" then error("`KoboldLib.submission` must be a string; you attempted to set it to a " .. type(v)) return - elseif not bridged.vars.gamestarted and v == "" then + elseif not bridged.story_settings.gamestarted and v == "" then error("`KoboldLib.submission` must not be set to the empty string when the story is empty") return end - bridged.vars.submission = v + bridged.story_settings.submission = v end @@ -1099,7 +1099,7 @@ return function(_python, _bridged) ---@param t KoboldLib ---@return string function KoboldLib_getters.model(t) - return bridged.vars.model + return bridged.model_settings.model end ---@param t KoboldLib @@ -1135,7 +1135,7 @@ return function(_python, _bridged) ---@param t KoboldLib ---@return string function KoboldLib_getters.custmodpth(t) - return bridged.vars.custmodpth + return bridged.model_settings.custmodpth end ---@param t KoboldLib @@ -2012,7 +2012,7 @@ return function(_python, _bridged) koboldbridge.userstate = "genmod" if koboldbridge.genmod ~= nil then local _generated = deepcopy(koboldbridge.generated) - if not bridged.vars.nogenmod then + if not bridged.user_settings.nogenmod then r = koboldbridge.genmod() end setmetatable(koboldbridge.logits, nil) diff --git a/koboldai_settings.py b/koboldai_settings.py new file mode 100644 index 00000000..cb514786 --- /dev/null +++ b/koboldai_settings.py @@ -0,0 +1,492 @@ +from flask_socketio import emit, join_room, leave_room, rooms +import os +import re + +socketio = None + +def clean_var_for_emit(value): + if isinstance(value, KoboldStoryRegister): + return value.to_json() + elif isinstance(value, set): + return list(value) + else: + return value + +def process_variable_changes(classname, name, value, old_value): + #Special Case for KoboldStoryRegister + if isinstance(value, KoboldStoryRegister): + print("resetting") + socketio.emit("reset_story", {}, broadcast=True, room="UI_2") + for i in range(len(value.actions)): + socketio.emit("var_changed", {"classname": "actions", "name": "Selected Text", "old_value": None, "value": {"id": i, "text": value[i]}}, broadcast=True, room="UI_2") + socketio.emit("var_changed", {"classname": "actions", "name": "Options", "old_value": None, "value": {"id": i, "options": value.actions[i]['Options']}}, broadcast=True, room="UI_2") + else: + #print("{}: {} changed from {} to {}".format(classname, name, old_value, value)) + #if name == "Selected Text": + # print({"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}) + socketio.emit("var_changed", {"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}, broadcast=True, room="UI_2") + + +class settings(object): + def send_to_ui(self): + if socketio is not None: + for (name, value) in vars(self).items(): + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, None) + + + +class model_settings(settings): + local_only_variables = ['badwordsids', 'apikey', '_class_init'] + settings_name = "model" + __class_initialized = False + def __init__(self): + self.model = "" # Model ID string chosen at startup + self.model_type = "" # Model Type (Automatically taken from the model config) + self.modelconfig = {} # Raw contents of the model's config.json, or empty dictionary if none found + self.custmodpth = "" # Filesystem location of custom model to run + self.max_length = 2048 # Maximum number of tokens to submit per action + self.ikmax = 3000 # Maximum number of characters to submit to InferKit + self.genamt = 80 # Amount of text for each action to generate + self.ikgen = 200 # Number of characters for InferKit to generate + self.rep_pen = 1.1 # Default generator repetition_penalty + self.rep_pen_slope = 0.7 # Default generator repetition penalty slope + self.rep_pen_range = 1024 # Default generator repetition penalty range + self.temp = 0.5 # Default generator temperature + self.top_p = 0.9 # Default generator top_p + self.top_k = 0 # Default generator top_k + self.top_a = 0.0 # Default generator top-a + self.tfs = 1.0 # Default generator tfs (tail-free sampling) + self.typical = 1.0 # Default generator typical sampling threshold + self.numseqs = 1 # Number of sequences to ask the generator to create + self.badwordsids = [] + self.fp32_model = False # Whether or not the most recently loaded HF model was in fp32 format + self.url = "https://api.inferkit.com/v1/models/standard/generate" # InferKit API URL + self.oaiurl = "" # OpenAI API URL + self.oaiengines = "https://api.openai.com/v1/engines" + self.colaburl = "" # Ngrok url for Google Colab mode + self.apikey = "" # API key to use for InferKit API calls + self.oaiapikey = "" # API key to use for OpenAI API calls + self.modeldim = -1 # Embedding dimension of your model (e.g. it's 4096 for GPT-J-6B and 2560 for GPT-Neo-2.7B) + self.sampler_order = [0, 1, 2, 3, 4, 5] + self.newlinemode = "n" + self.lazy_load = True # Whether or not to use torch_lazy_loader.py for transformers models in order to reduce CPU memory usage + self.revision = None + self.presets = [] # Holder for presets + self.selected_preset = "" + + #Must be at end of __init__ + self.__class_initialized = True + + def __setattr__(self, name, value): + old_value = getattr(self, name, None) + super().__setattr__(name, value) + if self.__class_initialized and name != '__class_initialized': + #Put variable change actions here + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + + +class story_settings(settings): + local_only_variables = [] + settings_name = "story" + __class_initialized = False + def __init__(self): + self.lastact = "" # The last action received from the user + self.submission = "" # Same as above, but after applying input formatting + self.lastctx = "" # The last context submitted to the generator + self.gamestarted = False # Whether the game has started (disables UI elements) + self.gamesaved = True # Whether or not current game is saved + self.prompt = "" # Prompt + self.memory = "" # Text submitted to memory field + self.authornote = "" # Text submitted to Author's Note field + self.authornotetemplate = "[Author's note: <|>]" # Author's note template + self.setauthornotetemplate = self.authornotetemplate # Saved author's note template in settings + self.andepth = 3 # How far back in history to append author's note + self.actions = KoboldStoryRegister() # Actions submitted by user and AI + self.actions_metadata = {} # List of dictonaries, one dictonary for every action that contains information about the action like alternative options. + # Contains at least the same number of items as actions. Back action will remove an item from actions, but not actions_metadata + # Dictonary keys are: + # Selected Text: (text the user had selected. None when this is a newly generated action) + # Alternative Generated Text: {Text, Pinned, Previous Selection, Edited} + # + self.worldinfo = [] # List of World Info key/value objects + self.worldinfo_i = [] # List of World Info key/value objects sans uninitialized entries + self.worldinfo_u = {} # Dictionary of World Info UID - key/value pairs + self.wifolders_d = {} # Dictionary of World Info folder UID-info pairs + self.wifolders_l = [] # List of World Info folder UIDs + self.wifolders_u = {} # Dictionary of pairs of folder UID - list of WI UID + self.lua_edited = set() # Set of chunk numbers that were edited from a Lua generation modifier + self.lua_deleted = set() # Set of chunk numbers that were deleted from a Lua generation modifier + self.generated_tkns = 0 # If using a backend that supports Lua generation modifiers, how many tokens have already been generated, otherwise 0 + self.deletewi = None # Temporary storage for UID to delete + self.mode = "play" # Whether the interface is in play, memory, or edit mode + self.editln = 0 # Which line was last selected in Edit Mode + self.genseqs = [] # Temporary storage for generated sequences + self.recentback = False # Whether Back button was recently used without Submitting or Retrying after + self.recentrng = None # If a new random game was recently generated without Submitting after, this is the topic used (as a string), otherwise this is None + self.recentrngm = None # If a new random game was recently generated without Submitting after, this is the memory used (as a string), otherwise this is None + self.useprompt = False # Whether to send the full prompt with every submit action + self.chatmode = False + self.chatname = "You" + self.adventure = False + self.actionmode = 1 + self.dynamicscan = False + self.recentedit = False + + #Must be at end of __init__ + self.__class_initialized = True + + def __setattr__(self, name, value): + old_value = getattr(self, name, None) + super().__setattr__(name, value) + if self.__class_initialized and name != '__class_initialized': + #Put variable change actions here + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + +class user_settings(settings): + local_only_variables = [] + settings_name = "user" + __class_initialized = False + def __init__(self): + self.wirmvwhtsp = False # Whether to remove leading whitespace from WI entries + self.widepth = 3 # How many historical actions to scan for WI hits + self.formatoptns = {'frmttriminc': True, 'frmtrmblln': False, 'frmtrmspch': False, 'frmtadsnsp': False, 'singleline': False} # Container for state of formatting options + self.importnum = -1 # Selection on import popup list + self.importjs = {} # Temporary storage for import data + self.loadselect = "" # Temporary storage for story filename to load + self.spselect = "" # Temporary storage for soft prompt filename to load + self.svowname = "" # Filename that was flagged for overwrite confirm + self.saveow = False # Whether or not overwrite confirm has been displayed + self.autosave = False # Whether or not to automatically save after each action + self.laststory = None # Filename (without extension) of most recent story JSON file we loaded + self.sid = "" # session id for the socketio client (request.sid) + self.username = "Default User" # Displayed Username + self.nopromptgen = False + self.rngpersist = False + self.nogenmod = False + self.debug = False # If set to true, will send debug information to the client for display + + #Must be at end of __init__ + self.__class_initialized = True + + def __setattr__(self, name, value): + old_value = getattr(self, name, None) + super().__setattr__(name, value) + if self.__class_initialized and name != '__class_initialized': + #Put variable change actions here + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + + +class system_settings(settings): + local_only_variables = ['lua_state', 'lua_logname', 'lua_koboldbridge', 'lua_kobold', 'lua_koboldcore', 'regex_sl', 'acregex_ai', 'acregex_ui', 'comregex_ai', 'comregex_ui'] + settings_name = "system" + __class_initialized = False + def __init__(self): + self.noai = False # Runs the script without starting up the transformers pipeline + self.aibusy = False # Stops submissions while the AI is working + self.serverstarted = False # Whether or not the Flask server has started + self.lua_state = None # Lua state of the Lua scripting system + self.lua_koboldbridge = None # `koboldbridge` from bridge.lua + self.lua_kobold = None # `kobold` from` bridge.lua + self.lua_koboldcore = None # `koboldcore` from bridge.lua + self.lua_logname = ... # Name of previous userscript that logged to terminal + self.lua_running = False # Whether or not Lua is running (i.e. wasn't stopped due to an error) + self.abort = False # Whether or not generation was aborted by clicking on the submit button during generation + self.compiling = False # If using a TPU Colab, this will be set to True when the TPU backend starts compiling and then set to False again + self.checking = False # Whether or not we are actively checking to see if TPU backend is compiling or not + self.sp_changed = False # This gets set to True whenever a userscript changes the soft prompt so that check_for_sp_change() can alert the browser that the soft prompt has changed + self.spfilename = "" # Filename of soft prompt to load, or an empty string if not using a soft prompt + self.userscripts = [] # List of userscripts to load + self.last_userscripts = [] # List of previous userscript filenames from the previous time userscripts were send via usstatitems + self.corescript = "default.lua" # Filename of corescript to load + + self.gpu_device = 0 # Which PyTorch device to use when using pure GPU generation + self.savedir = os.getcwd()+"\\stories" + self.hascuda = False # Whether torch has detected CUDA on the system + self.usegpu = False # Whether to launch pipeline with GPU support + self.spselect = "" # Temporary storage for soft prompt filename to load + self.spmeta = None # Metadata of current soft prompt, or None if not using a soft prompt + self.sp = None # Current soft prompt tensor (as a NumPy array) + self.sp_length = 0 # Length of current soft prompt in tokens, or 0 if not using a soft prompt + self.has_genmod = False # Whether or not at least one loaded Lua userscript has a generation modifier + self.breakmodel = False # For GPU users, whether to use both system RAM and VRAM to conserve VRAM while offering speedup compared to CPU-only + self.bmsupported = False # Whether the breakmodel option is supported (GPT-Neo/GPT-J/XGLM/OPT only, currently) + self.nobreakmodel = False # Something specifically requested Breakmodel to be disabled (For example a models config) + self.smandelete = False # Whether stories can be deleted from inside the browser + self.smanrename = False # Whether stories can be renamed from inside the browser + self.allowsp = False # Whether we are allowed to use soft prompts (by default enabled if we're using GPT-2, GPT-Neo or GPT-J) + self.regex_sl = re.compile(r'\n*(?<=.) *\n(.|\n)*') # Pattern for limiting the output to a single line + self.acregex_ai = re.compile(r'\n* *>(.|\n)*') # Pattern for matching adventure actions from the AI so we can remove them + self.acregex_ui = re.compile(r'^ *(>.*)$', re.MULTILINE) # Pattern for matching actions in the HTML-escaped story so we can apply colouring, etc (make sure to encase part to format in parentheses) + self.comregex_ai = re.compile(r'(?:\n<\|(?:.|\n)*?\|>(?=\n|$))|(?:<\|(?:.|\n)*?\|>\n?)') # Pattern for matching comments to remove them before sending them to the AI + self.comregex_ui = re.compile(r'(<\|(?:.|\n)*?\|>)') # Pattern for matching comments in the editor + self.host = False + self.flaskwebgui = False + self.welcome = False # Custom Welcome Text (False is default) + self.quiet = False # If set will suppress any story text from being printed to the console (will only be seen on the client web page) + self.use_colab_tpu = os.environ.get("COLAB_TPU_ADDR", "") != "" or os.environ.get("TPU_NAME", "") != "" # Whether or not we're in a Colab TPU instance or Kaggle TPU instance and are going to use the TPU rather than the CPU + self.aria2_port = 6799 #Specify the port on which aria2's RPC interface will be open if aria2 is installed (defaults to 6799) + + #Must be at end of __init__ + self.__class_initialized = True + + def __setattr__(self, name, value): + old_value = getattr(self, name, None) + super().__setattr__(name, value) + if self.__class_initialized and name != '__class_initialized': + #Put variable change actions here + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + + +class KoboldStoryRegister(object): + def __init__(self, sequence=[]): + self.actions = {} + self.action_count = -1 + for item in sequence: + self.append(item) + + def __str__(self): + return "".join([x['Selected Text'] for ignore, x in sorted(self.actions.items())]) + + def __repr__(self): + return self.__str__() + + def __iter__(self): + self.itter = -1 + return self + + def __next__(self): + self.itter += 1 + if self.itter < len(self.actions): + return self.itter + else: + raise StopIteration + + def __getitem__(self, i): + return self.actions[i]["Selected Text"] + + def __setitem__(self, i, text): + if i in self.actions: + old_text = self.actions[i]["Selected Text"] + self.actions[i]["Selected Text"] = text + if "Options" in self.actions[i]: + for j in range(len(self.actions[i]["Options"])): + if self.actions[i]["Options"][j]["text"] == text: + del self.actions[i]["Options"][j] + if old_text != "": + self.actions[i]["Options"].append({"text": old_text, "Pinned": False, "Previous Selection": False, "Edited": True}) + else: + old_text = None + self.actions[i] = {"Selected Text": text, "Options": []} + process_variable_changes("actions", "Selected Text", {"id": i, "text": text}, {"id": i, "text": old_text}) + + def __len__(self): + return self.action_count if self.action_count >=0 else 0 + + def __reversed__(self): + return reversed(range(self.action_count+1)) + + def values(self): + return [self.actions[k]["Selected Text"] for k in self.actions] + + def to_json(self): + return {"action_count": self.action_count, "actions": self.actions} + + def load_json(self, json_data): + if type(json_data) == str: + import json + json_data = json.loads(json_data) + #JSON forces keys to be strings, so let's fix that + temp = {} + for item in json_data['actions']: + temp[int(item)] = json_data['actions'][item] + process_variable_changes("actions", "Selected Text", {"id": int(item), "text": json_data['actions'][item]["Selected Text"]}, None) + if "Options" in json_data['actions'][item]: + process_variable_changes("actions", "Options", {"id": int(item), "options": json_data['actions'][item]["Options"]}, None) + + self.action_count = json_data['action_count'] + self.actions = temp + + def get_action(self, action_id): + if action_id not in actions: + return None + if "Selected Text" not in self.actions[action_id]: + return None + return self.actions[action_id]["Selected Text"] + + def get_action_list(self): + return [x['Selected Text'] for ignore, x in sorted(self.actions.items()) if x['Selected Text'] is not None] + + def append(self, text): + self.clear_unused_options() + self.action_count+=1 + if self.action_count in self.actions: + self.actions[self.action_count]["Selected Text"] = text + print("looking for old option that matches") + for item in self.actions[self.action_count]["Options"]: + if item['text'] == text: + print("found it") + old_options = self.actions[self.action_count]["Options"] + del item + print("old: ") + print(old_options) + print() + print("New: ") + print(self.actions[self.action_count]["Options"]) + process_variable_changes("actions", "Options", {"id": self.action_count, "options": self.actions[self.action_count]["Options"]}, {"id": self.action_count, "options": old_options}) + + else: + self.actions[self.action_count] = {"Selected Text": text, "Options": []} + process_variable_changes("actions", "Selected Text", {"id": self.action_count, "text": text}, None) + + def append_options(self, option_list): + if self.action_count+1 in self.actions: + print("1") + old_options = self.actions[self.action_count+1]["Options"] + self.actions[self.action_count+1]['Options'].extend([{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False} for x in option_list]) + for item in option_list: + process_variable_changes("actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": old_options}) + else: + print("2") + old_options = None + self.actions[self.action_count+1] = {"Selected Text": "", "Options": [{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False} for x in option_list]} + process_variable_changes("actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": old_options}) + + def clear_unused_options(self, pointer=None): + new_options = [] + old_options = None + if pointer is None: + pointer = self.action_count+1 + if pointer in self.actions: + old_options = self.actions[pointer]["Options"] + self.actions[pointer]["Options"] = [x for x in self.actions[pointer]["Options"] if x["Pinned"] or x["Previous Selection"] or x["Edited"]] + new_options = self.actions[pointer]["Options"] + process_variable_changes("actions", "Options", {"id": pointer, "options": new_options}, {"id": pointer, "options": old_options}) + + def set_pin(self, action_step, option_number): + if action_step in self.actions: + if option_number < len(self.actions[action_step]['Options']): + old_options = self.actions[action_step]["Options"] + self.actions[action_step]['Options'][option_number]['Pinned'] = True + process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) + + def unset_pin(self, action_step, option_number): + if action_step in self.actions: + old_options = self.actions[action_step]["Options"] + if option_number < len(self.actions[action_step]['Options']): + self.actions[action_step]['Options'][option_number]['Pinned'] = False + process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) + + def use_option(self, action_step, option_number): + if action_step in self.actions: + old_options = self.actions[action_step]["Options"] + old_text = self.actions[action_step]["Selected Text"] + if option_number < len(self.actions[action_step]['Options']): + self.actions[action_step]["Selected Text"] = self.actions[action_step]['Options'][option_number]['text'] + del self.actions[action_step]['Options'][option_number] + process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) + process_variable_changes("actions", "Selected Text", {"id": action_step, "text": self.actions[action_step]["Selected Text"]}, {"id": action_step, "Selected Text": old_text}) + + def delete_action(self, action_id): + if action_id in self.actions: + old_options = self.actions[action_id]["Options"] + old_text = self.actions[action_id]["Selected Text"] + self.actions[action_id]["Options"].append({"text": self.actions[action_id]["Selected Text"], "Pinned": False, "Previous Selection": True, "Edited": False}) + self.actions[action_id]["Selected Text"] = "" + self.action_count -= 1 + process_variable_changes("actions", "Selected Text", {"id": action_id, "text": None}, {"id": action_id, "text": old_text}) + process_variable_changes("actions", "Options", {"id": action_id, "options": self.actions[action_id]["Options"]}, {"id": action_id, "options": old_options}) + + def pop(self): + if self.action_count >= 0: + text = self.actions[self.action_count] + self.delete_action(self.action_count) + process_variable_changes("actions", "Selected Text", {"id": self.action_count, "text": None}, {"id": self.action_count, "text": text}) + return text + else: + return None + + def get_first_key(self): + if self.action_count >= 0: + text = "" + i = 0 + while text == "" and i <= self.action_count: + if "selected Text" in self.actions[i]: + text = self.actions[i]["Selected Text"] + i+=1 + return text + + def get_last_key(self): + if self.action_count >= 0: + return self.action_count + else: + return 0 + + def get_last_item(self): + if self.action_count >= 0: + return self.actions[self.action_count] + + def increment_id(self): + self.action_count += 1 + + def get_next_id(self): + return self.action_count+1 + + def set_next_id(self, x: int): + self.action_count = x + + def get_options(self, action_id): + if action_id in self.actions: + return self.actions[action_id]["Options"] + else: + return [] + + def get_current_options(self): + if self.action_count+1 in self.actions: + return self.actions[self.action_count+1]["Options"] + else: + return [] + + def get_current_options_no_edits(self): + if self.action_count+1 in self.actions: + return [x for x in self.actions[self.action_count+1]["Options"] if x["Edited"] == False] + else: + return [] + + def get_pins(self, action_id): + if action_id in self.actions: + return [x for x in self.actions[action_id]["Options"] if x["Pinned"]] + else: + return [] + + def get_prev_selections(self, action_id): + if action_id in self.actions: + return [x for x in self.actions[action_id]["Options"] if x["Previous Selection"]] + else: + return [] + + def get_edits(self, action_id): + if action_id in self.actions: + return [x for x in self.actions[action_id]["Options"] if x["Edited"]] + else: + return [] + + def get_redo_options(self): + pointer = max(self.actions) + while pointer > self.action_count: + if pointer in self.actions: + for item in self.actions[pointer]["Options"]: + if item["Previous Selection"] or item["Pinned"]: + return self.actions[pointer]["Options"] + pointer-=1 + return [] + + + +badwordsids_default = [[13460], [6880], [50256], [42496], [4613], [17414], [22039], [16410], [27], [29], [38430], [37922], [15913], [24618], [28725], [58], [47175], [36937], [26700], [12878], [16471], [37981], [5218], [29795], [13412], [45160], [3693], [49778], [4211], [20598], [36475], [33409], [44167], [32406], [29847], [29342], [42669], [685], [25787], [7359], [3784], [5320], [33994], [33490], [34516], [43734], [17635], [24293], [9959], [23785], [21737], [28401], [18161], [26358], [32509], [1279], [38155], [18189], [26894], [6927], [14610], [23834], [11037], [14631], [26933], [46904], [22330], [25915], [47934], [38214], [1875], [14692], [41832], [13163], [25970], [29565], [44926], [19841], [37250], [49029], [9609], [44438], [16791], [17816], [30109], [41888], [47527], [42924], [23984], [49074], [33717], [31161], [49082], [30138], [31175], [12240], [14804], [7131], [26076], [33250], [3556], [38381], [36338], [32756], [46581], [17912], [49146]] # Tokenized array of badwords used to prevent AI artifacting +badwordsids_neox = [[0], [1], [44162], [9502], [12520], [31841], [36320], [49824], [34417], [6038], [34494], [24815], [26635], [24345], [3455], [28905], [44270], [17278], [32666], [46880], [7086], [43189], [37322], [17778], [20879], [49821], [3138], [14490], [4681], [21391], [26786], [43134], [9336], [683], [48074], [41256], [19181], [29650], [28532], [36487], [45114], [46275], [16445], [15104], [11337], [1168], [5647], [29], [27482], [44965], [43782], [31011], [42944], [47389], [6334], [17548], [38329], [32044], [35487], [2239], [34761], [7444], [1084], [12399], [18990], [17636], [39083], [1184], [35830], [28365], [16731], [43467], [47744], [1138], [16079], [40116], [45564], [18297], [42368], [5456], [18022], [42696], [34476], [23505], [23741], [39334], [37944], [45382], [38709], [33440], [26077], [43600], [34418], [36033], [6660], [48167], [48471], [15775], [19884], [41533], [1008], [31053], [36692], [46576], [20095], [20629], [31759], [46410], [41000], [13488], [30952], [39258], [16160], [27655], [22367], [42767], [43736], [49694], [13811], [12004], [46768], [6257], [37471], [5264], [44153], [33805], [20977], [21083], [25416], [14277], [31096], [42041], [18331], [33376], [22372], [46294], [28379], [38475], [1656], [5204], [27075], [50001], [16616], [11396], [7748], [48744], [35402], [28120], [41512], [4207], [43144], [14767], [15640], [16595], [41305], [44479], [38958], [18474], [22734], [30522], [46267], [60], [13976], [31830], [48701], [39822], [9014], [21966], [31422], [28052], [34607], [2479], [3851], [32214], [44082], [45507], [3001], [34368], [34758], [13380], [38363], [4299], [46802], [30996], [12630], [49236], [7082], [8795], [5218], [44740], [9686], [9983], [45301], [27114], [40125], [1570], [26997], [544], [5290], [49193], [23781], [14193], [40000], [2947], [43781], [9102], [48064], [42274], [18772], [49384], [9884], [45635], [43521], [31258], [32056], [47686], [21760], [13143], [10148], [26119], [44308], [31379], [36399], [23983], [46694], [36134], [8562], [12977], [35117], [28591], [49021], [47093], [28653], [29013], [46468], [8605], [7254], [25896], [5032], [8168], [36893], [38270], [20499], [27501], [34419], [29547], [28571], [36586], [20871], [30537], [26842], [21375], [31148], [27618], [33094], [3291], [31789], [28391], [870], [9793], [41361], [47916], [27468], [43856], [8850], [35237], [15707], [47552], [2730], [41449], [45488], [3073], [49806], [21938], [24430], [22747], [20924], [46145], [20481], [20197], [8239], [28231], [17987], [42804], [47269], [29972], [49884], [21382], [46295], [36676], [34616], [3921], [26991], [27720], [46265], [654], [9855], [40354], [5291], [34904], [44342], [2470], [14598], [880], [19282], [2498], [24237], [21431], [16369], [8994], [44524], [45662], [13663], [37077], [1447], [37786], [30863], [42854], [1019], [20322], [4398], [12159], [44072], [48664], [31547], [18736], [9259], [31], [16354], [21810], [4357], [37982], [5064], [2033], [32871], [47446], [62], [22158], [37387], [8743], [47007], [17981], [11049], [4622], [37916], [36786], [35138], [29925], [14157], [18095], [27829], [1181], [22226], [5709], [4725], [30189], [37014], [1254], [11380], [42989], [696], [24576], [39487], [30119], [1092], [8088], [2194], [9899], [14412], [21828], [3725], [13544], [5180], [44679], [34398], [3891], [28739], [14219], [37594], [49550], [11326], [6904], [17266], [5749], [10174], [23405], [9955], [38271], [41018], [13011], [48392], [36784], [24254], [21687], [23734], [5413], [41447], [45472], [10122], [17555], [15830], [47384], [12084], [31350], [47940], [11661], [27988], [45443], [905], [49651], [16614], [34993], [6781], [30803], [35869], [8001], [41604], [28118], [46462], [46762], [16262], [17281], [5774], [10943], [5013], [18257], [6750], [4713], [3951], [11899], [38791], [16943], [37596], [9318], [18413], [40473], [13208], [16375]] +badwordsids_opt = [[44717], [46613], [48513], [49923], [50185], [48755], [8488], [43303], [49659], [48601], [49817], [45405], [48742], [49925], [47720], [11227], [48937], [48784], [50017], [42248], [49310], [48082], [49895], [50025], [49092], [49007], [8061], [44226], [0], [742], [28578], [15698], [49784], [46679], [39365], [49281], [49609], [48081], [48906], [46161], [48554], [49670], [48677], [49721], [49632], [48610], [48462], [47457], [10975], [46077], [28696], [48709], [43839], [49798], [49154], [48203], [49625], [48395], [50155], [47161], [49095], [48833], [49420], [49666], [48443], [22176], [49242], [48651], [49138], [49750], [40389], [48021], [21838], [49070], [45333], [40862], [1], [49915], [33525], [49858], [50254], [44403], [48992], [48872], [46117], [49853], [47567], [50206], [41552], [50068], [48999], [49703], [49940], [49329], [47620], [49868], [49962], [2], [44082], [50236], [31274], [50260], [47052], [42645], [49177], [17523], [48691], [49900], [49069], [49358], [48794], [47529], [46479], [48457], [646], [49910], [48077], [48935], [46386], [48902], [49151], [48759], [49803], [45587], [48392], [47789], [48654], [49836], [49230], [48188], [50264], [46844], [44690], [48505], [50161], [27779], [49995], [41833], [50154], [49097], [48520], [50018], [8174], [50084], [49366], [49526], [50193], [7479], [49982], [3]] \ No newline at end of file diff --git a/test_aiserver.py b/test_aiserver.py index 855b71f5..49b91a7c 100644 --- a/test_aiserver.py +++ b/test_aiserver.py @@ -125,7 +125,7 @@ def test_load_model_from_web_ui(client_data, model, expected_load_options): assert response['url'] == expected_load_options['url'] #Now send the load - socketio_client.emit('message',{'cmd': 'load_model', 'use_gpu': True, 'key': '', 'gpu_layers': '', 'url': '', 'online_model': ''}) + socketio_client.emit('message',{'cmd': 'load_model', 'use_gpu': True, 'key': '', 'gpu_layers': str(expected_load_options['layer_count']), 'disk_layers': '0', 'url': '', 'online_model': ''}) #wait until the game state turns back to start state = 'wait' start_time = time.time() diff --git a/tpu_mtj_backend.py b/tpu_mtj_backend.py index a0e017d3..5b799c04 100644 --- a/tpu_mtj_backend.py +++ b/tpu_mtj_backend.py @@ -547,7 +547,7 @@ class PenalizingCausalTransformer(CausalTransformer): compiling_callback() numseqs = numseqs_aux.shape[0] # These are the tokens that we don't want the AI to ever write - self.badwords = jnp.array(vars.badwordsids).squeeze() + self.badwords = jnp.array(model_settings.badwordsids).squeeze() @hk.transform def generate_sample(context, ctx_length): # Give the initial context to the transformer @@ -1025,8 +1025,8 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo elif "eos_token_id" in kwargs: pad_token_id = kwargs["eos_token_id"] - if not hasattr(vars, "sampler_order") or not vars.sampler_order: - vars.sampler_order = utils.default_sampler_order.copy() + if not hasattr(vars, "sampler_order") or not model_settings.sampler_order: + model_settings.sampler_order = utils.default_sampler_order.copy() default_params = { "compat": "j", @@ -1045,7 +1045,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo } params = kwargs - if vars.model == "TPUMeshTransformerGPTNeoX": + if model_settings.model == "TPUMeshTransformerGPTNeoX": default_params = { "compat": "neox", "layers": 44, @@ -1064,9 +1064,9 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo # Try to convert HF config.json to MTJ config if hf_checkpoint: - spec_path = os.path.join("maps", vars.model_type + ".json") + spec_path = os.path.join("maps", model_settings.model_type + ".json") if not os.path.isfile(spec_path): - raise NotImplementedError(f"Unsupported model type {repr(vars.model_type)}") + raise NotImplementedError(f"Unsupported model type {repr(model_settings.model_type)}") with open(spec_path) as f: lazy_load_spec = json.load(f) @@ -1117,7 +1117,7 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo params[param] = default_params[param] # Load tokenizer - if vars.model == "TPUMeshTransformerGPTNeoX": + if model_settings.model == "TPUMeshTransformerGPTNeoX": tokenizer = Tokenizer.from_file(os.path.join(path, "20B_tokenizer.json")) def new_encode(old_encode): def encode(s, *args, **kwargs): @@ -1165,19 +1165,19 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo global badwords # These are the tokens that we don't want the AI to ever write - badwords = jnp.array(vars.badwordsids).squeeze() + badwords = jnp.array(model_settings.badwordsids).squeeze() if not path.endswith("/"): path += "/" network = PenalizingCausalTransformer(params, dematerialized=True) - if not hf_checkpoint and vars.model != "TPUMeshTransformerGPTNeoX": + if not hf_checkpoint and model_settings.model != "TPUMeshTransformerGPTNeoX": network.state = read_ckpt_lowmem(network.state, path, devices.shape[1]) #network.state = network.move_xmap(network.state, np.zeros(cores_per_replica)) return - if vars.model == "TPUMeshTransformerGPTNeoX": + if model_settings.model == "TPUMeshTransformerGPTNeoX": print("\n\n\nThis model has ", f"{hk.data_structures.tree_size(network.state['params']):,d}".replace(",", " "), " parameters.\n") read_neox_checkpoint(network.state, path, params) return @@ -1322,58 +1322,58 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo f.close() callback.nested = False - if os.path.isdir(vars.model.replace('/', '_')): + if os.path.isdir(model_settings.model.replace('/', '_')): import shutil - shutil.move(vars.model.replace('/', '_'), "models/{}".format(vars.model.replace('/', '_'))) + shutil.move(model_settings.model.replace('/', '_'), "models/{}".format(model_settings.model.replace('/', '_'))) print("\n", flush=True) with torch_lazy_loader.use_lazy_torch_load(callback=callback, dematerialized_modules=True): - if(os.path.isdir(vars.custmodpth)): + if(os.path.isdir(model_settings.custmodpth)): try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") except Exception as e: pass try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - elif(os.path.isdir("models/{}".format(vars.model.replace('/', '_')))): + model = GPTNeoForCausalLM.from_pretrained(model_settings.custmodpth, revision=model_settings.revision, cache_dir="cache") + elif(os.path.isdir("models/{}".format(model_settings.model.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache") except Exception as e: pass try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + model = GPTNeoForCausalLM.from_pretrained("models/{}".format(model_settings.model.replace('/', '_')), revision=model_settings.revision, cache_dir="cache") else: try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache") except Exception as e: pass try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=model_settings.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + model = GPTNeoForCausalLM.from_pretrained(model_settings.model, revision=model_settings.revision, cache_dir="cache") #network.state = network.move_xmap(network.state, np.zeros(cores_per_replica)) diff --git a/utils.py b/utils.py index 430e729b..5cb199fa 100644 --- a/utils.py +++ b/utils.py @@ -94,7 +94,7 @@ def replaceblanklines(txt): # #==================================================================# def removespecialchars(txt, vars=None): - if vars is None or vars.actionmode == 0: + if vars is None or story_settings.actionmode == 0: txt = re.sub(r"[#/@%<>{}+=~|\^]", "", txt) else: txt = re.sub(r"[#/@%{}+=~|\^]", "", txt) @@ -105,33 +105,33 @@ def removespecialchars(txt, vars=None): #==================================================================# def addsentencespacing(txt, vars): # Get last character of last action - if(len(vars.actions) > 0): - if(len(vars.actions[vars.actions.get_last_key()]) > 0): - action = vars.actions[vars.actions.get_last_key()] + if(len(story_settings.actions) > 0): + if(len(story_settings.actions[story_settings.actions.get_last_key()]) > 0): + action = story_settings.actions[story_settings.actions.get_last_key()] lastchar = action[-1] if len(action) else "" else: # Last action is blank, this should never happen, but # since it did let's bail out. return txt else: - action = vars.prompt + action = story_settings.prompt lastchar = action[-1] if len(action) else "" if(lastchar == "." or lastchar == "!" or lastchar == "?" or lastchar == "," or lastchar == ";" or lastchar == ":"): txt = " " + txt return txt def singlelineprocessing(txt, vars): - txt = vars.regex_sl.sub('', txt) - if(len(vars.actions) > 0): - if(len(vars.actions[vars.actions.get_last_key()]) > 0): - action = vars.actions[vars.actions.get_last_key()] + txt = system_settings.regex_sl.sub('', txt) + if(len(story_settings.actions) > 0): + if(len(story_settings.actions[story_settings.actions.get_last_key()]) > 0): + action = story_settings.actions[story_settings.actions.get_last_key()] lastchar = action[-1] if len(action) else "" else: # Last action is blank, this should never happen, but # since it did let's bail out. return txt else: - action = vars.prompt + action = story_settings.prompt lastchar = action[-1] if len(action) else "" if(lastchar != "\n"): txt = txt + "\n" @@ -149,14 +149,14 @@ def cleanfilename(filename): # Newline substitution for fairseq models #==================================================================# def encodenewlines(txt): - if(vars.newlinemode == "s"): + if(model_settings.newlinemode == "s"): return txt.replace('\n', "") return txt def decodenewlines(txt): - if(vars.newlinemode == "s"): + if(model_settings.newlinemode == "s"): return txt.replace("", '\n') - if(vars.newlinemode == "ns"): + if(model_settings.newlinemode == "ns"): return txt.replace("", '') return txt @@ -253,9 +253,9 @@ def aria2_hook(pretrained_model_name_or_path: str, force_download=False, cache_d with tempfile.NamedTemporaryFile("w+b", delete=False) as f: f.write(aria2_config) f.flush() - p = subprocess.Popen(["aria2c", "-x", "10", "-s", "10", "-j", "10", "--enable-rpc=true", f"--rpc-secret={secret}", "--rpc-listen-port", str(vars.aria2_port), "--disable-ipv6", "--file-allocation=trunc", "--allow-overwrite", "--auto-file-renaming=false", "-d", _cache_dir, "-i", f.name, "-U", transformers.file_utils.http_user_agent(user_agent)] + (["-c"] if not force_download else []) + ([f"--header='Authorization: Bearer {token}'"] if use_auth_token else []), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + p = subprocess.Popen(["aria2c", "-x", "10", "-s", "10", "-j", "10", "--enable-rpc=true", f"--rpc-secret={secret}", "--rpc-listen-port", str(system_settings.aria2_port), "--disable-ipv6", "--file-allocation=trunc", "--allow-overwrite", "--auto-file-renaming=false", "-d", _cache_dir, "-i", f.name, "-U", transformers.file_utils.http_user_agent(user_agent)] + (["-c"] if not force_download else []) + ([f"--header='Authorization: Bearer {token}'"] if use_auth_token else []), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) while p.poll() is None: - r = s.post(f"http://localhost:{vars.aria2_port}/jsonrpc", json={"jsonrpc": "2.0", "id": "kai", "method": "aria2.tellActive", "params": [f"token:{secret}"]}).json()["result"] + r = s.post(f"http://localhost:{system_settings.aria2_port}/jsonrpc", json={"jsonrpc": "2.0", "id": "kai", "method": "aria2.tellActive", "params": [f"token:{secret}"]}).json()["result"] if not r: s.close() if bar is not None: From cd64c43f0e7e77b7db1c8bef1f4404983d397ca1 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 22 Jun 2022 12:12:52 -0400 Subject: [PATCH 0002/1297] Fix for action length --- koboldai_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index cb514786..b5c4e1b5 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -285,7 +285,7 @@ class KoboldStoryRegister(object): process_variable_changes("actions", "Selected Text", {"id": i, "text": text}, {"id": i, "text": old_text}) def __len__(self): - return self.action_count if self.action_count >=0 else 0 + return self.action_count+1 if self.action_count >=0 else 0 def __reversed__(self): return reversed(range(self.action_count+1)) From 83c0b9ee1ecd00faba58eb302be3d9886404d9af Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 22 Jun 2022 14:13:44 -0400 Subject: [PATCH 0003/1297] Vars Migration Fix for back/redo Fix for pytest for back/redo and model loading with disk caching --- aiserver.py | 27 ++++++++++++--------------- koboldai_settings.py | 7 ++----- pytest.ini | 2 +- test_aiserver.py | 12 +++++++++--- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/aiserver.py b/aiserver.py index 02e010c1..a732d371 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1401,25 +1401,22 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal args.breakmodel_disklayers = int(disk_layers) #We need to wipe out the existing model and refresh the cuda cache - #Show what's in VRAM - import gc model = None generator = None model_config = None + for tensor in gc.get_objects(): + try: + if torch.is_tensor(tensor): + with torch.no_grad(): + tensor.set_(torch.tensor((), device=tensor.device, dtype=tensor.dtype)) + except: + pass + gc.collect() try: with torch.no_grad(): torch.cuda.empty_cache() except: pass - - for obj in gc.get_objects(): - try: - if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)): - del obj - gc.collect() - torch.cuda.empty_cache() - except: - pass #Reload our badwords model_settings.badwordsids = koboldai_settings.badwordsids_default @@ -3536,7 +3533,7 @@ def actionback(): story_settings.recentback = True remove_story_chunk(last_key + 1) #for the redo to not get out of whack, need to reset the max # in the actions sequence - story_settings.actions.set_next_id(last_key) + #story_settings.actions.set_next_id(last_key) success = True elif(len(story_settings.genseqs) == 0): emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}) @@ -3563,10 +3560,10 @@ def actionredo(): restore_id+=1 if restore_id not in story_settings.actions_metadata: return - else: - story_settings.actions.set_next_id(restore_id) + #else: + #print("???") + #story_settings.actions.set_next_id(restore_id) - if restore_id in story_settings.actions_metadata: genout = [{"generated_text": item['Text']} for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]==True)] if len(genout) > 0: diff --git a/koboldai_settings.py b/koboldai_settings.py index b5c4e1b5..c2553410 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -261,7 +261,7 @@ class KoboldStoryRegister(object): def __next__(self): self.itter += 1 - if self.itter < len(self.actions): + if self.itter <= self.action_count: return self.itter else: raise StopIteration @@ -421,10 +421,7 @@ class KoboldStoryRegister(object): return text def get_last_key(self): - if self.action_count >= 0: - return self.action_count - else: - return 0 + return self.action_count def get_last_item(self): if self.action_count >= 0: diff --git a/pytest.ini b/pytest.ini index c930ba37..f7264939 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,2 +1,2 @@ [pytest] -addopts = --ignore=miniconda3 --ignore=runtime --html=unit_test_report.html --self-contained-html -v \ No newline at end of file +addopts = --ignore=miniconda3 --ignore=runtime --html=unit_test_report.html --self-contained-html -vv \ No newline at end of file diff --git a/test_aiserver.py b/test_aiserver.py index 49b91a7c..b9bf4606 100644 --- a/test_aiserver.py +++ b/test_aiserver.py @@ -208,11 +208,17 @@ def test_back_redo(client_data): response = socketio_client.get_received()[0]['args'][0] assert response == {'cmd': 'errmsg', 'data': 'Cannot delete the prompt.'} socketio_client.emit('message',{'cmd': 'redo', 'data': ''}) - socketio_client.emit('message',{'cmd': 'redo', 'data': ''}) + response = socketio_client.get_received() + assert response == [{'name': 'from_server', 'args': [{'cmd': 'updatescreen', 'gamestarted': True, 'data': 'Niko the kobold stalked carefully down the alley, his small scaly figure obscured by a dusky cloak that fluttered lightly in the cold winter breeze. Holding up his tail to keep it from dragging in the dirty snow that covered the cobblestone, he waited patiently for the butcher to turn his attention from his stall so that he could pilfer his next meal: a tender-looking chicken. He crouched just slightly as he neared the stall to ensure that no one was watching, not that anyone would be dumb enough to hassle a small kobold. What else was there for a lowly kobold to'}], 'namespace': '/'}, + {'name': 'from_server', 'args': [{'cmd': 'texteffect', 'data': 1}], 'namespace': '/'}] socketio_client.emit('message',{'cmd': 'redo', 'data': ''}) response = socketio_client.get_received() - assert response == [{'name': 'from_server', 'args': [{'cmd': 'updatescreen', 'gamestarted': True, 'data': 'Niko the kobold stalked carefully down the alley, his small scaly figure obscured by a dusky cloak that fluttered lightly in the cold winter breeze. Holding up his tail to keep it from dragging in the dirty snow that covered the cobblestone, he waited patiently for the butcher to turn his attention from his stall so that he could pilfer his next meal: a tender-looking chicken. He crouched just slightly as he neared the stall to ensure that no one was watching, not that anyone would be dumb enough to hassle a small kobold. What else was there for a lowly kobold to'}], 'namespace': '/'}, {'name': 'from_server', 'args': [{'cmd': 'texteffect', 'data': 1}], 'namespace': '/'}, {'name': 'from_server', 'args': [{'cmd': 'updatechunk', 'data': {'index': 2, 'html': ' do in a city? All that Niko needed to know was'}}], 'namespace': '/'}, {'name': 'from_server', 'args': [{'cmd': 'texteffect', 'data': 2}], 'namespace': '/'}, {'name': 'from_server', 'args': [{'cmd': 'updatechunk', 'data': {'index': 3, 'html': ' where to find the chicken and then how to make off with it.

A soft thud caused Niko to quickly lift his head. Standing behind the stall where the butcher had been cutting his chicken,
'}}], 'namespace': '/'}, {'name': 'from_server', 'args': [{'cmd': 'texteffect', 'data': 3}], 'namespace': '/'}] - + assert response == [{'name': 'from_server', 'args': [{'cmd': 'updatechunk', 'data': {'index': 2, 'html': ' do in a city? All that Niko needed to know was'}}], 'namespace': '/'}, + {'name': 'from_server', 'args': [{'cmd': 'texteffect', 'data': 2}], 'namespace': '/'}] + socketio_client.emit('message',{'cmd': 'redo', 'data': ''}) + response = socketio_client.get_received() + assert response == [{'name': 'from_server', 'args': [{'cmd': 'updatechunk', 'data': {'index': 3, 'html': ' where to find the chicken and then how to make off with it.

A soft thud caused Niko to quickly lift his head. Standing behind the stall where the butcher had been cutting his chicken,
'}}], 'namespace': '/'}, + {'name': 'from_server', 'args': [{'cmd': 'texteffect', 'data': 3}], 'namespace': '/'}] From a4bed76d2e4171f6ae766d6503d0b2f85416e8a5 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 22 Jun 2022 14:26:37 -0400 Subject: [PATCH 0004/1297] Merge United --- aiserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aiserver.py b/aiserver.py index a732d371..f8de58ff 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1880,7 +1880,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal shutil.rmtree("cache/") if(model_settings.badwordsids is koboldai_settings.badwordsids_default and model_settings.model_type not in ("gpt2", "gpt_neo", "gptj")): - model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] + model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] if model_settings.newlinemode != "s" or str(k) != ""] patch_causallm(model) @@ -2047,7 +2047,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal model_settings.modeldim = int(tpu_mtj_backend.params.get("d_embed", tpu_mtj_backend.params["d_model"])) tokenizer = tpu_mtj_backend.tokenizer if(model_settings.badwordsids is koboldai_settings.badwordsids_default and model_settings.model_type not in ("gpt2", "gpt_neo", "gptj")): - model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] + model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] if model_settings.newlinemode != "s" or str(k) != ""] else: loadsettings() From 86553d329cdd6afb3d840db8bf9485cf2f66c214 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 22 Jun 2022 14:26:37 -0400 Subject: [PATCH 0005/1297] Merge United --- aiserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aiserver.py b/aiserver.py index a732d371..fce0e9ab 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1880,7 +1880,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal shutil.rmtree("cache/") if(model_settings.badwordsids is koboldai_settings.badwordsids_default and model_settings.model_type not in ("gpt2", "gpt_neo", "gptj")): - model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] + model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]") if model_settings.newlinemode != "s" or str(k) != ""] patch_causallm(model) @@ -2047,7 +2047,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal model_settings.modeldim = int(tpu_mtj_backend.params.get("d_embed", tpu_mtj_backend.params["d_model"])) tokenizer = tpu_mtj_backend.tokenizer if(model_settings.badwordsids is koboldai_settings.badwordsids_default and model_settings.model_type not in ("gpt2", "gpt_neo", "gptj")): - model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]")] + model_settings.badwordsids = [[v] for k, v in tokenizer.get_vocab().items() if any(c in str(k) for c in "<>[]") if model_settings.newlinemode != "s" or str(k) != ""] else: loadsettings() From b0ac4581de1c0f92b1b5e5d80a08a1c028dd6992 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 22 Jun 2022 18:39:09 -0400 Subject: [PATCH 0006/1297] UI v2 Initial Commit --- aiserver.py | 70 +- gensettings.py | 195 ++- static/koboldai.css | 529 +++++++ static/koboldai.js | 256 +++ .../open-iconic/css/open-iconic-bootstrap.css | 952 +++++++++++ .../css/open-iconic-bootstrap.less | 960 +++++++++++ .../css/open-iconic-bootstrap.min.css | 1 + .../css/open-iconic-bootstrap.scss | 958 +++++++++++ .../css/open-iconic-bootstrap.styl | 954 +++++++++++ .../css/open-iconic-foundation.css | 1395 ++++++++++++++++ .../css/open-iconic-foundation.less | 1397 ++++++++++++++++ .../css/open-iconic-foundation.min.css | 1 + .../css/open-iconic-foundation.scss | 1398 +++++++++++++++++ .../css/open-iconic-foundation.styl | 1392 ++++++++++++++++ static/open-iconic/css/open-iconic.css | 511 ++++++ static/open-iconic/css/open-iconic.less | 962 ++++++++++++ static/open-iconic/css/open-iconic.min.css | 1 + static/open-iconic/css/open-iconic.scss | 963 ++++++++++++ static/open-iconic/css/open-iconic.styl | 733 +++++++++ static/open-iconic/fonts/open-iconic.eot | Bin 0 -> 28196 bytes static/open-iconic/fonts/open-iconic.otf | Bin 0 -> 20996 bytes static/open-iconic/fonts/open-iconic.svg | 543 +++++++ static/open-iconic/fonts/open-iconic.ttf | Bin 0 -> 28028 bytes static/open-iconic/fonts/open-iconic.woff | Bin 0 -> 14984 bytes templates/index_new.html | 63 + templates/settings flyout.html | 41 + templates/settings item.html | 39 + 27 files changed, 14277 insertions(+), 37 deletions(-) create mode 100644 static/koboldai.css create mode 100644 static/koboldai.js create mode 100644 static/open-iconic/css/open-iconic-bootstrap.css create mode 100644 static/open-iconic/css/open-iconic-bootstrap.less create mode 100644 static/open-iconic/css/open-iconic-bootstrap.min.css create mode 100644 static/open-iconic/css/open-iconic-bootstrap.scss create mode 100644 static/open-iconic/css/open-iconic-bootstrap.styl create mode 100644 static/open-iconic/css/open-iconic-foundation.css create mode 100644 static/open-iconic/css/open-iconic-foundation.less create mode 100644 static/open-iconic/css/open-iconic-foundation.min.css create mode 100644 static/open-iconic/css/open-iconic-foundation.scss create mode 100644 static/open-iconic/css/open-iconic-foundation.styl create mode 100644 static/open-iconic/css/open-iconic.css create mode 100644 static/open-iconic/css/open-iconic.less create mode 100644 static/open-iconic/css/open-iconic.min.css create mode 100644 static/open-iconic/css/open-iconic.scss create mode 100644 static/open-iconic/css/open-iconic.styl create mode 100644 static/open-iconic/fonts/open-iconic.eot create mode 100644 static/open-iconic/fonts/open-iconic.otf create mode 100644 static/open-iconic/fonts/open-iconic.svg create mode 100644 static/open-iconic/fonts/open-iconic.ttf create mode 100644 static/open-iconic/fonts/open-iconic.woff create mode 100644 templates/index_new.html create mode 100644 templates/settings flyout.html create mode 100644 templates/settings item.html diff --git a/aiserver.py b/aiserver.py index fce0e9ab..71279db1 100644 --- a/aiserver.py +++ b/aiserver.py @@ -240,7 +240,7 @@ log.setLevel(logging.ERROR) # Start flask & SocketIO print("{0}Initializing Flask... {1}".format(colors.PURPLE, colors.END), end="") from flask import Flask, render_template, Response, request, copy_current_request_context, send_from_directory -from flask_socketio import SocketIO, emit +from flask_socketio import SocketIO, emit, join_room, leave_room app = Flask(__name__, root_path=os.getcwd()) app.config['SECRET KEY'] = 'secret!' app.config['TEMPLATES_AUTO_RELOAD'] = True @@ -2769,6 +2769,13 @@ def execute_outmod(): #==================================================================# @socketio.on('connect') def do_connect(): + join_room("UI_{}".format(request.args.get('ui'))) + print("Joining Room UI_{}".format(request.args.get('ui'))) + #Send all variables to client + model_settings.send_to_ui() + story_settings.send_to_ui() + user_settings.send_to_ui() + system_settings.send_to_ui() print("{0}Client connected!{1}".format(colors.GREEN, colors.END)) emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}) emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}) @@ -5866,6 +5873,67 @@ def send_debug(): emit('from_server', {'cmd': 'debug_info', 'data': debug_info}, broadcast=True) +#==================================================================# +# Event triggered when browser SocketIO detects a variable change +#==================================================================# +@socketio.on('var_change') +def UI_2_var_change(data): + classname = data['ID'].split("_")[0] + name = data['ID'][len(classname)+1:] + classname += "_settings" + + #Need to fix the data type of value to match the module + if type(getattr(globals()[classname], name)) == int: + value = int(data['value']) + elif type(getattr(globals()[classname], name)) == float: + value = float(data['value']) + elif type(getattr(globals()[classname], name)) == bool: + value = bool(data['value']) + elif type(getattr(globals()[classname], name)) == str: + value = str(data['value']) + else: + print("Unknown Type {} = {}".format(name, type(getattr(globals()[classname], name)))) + + print("{} {} = {}".format(classname, name, value)) + + setattr(globals()[classname], name, value) + + +#==================================================================# +# UI V2 CODE +#==================================================================# +@app.route('/new_ui') +def new_ui_index(): + return render_template('index_new.html', settings=gensettings.gensettingstf if model_settings.model != "InferKit" else gensettings.gensettingsik ) + +#==================================================================# +# Event triggered when Selected Text is edited, Option is Selected, etc +#==================================================================# +@socketio.on('Set Selected Text') +def UI_2_Set_Selected_Text(data): + print("Updating Selected Text: {}".format(data)) + story_settings.actions.use_option(int(data['chunk']), int(data['option'])) + +#==================================================================# +# Event triggered when user clicks the submit button +#==================================================================# +@socketio.on('submit') +def UI_2_submit(data): + system_settings.lua_koboldbridge.feedback = None + story_settings.recentrng = story_settings.recentrngm = None + actionsubmit(data['data'], actionmode=story_settings.actionmode) + +#==================================================================# +# Event triggered when user clicks the pin button +#==================================================================# +@socketio.on('Pinning') +def UI_2_Pinning(data): + if data['set']: + story_settings.actions.set_pin(int(data['chunk']), int(data['option'])) + else: + story_settings.actions.unset_pin(int(data['chunk']), int(data['option'])) + + #==================================================================# # Final startup commands to launch Flask app #==================================================================# diff --git a/gensettings.py b/gensettings.py index b3007c91..14ba7970 100644 --- a/gensettings.py +++ b/gensettings.py @@ -8,7 +8,11 @@ gensettingstf = [ "max": 512, "step": 2, "default": 80, - "tooltip": "Number of tokens the AI should generate. Higher numbers will take longer to generate." + "tooltip": "Number of tokens the AI should generate. Higher numbers will take longer to generate.", + "menu_path": "Model", + "sub_path": "Generation", + "classname": "model", + "name": "genamt" }, { "uitype": "slider", @@ -19,7 +23,11 @@ gensettingstf = [ "max": 2.0, "step": 0.05, "default": 0.5, - "tooltip": "Randomness of sampling. High values can increase creativity but may make text less sensible. Lower values will make text more predictable but can become repetitious." + "tooltip": "Randomness of sampling. High values can increase creativity but may make text less sensible. Lower values will make text more predictable but can become repetitious.", + "menu_path": "Model", + "sub_path": "Generation", + "classname": "model", + "name": "temp" }, { "uitype": "slider", @@ -30,7 +38,12 @@ gensettingstf = [ "max": 1.0, "step": 0.05, "default": 0.9, - "tooltip": "Used to discard unlikely text in the sampling process. Lower values will make text more predictable but can become repetitious. (Put this value on 1 to disable its effect)" + "tooltip": "Used to discard unlikely text in the sampling process. Lower values will make text more predictable but can become repetitious. (Put this value on 1 to disable its effect)", + "menu_path": "Model", + "sub_path": "Sampling", + "classname": "model", + "name": "top_p" + }, { "uitype": "slider", @@ -41,7 +54,11 @@ gensettingstf = [ "max": 100, "step": 1, "default": 0, - "tooltip": "Alternative sampling method, can be combined with top_p. (Put this value on 0 to disable its effect)" + "tooltip": "Alternative sampling method, can be combined with top_p. (Put this value on 0 to disable its effect)", + "menu_path": "Model", + "sub_path": "Sampling", + "classname": "model", + "name": "top_k" }, { "uitype": "slider", @@ -52,7 +69,11 @@ gensettingstf = [ "max": 1.0, "step": 0.05, "default": 1.0, - "tooltip": "Alternative sampling method; it is recommended to disable top_p and top_k (set top_p to 1 and top_k to 0) if using this. 0.95 is thought to be a good value. (Put this value on 1 to disable its effect)" + "tooltip": "Alternative sampling method; it is recommended to disable top_p and top_k (set top_p to 1 and top_k to 0) if using this. 0.95 is thought to be a good value. (Put this value on 1 to disable its effect)", + "menu_path": "Model", + "sub_path": "Sampling", + "classname": "model", + "name": "tfs" }, { "uitype": "slider", @@ -63,7 +84,11 @@ gensettingstf = [ "max": 1.0, "step": 0.05, "default": 1.0, - "tooltip": "Alternative sampling method described in the paper \"Typical Decoding for Natural Language Generation\" (10.48550/ARXIV.2202.00666). The paper suggests 0.2 as a good value for this setting. Set this setting to 1 to disable its effect." + "tooltip": "Alternative sampling method described in the paper \"Typical Decoding for Natural Language Generation\" (10.48550/ARXIV.2202.00666). The paper suggests 0.2 as a good value for this setting. Set this setting to 1 to disable its effect.", + "menu_path": "Model", + "sub_path": "Sampling", + "classname": "model", + "name": "typical" }, { "uitype": "slider", @@ -74,7 +99,11 @@ gensettingstf = [ "max": 1.0, "step": 0.01, "default": 0.0, - "tooltip": "Alternative sampling method that reduces the randomness of the AI whenever the probability of one token is much higher than all the others. Higher values have a stronger effect. Set this setting to 0 to disable its effect." + "tooltip": "Alternative sampling method that reduces the randomness of the AI whenever the probability of one token is much higher than all the others. Higher values have a stronger effect. Set this setting to 0 to disable its effect.", + "menu_path": "Model", + "sub_path": "Sampling", + "classname": "model", + "name": "top_a" }, { "uitype": "slider", @@ -85,7 +114,11 @@ gensettingstf = [ "max": 3.0, "step": 0.01, "default": 1.1, - "tooltip": "Used to penalize words that were already generated or belong to the context (Going over 1.2 breaks 6B models)." + "tooltip": "Used to penalize words that were already generated or belong to the context (Going over 1.2 breaks 6B models).", + "menu_path": "Model", + "sub_path": "Repetition", + "classname": "model", + "name": "rep_pen" }, { "uitype": "slider", @@ -96,7 +129,11 @@ gensettingstf = [ "max": 4096, "step": 4, "default": 0, - "tooltip": "Repetition penalty range. If set higher than 0, only applies repetition penalty to the last few tokens of your story rather than applying it to the entire story. This slider controls the amount of tokens at the end of your story to apply it to." + "tooltip": "Repetition penalty range. If set higher than 0, only applies repetition penalty to the last few tokens of your story rather than applying it to the entire story. This slider controls the amount of tokens at the end of your story to apply it to.", + "menu_path": "Model", + "sub_path": "Repetition", + "classname": "model", + "name": "rep_pen_range" }, { "uitype": "slider", @@ -107,7 +144,11 @@ gensettingstf = [ "max": 10.0, "step": 0.1, "default": 0.0, - "tooltip": "Repetition penalty slope. If BOTH this setting and Rep Penalty Range are set higher than 0, will use sigmoid interpolation to apply repetition penalty more strongly on tokens that are closer to the end of your story. This setting controls the tension of the sigmoid curve; higher settings will result in the repetition penalty difference between the start and end of your story being more apparent. Setting this to 1 uses linear interpolation; setting this to 0 disables interpolation." + "tooltip": "Repetition penalty slope. If BOTH this setting and Rep Penalty Range are set higher than 0, will use sigmoid interpolation to apply repetition penalty more strongly on tokens that are closer to the end of your story. This setting controls the tension of the sigmoid curve; higher settings will result in the repetition penalty difference between the start and end of your story being more apparent. Setting this to 1 uses linear interpolation; setting this to 0 disables interpolation.", + "menu_path": "Model", + "sub_path": "Repetition", + "classname": "model", + "name": "repo_pen_slope" }, { "uitype": "slider", @@ -118,7 +159,11 @@ gensettingstf = [ "max": 2048, "step": 8, "default": 1024, - "tooltip": "Max number of tokens of context to submit to the AI for sampling. Make sure this is higher than Amount to Generate. Higher values increase VRAM/RAM usage." + "tooltip": "Max number of tokens of context to submit to the AI for sampling. Make sure this is higher than Amount to Generate. Higher values increase VRAM/RAM usage.", + "menu_path": "Model", + "sub_path": "Generation", + "classname": "model", + "name": "max_length" }, { "uitype": "slider", @@ -129,7 +174,11 @@ gensettingstf = [ "max": 5, "step": 1, "default": 1, - "tooltip": "Number of results to generate per submission. Increases VRAM/RAM usage." + "tooltip": "Number of results to generate per submission. Increases VRAM/RAM usage.", + "menu_path": "Model", + "sub_path": "Generation", + "classname": "model", + "name": "numseqs" }, { "uitype": "slider", @@ -140,7 +189,10 @@ gensettingstf = [ "max": 5, "step": 1, "default": 3, - "tooltip": "Number of historic actions to scan for W Info keys." + "tooltip": "Number of historic actions to scan for W Info keys.", + "menu_path": "User", + "classname": "user", + "name": "widepth" }, { "uitype": "toggle", @@ -151,7 +203,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 0, - "tooltip": "Whether the game is saved after each action." + "tooltip": "Whether the game is saved after each action.", + "menu_path": "User", + "classname": "user", + "name": "autosave" }, { "uitype": "toggle", @@ -162,7 +217,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 1, - "tooltip": "Whether the prompt should be sent in the context of every action." + "tooltip": "Whether the prompt should be sent in the context of every action.", + "menu_path": "Story", + "classname": "story", + "name": "useprompt" }, { "uitype": "toggle", @@ -173,7 +231,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 0, - "tooltip": "Turn this on if you are playing a Choose your Adventure model." + "tooltip": "Turn this on if you are playing a Choose your Adventure model.", + "menu_path": "Story", + "classname": "story", + "name": "adventure" }, { "uitype": "toggle", @@ -184,7 +245,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 0, - "tooltip": "This mode optimizes KoboldAI for chatting." + "tooltip": "This mode optimizes KoboldAI for chatting.", + "menu_path": "Story", + "classname": "story", + "name": "chatmode" }, { "uitype": "toggle", @@ -195,7 +259,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 0, - "tooltip": "Scan the AI's output for world info keys as it's generating the output." + "tooltip": "Scan the AI's output for world info keys as it's generating the output.", + "menu_path": "Story", + "classname": "story", + "name": "dynamicscan" }, { "uitype": "toggle", @@ -206,7 +273,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 0, - "tooltip": "When enabled the AI does not generate when you enter the prompt, instead you need to do an action first." + "tooltip": "When enabled the AI does not generate when you enter the prompt, instead you need to do an action first.", + "menu_path": "User", + "classname": "user", + "name": "nopromptgen" }, { "uitype": "toggle", @@ -217,7 +287,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 0, - "tooltip": "When enabled, the Memory text box in the Random Story dialog will be prefilled by default with your current story's memory instead of being empty." + "tooltip": "When enabled, the Memory text box in the Random Story dialog will be prefilled by default with your current story's memory instead of being empty.", + "menu_path": "User", + "classname": "user", + "name": "rngpersist" }, { "uitype": "toggle", @@ -228,7 +301,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 0, - "tooltip": "Disables userscript generation modifiers." + "tooltip": "Disables userscript generation modifiers.", + "menu_path": "User", + "classname": "user", + "name": "nogenmod" }, { "uitype": "toggle", @@ -239,7 +315,10 @@ gensettingstf = [ "max": 1, "step": 1, "default": 0, - "tooltip": "Show debug info" + "tooltip": "Show debug info", + "menu_path": "user", + "classname": "user", + "name": "debug" } ] @@ -252,7 +331,11 @@ gensettingsik =[{ "max": 2.0, "step": 0.05, "default": 0.5, - "tooltip": "Randomness of sampling. High values can increase creativity but may make text less sensible. Lower values will make text more predictable but can become repetitious." + "tooltip": "Randomness of sampling. High values can increase creativity but may make text less sensible. Lower values will make text more predictable but can become repetitious.", + "menu_path": "Model", + "sub_path": "Generation", + "classname": "model", + "name": "temp" }, { "uitype": "slider", @@ -263,7 +346,11 @@ gensettingsik =[{ "max": 1.0, "step": 0.05, "default": 1.1, - "tooltip": "Used to discard unlikely text in the sampling process. Lower values will make text more predictable but can become repetitious." + "tooltip": "Used to discard unlikely text in the sampling process. Lower values will make text more predictable but can become repetitious.", + "menu_path": "Model", + "sub_path": "Sampling", + "classname": "model", + "name": "top_p" }, { "uitype": "slider", @@ -274,7 +361,11 @@ gensettingsik =[{ "max": 100, "step": 1, "default": 0, - "tooltip": "Alternative sampling method, can be combined with top_p." + "tooltip": "Alternative sampling method, can be combined with top_p.", + "menu_path": "Model", + "sub_path": "Sampling", + "classname": "model", + "name": "top_k" }, { "uitype": "slider", @@ -285,7 +376,11 @@ gensettingsik =[{ "max": 1.0, "step": 0.05, "default": 0.0, - "tooltip": "Alternative sampling method; it is recommended to disable (set to 0) top_p and top_k if using this. 0.95 is thought to be a good value." + "tooltip": "Alternative sampling method; it is recommended to disable (set to 0) top_p and top_k if using this. 0.95 is thought to be a good value.", + "menu_path": "Model", + "sub_path": "Sampling", + "classname": "model", + "name": "tfs" }, { "uitype": "slider", @@ -296,7 +391,11 @@ gensettingsik =[{ "max": 3000, "step": 2, "default": 200, - "tooltip": "Number of characters the AI should generate." + "tooltip": "Number of characters the AI should generate.", + "menu_path": "Model", + "sub_path": "Generation", + "classname": "model", + "name": "max_length" }, { "uitype": "slider", @@ -307,7 +406,10 @@ gensettingsik =[{ "max": 5, "step": 1, "default": 3, - "tooltip": "Number of historic actions to scan for W Info keys." + "tooltip": "Number of historic actions to scan for W Info keys.", + "menu_path": "User", + "classname": "user", + "name": "widepth" }, { "uitype": "toggle", @@ -318,7 +420,10 @@ gensettingsik =[{ "max": 1, "step": 1, "default": 0, - "tooltip": "Whether the game is saved after each action." + "tooltip": "Whether the game is saved after each action.", + "menu_path": "User", + "classname": "user", + "name": "autosave" }, { "uitype": "toggle", @@ -329,7 +434,10 @@ gensettingsik =[{ "max": 1, "step": 1, "default": 1, - "tooltip": "Whether the prompt should be sent in the context of every action." + "tooltip": "Whether the prompt should be sent in the context of every action.", + "menu_path": "Story", + "classname": "story", + "name": "useprompt" }, { "uitype": "toggle", @@ -340,7 +448,10 @@ gensettingsik =[{ "max": 1, "step": 1, "default": 0, - "tooltip": "Turn this on if you are playing a Choose your Adventure model." + "tooltip": "Turn this on if you are playing a Choose your Adventure model.", + "menu_path": "Story", + "classname": "story", + "name": "adventure" }, { "uitype": "toggle", @@ -351,7 +462,10 @@ gensettingsik =[{ "max": 1, "step": 1, "default": 0, - "tooltip": "This mode optimizes KoboldAI for chatting." + "tooltip": "This mode optimizes KoboldAI for chatting.", + "menu_path": "Story", + "classname": "story", + "name": "chatmode" }, { "uitype": "toggle", @@ -362,7 +476,10 @@ gensettingsik =[{ "max": 1, "step": 1, "default": 0, - "tooltip": "When enabled the AI does not generate when you enter the prompt, instead you need to do an action first." + "tooltip": "When enabled the AI does not generate when you enter the prompt, instead you need to do an action first.", + "menu_path": "User", + "classname": "user", + "name": "nopromptgen" }, { "uitype": "toggle", @@ -373,7 +490,10 @@ gensettingsik =[{ "max": 1, "step": 1, "default": 0, - "tooltip": "When enabled, the Memory text box in the Random Story dialog will be prefilled by default with your current story's memory instead of being empty." + "tooltip": "When enabled, the Memory text box in the Random Story dialog will be prefilled by default with your current story's memory instead of being empty.", + "menu_path": "User", + "classname": "user", + "name": "rngpersist" }, { "uitype": "toggle", @@ -384,7 +504,10 @@ gensettingsik =[{ "max": 1, "step": 1, "default": 0, - "tooltip": "Show debug info" + "tooltip": "Show debug info", + "menu_path": "User", + "classname": "user", + "name": "debug" } ] @@ -412,4 +535,4 @@ formatcontrols = [{ "label": "Single Line", "id": "singleline", "tooltip": "Only allows the AI to output anything before the enter" - }] + }] \ No newline at end of file diff --git a/static/koboldai.css b/static/koboldai.css new file mode 100644 index 00000000..5e8667ba --- /dev/null +++ b/static/koboldai.css @@ -0,0 +1,529 @@ +/*----------------Global Colors------------------*/ +:root { + --flyout_menu_width_base: 350px; + --flyout_menu_width: calc((var(--flyout_menu_width_base) * var(--screen_type_desktop)) + + (100% * var(--screen_type_mobile)) + + (var(--flyout_menu_width_base) * var(--screen_type_wide))); + --flyout_menu_closed_width: calc((0px * var(--screen_type_desktop)) + + (0px * var(--screen_type_mobile)) + + (var(--flyout_menu_width_base) * var(--screen_type_wide))); + --background: #474B4F; + --text: white; + --text_edit: #cdf; + --flyout_background: #6B6E70; + --setting_background: #285070; + --setting_text: white; + --tooltip_text: white; + --tooltip_background: #1f2931; + --gamescreen_background: #262626; + --textbox_background: #404040; + --options_background: #404040; + --enabled_button_text: #fff; + --enabled_button_background_color: #337ab7; + --enabled_button_border_color: #2e6da4; + --disabled_button_text: #303030; + --disabled_button_background_color: #686c68; + --disabled_button_border_color: #686c68; + --menu_button_level_1_bg_color: #4A654F; + --menu_button_level_1_border_color: #4A654F; + --menu_button_level_2_bg_color: #243127; + --menu_button_level_2_border_color: #243127; + /*--menu_button_level_2_bg_color: #6F8699;*/ + /*--menu_button_level_2_border_color: #495965;*/ + +} +@media only screen and (max-width: 768px) { + :root { + --screen_type_mobile: 1; + --screen_type_desktop: 0; + --screen_type_wide: 0; + --flyout_menu_width: 100%; + } +} +@media only screen and (min-width: 768px) and (max-width: 1920px) { + :root { + --screen_type_mobile: 0; + --screen_type_desktop: 1; + --screen_type_wide: 0; + --flyout_menu_width: 450px; + } +} +@media only screen and (min-width: 1921px) { + :root { + --screen_type_mobile: 0; + --screen_type_desktop: 0; + --screen_type_wide: 1; + + } +} + + + +/*----------------SETTINGS AREA------------------*/ +.settings_category { + width: 100%; +} + +.btn.menu_button_level_1 { + background-color: var(--menu_button_level_1_bg_color); + border-color: var(--menu_button_level_1_border_color); + text-align: left; +} + +.menu_background_level_1 { + background-color: var(--menu_button_level_1_bg_color); +} + +.btn.menu_button_level_2 { + background-color: var(--menu_button_level_2_bg_color); + border-color: var(--menu_button_level_2_border_color); + text-align: left; +} + +.menu_background_level_2 { + background-color: var(--menu_button_level_2_bg_color); +} + +.settings_category_area { + padding-left: 10px; + display: flex; + flex-direction: row; + flex-wrap: wrap; + width: 100%; +} + +.setting_container { + display: grid; + grid-template-areas: "label value" + "item item"; + grid-template-rows: 20px 25px; + grid-template-columns: 160px 40px; + row-gap: 0.2em; + background-color: var(--setting_background); + color: var(--text); + margin: 2px; +} + +.setting_label { + grid-area: label; + overflow: hidden; +} + +.setting_value { + text-align: right; + grid-area: value; + background-color: inherit; + color: inherit; + border: none; + outline: none; +} + +.setting_value:focus { + color: var(--text_edit); +} + +.setting_item { + grid-area: item; + overflow: hidden; +} + +.setting_item_input { + width:95%; +} + +.helpicon { + display: inline-block; + font-family: sans-serif; + font-weight: bold; + text-align: center; + width: 2ex; + height: 2ex; + font-size: 1.4ex; + line-height: 1.8ex; + border-radius: 1.2ex; + margin-right: 4px; + padding: 1px; + color: var(--setting_background); + background: var(--setting_text); + border: 1px solid white; + text-decoration: none; +} + +.helpicon .helptext { + display: none; + font-family: sans-serif; + position: absolute; + z-index: 100; + text-shadow: none !important; +} + +.helpicon:hover .helptext { + display: inline-block; + position: fixed; + width: 250px; + background-color: var(--tooltip_background); + color: var(--tooltip_text); + font-size: 11pt; + z-index: 100; + font-weight: normal; + line-height: normal; + border-radius: 6px; + padding: 5px; + margin-left:10px; + border: 1px solid #337ab7; +} + +/*----------------LEFT FLYOUT MENU------------------*/ +.menu_icon { + position: fixed; + top:10px; + left: 5px; + z-index:50; + display: inline-block; + cursor: pointer; +} + +.menu_pin { + position: fixed; + top:10px; + left: calc(var(--flyout_menu_width) - 20px); + z-index:50; + width: 25px; + height: 25px; + color: #999; + display: inline-block; + transition: left 0.5s; + cursor: pointer; +} + +.menu_pin.hidden { + left: 0px; + display: inline-block; + width: 0px; + overflow: hidden; + transition: left 0.5s; +} + +.SideMenu { + height: 100%; + width: var(--flyout_menu_closed_width); + position: fixed; + z-index: 1; + top: 0; + left: 0; + background-color: var(--flyout_background); + overflow-x: hidden; + transition: 0.5s; +} + +.SideMenu.pinned { + height: 100%; + width: var(--flyout_menu_width); + position: fixed; + z-index: 1; + top: 0; + left: 0; + background-color: var(--flyout_background); + overflow-x: hidden; + transition: 0.5s; +} + +.SideMenu.open { + width: var(--flyout_menu_width); +} + +.settings_menu { + padding-left: 30px; + padding-top: 7px; +} + +.settings_menu .menu_button{ + color: var(--enabled_button_text); + background-color: var(--enabled_button_background_color); + border-color: var(--enabled_button_border_color); +} + +.settings_menu .menu_button:hover { + filter: brightness(85%); +} + +.settings_menu select { + color: black; + margin-left: auto; + margin-right: 25px; +} + +.menubar1, .menubar2, .menubar3 { + width: 21px; + height: 3px; + background-color: #999; + margin: 3px 0; + transition: 0.4s; +} + +.change .menubar1 { + transform: translate(0px, 6px) rotate(-45deg); +} + +.change .menubar2 {opacity: 0;} + +.change .menubar3 { + transform: translate(0px, -6px) rotate(45deg); +} + +/*----------------RIGHT FLYOUT MENU------------------*/ +.right_menu_icon { + position: fixed; + top:10px; + right: 5px; + z-index:50; + display: inline-block; + cursor: pointer; + grid-area: lefticon; +} + +.rightSideMenu { + height: 100%; + width: 0; + position: fixed; + z-index: 1; + top: 0; + right: 0; + background-color: var(--flyout_background); + overflow-x: hidden; + transition: 0.5s; + padding-top: 20px; + padding-bottom: 10px; + +} + +.rightSideMenu.open { + width: var(--flyout_menu_width); +} + +table.server_vars { + border: 1px solid #959595; +} + +tr.server_vars { + margin-top: 0px; + margin-bottom: 0px; +} + +td.server_vars { + border: 1px solid #959595; + border-radius: 5px; + padding: 0px; + color: #ffffff; + -moz-transition: all 0.15s ease-in; + -o-transition: all 0.15s ease-in; + -webkit-transition: all 0.15s ease-in; + transition: all 0.15s ease-in; + white-space: pre-wrap; +} + + +/* ---------------------------- OVERALL PAGE CONFIG ------------------------------*/ +body { + background-color: var(--background); + color: var(--text); +} + +.main-grid { + transition: margin-left .5s; + display: grid; + min-height: 98vh; + margin-left: var(--flyout_menu_closed_width); +/* grid-template-areas: "menuicon gamescreen lefticon" + "menuicon actions lefticon" + "menuicon inputrow lefticon";*/ + grid-template-areas: "menuicon gamescreen lefticon" + "menuicon inputrow lefticon"; + grid-template-columns: 30px auto 20px; +/* grid-template-rows: auto 40px 100px;*/ + grid-template-rows: auto 100px; +} + +.main-grid.pinned { + margin-left: var(--flyout_menu_width); +} + +/* ---------------------------------- GAME SCREEN ----------------------------------*/ +.gamescreen { + background-color: var(--gamescreen_background); + color: white; + grid-area: gamescreen; + display: flex; + flex-direction: column; + overflow-x: hidden; + margin-top: 10px; + vertical-align: bottom; + font-family: "Helvetica Neue",Helvetica,Arial,sans-serif; + font-size: 14px; + line-height: 1.42857143; +} + +.gametext { + margin-top: auto; + padding-bottom: 1px; + vertical-align: bottom; +} + +[contenteditable="true"]:active, +[contenteditable="true"]:focus{ + border:none; + outline:none; + color: var(--text_edit); +} + + +table.sequence { + width: 100%; + border: 0px; + border-spacing: 0; +} + +tr.sequence { + margin-top: 0px; + margin-bottom: 0px; +} + +td.sequence { + border: 1px solid #959595; + border-radius: 5px; + padding: 0px; + background-color: var(--options_background); + -moz-transition: all 0.15s ease-in; + -o-transition: all 0.15s ease-in; + -webkit-transition: all 0.15s ease-in; + transition: all 0.15s ease-in; + white-space: pre-wrap; +} + +td.sequence:hover { + filter: brightness(70%); +} + + +.actions { + grid-area: actions; + display: flex; +} + +.actions button { + width: 80px; + margin-right: 2px; +} + +.inputrow { + grid-area: inputrow; + display: grid; + grid-template-areas: "textarea submit submit submit" + "textarea back redo retry"; + grid-template-columns: auto 30px 30px 30px; + grid-template-rows: auto 40px; + gap: 1px; + +} + +.inputrow textarea { + grid-area: textarea; + background-color: var(--textarea_background); + color: var(--text); +} + +.inputrow .submit[server_value=false] { + grid-area: submit; + height: 100%; + width: 100%; + text-align: center; + overflow: hidden; +} +.inputrow .submit[server_value=true] { + grid-area: submit; + height: 100%; + width: 100%; + text-align: center; + overflow: hidden; + display: none; +} + +.inputrow .submited[server_value=false] { + grid-area: submit; + height: 100%; + width: 100%; + text-align: center; + overflow: hidden; + display: none; +} +.inputrow .submited[server_value=true] { + grid-area: submit; + height: 100%; + width: 100%; + text-align: center; + overflow: hidden; + display: inline; +} + +.inputrow .back { + grid-area: back; + padding: 0px; + height: 100%; + width: 100%; + text-align: center; + overflow: hidden; +} + +.inputrow .redo { + grid-area: redo; + padding: 0px; + height: 100%; + width: 100%; + text-align: center; + overflow: hidden; +} + +.inputrow .retry { + grid-area: retry; + padding: 0px; + height: 100%; + width: 100%; + text-align: center; + overflow: hidden; +} + +/*---------------------------------- Global ------------------------------------------------*/ +.hidden { + display: none; +} + +.action_button { + color: var(--enabled_button_text); + background-color: var(--enabled_button_background_color); + border-color: var(--enabled_button_border_color); +} + +.action_button:hover { + filter: brightness(85%); +} + + +.action_button.disabled { + color: var(--disabled_button_text); + background-color: var(--disabled_button_background_color); + border-color: var(--disabled_button_border_color); + cursor: not-allowed; +} + +.force_center { + margin-left: 50%; + transform: translateX(-50%); +} + +.flex { + display: flex; +} + +.rawtext { + white-space: pre-wrap; +} \ No newline at end of file diff --git a/static/koboldai.js b/static/koboldai.js new file mode 100644 index 00000000..45527d58 --- /dev/null +++ b/static/koboldai.js @@ -0,0 +1,256 @@ +var socket; +socket = io.connect(window.location.origin, {transports: ['polling', 'websocket'], closeOnBeforeunload: false, query:{"ui": "2"}}); + +//Let's register our server communications +socket.on('connect', function(){connect();}); +socket.on('disconnect', function(){disconnect();}); +socket.on('reset_story', function(){reset_story();}); +socket.on('var_changed', function(data){var_changed(data);}); +//socket.onAny(function(event_name, data) {console.log({"event": event_name, "data": data});}); + +var backend_vars = {}; +var presets = {} +//-----------------------------------Server to UI Functions----------------------------------------------- +function connect() { + console.log("connected"); +} + +function disconnect() { + console.log("disconnected"); +} + +function reset_story() { + var story_area = document.getElementById('Selected Text'); + while (story_area.firstChild) { + story_area.removeChild(story_area.firstChild); + } + var option_area = document.getElementById("Select Options"); + while (option_area.firstChild) { + option_area.removeChild(option_area.firstChild); + } +} + +function fix_text(val) { + if (typeof val === 'string' || val instanceof String) { + if (val.includes("{")) { + return JSON.stringify(val); + } else { + return val; + } + } else { + return val; + } +} + +function create_options(data) { + if (document.getElementById("Select Options Chunk "+data.value.id)) { + var option_chunk = document.getElementById("Select Options Chunk "+data.value.id) + } else { + var option_area = document.getElementById("Select Options"); + var option_chunk = document.createElement("div"); + option_chunk.id = "Select Options Chunk "+data.value.id; + option_area.append(option_chunk); + } + //first, let's clear out our existing data + while (option_chunk.firstChild) { + option_chunk.removeChild(option_chunk.firstChild); + } + var table = document.createElement("table"); + table.classList.add("sequence"); + table.style = "border-spacing: 0;"; + //Add pins + i=0; + for (item of data.value.options) { + if (item.Pinned) { + var row = document.createElement("tr"); + row.classList.add("sequence"); + var textcell = document.createElement("td"); + textcell.textContent = item.text; + textcell.classList.add("sequence"); + textcell.setAttribute("option_id", i); + textcell.setAttribute("option_chunk", data.value.id); + var iconcell = document.createElement("td"); + iconcell.setAttribute("option_id", i); + iconcell.setAttribute("option_chunk", data.value.id); + var icon = document.createElement("span"); + icon.id = "Pin_"+i; + icon.classList.add("oi"); + icon.setAttribute('data-glyph', "pin"); + iconcell.append(icon); + textcell.onclick = function () { + socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); + }; + iconcell.onclick = function () { + socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id"), "set": false}); + }; + row.append(textcell); + row.append(iconcell); + table.append(row); + } + i+=1; + } + //Add general options + i=0; + for (item of data.value.options) { + if (!(item.Edited) && !(item.Pinned) && !(item['Previous Selection'])) { + var row = document.createElement("tr"); + row.classList.add("sequence"); + var textcell = document.createElement("td"); + textcell.textContent = item.text; + textcell.classList.add("sequence"); + textcell.setAttribute("option_id", i); + textcell.setAttribute("option_chunk", data.value.id); + var iconcell = document.createElement("td"); + iconcell.setAttribute("option_id", i); + iconcell.setAttribute("option_chunk", data.value.id); + var icon = document.createElement("span"); + icon.id = "Pin_"+i; + icon.classList.add("oi"); + icon.setAttribute('data-glyph', "pin"); + icon.setAttribute('style', "filter: brightness(50%);"); + iconcell.append(icon); + iconcell.onclick = function () { + socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id"), "set": true}); + }; + textcell.onclick = function () { + socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); + }; + row.append(textcell); + row.append(iconcell); + table.append(row); + } + i+=1; + } + option_chunk.append(table); +} + +function do_story_text_updates(data) { + story_area = document.getElementById('Selected Text'); + if (document.getElementById('Selected Text Chunk '+data.value.id)) { + document.getElementById('Selected Text Chunk '+data.value.id).textContent = data.value.text; + } else { + var span = document.createElement("span"); + span.id = 'Selected Text Chunk '+data.value.id; + span.chunk = data.value.id; + span.original_text = data.value.text; + span.setAttribute("contenteditable", true); + span.onblur = function () { + if (this.textContent != this.original_text) { + socket.emit("Set Selected Text", {"id": this.chunk, "text": this.textContent}); + } + } + span.textContent = data.value.text; + + story_area.append(span); + } +} + +function do_presets(data) { + var select = document.getElementById('presets'); + //clear out the preset list + while (select.firstChild) { + select.removeChild(select.firstChild); + } + //add our blank option + var option = document.createElement("option"); + option.value=""; + option.text="presets"; + select.append(option); + for (item of data.value) { + presets[item.preset] = item; + var option = document.createElement("option"); + option.value=item.preset; + option.text=item.preset; + select.append(option); + } +} + +function selected_preset(data) { + if ((data.value == undefined) || (presets[data.value] == undefined)) { + return; + } + for (const [key, value] of Object.entries(presets[data.value])) { + if (key.charAt(0) != '_') { + var elements_to_change = document.getElementsByClassName("var_sync_model_"+key); + for (item of elements_to_change) { + if (item.tagName.toLowerCase() === 'input') { + item.value = fix_text(value); + } else { + item.textContent = fix_text(value); + } + } + } + } +} + +function var_changed(data) { + //Special Case for Story Text + if ((data.classname == "actions") && (data.name == "Selected Text")) { + do_story_text_updates(data); + //Special Case for Story Options + } else if ((data.classname == "actions") && (data.name == "Options")) { + create_options(data); + //Special Case for Presets + } else if ((data.classname == 'model') && (data.name == 'presets')) { + do_presets(data); + } else if ((data.classname == "model") && (data.name == "selected_preset")) { + selected_preset(data); + //Basic Data Syncing + } else { + var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname+"_"+data.name); + for (item of elements_to_change) { + if (item.tagName.toLowerCase() === 'input') { + item.value = fix_text(data.value); + } else { + item.textContent = fix_text(data.value); + } + } + var elements_to_change = document.getElementsByClassName("var_sync_alt_"+data.classname+"_"+data.name); + for (item of elements_to_change) { + item.setAttribute("server_value", fix_text(data.value)); + } + } +} + +//--------------------------------------------UI to Server Functions---------------------------------- + + +//--------------------------------------------General UI Functions------------------------------------ +function toggle_flyout(x) { + if (document.getElementById("SideMenu").classList.contains("open")) { + x.classList.remove("change"); + document.getElementById("SideMenu").classList.remove("open"); + document.getElementById("main-grid").classList.remove("menu-open"); + //if pinned + if (document.getElementById("SideMenu").classList.contains("pinned")) { + document.getElementById("menu_pin").classList.remove("hidden"); + } else { + document.getElementById("menu_pin").classList.add("hidden"); + } + } else { + x.classList.add("change"); + document.getElementById("SideMenu").classList.add("open"); + document.getElementById("main-grid").classList.add("menu-open"); + document.getElementById("menu_pin").classList.remove("hidden"); + } +} + +function toggle_flyout_right(x) { + if (document.getElementById("rightSideMenu").classList.contains("open")) { + document.getElementById("rightSideMenu").classList.remove("open"); + x.setAttribute("data-glyph", "chevron-left"); + } else { + document.getElementById("rightSideMenu").classList.add("open"); + x.setAttribute("data-glyph", "chevron-right"); + } +} + +function toggle_pin_flyout() { + if (document.getElementById("SideMenu").classList.contains("pinned")) { + document.getElementById("SideMenu").classList.remove("pinned"); + document.getElementById("main-grid").classList.remove("pinned"); + } else { + document.getElementById("SideMenu").classList.add("pinned"); + document.getElementById("main-grid").classList.add("pinned"); + } +} diff --git a/static/open-iconic/css/open-iconic-bootstrap.css b/static/open-iconic/css/open-iconic-bootstrap.css new file mode 100644 index 00000000..56c4e5f3 --- /dev/null +++ b/static/open-iconic/css/open-iconic-bootstrap.css @@ -0,0 +1,952 @@ +/* Bootstrap */ + +@font-face { + font-family: 'Icons'; + src: url('../fonts/open-iconic.eot'); + src: url('../fonts/open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('../fonts/open-iconic.woff') format('woff'), url('../fonts/open-iconic.ttf') format('truetype'), url('../fonts/open-iconic.otf') format('opentype'), url('../fonts/open-iconic.svg#iconic-sm') format('svg'); + font-weight: normal; + font-style: normal; +} + +.oi { + position: relative; + top: 1px; + display: inline-block; + speak:none; + font-family: 'Icons'; + font-style: normal; + font-weight: normal; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +.oi:empty:before { + width: 1em; + text-align: center; + box-sizing: content-box; +} + +.oi.oi-align-center:before { + text-align: center; +} + +.oi.oi-align-left:before { + text-align: left; +} + +.oi.oi-align-right:before { + text-align: right; +} + + +.oi.oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); +} + +.oi.oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); +} + +.oi.oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); +} + + +.oi-account-login:before { + content:'\e000'; +} + +.oi-account-logout:before { + content:'\e001'; +} + +.oi-action-redo:before { + content:'\e002'; +} + +.oi-action-undo:before { + content:'\e003'; +} + +.oi-align-center:before { + content:'\e004'; +} + +.oi-align-left:before { + content:'\e005'; +} + +.oi-align-right:before { + content:'\e006'; +} + +.oi-aperture:before { + content:'\e007'; +} + +.oi-arrow-bottom:before { + content:'\e008'; +} + +.oi-arrow-circle-bottom:before { + content:'\e009'; +} + +.oi-arrow-circle-left:before { + content:'\e00a'; +} + +.oi-arrow-circle-right:before { + content:'\e00b'; +} + +.oi-arrow-circle-top:before { + content:'\e00c'; +} + +.oi-arrow-left:before { + content:'\e00d'; +} + +.oi-arrow-right:before { + content:'\e00e'; +} + +.oi-arrow-thick-bottom:before { + content:'\e00f'; +} + +.oi-arrow-thick-left:before { + content:'\e010'; +} + +.oi-arrow-thick-right:before { + content:'\e011'; +} + +.oi-arrow-thick-top:before { + content:'\e012'; +} + +.oi-arrow-top:before { + content:'\e013'; +} + +.oi-audio-spectrum:before { + content:'\e014'; +} + +.oi-audio:before { + content:'\e015'; +} + +.oi-badge:before { + content:'\e016'; +} + +.oi-ban:before { + content:'\e017'; +} + +.oi-bar-chart:before { + content:'\e018'; +} + +.oi-basket:before { + content:'\e019'; +} + +.oi-battery-empty:before { + content:'\e01a'; +} + +.oi-battery-full:before { + content:'\e01b'; +} + +.oi-beaker:before { + content:'\e01c'; +} + +.oi-bell:before { + content:'\e01d'; +} + +.oi-bluetooth:before { + content:'\e01e'; +} + +.oi-bold:before { + content:'\e01f'; +} + +.oi-bolt:before { + content:'\e020'; +} + +.oi-book:before { + content:'\e021'; +} + +.oi-bookmark:before { + content:'\e022'; +} + +.oi-box:before { + content:'\e023'; +} + +.oi-briefcase:before { + content:'\e024'; +} + +.oi-british-pound:before { + content:'\e025'; +} + +.oi-browser:before { + content:'\e026'; +} + +.oi-brush:before { + content:'\e027'; +} + +.oi-bug:before { + content:'\e028'; +} + +.oi-bullhorn:before { + content:'\e029'; +} + +.oi-calculator:before { + content:'\e02a'; +} + +.oi-calendar:before { + content:'\e02b'; +} + +.oi-camera-slr:before { + content:'\e02c'; +} + +.oi-caret-bottom:before { + content:'\e02d'; +} + +.oi-caret-left:before { + content:'\e02e'; +} + +.oi-caret-right:before { + content:'\e02f'; +} + +.oi-caret-top:before { + content:'\e030'; +} + +.oi-cart:before { + content:'\e031'; +} + +.oi-chat:before { + content:'\e032'; +} + +.oi-check:before { + content:'\e033'; +} + +.oi-chevron-bottom:before { + content:'\e034'; +} + +.oi-chevron-left:before { + content:'\e035'; +} + +.oi-chevron-right:before { + content:'\e036'; +} + +.oi-chevron-top:before { + content:'\e037'; +} + +.oi-circle-check:before { + content:'\e038'; +} + +.oi-circle-x:before { + content:'\e039'; +} + +.oi-clipboard:before { + content:'\e03a'; +} + +.oi-clock:before { + content:'\e03b'; +} + +.oi-cloud-download:before { + content:'\e03c'; +} + +.oi-cloud-upload:before { + content:'\e03d'; +} + +.oi-cloud:before { + content:'\e03e'; +} + +.oi-cloudy:before { + content:'\e03f'; +} + +.oi-code:before { + content:'\e040'; +} + +.oi-cog:before { + content:'\e041'; +} + +.oi-collapse-down:before { + content:'\e042'; +} + +.oi-collapse-left:before { + content:'\e043'; +} + +.oi-collapse-right:before { + content:'\e044'; +} + +.oi-collapse-up:before { + content:'\e045'; +} + +.oi-command:before { + content:'\e046'; +} + +.oi-comment-square:before { + content:'\e047'; +} + +.oi-compass:before { + content:'\e048'; +} + +.oi-contrast:before { + content:'\e049'; +} + +.oi-copywriting:before { + content:'\e04a'; +} + +.oi-credit-card:before { + content:'\e04b'; +} + +.oi-crop:before { + content:'\e04c'; +} + +.oi-dashboard:before { + content:'\e04d'; +} + +.oi-data-transfer-download:before { + content:'\e04e'; +} + +.oi-data-transfer-upload:before { + content:'\e04f'; +} + +.oi-delete:before { + content:'\e050'; +} + +.oi-dial:before { + content:'\e051'; +} + +.oi-document:before { + content:'\e052'; +} + +.oi-dollar:before { + content:'\e053'; +} + +.oi-double-quote-sans-left:before { + content:'\e054'; +} + +.oi-double-quote-sans-right:before { + content:'\e055'; +} + +.oi-double-quote-serif-left:before { + content:'\e056'; +} + +.oi-double-quote-serif-right:before { + content:'\e057'; +} + +.oi-droplet:before { + content:'\e058'; +} + +.oi-eject:before { + content:'\e059'; +} + +.oi-elevator:before { + content:'\e05a'; +} + +.oi-ellipses:before { + content:'\e05b'; +} + +.oi-envelope-closed:before { + content:'\e05c'; +} + +.oi-envelope-open:before { + content:'\e05d'; +} + +.oi-euro:before { + content:'\e05e'; +} + +.oi-excerpt:before { + content:'\e05f'; +} + +.oi-expand-down:before { + content:'\e060'; +} + +.oi-expand-left:before { + content:'\e061'; +} + +.oi-expand-right:before { + content:'\e062'; +} + +.oi-expand-up:before { + content:'\e063'; +} + +.oi-external-link:before { + content:'\e064'; +} + +.oi-eye:before { + content:'\e065'; +} + +.oi-eyedropper:before { + content:'\e066'; +} + +.oi-file:before { + content:'\e067'; +} + +.oi-fire:before { + content:'\e068'; +} + +.oi-flag:before { + content:'\e069'; +} + +.oi-flash:before { + content:'\e06a'; +} + +.oi-folder:before { + content:'\e06b'; +} + +.oi-fork:before { + content:'\e06c'; +} + +.oi-fullscreen-enter:before { + content:'\e06d'; +} + +.oi-fullscreen-exit:before { + content:'\e06e'; +} + +.oi-globe:before { + content:'\e06f'; +} + +.oi-graph:before { + content:'\e070'; +} + +.oi-grid-four-up:before { + content:'\e071'; +} + +.oi-grid-three-up:before { + content:'\e072'; +} + +.oi-grid-two-up:before { + content:'\e073'; +} + +.oi-hard-drive:before { + content:'\e074'; +} + +.oi-header:before { + content:'\e075'; +} + +.oi-headphones:before { + content:'\e076'; +} + +.oi-heart:before { + content:'\e077'; +} + +.oi-home:before { + content:'\e078'; +} + +.oi-image:before { + content:'\e079'; +} + +.oi-inbox:before { + content:'\e07a'; +} + +.oi-infinity:before { + content:'\e07b'; +} + +.oi-info:before { + content:'\e07c'; +} + +.oi-italic:before { + content:'\e07d'; +} + +.oi-justify-center:before { + content:'\e07e'; +} + +.oi-justify-left:before { + content:'\e07f'; +} + +.oi-justify-right:before { + content:'\e080'; +} + +.oi-key:before { + content:'\e081'; +} + +.oi-laptop:before { + content:'\e082'; +} + +.oi-layers:before { + content:'\e083'; +} + +.oi-lightbulb:before { + content:'\e084'; +} + +.oi-link-broken:before { + content:'\e085'; +} + +.oi-link-intact:before { + content:'\e086'; +} + +.oi-list-rich:before { + content:'\e087'; +} + +.oi-list:before { + content:'\e088'; +} + +.oi-location:before { + content:'\e089'; +} + +.oi-lock-locked:before { + content:'\e08a'; +} + +.oi-lock-unlocked:before { + content:'\e08b'; +} + +.oi-loop-circular:before { + content:'\e08c'; +} + +.oi-loop-square:before { + content:'\e08d'; +} + +.oi-loop:before { + content:'\e08e'; +} + +.oi-magnifying-glass:before { + content:'\e08f'; +} + +.oi-map-marker:before { + content:'\e090'; +} + +.oi-map:before { + content:'\e091'; +} + +.oi-media-pause:before { + content:'\e092'; +} + +.oi-media-play:before { + content:'\e093'; +} + +.oi-media-record:before { + content:'\e094'; +} + +.oi-media-skip-backward:before { + content:'\e095'; +} + +.oi-media-skip-forward:before { + content:'\e096'; +} + +.oi-media-step-backward:before { + content:'\e097'; +} + +.oi-media-step-forward:before { + content:'\e098'; +} + +.oi-media-stop:before { + content:'\e099'; +} + +.oi-medical-cross:before { + content:'\e09a'; +} + +.oi-menu:before { + content:'\e09b'; +} + +.oi-microphone:before { + content:'\e09c'; +} + +.oi-minus:before { + content:'\e09d'; +} + +.oi-monitor:before { + content:'\e09e'; +} + +.oi-moon:before { + content:'\e09f'; +} + +.oi-move:before { + content:'\e0a0'; +} + +.oi-musical-note:before { + content:'\e0a1'; +} + +.oi-paperclip:before { + content:'\e0a2'; +} + +.oi-pencil:before { + content:'\e0a3'; +} + +.oi-people:before { + content:'\e0a4'; +} + +.oi-person:before { + content:'\e0a5'; +} + +.oi-phone:before { + content:'\e0a6'; +} + +.oi-pie-chart:before { + content:'\e0a7'; +} + +.oi-pin:before { + content:'\e0a8'; +} + +.oi-play-circle:before { + content:'\e0a9'; +} + +.oi-plus:before { + content:'\e0aa'; +} + +.oi-power-standby:before { + content:'\e0ab'; +} + +.oi-print:before { + content:'\e0ac'; +} + +.oi-project:before { + content:'\e0ad'; +} + +.oi-pulse:before { + content:'\e0ae'; +} + +.oi-puzzle-piece:before { + content:'\e0af'; +} + +.oi-question-mark:before { + content:'\e0b0'; +} + +.oi-rain:before { + content:'\e0b1'; +} + +.oi-random:before { + content:'\e0b2'; +} + +.oi-reload:before { + content:'\e0b3'; +} + +.oi-resize-both:before { + content:'\e0b4'; +} + +.oi-resize-height:before { + content:'\e0b5'; +} + +.oi-resize-width:before { + content:'\e0b6'; +} + +.oi-rss-alt:before { + content:'\e0b7'; +} + +.oi-rss:before { + content:'\e0b8'; +} + +.oi-script:before { + content:'\e0b9'; +} + +.oi-share-boxed:before { + content:'\e0ba'; +} + +.oi-share:before { + content:'\e0bb'; +} + +.oi-shield:before { + content:'\e0bc'; +} + +.oi-signal:before { + content:'\e0bd'; +} + +.oi-signpost:before { + content:'\e0be'; +} + +.oi-sort-ascending:before { + content:'\e0bf'; +} + +.oi-sort-descending:before { + content:'\e0c0'; +} + +.oi-spreadsheet:before { + content:'\e0c1'; +} + +.oi-star:before { + content:'\e0c2'; +} + +.oi-sun:before { + content:'\e0c3'; +} + +.oi-tablet:before { + content:'\e0c4'; +} + +.oi-tag:before { + content:'\e0c5'; +} + +.oi-tags:before { + content:'\e0c6'; +} + +.oi-target:before { + content:'\e0c7'; +} + +.oi-task:before { + content:'\e0c8'; +} + +.oi-terminal:before { + content:'\e0c9'; +} + +.oi-text:before { + content:'\e0ca'; +} + +.oi-thumb-down:before { + content:'\e0cb'; +} + +.oi-thumb-up:before { + content:'\e0cc'; +} + +.oi-timer:before { + content:'\e0cd'; +} + +.oi-transfer:before { + content:'\e0ce'; +} + +.oi-trash:before { + content:'\e0cf'; +} + +.oi-underline:before { + content:'\e0d0'; +} + +.oi-vertical-align-bottom:before { + content:'\e0d1'; +} + +.oi-vertical-align-center:before { + content:'\e0d2'; +} + +.oi-vertical-align-top:before { + content:'\e0d3'; +} + +.oi-video:before { + content:'\e0d4'; +} + +.oi-volume-high:before { + content:'\e0d5'; +} + +.oi-volume-low:before { + content:'\e0d6'; +} + +.oi-volume-off:before { + content:'\e0d7'; +} + +.oi-warning:before { + content:'\e0d8'; +} + +.oi-wifi:before { + content:'\e0d9'; +} + +.oi-wrench:before { + content:'\e0da'; +} + +.oi-x:before { + content:'\e0db'; +} + +.oi-yen:before { + content:'\e0dc'; +} + +.oi-zoom-in:before { + content:'\e0dd'; +} + +.oi-zoom-out:before { + content:'\e0de'; +} diff --git a/static/open-iconic/css/open-iconic-bootstrap.less b/static/open-iconic/css/open-iconic-bootstrap.less new file mode 100644 index 00000000..fc3fe341 --- /dev/null +++ b/static/open-iconic/css/open-iconic-bootstrap.less @@ -0,0 +1,960 @@ +/* Bootstrap */ + +/* Override Bootstrap default variable */ +//@icon-font-path: "../fonts/"; + +@font-face { + font-family: 'Icons'; + src: ~"url('@{icon-font-path}open-iconic.eot')"; + src: ~"url('@{icon-font-path}open-iconic.eot?#iconic-sm') format('embedded-opentype')", + ~"url('@{icon-font-path}open-iconic.woff') format('woff')", + ~"url('@{icon-font-path}open-iconic.ttf') format('truetype')", + ~"url('@{icon-font-path}open-iconic.svg#iconic-sm') format('svg')"; + font-weight: normal; + font-style: normal; +} + +// Catchall baseclass +.oi { + position: relative; + top: 1px; + display: inline-block; + font-family: 'Icons'; + font-style: normal; + font-weight: normal; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + + &:empty:before { + width: 1em; + text-align: center; + box-sizing: content-box; + } + + &.oi-align-center:before { + text-align: center; + } + + &.oi-align-left:before { + text-align: left; + } + + &.oi-align-right:before { + text-align: right; + } + + + &.oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); + } + + &.oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); + } + + &.oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); + } +} + + + +.oi-account-login:before { + content:"\e000"; +} + +.oi-account-logout:before { + content:"\e001"; +} + +.oi-action-redo:before { + content:"\e002"; +} + +.oi-action-undo:before { + content:"\e003"; +} + +.oi-align-center:before { + content:"\e004"; +} + +.oi-align-left:before { + content:"\e005"; +} + +.oi-align-right:before { + content:"\e006"; +} + +.oi-aperture:before { + content:"\e007"; +} + +.oi-arrow-bottom:before { + content:"\e008"; +} + +.oi-arrow-circle-bottom:before { + content:"\e009"; +} + +.oi-arrow-circle-left:before { + content:"\e00a"; +} + +.oi-arrow-circle-right:before { + content:"\e00b"; +} + +.oi-arrow-circle-top:before { + content:"\e00c"; +} + +.oi-arrow-left:before { + content:"\e00d"; +} + +.oi-arrow-right:before { + content:"\e00e"; +} + +.oi-arrow-thick-bottom:before { + content:"\e00f"; +} + +.oi-arrow-thick-left:before { + content:"\e010"; +} + +.oi-arrow-thick-right:before { + content:"\e011"; +} + +.oi-arrow-thick-top:before { + content:"\e012"; +} + +.oi-arrow-top:before { + content:"\e013"; +} + +.oi-audio-spectrum:before { + content:"\e014"; +} + +.oi-audio:before { + content:"\e015"; +} + +.oi-badge:before { + content:"\e016"; +} + +.oi-ban:before { + content:"\e017"; +} + +.oi-bar-chart:before { + content:"\e018"; +} + +.oi-basket:before { + content:"\e019"; +} + +.oi-battery-empty:before { + content:"\e01a"; +} + +.oi-battery-full:before { + content:"\e01b"; +} + +.oi-beaker:before { + content:"\e01c"; +} + +.oi-bell:before { + content:"\e01d"; +} + +.oi-bluetooth:before { + content:"\e01e"; +} + +.oi-bold:before { + content:"\e01f"; +} + +.oi-bolt:before { + content:"\e020"; +} + +.oi-book:before { + content:"\e021"; +} + +.oi-bookmark:before { + content:"\e022"; +} + +.oi-box:before { + content:"\e023"; +} + +.oi-briefcase:before { + content:"\e024"; +} + +.oi-british-pound:before { + content:"\e025"; +} + +.oi-browser:before { + content:"\e026"; +} + +.oi-brush:before { + content:"\e027"; +} + +.oi-bug:before { + content:"\e028"; +} + +.oi-bullhorn:before { + content:"\e029"; +} + +.oi-calculator:before { + content:"\e02a"; +} + +.oi-calendar:before { + content:"\e02b"; +} + +.oi-camera-slr:before { + content:"\e02c"; +} + +.oi-caret-bottom:before { + content:"\e02d"; +} + +.oi-caret-left:before { + content:"\e02e"; +} + +.oi-caret-right:before { + content:"\e02f"; +} + +.oi-caret-top:before { + content:"\e030"; +} + +.oi-cart:before { + content:"\e031"; +} + +.oi-chat:before { + content:"\e032"; +} + +.oi-check:before { + content:"\e033"; +} + +.oi-chevron-bottom:before { + content:"\e034"; +} + +.oi-chevron-left:before { + content:"\e035"; +} + +.oi-chevron-right:before { + content:"\e036"; +} + +.oi-chevron-top:before { + content:"\e037"; +} + +.oi-circle-check:before { + content:"\e038"; +} + +.oi-circle-x:before { + content:"\e039"; +} + +.oi-clipboard:before { + content:"\e03a"; +} + +.oi-clock:before { + content:"\e03b"; +} + +.oi-cloud-download:before { + content:"\e03c"; +} + +.oi-cloud-upload:before { + content:"\e03d"; +} + +.oi-cloud:before { + content:"\e03e"; +} + +.oi-cloudy:before { + content:"\e03f"; +} + +.oi-code:before { + content:"\e040"; +} + +.oi-cog:before { + content:"\e041"; +} + +.oi-collapse-down:before { + content:"\e042"; +} + +.oi-collapse-left:before { + content:"\e043"; +} + +.oi-collapse-right:before { + content:"\e044"; +} + +.oi-collapse-up:before { + content:"\e045"; +} + +.oi-command:before { + content:"\e046"; +} + +.oi-comment-square:before { + content:"\e047"; +} + +.oi-compass:before { + content:"\e048"; +} + +.oi-contrast:before { + content:"\e049"; +} + +.oi-copywriting:before { + content:"\e04a"; +} + +.oi-credit-card:before { + content:"\e04b"; +} + +.oi-crop:before { + content:"\e04c"; +} + +.oi-dashboard:before { + content:"\e04d"; +} + +.oi-data-transfer-download:before { + content:"\e04e"; +} + +.oi-data-transfer-upload:before { + content:"\e04f"; +} + +.oi-delete:before { + content:"\e050"; +} + +.oi-dial:before { + content:"\e051"; +} + +.oi-document:before { + content:"\e052"; +} + +.oi-dollar:before { + content:"\e053"; +} + +.oi-double-quote-sans-left:before { + content:"\e054"; +} + +.oi-double-quote-sans-right:before { + content:"\e055"; +} + +.oi-double-quote-serif-left:before { + content:"\e056"; +} + +.oi-double-quote-serif-right:before { + content:"\e057"; +} + +.oi-droplet:before { + content:"\e058"; +} + +.oi-eject:before { + content:"\e059"; +} + +.oi-elevator:before { + content:"\e05a"; +} + +.oi-ellipses:before { + content:"\e05b"; +} + +.oi-envelope-closed:before { + content:"\e05c"; +} + +.oi-envelope-open:before { + content:"\e05d"; +} + +.oi-euro:before { + content:"\e05e"; +} + +.oi-excerpt:before { + content:"\e05f"; +} + +.oi-expand-down:before { + content:"\e060"; +} + +.oi-expand-left:before { + content:"\e061"; +} + +.oi-expand-right:before { + content:"\e062"; +} + +.oi-expand-up:before { + content:"\e063"; +} + +.oi-external-link:before { + content:"\e064"; +} + +.oi-eye:before { + content:"\e065"; +} + +.oi-eyedropper:before { + content:"\e066"; +} + +.oi-file:before { + content:"\e067"; +} + +.oi-fire:before { + content:"\e068"; +} + +.oi-flag:before { + content:"\e069"; +} + +.oi-flash:before { + content:"\e06a"; +} + +.oi-folder:before { + content:"\e06b"; +} + +.oi-fork:before { + content:"\e06c"; +} + +.oi-fullscreen-enter:before { + content:"\e06d"; +} + +.oi-fullscreen-exit:before { + content:"\e06e"; +} + +.oi-globe:before { + content:"\e06f"; +} + +.oi-graph:before { + content:"\e070"; +} + +.oi-grid-four-up:before { + content:"\e071"; +} + +.oi-grid-three-up:before { + content:"\e072"; +} + +.oi-grid-two-up:before { + content:"\e073"; +} + +.oi-hard-drive:before { + content:"\e074"; +} + +.oi-header:before { + content:"\e075"; +} + +.oi-headphones:before { + content:"\e076"; +} + +.oi-heart:before { + content:"\e077"; +} + +.oi-home:before { + content:"\e078"; +} + +.oi-image:before { + content:"\e079"; +} + +.oi-inbox:before { + content:"\e07a"; +} + +.oi-infinity:before { + content:"\e07b"; +} + +.oi-info:before { + content:"\e07c"; +} + +.oi-italic:before { + content:"\e07d"; +} + +.oi-justify-center:before { + content:"\e07e"; +} + +.oi-justify-left:before { + content:"\e07f"; +} + +.oi-justify-right:before { + content:"\e080"; +} + +.oi-key:before { + content:"\e081"; +} + +.oi-laptop:before { + content:"\e082"; +} + +.oi-layers:before { + content:"\e083"; +} + +.oi-lightbulb:before { + content:"\e084"; +} + +.oi-link-broken:before { + content:"\e085"; +} + +.oi-link-intact:before { + content:"\e086"; +} + +.oi-list-rich:before { + content:"\e087"; +} + +.oi-list:before { + content:"\e088"; +} + +.oi-location:before { + content:"\e089"; +} + +.oi-lock-locked:before { + content:"\e08a"; +} + +.oi-lock-unlocked:before { + content:"\e08b"; +} + +.oi-loop-circular:before { + content:"\e08c"; +} + +.oi-loop-square:before { + content:"\e08d"; +} + +.oi-loop:before { + content:"\e08e"; +} + +.oi-magnifying-glass:before { + content:"\e08f"; +} + +.oi-map-marker:before { + content:"\e090"; +} + +.oi-map:before { + content:"\e091"; +} + +.oi-media-pause:before { + content:"\e092"; +} + +.oi-media-play:before { + content:"\e093"; +} + +.oi-media-record:before { + content:"\e094"; +} + +.oi-media-skip-backward:before { + content:"\e095"; +} + +.oi-media-skip-forward:before { + content:"\e096"; +} + +.oi-media-step-backward:before { + content:"\e097"; +} + +.oi-media-step-forward:before { + content:"\e098"; +} + +.oi-media-stop:before { + content:"\e099"; +} + +.oi-medical-cross:before { + content:"\e09a"; +} + +.oi-menu:before { + content:"\e09b"; +} + +.oi-microphone:before { + content:"\e09c"; +} + +.oi-minus:before { + content:"\e09d"; +} + +.oi-monitor:before { + content:"\e09e"; +} + +.oi-moon:before { + content:"\e09f"; +} + +.oi-move:before { + content:"\e0a0"; +} + +.oi-musical-note:before { + content:"\e0a1"; +} + +.oi-paperclip:before { + content:"\e0a2"; +} + +.oi-pencil:before { + content:"\e0a3"; +} + +.oi-people:before { + content:"\e0a4"; +} + +.oi-person:before { + content:"\e0a5"; +} + +.oi-phone:before { + content:"\e0a6"; +} + +.oi-pie-chart:before { + content:"\e0a7"; +} + +.oi-pin:before { + content:"\e0a8"; +} + +.oi-play-circle:before { + content:"\e0a9"; +} + +.oi-plus:before { + content:"\e0aa"; +} + +.oi-power-standby:before { + content:"\e0ab"; +} + +.oi-print:before { + content:"\e0ac"; +} + +.oi-project:before { + content:"\e0ad"; +} + +.oi-pulse:before { + content:"\e0ae"; +} + +.oi-puzzle-piece:before { + content:"\e0af"; +} + +.oi-question-mark:before { + content:"\e0b0"; +} + +.oi-rain:before { + content:"\e0b1"; +} + +.oi-random:before { + content:"\e0b2"; +} + +.oi-reload:before { + content:"\e0b3"; +} + +.oi-resize-both:before { + content:"\e0b4"; +} + +.oi-resize-height:before { + content:"\e0b5"; +} + +.oi-resize-width:before { + content:"\e0b6"; +} + +.oi-rss-alt:before { + content:"\e0b7"; +} + +.oi-rss:before { + content:"\e0b8"; +} + +.oi-script:before { + content:"\e0b9"; +} + +.oi-share-boxed:before { + content:"\e0ba"; +} + +.oi-share:before { + content:"\e0bb"; +} + +.oi-shield:before { + content:"\e0bc"; +} + +.oi-signal:before { + content:"\e0bd"; +} + +.oi-signpost:before { + content:"\e0be"; +} + +.oi-sort-ascending:before { + content:"\e0bf"; +} + +.oi-sort-descending:before { + content:"\e0c0"; +} + +.oi-spreadsheet:before { + content:"\e0c1"; +} + +.oi-star:before { + content:"\e0c2"; +} + +.oi-sun:before { + content:"\e0c3"; +} + +.oi-tablet:before { + content:"\e0c4"; +} + +.oi-tag:before { + content:"\e0c5"; +} + +.oi-tags:before { + content:"\e0c6"; +} + +.oi-target:before { + content:"\e0c7"; +} + +.oi-task:before { + content:"\e0c8"; +} + +.oi-terminal:before { + content:"\e0c9"; +} + +.oi-text:before { + content:"\e0ca"; +} + +.oi-thumb-down:before { + content:"\e0cb"; +} + +.oi-thumb-up:before { + content:"\e0cc"; +} + +.oi-timer:before { + content:"\e0cd"; +} + +.oi-transfer:before { + content:"\e0ce"; +} + +.oi-trash:before { + content:"\e0cf"; +} + +.oi-underline:before { + content:"\e0d0"; +} + +.oi-vertical-align-bottom:before { + content:"\e0d1"; +} + +.oi-vertical-align-center:before { + content:"\e0d2"; +} + +.oi-vertical-align-top:before { + content:"\e0d3"; +} + +.oi-video:before { + content:"\e0d4"; +} + +.oi-volume-high:before { + content:"\e0d5"; +} + +.oi-volume-low:before { + content:"\e0d6"; +} + +.oi-volume-off:before { + content:"\e0d7"; +} + +.oi-warning:before { + content:"\e0d8"; +} + +.oi-wifi:before { + content:"\e0d9"; +} + +.oi-wrench:before { + content:"\e0da"; +} + +.oi-x:before { + content:"\e0db"; +} + +.oi-yen:before { + content:"\e0dc"; +} + +.oi-zoom-in:before { + content:"\e0dd"; +} + +.oi-zoom-out:before { + content:"\e0de"; +} + diff --git a/static/open-iconic/css/open-iconic-bootstrap.min.css b/static/open-iconic/css/open-iconic-bootstrap.min.css new file mode 100644 index 00000000..4664f2e8 --- /dev/null +++ b/static/open-iconic/css/open-iconic-bootstrap.min.css @@ -0,0 +1 @@ +@font-face{font-family:Icons;src:url(../fonts/open-iconic.eot);src:url(../fonts/open-iconic.eot?#iconic-sm) format('embedded-opentype'),url(../fonts/open-iconic.woff) format('woff'),url(../fonts/open-iconic.ttf) format('truetype'),url(../fonts/open-iconic.otf) format('opentype'),url(../fonts/open-iconic.svg#iconic-sm) format('svg');font-weight:400;font-style:normal}.oi{position:relative;top:1px;display:inline-block;speak:none;font-family:Icons;font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.oi:empty:before{width:1em;text-align:center;box-sizing:content-box}.oi.oi-align-center:before{text-align:center}.oi.oi-align-left:before{text-align:left}.oi.oi-align-right:before{text-align:right}.oi.oi-flip-horizontal:before{-webkit-transform:scale(-1,1);-ms-transform:scale(-1,1);transform:scale(-1,1)}.oi.oi-flip-vertical:before{-webkit-transform:scale(1,-1);-ms-transform:scale(-1,1);transform:scale(1,-1)}.oi.oi-flip-horizontal-vertical:before{-webkit-transform:scale(-1,-1);-ms-transform:scale(-1,1);transform:scale(-1,-1)}.oi-account-login:before{content:'\e000'}.oi-account-logout:before{content:'\e001'}.oi-action-redo:before{content:'\e002'}.oi-action-undo:before{content:'\e003'}.oi-align-center:before{content:'\e004'}.oi-align-left:before{content:'\e005'}.oi-align-right:before{content:'\e006'}.oi-aperture:before{content:'\e007'}.oi-arrow-bottom:before{content:'\e008'}.oi-arrow-circle-bottom:before{content:'\e009'}.oi-arrow-circle-left:before{content:'\e00a'}.oi-arrow-circle-right:before{content:'\e00b'}.oi-arrow-circle-top:before{content:'\e00c'}.oi-arrow-left:before{content:'\e00d'}.oi-arrow-right:before{content:'\e00e'}.oi-arrow-thick-bottom:before{content:'\e00f'}.oi-arrow-thick-left:before{content:'\e010'}.oi-arrow-thick-right:before{content:'\e011'}.oi-arrow-thick-top:before{content:'\e012'}.oi-arrow-top:before{content:'\e013'}.oi-audio-spectrum:before{content:'\e014'}.oi-audio:before{content:'\e015'}.oi-badge:before{content:'\e016'}.oi-ban:before{content:'\e017'}.oi-bar-chart:before{content:'\e018'}.oi-basket:before{content:'\e019'}.oi-battery-empty:before{content:'\e01a'}.oi-battery-full:before{content:'\e01b'}.oi-beaker:before{content:'\e01c'}.oi-bell:before{content:'\e01d'}.oi-bluetooth:before{content:'\e01e'}.oi-bold:before{content:'\e01f'}.oi-bolt:before{content:'\e020'}.oi-book:before{content:'\e021'}.oi-bookmark:before{content:'\e022'}.oi-box:before{content:'\e023'}.oi-briefcase:before{content:'\e024'}.oi-british-pound:before{content:'\e025'}.oi-browser:before{content:'\e026'}.oi-brush:before{content:'\e027'}.oi-bug:before{content:'\e028'}.oi-bullhorn:before{content:'\e029'}.oi-calculator:before{content:'\e02a'}.oi-calendar:before{content:'\e02b'}.oi-camera-slr:before{content:'\e02c'}.oi-caret-bottom:before{content:'\e02d'}.oi-caret-left:before{content:'\e02e'}.oi-caret-right:before{content:'\e02f'}.oi-caret-top:before{content:'\e030'}.oi-cart:before{content:'\e031'}.oi-chat:before{content:'\e032'}.oi-check:before{content:'\e033'}.oi-chevron-bottom:before{content:'\e034'}.oi-chevron-left:before{content:'\e035'}.oi-chevron-right:before{content:'\e036'}.oi-chevron-top:before{content:'\e037'}.oi-circle-check:before{content:'\e038'}.oi-circle-x:before{content:'\e039'}.oi-clipboard:before{content:'\e03a'}.oi-clock:before{content:'\e03b'}.oi-cloud-download:before{content:'\e03c'}.oi-cloud-upload:before{content:'\e03d'}.oi-cloud:before{content:'\e03e'}.oi-cloudy:before{content:'\e03f'}.oi-code:before{content:'\e040'}.oi-cog:before{content:'\e041'}.oi-collapse-down:before{content:'\e042'}.oi-collapse-left:before{content:'\e043'}.oi-collapse-right:before{content:'\e044'}.oi-collapse-up:before{content:'\e045'}.oi-command:before{content:'\e046'}.oi-comment-square:before{content:'\e047'}.oi-compass:before{content:'\e048'}.oi-contrast:before{content:'\e049'}.oi-copywriting:before{content:'\e04a'}.oi-credit-card:before{content:'\e04b'}.oi-crop:before{content:'\e04c'}.oi-dashboard:before{content:'\e04d'}.oi-data-transfer-download:before{content:'\e04e'}.oi-data-transfer-upload:before{content:'\e04f'}.oi-delete:before{content:'\e050'}.oi-dial:before{content:'\e051'}.oi-document:before{content:'\e052'}.oi-dollar:before{content:'\e053'}.oi-double-quote-sans-left:before{content:'\e054'}.oi-double-quote-sans-right:before{content:'\e055'}.oi-double-quote-serif-left:before{content:'\e056'}.oi-double-quote-serif-right:before{content:'\e057'}.oi-droplet:before{content:'\e058'}.oi-eject:before{content:'\e059'}.oi-elevator:before{content:'\e05a'}.oi-ellipses:before{content:'\e05b'}.oi-envelope-closed:before{content:'\e05c'}.oi-envelope-open:before{content:'\e05d'}.oi-euro:before{content:'\e05e'}.oi-excerpt:before{content:'\e05f'}.oi-expand-down:before{content:'\e060'}.oi-expand-left:before{content:'\e061'}.oi-expand-right:before{content:'\e062'}.oi-expand-up:before{content:'\e063'}.oi-external-link:before{content:'\e064'}.oi-eye:before{content:'\e065'}.oi-eyedropper:before{content:'\e066'}.oi-file:before{content:'\e067'}.oi-fire:before{content:'\e068'}.oi-flag:before{content:'\e069'}.oi-flash:before{content:'\e06a'}.oi-folder:before{content:'\e06b'}.oi-fork:before{content:'\e06c'}.oi-fullscreen-enter:before{content:'\e06d'}.oi-fullscreen-exit:before{content:'\e06e'}.oi-globe:before{content:'\e06f'}.oi-graph:before{content:'\e070'}.oi-grid-four-up:before{content:'\e071'}.oi-grid-three-up:before{content:'\e072'}.oi-grid-two-up:before{content:'\e073'}.oi-hard-drive:before{content:'\e074'}.oi-header:before{content:'\e075'}.oi-headphones:before{content:'\e076'}.oi-heart:before{content:'\e077'}.oi-home:before{content:'\e078'}.oi-image:before{content:'\e079'}.oi-inbox:before{content:'\e07a'}.oi-infinity:before{content:'\e07b'}.oi-info:before{content:'\e07c'}.oi-italic:before{content:'\e07d'}.oi-justify-center:before{content:'\e07e'}.oi-justify-left:before{content:'\e07f'}.oi-justify-right:before{content:'\e080'}.oi-key:before{content:'\e081'}.oi-laptop:before{content:'\e082'}.oi-layers:before{content:'\e083'}.oi-lightbulb:before{content:'\e084'}.oi-link-broken:before{content:'\e085'}.oi-link-intact:before{content:'\e086'}.oi-list-rich:before{content:'\e087'}.oi-list:before{content:'\e088'}.oi-location:before{content:'\e089'}.oi-lock-locked:before{content:'\e08a'}.oi-lock-unlocked:before{content:'\e08b'}.oi-loop-circular:before{content:'\e08c'}.oi-loop-square:before{content:'\e08d'}.oi-loop:before{content:'\e08e'}.oi-magnifying-glass:before{content:'\e08f'}.oi-map-marker:before{content:'\e090'}.oi-map:before{content:'\e091'}.oi-media-pause:before{content:'\e092'}.oi-media-play:before{content:'\e093'}.oi-media-record:before{content:'\e094'}.oi-media-skip-backward:before{content:'\e095'}.oi-media-skip-forward:before{content:'\e096'}.oi-media-step-backward:before{content:'\e097'}.oi-media-step-forward:before{content:'\e098'}.oi-media-stop:before{content:'\e099'}.oi-medical-cross:before{content:'\e09a'}.oi-menu:before{content:'\e09b'}.oi-microphone:before{content:'\e09c'}.oi-minus:before{content:'\e09d'}.oi-monitor:before{content:'\e09e'}.oi-moon:before{content:'\e09f'}.oi-move:before{content:'\e0a0'}.oi-musical-note:before{content:'\e0a1'}.oi-paperclip:before{content:'\e0a2'}.oi-pencil:before{content:'\e0a3'}.oi-people:before{content:'\e0a4'}.oi-person:before{content:'\e0a5'}.oi-phone:before{content:'\e0a6'}.oi-pie-chart:before{content:'\e0a7'}.oi-pin:before{content:'\e0a8'}.oi-play-circle:before{content:'\e0a9'}.oi-plus:before{content:'\e0aa'}.oi-power-standby:before{content:'\e0ab'}.oi-print:before{content:'\e0ac'}.oi-project:before{content:'\e0ad'}.oi-pulse:before{content:'\e0ae'}.oi-puzzle-piece:before{content:'\e0af'}.oi-question-mark:before{content:'\e0b0'}.oi-rain:before{content:'\e0b1'}.oi-random:before{content:'\e0b2'}.oi-reload:before{content:'\e0b3'}.oi-resize-both:before{content:'\e0b4'}.oi-resize-height:before{content:'\e0b5'}.oi-resize-width:before{content:'\e0b6'}.oi-rss-alt:before{content:'\e0b7'}.oi-rss:before{content:'\e0b8'}.oi-script:before{content:'\e0b9'}.oi-share-boxed:before{content:'\e0ba'}.oi-share:before{content:'\e0bb'}.oi-shield:before{content:'\e0bc'}.oi-signal:before{content:'\e0bd'}.oi-signpost:before{content:'\e0be'}.oi-sort-ascending:before{content:'\e0bf'}.oi-sort-descending:before{content:'\e0c0'}.oi-spreadsheet:before{content:'\e0c1'}.oi-star:before{content:'\e0c2'}.oi-sun:before{content:'\e0c3'}.oi-tablet:before{content:'\e0c4'}.oi-tag:before{content:'\e0c5'}.oi-tags:before{content:'\e0c6'}.oi-target:before{content:'\e0c7'}.oi-task:before{content:'\e0c8'}.oi-terminal:before{content:'\e0c9'}.oi-text:before{content:'\e0ca'}.oi-thumb-down:before{content:'\e0cb'}.oi-thumb-up:before{content:'\e0cc'}.oi-timer:before{content:'\e0cd'}.oi-transfer:before{content:'\e0ce'}.oi-trash:before{content:'\e0cf'}.oi-underline:before{content:'\e0d0'}.oi-vertical-align-bottom:before{content:'\e0d1'}.oi-vertical-align-center:before{content:'\e0d2'}.oi-vertical-align-top:before{content:'\e0d3'}.oi-video:before{content:'\e0d4'}.oi-volume-high:before{content:'\e0d5'}.oi-volume-low:before{content:'\e0d6'}.oi-volume-off:before{content:'\e0d7'}.oi-warning:before{content:'\e0d8'}.oi-wifi:before{content:'\e0d9'}.oi-wrench:before{content:'\e0da'}.oi-x:before{content:'\e0db'}.oi-yen:before{content:'\e0dc'}.oi-zoom-in:before{content:'\e0dd'}.oi-zoom-out:before{content:'\e0de'} \ No newline at end of file diff --git a/static/open-iconic/css/open-iconic-bootstrap.scss b/static/open-iconic/css/open-iconic-bootstrap.scss new file mode 100644 index 00000000..18f01e26 --- /dev/null +++ b/static/open-iconic/css/open-iconic-bootstrap.scss @@ -0,0 +1,958 @@ +/* Bootstrap */ + +/* Override Bootstrap default variable */ +$icon-font-path: '../fonts/' !default; + +@font-face { + font-family: 'Icons'; + src: url('#{$icon-font-path}open-iconic.eot'); + src: url('#{$icon-font-path}open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('#{$icon-font-path}open-iconic.woff') format('woff'), url('#{$icon-font-path}open-iconic.ttf') format('truetype'), url('#{$icon-font-path}open-iconic.svg#iconic-sm') format('svg'); + font-weight: normal; + font-style: normal; +} + +// Catchall baseclass +.oi { + position: relative; + top: 1px; + display: inline-block; + font-family: 'Icons'; + font-style: normal; + font-weight: normal; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + + + &:empty:before { + width: 1em; + text-align: center; + box-sizing: content-box; + } + + &.oi-align-center:before { + text-align: center; + } + + &.oi-align-left:before { + text-align: left; + } + + &.oi-align-right:before { + text-align: right; + } + + + &.oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); + } + + &.oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); + } + + &.oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); + } +} + + + +.oi-account-login:before { + content:'\e000'; +} + +.oi-account-logout:before { + content:'\e001'; +} + +.oi-action-redo:before { + content:'\e002'; +} + +.oi-action-undo:before { + content:'\e003'; +} + +.oi-align-center:before { + content:'\e004'; +} + +.oi-align-left:before { + content:'\e005'; +} + +.oi-align-right:before { + content:'\e006'; +} + +.oi-aperture:before { + content:'\e007'; +} + +.oi-arrow-bottom:before { + content:'\e008'; +} + +.oi-arrow-circle-bottom:before { + content:'\e009'; +} + +.oi-arrow-circle-left:before { + content:'\e00a'; +} + +.oi-arrow-circle-right:before { + content:'\e00b'; +} + +.oi-arrow-circle-top:before { + content:'\e00c'; +} + +.oi-arrow-left:before { + content:'\e00d'; +} + +.oi-arrow-right:before { + content:'\e00e'; +} + +.oi-arrow-thick-bottom:before { + content:'\e00f'; +} + +.oi-arrow-thick-left:before { + content:'\e010'; +} + +.oi-arrow-thick-right:before { + content:'\e011'; +} + +.oi-arrow-thick-top:before { + content:'\e012'; +} + +.oi-arrow-top:before { + content:'\e013'; +} + +.oi-audio-spectrum:before { + content:'\e014'; +} + +.oi-audio:before { + content:'\e015'; +} + +.oi-badge:before { + content:'\e016'; +} + +.oi-ban:before { + content:'\e017'; +} + +.oi-bar-chart:before { + content:'\e018'; +} + +.oi-basket:before { + content:'\e019'; +} + +.oi-battery-empty:before { + content:'\e01a'; +} + +.oi-battery-full:before { + content:'\e01b'; +} + +.oi-beaker:before { + content:'\e01c'; +} + +.oi-bell:before { + content:'\e01d'; +} + +.oi-bluetooth:before { + content:'\e01e'; +} + +.oi-bold:before { + content:'\e01f'; +} + +.oi-bolt:before { + content:'\e020'; +} + +.oi-book:before { + content:'\e021'; +} + +.oi-bookmark:before { + content:'\e022'; +} + +.oi-box:before { + content:'\e023'; +} + +.oi-briefcase:before { + content:'\e024'; +} + +.oi-british-pound:before { + content:'\e025'; +} + +.oi-browser:before { + content:'\e026'; +} + +.oi-brush:before { + content:'\e027'; +} + +.oi-bug:before { + content:'\e028'; +} + +.oi-bullhorn:before { + content:'\e029'; +} + +.oi-calculator:before { + content:'\e02a'; +} + +.oi-calendar:before { + content:'\e02b'; +} + +.oi-camera-slr:before { + content:'\e02c'; +} + +.oi-caret-bottom:before { + content:'\e02d'; +} + +.oi-caret-left:before { + content:'\e02e'; +} + +.oi-caret-right:before { + content:'\e02f'; +} + +.oi-caret-top:before { + content:'\e030'; +} + +.oi-cart:before { + content:'\e031'; +} + +.oi-chat:before { + content:'\e032'; +} + +.oi-check:before { + content:'\e033'; +} + +.oi-chevron-bottom:before { + content:'\e034'; +} + +.oi-chevron-left:before { + content:'\e035'; +} + +.oi-chevron-right:before { + content:'\e036'; +} + +.oi-chevron-top:before { + content:'\e037'; +} + +.oi-circle-check:before { + content:'\e038'; +} + +.oi-circle-x:before { + content:'\e039'; +} + +.oi-clipboard:before { + content:'\e03a'; +} + +.oi-clock:before { + content:'\e03b'; +} + +.oi-cloud-download:before { + content:'\e03c'; +} + +.oi-cloud-upload:before { + content:'\e03d'; +} + +.oi-cloud:before { + content:'\e03e'; +} + +.oi-cloudy:before { + content:'\e03f'; +} + +.oi-code:before { + content:'\e040'; +} + +.oi-cog:before { + content:'\e041'; +} + +.oi-collapse-down:before { + content:'\e042'; +} + +.oi-collapse-left:before { + content:'\e043'; +} + +.oi-collapse-right:before { + content:'\e044'; +} + +.oi-collapse-up:before { + content:'\e045'; +} + +.oi-command:before { + content:'\e046'; +} + +.oi-comment-square:before { + content:'\e047'; +} + +.oi-compass:before { + content:'\e048'; +} + +.oi-contrast:before { + content:'\e049'; +} + +.oi-copywriting:before { + content:'\e04a'; +} + +.oi-credit-card:before { + content:'\e04b'; +} + +.oi-crop:before { + content:'\e04c'; +} + +.oi-dashboard:before { + content:'\e04d'; +} + +.oi-data-transfer-download:before { + content:'\e04e'; +} + +.oi-data-transfer-upload:before { + content:'\e04f'; +} + +.oi-delete:before { + content:'\e050'; +} + +.oi-dial:before { + content:'\e051'; +} + +.oi-document:before { + content:'\e052'; +} + +.oi-dollar:before { + content:'\e053'; +} + +.oi-double-quote-sans-left:before { + content:'\e054'; +} + +.oi-double-quote-sans-right:before { + content:'\e055'; +} + +.oi-double-quote-serif-left:before { + content:'\e056'; +} + +.oi-double-quote-serif-right:before { + content:'\e057'; +} + +.oi-droplet:before { + content:'\e058'; +} + +.oi-eject:before { + content:'\e059'; +} + +.oi-elevator:before { + content:'\e05a'; +} + +.oi-ellipses:before { + content:'\e05b'; +} + +.oi-envelope-closed:before { + content:'\e05c'; +} + +.oi-envelope-open:before { + content:'\e05d'; +} + +.oi-euro:before { + content:'\e05e'; +} + +.oi-excerpt:before { + content:'\e05f'; +} + +.oi-expand-down:before { + content:'\e060'; +} + +.oi-expand-left:before { + content:'\e061'; +} + +.oi-expand-right:before { + content:'\e062'; +} + +.oi-expand-up:before { + content:'\e063'; +} + +.oi-external-link:before { + content:'\e064'; +} + +.oi-eye:before { + content:'\e065'; +} + +.oi-eyedropper:before { + content:'\e066'; +} + +.oi-file:before { + content:'\e067'; +} + +.oi-fire:before { + content:'\e068'; +} + +.oi-flag:before { + content:'\e069'; +} + +.oi-flash:before { + content:'\e06a'; +} + +.oi-folder:before { + content:'\e06b'; +} + +.oi-fork:before { + content:'\e06c'; +} + +.oi-fullscreen-enter:before { + content:'\e06d'; +} + +.oi-fullscreen-exit:before { + content:'\e06e'; +} + +.oi-globe:before { + content:'\e06f'; +} + +.oi-graph:before { + content:'\e070'; +} + +.oi-grid-four-up:before { + content:'\e071'; +} + +.oi-grid-three-up:before { + content:'\e072'; +} + +.oi-grid-two-up:before { + content:'\e073'; +} + +.oi-hard-drive:before { + content:'\e074'; +} + +.oi-header:before { + content:'\e075'; +} + +.oi-headphones:before { + content:'\e076'; +} + +.oi-heart:before { + content:'\e077'; +} + +.oi-home:before { + content:'\e078'; +} + +.oi-image:before { + content:'\e079'; +} + +.oi-inbox:before { + content:'\e07a'; +} + +.oi-infinity:before { + content:'\e07b'; +} + +.oi-info:before { + content:'\e07c'; +} + +.oi-italic:before { + content:'\e07d'; +} + +.oi-justify-center:before { + content:'\e07e'; +} + +.oi-justify-left:before { + content:'\e07f'; +} + +.oi-justify-right:before { + content:'\e080'; +} + +.oi-key:before { + content:'\e081'; +} + +.oi-laptop:before { + content:'\e082'; +} + +.oi-layers:before { + content:'\e083'; +} + +.oi-lightbulb:before { + content:'\e084'; +} + +.oi-link-broken:before { + content:'\e085'; +} + +.oi-link-intact:before { + content:'\e086'; +} + +.oi-list-rich:before { + content:'\e087'; +} + +.oi-list:before { + content:'\e088'; +} + +.oi-location:before { + content:'\e089'; +} + +.oi-lock-locked:before { + content:'\e08a'; +} + +.oi-lock-unlocked:before { + content:'\e08b'; +} + +.oi-loop-circular:before { + content:'\e08c'; +} + +.oi-loop-square:before { + content:'\e08d'; +} + +.oi-loop:before { + content:'\e08e'; +} + +.oi-magnifying-glass:before { + content:'\e08f'; +} + +.oi-map-marker:before { + content:'\e090'; +} + +.oi-map:before { + content:'\e091'; +} + +.oi-media-pause:before { + content:'\e092'; +} + +.oi-media-play:before { + content:'\e093'; +} + +.oi-media-record:before { + content:'\e094'; +} + +.oi-media-skip-backward:before { + content:'\e095'; +} + +.oi-media-skip-forward:before { + content:'\e096'; +} + +.oi-media-step-backward:before { + content:'\e097'; +} + +.oi-media-step-forward:before { + content:'\e098'; +} + +.oi-media-stop:before { + content:'\e099'; +} + +.oi-medical-cross:before { + content:'\e09a'; +} + +.oi-menu:before { + content:'\e09b'; +} + +.oi-microphone:before { + content:'\e09c'; +} + +.oi-minus:before { + content:'\e09d'; +} + +.oi-monitor:before { + content:'\e09e'; +} + +.oi-moon:before { + content:'\e09f'; +} + +.oi-move:before { + content:'\e0a0'; +} + +.oi-musical-note:before { + content:'\e0a1'; +} + +.oi-paperclip:before { + content:'\e0a2'; +} + +.oi-pencil:before { + content:'\e0a3'; +} + +.oi-people:before { + content:'\e0a4'; +} + +.oi-person:before { + content:'\e0a5'; +} + +.oi-phone:before { + content:'\e0a6'; +} + +.oi-pie-chart:before { + content:'\e0a7'; +} + +.oi-pin:before { + content:'\e0a8'; +} + +.oi-play-circle:before { + content:'\e0a9'; +} + +.oi-plus:before { + content:'\e0aa'; +} + +.oi-power-standby:before { + content:'\e0ab'; +} + +.oi-print:before { + content:'\e0ac'; +} + +.oi-project:before { + content:'\e0ad'; +} + +.oi-pulse:before { + content:'\e0ae'; +} + +.oi-puzzle-piece:before { + content:'\e0af'; +} + +.oi-question-mark:before { + content:'\e0b0'; +} + +.oi-rain:before { + content:'\e0b1'; +} + +.oi-random:before { + content:'\e0b2'; +} + +.oi-reload:before { + content:'\e0b3'; +} + +.oi-resize-both:before { + content:'\e0b4'; +} + +.oi-resize-height:before { + content:'\e0b5'; +} + +.oi-resize-width:before { + content:'\e0b6'; +} + +.oi-rss-alt:before { + content:'\e0b7'; +} + +.oi-rss:before { + content:'\e0b8'; +} + +.oi-script:before { + content:'\e0b9'; +} + +.oi-share-boxed:before { + content:'\e0ba'; +} + +.oi-share:before { + content:'\e0bb'; +} + +.oi-shield:before { + content:'\e0bc'; +} + +.oi-signal:before { + content:'\e0bd'; +} + +.oi-signpost:before { + content:'\e0be'; +} + +.oi-sort-ascending:before { + content:'\e0bf'; +} + +.oi-sort-descending:before { + content:'\e0c0'; +} + +.oi-spreadsheet:before { + content:'\e0c1'; +} + +.oi-star:before { + content:'\e0c2'; +} + +.oi-sun:before { + content:'\e0c3'; +} + +.oi-tablet:before { + content:'\e0c4'; +} + +.oi-tag:before { + content:'\e0c5'; +} + +.oi-tags:before { + content:'\e0c6'; +} + +.oi-target:before { + content:'\e0c7'; +} + +.oi-task:before { + content:'\e0c8'; +} + +.oi-terminal:before { + content:'\e0c9'; +} + +.oi-text:before { + content:'\e0ca'; +} + +.oi-thumb-down:before { + content:'\e0cb'; +} + +.oi-thumb-up:before { + content:'\e0cc'; +} + +.oi-timer:before { + content:'\e0cd'; +} + +.oi-transfer:before { + content:'\e0ce'; +} + +.oi-trash:before { + content:'\e0cf'; +} + +.oi-underline:before { + content:'\e0d0'; +} + +.oi-vertical-align-bottom:before { + content:'\e0d1'; +} + +.oi-vertical-align-center:before { + content:'\e0d2'; +} + +.oi-vertical-align-top:before { + content:'\e0d3'; +} + +.oi-video:before { + content:'\e0d4'; +} + +.oi-volume-high:before { + content:'\e0d5'; +} + +.oi-volume-low:before { + content:'\e0d6'; +} + +.oi-volume-off:before { + content:'\e0d7'; +} + +.oi-warning:before { + content:'\e0d8'; +} + +.oi-wifi:before { + content:'\e0d9'; +} + +.oi-wrench:before { + content:'\e0da'; +} + +.oi-x:before { + content:'\e0db'; +} + +.oi-yen:before { + content:'\e0dc'; +} + +.oi-zoom-in:before { + content:'\e0dd'; +} + +.oi-zoom-out:before { + content:'\e0de'; +} + diff --git a/static/open-iconic/css/open-iconic-bootstrap.styl b/static/open-iconic/css/open-iconic-bootstrap.styl new file mode 100644 index 00000000..0afa2548 --- /dev/null +++ b/static/open-iconic/css/open-iconic-bootstrap.styl @@ -0,0 +1,954 @@ +/* Bootstrap */ + +@font-face + font-family 'Icons' + src url('../fonts/open-iconic.eot') + src url('../fonts/open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('../fonts/open-iconic.woff') format('woff'), url('../fonts/open-iconic.ttf') format('truetype'), url('../fonts/open-iconic.svg#iconic-sm') format('svg') + font-weight normal + font-style normal + + +// Catchall baseclass +.oi + position relative + top 1px + display inline-block + font-family 'Icons' + font-style normal + font-weight normal + line-height 1 + -webkit-font-smoothing antialiased + -moz-osx-font-smoothing grayscale + + + &:empty:before + width 1em + text-align center + box-sizing content-box + + &.oi-align-center:before + text-align center + + + &.oi-align-left:before + text-align left + + + &.oi-align-right:before + text-align right + + + + &.oi-flip-horizontal:before + -webkit-transform scale(-1, 1) + -ms-transform scale(-1, 1) + transform scale(-1, 1) + + + &.oi-flip-vertical:before + -webkit-transform scale(1, -1) + -ms-transform scale(-1, 1) + transform scale(1, -1) + + + &.oi-flip-horizontal-vertical:before + -webkit-transform scale(-1, -1) + -ms-transform scale(-1, 1) + transform scale(-1, -1) + + + + + +.oi-account-login:before { + content'\e000' +} + +.oi-account-logout:before { + content'\e001' +} + +.oi-action-redo:before { + content'\e002' +} + +.oi-action-undo:before { + content'\e003' +} + +.oi-align-center:before { + content'\e004' +} + +.oi-align-left:before { + content'\e005' +} + +.oi-align-right:before { + content'\e006' +} + +.oi-aperture:before { + content'\e007' +} + +.oi-arrow-bottom:before { + content'\e008' +} + +.oi-arrow-circle-bottom:before { + content'\e009' +} + +.oi-arrow-circle-left:before { + content'\e00a' +} + +.oi-arrow-circle-right:before { + content'\e00b' +} + +.oi-arrow-circle-top:before { + content'\e00c' +} + +.oi-arrow-left:before { + content'\e00d' +} + +.oi-arrow-right:before { + content'\e00e' +} + +.oi-arrow-thick-bottom:before { + content'\e00f' +} + +.oi-arrow-thick-left:before { + content'\e010' +} + +.oi-arrow-thick-right:before { + content'\e011' +} + +.oi-arrow-thick-top:before { + content'\e012' +} + +.oi-arrow-top:before { + content'\e013' +} + +.oi-audio-spectrum:before { + content'\e014' +} + +.oi-audio:before { + content'\e015' +} + +.oi-badge:before { + content'\e016' +} + +.oi-ban:before { + content'\e017' +} + +.oi-bar-chart:before { + content'\e018' +} + +.oi-basket:before { + content'\e019' +} + +.oi-battery-empty:before { + content'\e01a' +} + +.oi-battery-full:before { + content'\e01b' +} + +.oi-beaker:before { + content'\e01c' +} + +.oi-bell:before { + content'\e01d' +} + +.oi-bluetooth:before { + content'\e01e' +} + +.oi-bold:before { + content'\e01f' +} + +.oi-bolt:before { + content'\e020' +} + +.oi-book:before { + content'\e021' +} + +.oi-bookmark:before { + content'\e022' +} + +.oi-box:before { + content'\e023' +} + +.oi-briefcase:before { + content'\e024' +} + +.oi-british-pound:before { + content'\e025' +} + +.oi-browser:before { + content'\e026' +} + +.oi-brush:before { + content'\e027' +} + +.oi-bug:before { + content'\e028' +} + +.oi-bullhorn:before { + content'\e029' +} + +.oi-calculator:before { + content'\e02a' +} + +.oi-calendar:before { + content'\e02b' +} + +.oi-camera-slr:before { + content'\e02c' +} + +.oi-caret-bottom:before { + content'\e02d' +} + +.oi-caret-left:before { + content'\e02e' +} + +.oi-caret-right:before { + content'\e02f' +} + +.oi-caret-top:before { + content'\e030' +} + +.oi-cart:before { + content'\e031' +} + +.oi-chat:before { + content'\e032' +} + +.oi-check:before { + content'\e033' +} + +.oi-chevron-bottom:before { + content'\e034' +} + +.oi-chevron-left:before { + content'\e035' +} + +.oi-chevron-right:before { + content'\e036' +} + +.oi-chevron-top:before { + content'\e037' +} + +.oi-circle-check:before { + content'\e038' +} + +.oi-circle-x:before { + content'\e039' +} + +.oi-clipboard:before { + content'\e03a' +} + +.oi-clock:before { + content'\e03b' +} + +.oi-cloud-download:before { + content'\e03c' +} + +.oi-cloud-upload:before { + content'\e03d' +} + +.oi-cloud:before { + content'\e03e' +} + +.oi-cloudy:before { + content'\e03f' +} + +.oi-code:before { + content'\e040' +} + +.oi-cog:before { + content'\e041' +} + +.oi-collapse-down:before { + content'\e042' +} + +.oi-collapse-left:before { + content'\e043' +} + +.oi-collapse-right:before { + content'\e044' +} + +.oi-collapse-up:before { + content'\e045' +} + +.oi-command:before { + content'\e046' +} + +.oi-comment-square:before { + content'\e047' +} + +.oi-compass:before { + content'\e048' +} + +.oi-contrast:before { + content'\e049' +} + +.oi-copywriting:before { + content'\e04a' +} + +.oi-credit-card:before { + content'\e04b' +} + +.oi-crop:before { + content'\e04c' +} + +.oi-dashboard:before { + content'\e04d' +} + +.oi-data-transfer-download:before { + content'\e04e' +} + +.oi-data-transfer-upload:before { + content'\e04f' +} + +.oi-delete:before { + content'\e050' +} + +.oi-dial:before { + content'\e051' +} + +.oi-document:before { + content'\e052' +} + +.oi-dollar:before { + content'\e053' +} + +.oi-double-quote-sans-left:before { + content'\e054' +} + +.oi-double-quote-sans-right:before { + content'\e055' +} + +.oi-double-quote-serif-left:before { + content'\e056' +} + +.oi-double-quote-serif-right:before { + content'\e057' +} + +.oi-droplet:before { + content'\e058' +} + +.oi-eject:before { + content'\e059' +} + +.oi-elevator:before { + content'\e05a' +} + +.oi-ellipses:before { + content'\e05b' +} + +.oi-envelope-closed:before { + content'\e05c' +} + +.oi-envelope-open:before { + content'\e05d' +} + +.oi-euro:before { + content'\e05e' +} + +.oi-excerpt:before { + content'\e05f' +} + +.oi-expand-down:before { + content'\e060' +} + +.oi-expand-left:before { + content'\e061' +} + +.oi-expand-right:before { + content'\e062' +} + +.oi-expand-up:before { + content'\e063' +} + +.oi-external-link:before { + content'\e064' +} + +.oi-eye:before { + content'\e065' +} + +.oi-eyedropper:before { + content'\e066' +} + +.oi-file:before { + content'\e067' +} + +.oi-fire:before { + content'\e068' +} + +.oi-flag:before { + content'\e069' +} + +.oi-flash:before { + content'\e06a' +} + +.oi-folder:before { + content'\e06b' +} + +.oi-fork:before { + content'\e06c' +} + +.oi-fullscreen-enter:before { + content'\e06d' +} + +.oi-fullscreen-exit:before { + content'\e06e' +} + +.oi-globe:before { + content'\e06f' +} + +.oi-graph:before { + content'\e070' +} + +.oi-grid-four-up:before { + content'\e071' +} + +.oi-grid-three-up:before { + content'\e072' +} + +.oi-grid-two-up:before { + content'\e073' +} + +.oi-hard-drive:before { + content'\e074' +} + +.oi-header:before { + content'\e075' +} + +.oi-headphones:before { + content'\e076' +} + +.oi-heart:before { + content'\e077' +} + +.oi-home:before { + content'\e078' +} + +.oi-image:before { + content'\e079' +} + +.oi-inbox:before { + content'\e07a' +} + +.oi-infinity:before { + content'\e07b' +} + +.oi-info:before { + content'\e07c' +} + +.oi-italic:before { + content'\e07d' +} + +.oi-justify-center:before { + content'\e07e' +} + +.oi-justify-left:before { + content'\e07f' +} + +.oi-justify-right:before { + content'\e080' +} + +.oi-key:before { + content'\e081' +} + +.oi-laptop:before { + content'\e082' +} + +.oi-layers:before { + content'\e083' +} + +.oi-lightbulb:before { + content'\e084' +} + +.oi-link-broken:before { + content'\e085' +} + +.oi-link-intact:before { + content'\e086' +} + +.oi-list-rich:before { + content'\e087' +} + +.oi-list:before { + content'\e088' +} + +.oi-location:before { + content'\e089' +} + +.oi-lock-locked:before { + content'\e08a' +} + +.oi-lock-unlocked:before { + content'\e08b' +} + +.oi-loop-circular:before { + content'\e08c' +} + +.oi-loop-square:before { + content'\e08d' +} + +.oi-loop:before { + content'\e08e' +} + +.oi-magnifying-glass:before { + content'\e08f' +} + +.oi-map-marker:before { + content'\e090' +} + +.oi-map:before { + content'\e091' +} + +.oi-media-pause:before { + content'\e092' +} + +.oi-media-play:before { + content'\e093' +} + +.oi-media-record:before { + content'\e094' +} + +.oi-media-skip-backward:before { + content'\e095' +} + +.oi-media-skip-forward:before { + content'\e096' +} + +.oi-media-step-backward:before { + content'\e097' +} + +.oi-media-step-forward:before { + content'\e098' +} + +.oi-media-stop:before { + content'\e099' +} + +.oi-medical-cross:before { + content'\e09a' +} + +.oi-menu:before { + content'\e09b' +} + +.oi-microphone:before { + content'\e09c' +} + +.oi-minus:before { + content'\e09d' +} + +.oi-monitor:before { + content'\e09e' +} + +.oi-moon:before { + content'\e09f' +} + +.oi-move:before { + content'\e0a0' +} + +.oi-musical-note:before { + content'\e0a1' +} + +.oi-paperclip:before { + content'\e0a2' +} + +.oi-pencil:before { + content'\e0a3' +} + +.oi-people:before { + content'\e0a4' +} + +.oi-person:before { + content'\e0a5' +} + +.oi-phone:before { + content'\e0a6' +} + +.oi-pie-chart:before { + content'\e0a7' +} + +.oi-pin:before { + content'\e0a8' +} + +.oi-play-circle:before { + content'\e0a9' +} + +.oi-plus:before { + content'\e0aa' +} + +.oi-power-standby:before { + content'\e0ab' +} + +.oi-print:before { + content'\e0ac' +} + +.oi-project:before { + content'\e0ad' +} + +.oi-pulse:before { + content'\e0ae' +} + +.oi-puzzle-piece:before { + content'\e0af' +} + +.oi-question-mark:before { + content'\e0b0' +} + +.oi-rain:before { + content'\e0b1' +} + +.oi-random:before { + content'\e0b2' +} + +.oi-reload:before { + content'\e0b3' +} + +.oi-resize-both:before { + content'\e0b4' +} + +.oi-resize-height:before { + content'\e0b5' +} + +.oi-resize-width:before { + content'\e0b6' +} + +.oi-rss-alt:before { + content'\e0b7' +} + +.oi-rss:before { + content'\e0b8' +} + +.oi-script:before { + content'\e0b9' +} + +.oi-share-boxed:before { + content'\e0ba' +} + +.oi-share:before { + content'\e0bb' +} + +.oi-shield:before { + content'\e0bc' +} + +.oi-signal:before { + content'\e0bd' +} + +.oi-signpost:before { + content'\e0be' +} + +.oi-sort-ascending:before { + content'\e0bf' +} + +.oi-sort-descending:before { + content'\e0c0' +} + +.oi-spreadsheet:before { + content'\e0c1' +} + +.oi-star:before { + content'\e0c2' +} + +.oi-sun:before { + content'\e0c3' +} + +.oi-tablet:before { + content'\e0c4' +} + +.oi-tag:before { + content'\e0c5' +} + +.oi-tags:before { + content'\e0c6' +} + +.oi-target:before { + content'\e0c7' +} + +.oi-task:before { + content'\e0c8' +} + +.oi-terminal:before { + content'\e0c9' +} + +.oi-text:before { + content'\e0ca' +} + +.oi-thumb-down:before { + content'\e0cb' +} + +.oi-thumb-up:before { + content'\e0cc' +} + +.oi-timer:before { + content'\e0cd' +} + +.oi-transfer:before { + content'\e0ce' +} + +.oi-trash:before { + content'\e0cf' +} + +.oi-underline:before { + content'\e0d0' +} + +.oi-vertical-align-bottom:before { + content'\e0d1' +} + +.oi-vertical-align-center:before { + content'\e0d2' +} + +.oi-vertical-align-top:before { + content'\e0d3' +} + +.oi-video:before { + content'\e0d4' +} + +.oi-volume-high:before { + content'\e0d5' +} + +.oi-volume-low:before { + content'\e0d6' +} + +.oi-volume-off:before { + content'\e0d7' +} + +.oi-warning:before { + content'\e0d8' +} + +.oi-wifi:before { + content'\e0d9' +} + +.oi-wrench:before { + content'\e0da' +} + +.oi-x:before { + content'\e0db' +} + +.oi-yen:before { + content'\e0dc' +} + +.oi-zoom-in:before { + content'\e0dd' +} + +.oi-zoom-out:before { + content'\e0de' +} + diff --git a/static/open-iconic/css/open-iconic-foundation.css b/static/open-iconic/css/open-iconic-foundation.css new file mode 100644 index 00000000..905a8212 --- /dev/null +++ b/static/open-iconic/css/open-iconic-foundation.css @@ -0,0 +1,1395 @@ +/* Foundation */ + +@font-face { + font-family: 'Icons'; + src: url('../fonts/open-iconic.eot'); + src: url('../fonts/open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('../fonts/open-iconic.woff') format('woff'), url('../fonts/open-iconic.ttf') format('truetype'), url('../fonts/open-iconic.otf') format('opentype'), url('../fonts/open-iconic.svg#iconic-sm') format('svg'); + font-weight: normal; + font-style: normal; +} + + +.fi-account-login:before, + +.fi-account-logout:before, + +.fi-action-redo:before, + +.fi-action-undo:before, + +.fi-align-center:before, + +.fi-align-left:before, + +.fi-align-right:before, + +.fi-aperture:before, + +.fi-arrow-bottom:before, + +.fi-arrow-circle-bottom:before, + +.fi-arrow-circle-left:before, + +.fi-arrow-circle-right:before, + +.fi-arrow-circle-top:before, + +.fi-arrow-left:before, + +.fi-arrow-right:before, + +.fi-arrow-thick-bottom:before, + +.fi-arrow-thick-left:before, + +.fi-arrow-thick-right:before, + +.fi-arrow-thick-top:before, + +.fi-arrow-top:before, + +.fi-audio-spectrum:before, + +.fi-audio:before, + +.fi-badge:before, + +.fi-ban:before, + +.fi-bar-chart:before, + +.fi-basket:before, + +.fi-battery-empty:before, + +.fi-battery-full:before, + +.fi-beaker:before, + +.fi-bell:before, + +.fi-bluetooth:before, + +.fi-bold:before, + +.fi-bolt:before, + +.fi-book:before, + +.fi-bookmark:before, + +.fi-box:before, + +.fi-briefcase:before, + +.fi-british-pound:before, + +.fi-browser:before, + +.fi-brush:before, + +.fi-bug:before, + +.fi-bullhorn:before, + +.fi-calculator:before, + +.fi-calendar:before, + +.fi-camera-slr:before, + +.fi-caret-bottom:before, + +.fi-caret-left:before, + +.fi-caret-right:before, + +.fi-caret-top:before, + +.fi-cart:before, + +.fi-chat:before, + +.fi-check:before, + +.fi-chevron-bottom:before, + +.fi-chevron-left:before, + +.fi-chevron-right:before, + +.fi-chevron-top:before, + +.fi-circle-check:before, + +.fi-circle-x:before, + +.fi-clipboard:before, + +.fi-clock:before, + +.fi-cloud-download:before, + +.fi-cloud-upload:before, + +.fi-cloud:before, + +.fi-cloudy:before, + +.fi-code:before, + +.fi-cog:before, + +.fi-collapse-down:before, + +.fi-collapse-left:before, + +.fi-collapse-right:before, + +.fi-collapse-up:before, + +.fi-command:before, + +.fi-comment-square:before, + +.fi-compass:before, + +.fi-contrast:before, + +.fi-copywriting:before, + +.fi-credit-card:before, + +.fi-crop:before, + +.fi-dashboard:before, + +.fi-data-transfer-download:before, + +.fi-data-transfer-upload:before, + +.fi-delete:before, + +.fi-dial:before, + +.fi-document:before, + +.fi-dollar:before, + +.fi-double-quote-sans-left:before, + +.fi-double-quote-sans-right:before, + +.fi-double-quote-serif-left:before, + +.fi-double-quote-serif-right:before, + +.fi-droplet:before, + +.fi-eject:before, + +.fi-elevator:before, + +.fi-ellipses:before, + +.fi-envelope-closed:before, + +.fi-envelope-open:before, + +.fi-euro:before, + +.fi-excerpt:before, + +.fi-expand-down:before, + +.fi-expand-left:before, + +.fi-expand-right:before, + +.fi-expand-up:before, + +.fi-external-link:before, + +.fi-eye:before, + +.fi-eyedropper:before, + +.fi-file:before, + +.fi-fire:before, + +.fi-flag:before, + +.fi-flash:before, + +.fi-folder:before, + +.fi-fork:before, + +.fi-fullscreen-enter:before, + +.fi-fullscreen-exit:before, + +.fi-globe:before, + +.fi-graph:before, + +.fi-grid-four-up:before, + +.fi-grid-three-up:before, + +.fi-grid-two-up:before, + +.fi-hard-drive:before, + +.fi-header:before, + +.fi-headphones:before, + +.fi-heart:before, + +.fi-home:before, + +.fi-image:before, + +.fi-inbox:before, + +.fi-infinity:before, + +.fi-info:before, + +.fi-italic:before, + +.fi-justify-center:before, + +.fi-justify-left:before, + +.fi-justify-right:before, + +.fi-key:before, + +.fi-laptop:before, + +.fi-layers:before, + +.fi-lightbulb:before, + +.fi-link-broken:before, + +.fi-link-intact:before, + +.fi-list-rich:before, + +.fi-list:before, + +.fi-location:before, + +.fi-lock-locked:before, + +.fi-lock-unlocked:before, + +.fi-loop-circular:before, + +.fi-loop-square:before, + +.fi-loop:before, + +.fi-magnifying-glass:before, + +.fi-map-marker:before, + +.fi-map:before, + +.fi-media-pause:before, + +.fi-media-play:before, + +.fi-media-record:before, + +.fi-media-skip-backward:before, + +.fi-media-skip-forward:before, + +.fi-media-step-backward:before, + +.fi-media-step-forward:before, + +.fi-media-stop:before, + +.fi-medical-cross:before, + +.fi-menu:before, + +.fi-microphone:before, + +.fi-minus:before, + +.fi-monitor:before, + +.fi-moon:before, + +.fi-move:before, + +.fi-musical-note:before, + +.fi-paperclip:before, + +.fi-pencil:before, + +.fi-people:before, + +.fi-person:before, + +.fi-phone:before, + +.fi-pie-chart:before, + +.fi-pin:before, + +.fi-play-circle:before, + +.fi-plus:before, + +.fi-power-standby:before, + +.fi-print:before, + +.fi-project:before, + +.fi-pulse:before, + +.fi-puzzle-piece:before, + +.fi-question-mark:before, + +.fi-rain:before, + +.fi-random:before, + +.fi-reload:before, + +.fi-resize-both:before, + +.fi-resize-height:before, + +.fi-resize-width:before, + +.fi-rss-alt:before, + +.fi-rss:before, + +.fi-script:before, + +.fi-share-boxed:before, + +.fi-share:before, + +.fi-shield:before, + +.fi-signal:before, + +.fi-signpost:before, + +.fi-sort-ascending:before, + +.fi-sort-descending:before, + +.fi-spreadsheet:before, + +.fi-star:before, + +.fi-sun:before, + +.fi-tablet:before, + +.fi-tag:before, + +.fi-tags:before, + +.fi-target:before, + +.fi-task:before, + +.fi-terminal:before, + +.fi-text:before, + +.fi-thumb-down:before, + +.fi-thumb-up:before, + +.fi-timer:before, + +.fi-transfer:before, + +.fi-trash:before, + +.fi-underline:before, + +.fi-vertical-align-bottom:before, + +.fi-vertical-align-center:before, + +.fi-vertical-align-top:before, + +.fi-video:before, + +.fi-volume-high:before, + +.fi-volume-low:before, + +.fi-volume-off:before, + +.fi-warning:before, + +.fi-wifi:before, + +.fi-wrench:before, + +.fi-x:before, + +.fi-yen:before, + +.fi-zoom-in:before, + +.fi-zoom-out:before + { + font-family: 'Icons'; + font-style: normal; + font-weight: normal; + font-variant: normal; + text-transform: none; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + display: inline-block; + text-decoration: inherit; +} + + +[class*='fi-'].oi-align-center:before { + text-align: center; +} + +[class*='fi-'].oi-align-left:before { + text-align: left; +} + +[class*='fi-'].oi-align-right:before { + text-align: right; +} + + +[class*='fi-'].oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); +} + +[class*='fi-'].oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); +} + +[class*='fi-'].oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); +} + + + +.fi-account-login:before { + content:'\e000'; +} + +.fi-account-logout:before { + content:'\e001'; +} + +.fi-action-redo:before { + content:'\e002'; +} + +.fi-action-undo:before { + content:'\e003'; +} + +.fi-align-center:before { + content:'\e004'; +} + +.fi-align-left:before { + content:'\e005'; +} + +.fi-align-right:before { + content:'\e006'; +} + +.fi-aperture:before { + content:'\e007'; +} + +.fi-arrow-bottom:before { + content:'\e008'; +} + +.fi-arrow-circle-bottom:before { + content:'\e009'; +} + +.fi-arrow-circle-left:before { + content:'\e00a'; +} + +.fi-arrow-circle-right:before { + content:'\e00b'; +} + +.fi-arrow-circle-top:before { + content:'\e00c'; +} + +.fi-arrow-left:before { + content:'\e00d'; +} + +.fi-arrow-right:before { + content:'\e00e'; +} + +.fi-arrow-thick-bottom:before { + content:'\e00f'; +} + +.fi-arrow-thick-left:before { + content:'\e010'; +} + +.fi-arrow-thick-right:before { + content:'\e011'; +} + +.fi-arrow-thick-top:before { + content:'\e012'; +} + +.fi-arrow-top:before { + content:'\e013'; +} + +.fi-audio-spectrum:before { + content:'\e014'; +} + +.fi-audio:before { + content:'\e015'; +} + +.fi-badge:before { + content:'\e016'; +} + +.fi-ban:before { + content:'\e017'; +} + +.fi-bar-chart:before { + content:'\e018'; +} + +.fi-basket:before { + content:'\e019'; +} + +.fi-battery-empty:before { + content:'\e01a'; +} + +.fi-battery-full:before { + content:'\e01b'; +} + +.fi-beaker:before { + content:'\e01c'; +} + +.fi-bell:before { + content:'\e01d'; +} + +.fi-bluetooth:before { + content:'\e01e'; +} + +.fi-bold:before { + content:'\e01f'; +} + +.fi-bolt:before { + content:'\e020'; +} + +.fi-book:before { + content:'\e021'; +} + +.fi-bookmark:before { + content:'\e022'; +} + +.fi-box:before { + content:'\e023'; +} + +.fi-briefcase:before { + content:'\e024'; +} + +.fi-british-pound:before { + content:'\e025'; +} + +.fi-browser:before { + content:'\e026'; +} + +.fi-brush:before { + content:'\e027'; +} + +.fi-bug:before { + content:'\e028'; +} + +.fi-bullhorn:before { + content:'\e029'; +} + +.fi-calculator:before { + content:'\e02a'; +} + +.fi-calendar:before { + content:'\e02b'; +} + +.fi-camera-slr:before { + content:'\e02c'; +} + +.fi-caret-bottom:before { + content:'\e02d'; +} + +.fi-caret-left:before { + content:'\e02e'; +} + +.fi-caret-right:before { + content:'\e02f'; +} + +.fi-caret-top:before { + content:'\e030'; +} + +.fi-cart:before { + content:'\e031'; +} + +.fi-chat:before { + content:'\e032'; +} + +.fi-check:before { + content:'\e033'; +} + +.fi-chevron-bottom:before { + content:'\e034'; +} + +.fi-chevron-left:before { + content:'\e035'; +} + +.fi-chevron-right:before { + content:'\e036'; +} + +.fi-chevron-top:before { + content:'\e037'; +} + +.fi-circle-check:before { + content:'\e038'; +} + +.fi-circle-x:before { + content:'\e039'; +} + +.fi-clipboard:before { + content:'\e03a'; +} + +.fi-clock:before { + content:'\e03b'; +} + +.fi-cloud-download:before { + content:'\e03c'; +} + +.fi-cloud-upload:before { + content:'\e03d'; +} + +.fi-cloud:before { + content:'\e03e'; +} + +.fi-cloudy:before { + content:'\e03f'; +} + +.fi-code:before { + content:'\e040'; +} + +.fi-cog:before { + content:'\e041'; +} + +.fi-collapse-down:before { + content:'\e042'; +} + +.fi-collapse-left:before { + content:'\e043'; +} + +.fi-collapse-right:before { + content:'\e044'; +} + +.fi-collapse-up:before { + content:'\e045'; +} + +.fi-command:before { + content:'\e046'; +} + +.fi-comment-square:before { + content:'\e047'; +} + +.fi-compass:before { + content:'\e048'; +} + +.fi-contrast:before { + content:'\e049'; +} + +.fi-copywriting:before { + content:'\e04a'; +} + +.fi-credit-card:before { + content:'\e04b'; +} + +.fi-crop:before { + content:'\e04c'; +} + +.fi-dashboard:before { + content:'\e04d'; +} + +.fi-data-transfer-download:before { + content:'\e04e'; +} + +.fi-data-transfer-upload:before { + content:'\e04f'; +} + +.fi-delete:before { + content:'\e050'; +} + +.fi-dial:before { + content:'\e051'; +} + +.fi-document:before { + content:'\e052'; +} + +.fi-dollar:before { + content:'\e053'; +} + +.fi-double-quote-sans-left:before { + content:'\e054'; +} + +.fi-double-quote-sans-right:before { + content:'\e055'; +} + +.fi-double-quote-serif-left:before { + content:'\e056'; +} + +.fi-double-quote-serif-right:before { + content:'\e057'; +} + +.fi-droplet:before { + content:'\e058'; +} + +.fi-eject:before { + content:'\e059'; +} + +.fi-elevator:before { + content:'\e05a'; +} + +.fi-ellipses:before { + content:'\e05b'; +} + +.fi-envelope-closed:before { + content:'\e05c'; +} + +.fi-envelope-open:before { + content:'\e05d'; +} + +.fi-euro:before { + content:'\e05e'; +} + +.fi-excerpt:before { + content:'\e05f'; +} + +.fi-expand-down:before { + content:'\e060'; +} + +.fi-expand-left:before { + content:'\e061'; +} + +.fi-expand-right:before { + content:'\e062'; +} + +.fi-expand-up:before { + content:'\e063'; +} + +.fi-external-link:before { + content:'\e064'; +} + +.fi-eye:before { + content:'\e065'; +} + +.fi-eyedropper:before { + content:'\e066'; +} + +.fi-file:before { + content:'\e067'; +} + +.fi-fire:before { + content:'\e068'; +} + +.fi-flag:before { + content:'\e069'; +} + +.fi-flash:before { + content:'\e06a'; +} + +.fi-folder:before { + content:'\e06b'; +} + +.fi-fork:before { + content:'\e06c'; +} + +.fi-fullscreen-enter:before { + content:'\e06d'; +} + +.fi-fullscreen-exit:before { + content:'\e06e'; +} + +.fi-globe:before { + content:'\e06f'; +} + +.fi-graph:before { + content:'\e070'; +} + +.fi-grid-four-up:before { + content:'\e071'; +} + +.fi-grid-three-up:before { + content:'\e072'; +} + +.fi-grid-two-up:before { + content:'\e073'; +} + +.fi-hard-drive:before { + content:'\e074'; +} + +.fi-header:before { + content:'\e075'; +} + +.fi-headphones:before { + content:'\e076'; +} + +.fi-heart:before { + content:'\e077'; +} + +.fi-home:before { + content:'\e078'; +} + +.fi-image:before { + content:'\e079'; +} + +.fi-inbox:before { + content:'\e07a'; +} + +.fi-infinity:before { + content:'\e07b'; +} + +.fi-info:before { + content:'\e07c'; +} + +.fi-italic:before { + content:'\e07d'; +} + +.fi-justify-center:before { + content:'\e07e'; +} + +.fi-justify-left:before { + content:'\e07f'; +} + +.fi-justify-right:before { + content:'\e080'; +} + +.fi-key:before { + content:'\e081'; +} + +.fi-laptop:before { + content:'\e082'; +} + +.fi-layers:before { + content:'\e083'; +} + +.fi-lightbulb:before { + content:'\e084'; +} + +.fi-link-broken:before { + content:'\e085'; +} + +.fi-link-intact:before { + content:'\e086'; +} + +.fi-list-rich:before { + content:'\e087'; +} + +.fi-list:before { + content:'\e088'; +} + +.fi-location:before { + content:'\e089'; +} + +.fi-lock-locked:before { + content:'\e08a'; +} + +.fi-lock-unlocked:before { + content:'\e08b'; +} + +.fi-loop-circular:before { + content:'\e08c'; +} + +.fi-loop-square:before { + content:'\e08d'; +} + +.fi-loop:before { + content:'\e08e'; +} + +.fi-magnifying-glass:before { + content:'\e08f'; +} + +.fi-map-marker:before { + content:'\e090'; +} + +.fi-map:before { + content:'\e091'; +} + +.fi-media-pause:before { + content:'\e092'; +} + +.fi-media-play:before { + content:'\e093'; +} + +.fi-media-record:before { + content:'\e094'; +} + +.fi-media-skip-backward:before { + content:'\e095'; +} + +.fi-media-skip-forward:before { + content:'\e096'; +} + +.fi-media-step-backward:before { + content:'\e097'; +} + +.fi-media-step-forward:before { + content:'\e098'; +} + +.fi-media-stop:before { + content:'\e099'; +} + +.fi-medical-cross:before { + content:'\e09a'; +} + +.fi-menu:before { + content:'\e09b'; +} + +.fi-microphone:before { + content:'\e09c'; +} + +.fi-minus:before { + content:'\e09d'; +} + +.fi-monitor:before { + content:'\e09e'; +} + +.fi-moon:before { + content:'\e09f'; +} + +.fi-move:before { + content:'\e0a0'; +} + +.fi-musical-note:before { + content:'\e0a1'; +} + +.fi-paperclip:before { + content:'\e0a2'; +} + +.fi-pencil:before { + content:'\e0a3'; +} + +.fi-people:before { + content:'\e0a4'; +} + +.fi-person:before { + content:'\e0a5'; +} + +.fi-phone:before { + content:'\e0a6'; +} + +.fi-pie-chart:before { + content:'\e0a7'; +} + +.fi-pin:before { + content:'\e0a8'; +} + +.fi-play-circle:before { + content:'\e0a9'; +} + +.fi-plus:before { + content:'\e0aa'; +} + +.fi-power-standby:before { + content:'\e0ab'; +} + +.fi-print:before { + content:'\e0ac'; +} + +.fi-project:before { + content:'\e0ad'; +} + +.fi-pulse:before { + content:'\e0ae'; +} + +.fi-puzzle-piece:before { + content:'\e0af'; +} + +.fi-question-mark:before { + content:'\e0b0'; +} + +.fi-rain:before { + content:'\e0b1'; +} + +.fi-random:before { + content:'\e0b2'; +} + +.fi-reload:before { + content:'\e0b3'; +} + +.fi-resize-both:before { + content:'\e0b4'; +} + +.fi-resize-height:before { + content:'\e0b5'; +} + +.fi-resize-width:before { + content:'\e0b6'; +} + +.fi-rss-alt:before { + content:'\e0b7'; +} + +.fi-rss:before { + content:'\e0b8'; +} + +.fi-script:before { + content:'\e0b9'; +} + +.fi-share-boxed:before { + content:'\e0ba'; +} + +.fi-share:before { + content:'\e0bb'; +} + +.fi-shield:before { + content:'\e0bc'; +} + +.fi-signal:before { + content:'\e0bd'; +} + +.fi-signpost:before { + content:'\e0be'; +} + +.fi-sort-ascending:before { + content:'\e0bf'; +} + +.fi-sort-descending:before { + content:'\e0c0'; +} + +.fi-spreadsheet:before { + content:'\e0c1'; +} + +.fi-star:before { + content:'\e0c2'; +} + +.fi-sun:before { + content:'\e0c3'; +} + +.fi-tablet:before { + content:'\e0c4'; +} + +.fi-tag:before { + content:'\e0c5'; +} + +.fi-tags:before { + content:'\e0c6'; +} + +.fi-target:before { + content:'\e0c7'; +} + +.fi-task:before { + content:'\e0c8'; +} + +.fi-terminal:before { + content:'\e0c9'; +} + +.fi-text:before { + content:'\e0ca'; +} + +.fi-thumb-down:before { + content:'\e0cb'; +} + +.fi-thumb-up:before { + content:'\e0cc'; +} + +.fi-timer:before { + content:'\e0cd'; +} + +.fi-transfer:before { + content:'\e0ce'; +} + +.fi-trash:before { + content:'\e0cf'; +} + +.fi-underline:before { + content:'\e0d0'; +} + +.fi-vertical-align-bottom:before { + content:'\e0d1'; +} + +.fi-vertical-align-center:before { + content:'\e0d2'; +} + +.fi-vertical-align-top:before { + content:'\e0d3'; +} + +.fi-video:before { + content:'\e0d4'; +} + +.fi-volume-high:before { + content:'\e0d5'; +} + +.fi-volume-low:before { + content:'\e0d6'; +} + +.fi-volume-off:before { + content:'\e0d7'; +} + +.fi-warning:before { + content:'\e0d8'; +} + +.fi-wifi:before { + content:'\e0d9'; +} + +.fi-wrench:before { + content:'\e0da'; +} + +.fi-x:before { + content:'\e0db'; +} + +.fi-yen:before { + content:'\e0dc'; +} + +.fi-zoom-in:before { + content:'\e0dd'; +} + +.fi-zoom-out:before { + content:'\e0de'; +} + diff --git a/static/open-iconic/css/open-iconic-foundation.less b/static/open-iconic/css/open-iconic-foundation.less new file mode 100644 index 00000000..deabf26f --- /dev/null +++ b/static/open-iconic/css/open-iconic-foundation.less @@ -0,0 +1,1397 @@ +/* Foundation */ + +/* Font path variable */ +@icon-font-path: '../fonts/'; + +@font-face { + font-family: 'Icons'; + src: url('@{icon-font-path}open-iconic.eot'); + src: url('@{icon-font-path}open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('@{icon-font-path}open-iconic.woff') format('woff'), url('@{icon-font-path}open-iconic.ttf') format('truetype'), url('@{icon-font-path}open-iconic.otf') format('opentype'), url('@{icon-font-path}open-iconic.svg#iconic-sm') format('svg'); + font-weight: normal; + font-style: normal; +} + + +.fi-account-login:before, + +.fi-account-logout:before, + +.fi-action-redo:before, + +.fi-action-undo:before, + +.fi-align-center:before, + +.fi-align-left:before, + +.fi-align-right:before, + +.fi-aperture:before, + +.fi-arrow-bottom:before, + +.fi-arrow-circle-bottom:before, + +.fi-arrow-circle-left:before, + +.fi-arrow-circle-right:before, + +.fi-arrow-circle-top:before, + +.fi-arrow-left:before, + +.fi-arrow-right:before, + +.fi-arrow-thick-bottom:before, + +.fi-arrow-thick-left:before, + +.fi-arrow-thick-right:before, + +.fi-arrow-thick-top:before, + +.fi-arrow-top:before, + +.fi-audio-spectrum:before, + +.fi-audio:before, + +.fi-badge:before, + +.fi-ban:before, + +.fi-bar-chart:before, + +.fi-basket:before, + +.fi-battery-empty:before, + +.fi-battery-full:before, + +.fi-beaker:before, + +.fi-bell:before, + +.fi-bluetooth:before, + +.fi-bold:before, + +.fi-bolt:before, + +.fi-book:before, + +.fi-bookmark:before, + +.fi-box:before, + +.fi-briefcase:before, + +.fi-british-pound:before, + +.fi-browser:before, + +.fi-brush:before, + +.fi-bug:before, + +.fi-bullhorn:before, + +.fi-calculator:before, + +.fi-calendar:before, + +.fi-camera-slr:before, + +.fi-caret-bottom:before, + +.fi-caret-left:before, + +.fi-caret-right:before, + +.fi-caret-top:before, + +.fi-cart:before, + +.fi-chat:before, + +.fi-check:before, + +.fi-chevron-bottom:before, + +.fi-chevron-left:before, + +.fi-chevron-right:before, + +.fi-chevron-top:before, + +.fi-circle-check:before, + +.fi-circle-x:before, + +.fi-clipboard:before, + +.fi-clock:before, + +.fi-cloud-download:before, + +.fi-cloud-upload:before, + +.fi-cloud:before, + +.fi-cloudy:before, + +.fi-code:before, + +.fi-cog:before, + +.fi-collapse-down:before, + +.fi-collapse-left:before, + +.fi-collapse-right:before, + +.fi-collapse-up:before, + +.fi-command:before, + +.fi-comment-square:before, + +.fi-compass:before, + +.fi-contrast:before, + +.fi-copywriting:before, + +.fi-credit-card:before, + +.fi-crop:before, + +.fi-dashboard:before, + +.fi-data-transfer-download:before, + +.fi-data-transfer-upload:before, + +.fi-delete:before, + +.fi-dial:before, + +.fi-document:before, + +.fi-dollar:before, + +.fi-double-quote-sans-left:before, + +.fi-double-quote-sans-right:before, + +.fi-double-quote-serif-left:before, + +.fi-double-quote-serif-right:before, + +.fi-droplet:before, + +.fi-eject:before, + +.fi-elevator:before, + +.fi-ellipses:before, + +.fi-envelope-closed:before, + +.fi-envelope-open:before, + +.fi-euro:before, + +.fi-excerpt:before, + +.fi-expand-down:before, + +.fi-expand-left:before, + +.fi-expand-right:before, + +.fi-expand-up:before, + +.fi-external-link:before, + +.fi-eye:before, + +.fi-eyedropper:before, + +.fi-file:before, + +.fi-fire:before, + +.fi-flag:before, + +.fi-flash:before, + +.fi-folder:before, + +.fi-fork:before, + +.fi-fullscreen-enter:before, + +.fi-fullscreen-exit:before, + +.fi-globe:before, + +.fi-graph:before, + +.fi-grid-four-up:before, + +.fi-grid-three-up:before, + +.fi-grid-two-up:before, + +.fi-hard-drive:before, + +.fi-header:before, + +.fi-headphones:before, + +.fi-heart:before, + +.fi-home:before, + +.fi-image:before, + +.fi-inbox:before, + +.fi-infinity:before, + +.fi-info:before, + +.fi-italic:before, + +.fi-justify-center:before, + +.fi-justify-left:before, + +.fi-justify-right:before, + +.fi-key:before, + +.fi-laptop:before, + +.fi-layers:before, + +.fi-lightbulb:before, + +.fi-link-broken:before, + +.fi-link-intact:before, + +.fi-list-rich:before, + +.fi-list:before, + +.fi-location:before, + +.fi-lock-locked:before, + +.fi-lock-unlocked:before, + +.fi-loop-circular:before, + +.fi-loop-square:before, + +.fi-loop:before, + +.fi-magnifying-glass:before, + +.fi-map-marker:before, + +.fi-map:before, + +.fi-media-pause:before, + +.fi-media-play:before, + +.fi-media-record:before, + +.fi-media-skip-backward:before, + +.fi-media-skip-forward:before, + +.fi-media-step-backward:before, + +.fi-media-step-forward:before, + +.fi-media-stop:before, + +.fi-medical-cross:before, + +.fi-menu:before, + +.fi-microphone:before, + +.fi-minus:before, + +.fi-monitor:before, + +.fi-moon:before, + +.fi-move:before, + +.fi-musical-note:before, + +.fi-paperclip:before, + +.fi-pencil:before, + +.fi-people:before, + +.fi-person:before, + +.fi-phone:before, + +.fi-pie-chart:before, + +.fi-pin:before, + +.fi-play-circle:before, + +.fi-plus:before, + +.fi-power-standby:before, + +.fi-print:before, + +.fi-project:before, + +.fi-pulse:before, + +.fi-puzzle-piece:before, + +.fi-question-mark:before, + +.fi-rain:before, + +.fi-random:before, + +.fi-reload:before, + +.fi-resize-both:before, + +.fi-resize-height:before, + +.fi-resize-width:before, + +.fi-rss-alt:before, + +.fi-rss:before, + +.fi-script:before, + +.fi-share-boxed:before, + +.fi-share:before, + +.fi-shield:before, + +.fi-signal:before, + +.fi-signpost:before, + +.fi-sort-ascending:before, + +.fi-sort-descending:before, + +.fi-spreadsheet:before, + +.fi-star:before, + +.fi-sun:before, + +.fi-tablet:before, + +.fi-tag:before, + +.fi-tags:before, + +.fi-target:before, + +.fi-task:before, + +.fi-terminal:before, + +.fi-text:before, + +.fi-thumb-down:before, + +.fi-thumb-up:before, + +.fi-timer:before, + +.fi-transfer:before, + +.fi-trash:before, + +.fi-underline:before, + +.fi-vertical-align-bottom:before, + +.fi-vertical-align-center:before, + +.fi-vertical-align-top:before, + +.fi-video:before, + +.fi-volume-high:before, + +.fi-volume-low:before, + +.fi-volume-off:before, + +.fi-warning:before, + +.fi-wifi:before, + +.fi-wrench:before, + +.fi-x:before, + +.fi-yen:before, + +.fi-zoom-in:before, + +.fi-zoom-out:before + { + font-family: 'Icons'; + font-style: normal; + font-weight: normal; + font-variant: normal; + text-transform: none; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + display: inline-block; + text-decoration: inherit; +} + +[class*='fi-'].oi-align-center:before { + text-align: center; +} + +[class*='fi-'].oi-align-left:before { + text-align: left; +} + +[class*='fi-'].oi-align-right:before { + text-align: right; +} + + +[class*='fi-'].oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); +} + +[class*='fi-'].oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); +} + +[class*='fi-'].oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); +} + + + +.fi-account-login:before { + content:'\e000'; +} + +.fi-account-logout:before { + content:'\e001'; +} + +.fi-action-redo:before { + content:'\e002'; +} + +.fi-action-undo:before { + content:'\e003'; +} + +.fi-align-center:before { + content:'\e004'; +} + +.fi-align-left:before { + content:'\e005'; +} + +.fi-align-right:before { + content:'\e006'; +} + +.fi-aperture:before { + content:'\e007'; +} + +.fi-arrow-bottom:before { + content:'\e008'; +} + +.fi-arrow-circle-bottom:before { + content:'\e009'; +} + +.fi-arrow-circle-left:before { + content:'\e00a'; +} + +.fi-arrow-circle-right:before { + content:'\e00b'; +} + +.fi-arrow-circle-top:before { + content:'\e00c'; +} + +.fi-arrow-left:before { + content:'\e00d'; +} + +.fi-arrow-right:before { + content:'\e00e'; +} + +.fi-arrow-thick-bottom:before { + content:'\e00f'; +} + +.fi-arrow-thick-left:before { + content:'\e010'; +} + +.fi-arrow-thick-right:before { + content:'\e011'; +} + +.fi-arrow-thick-top:before { + content:'\e012'; +} + +.fi-arrow-top:before { + content:'\e013'; +} + +.fi-audio-spectrum:before { + content:'\e014'; +} + +.fi-audio:before { + content:'\e015'; +} + +.fi-badge:before { + content:'\e016'; +} + +.fi-ban:before { + content:'\e017'; +} + +.fi-bar-chart:before { + content:'\e018'; +} + +.fi-basket:before { + content:'\e019'; +} + +.fi-battery-empty:before { + content:'\e01a'; +} + +.fi-battery-full:before { + content:'\e01b'; +} + +.fi-beaker:before { + content:'\e01c'; +} + +.fi-bell:before { + content:'\e01d'; +} + +.fi-bluetooth:before { + content:'\e01e'; +} + +.fi-bold:before { + content:'\e01f'; +} + +.fi-bolt:before { + content:'\e020'; +} + +.fi-book:before { + content:'\e021'; +} + +.fi-bookmark:before { + content:'\e022'; +} + +.fi-box:before { + content:'\e023'; +} + +.fi-briefcase:before { + content:'\e024'; +} + +.fi-british-pound:before { + content:'\e025'; +} + +.fi-browser:before { + content:'\e026'; +} + +.fi-brush:before { + content:'\e027'; +} + +.fi-bug:before { + content:'\e028'; +} + +.fi-bullhorn:before { + content:'\e029'; +} + +.fi-calculator:before { + content:'\e02a'; +} + +.fi-calendar:before { + content:'\e02b'; +} + +.fi-camera-slr:before { + content:'\e02c'; +} + +.fi-caret-bottom:before { + content:'\e02d'; +} + +.fi-caret-left:before { + content:'\e02e'; +} + +.fi-caret-right:before { + content:'\e02f'; +} + +.fi-caret-top:before { + content:'\e030'; +} + +.fi-cart:before { + content:'\e031'; +} + +.fi-chat:before { + content:'\e032'; +} + +.fi-check:before { + content:'\e033'; +} + +.fi-chevron-bottom:before { + content:'\e034'; +} + +.fi-chevron-left:before { + content:'\e035'; +} + +.fi-chevron-right:before { + content:'\e036'; +} + +.fi-chevron-top:before { + content:'\e037'; +} + +.fi-circle-check:before { + content:'\e038'; +} + +.fi-circle-x:before { + content:'\e039'; +} + +.fi-clipboard:before { + content:'\e03a'; +} + +.fi-clock:before { + content:'\e03b'; +} + +.fi-cloud-download:before { + content:'\e03c'; +} + +.fi-cloud-upload:before { + content:'\e03d'; +} + +.fi-cloud:before { + content:'\e03e'; +} + +.fi-cloudy:before { + content:'\e03f'; +} + +.fi-code:before { + content:'\e040'; +} + +.fi-cog:before { + content:'\e041'; +} + +.fi-collapse-down:before { + content:'\e042'; +} + +.fi-collapse-left:before { + content:'\e043'; +} + +.fi-collapse-right:before { + content:'\e044'; +} + +.fi-collapse-up:before { + content:'\e045'; +} + +.fi-command:before { + content:'\e046'; +} + +.fi-comment-square:before { + content:'\e047'; +} + +.fi-compass:before { + content:'\e048'; +} + +.fi-contrast:before { + content:'\e049'; +} + +.fi-copywriting:before { + content:'\e04a'; +} + +.fi-credit-card:before { + content:'\e04b'; +} + +.fi-crop:before { + content:'\e04c'; +} + +.fi-dashboard:before { + content:'\e04d'; +} + +.fi-data-transfer-download:before { + content:'\e04e'; +} + +.fi-data-transfer-upload:before { + content:'\e04f'; +} + +.fi-delete:before { + content:'\e050'; +} + +.fi-dial:before { + content:'\e051'; +} + +.fi-document:before { + content:'\e052'; +} + +.fi-dollar:before { + content:'\e053'; +} + +.fi-double-quote-sans-left:before { + content:'\e054'; +} + +.fi-double-quote-sans-right:before { + content:'\e055'; +} + +.fi-double-quote-serif-left:before { + content:'\e056'; +} + +.fi-double-quote-serif-right:before { + content:'\e057'; +} + +.fi-droplet:before { + content:'\e058'; +} + +.fi-eject:before { + content:'\e059'; +} + +.fi-elevator:before { + content:'\e05a'; +} + +.fi-ellipses:before { + content:'\e05b'; +} + +.fi-envelope-closed:before { + content:'\e05c'; +} + +.fi-envelope-open:before { + content:'\e05d'; +} + +.fi-euro:before { + content:'\e05e'; +} + +.fi-excerpt:before { + content:'\e05f'; +} + +.fi-expand-down:before { + content:'\e060'; +} + +.fi-expand-left:before { + content:'\e061'; +} + +.fi-expand-right:before { + content:'\e062'; +} + +.fi-expand-up:before { + content:'\e063'; +} + +.fi-external-link:before { + content:'\e064'; +} + +.fi-eye:before { + content:'\e065'; +} + +.fi-eyedropper:before { + content:'\e066'; +} + +.fi-file:before { + content:'\e067'; +} + +.fi-fire:before { + content:'\e068'; +} + +.fi-flag:before { + content:'\e069'; +} + +.fi-flash:before { + content:'\e06a'; +} + +.fi-folder:before { + content:'\e06b'; +} + +.fi-fork:before { + content:'\e06c'; +} + +.fi-fullscreen-enter:before { + content:'\e06d'; +} + +.fi-fullscreen-exit:before { + content:'\e06e'; +} + +.fi-globe:before { + content:'\e06f'; +} + +.fi-graph:before { + content:'\e070'; +} + +.fi-grid-four-up:before { + content:'\e071'; +} + +.fi-grid-three-up:before { + content:'\e072'; +} + +.fi-grid-two-up:before { + content:'\e073'; +} + +.fi-hard-drive:before { + content:'\e074'; +} + +.fi-header:before { + content:'\e075'; +} + +.fi-headphones:before { + content:'\e076'; +} + +.fi-heart:before { + content:'\e077'; +} + +.fi-home:before { + content:'\e078'; +} + +.fi-image:before { + content:'\e079'; +} + +.fi-inbox:before { + content:'\e07a'; +} + +.fi-infinity:before { + content:'\e07b'; +} + +.fi-info:before { + content:'\e07c'; +} + +.fi-italic:before { + content:'\e07d'; +} + +.fi-justify-center:before { + content:'\e07e'; +} + +.fi-justify-left:before { + content:'\e07f'; +} + +.fi-justify-right:before { + content:'\e080'; +} + +.fi-key:before { + content:'\e081'; +} + +.fi-laptop:before { + content:'\e082'; +} + +.fi-layers:before { + content:'\e083'; +} + +.fi-lightbulb:before { + content:'\e084'; +} + +.fi-link-broken:before { + content:'\e085'; +} + +.fi-link-intact:before { + content:'\e086'; +} + +.fi-list-rich:before { + content:'\e087'; +} + +.fi-list:before { + content:'\e088'; +} + +.fi-location:before { + content:'\e089'; +} + +.fi-lock-locked:before { + content:'\e08a'; +} + +.fi-lock-unlocked:before { + content:'\e08b'; +} + +.fi-loop-circular:before { + content:'\e08c'; +} + +.fi-loop-square:before { + content:'\e08d'; +} + +.fi-loop:before { + content:'\e08e'; +} + +.fi-magnifying-glass:before { + content:'\e08f'; +} + +.fi-map-marker:before { + content:'\e090'; +} + +.fi-map:before { + content:'\e091'; +} + +.fi-media-pause:before { + content:'\e092'; +} + +.fi-media-play:before { + content:'\e093'; +} + +.fi-media-record:before { + content:'\e094'; +} + +.fi-media-skip-backward:before { + content:'\e095'; +} + +.fi-media-skip-forward:before { + content:'\e096'; +} + +.fi-media-step-backward:before { + content:'\e097'; +} + +.fi-media-step-forward:before { + content:'\e098'; +} + +.fi-media-stop:before { + content:'\e099'; +} + +.fi-medical-cross:before { + content:'\e09a'; +} + +.fi-menu:before { + content:'\e09b'; +} + +.fi-microphone:before { + content:'\e09c'; +} + +.fi-minus:before { + content:'\e09d'; +} + +.fi-monitor:before { + content:'\e09e'; +} + +.fi-moon:before { + content:'\e09f'; +} + +.fi-move:before { + content:'\e0a0'; +} + +.fi-musical-note:before { + content:'\e0a1'; +} + +.fi-paperclip:before { + content:'\e0a2'; +} + +.fi-pencil:before { + content:'\e0a3'; +} + +.fi-people:before { + content:'\e0a4'; +} + +.fi-person:before { + content:'\e0a5'; +} + +.fi-phone:before { + content:'\e0a6'; +} + +.fi-pie-chart:before { + content:'\e0a7'; +} + +.fi-pin:before { + content:'\e0a8'; +} + +.fi-play-circle:before { + content:'\e0a9'; +} + +.fi-plus:before { + content:'\e0aa'; +} + +.fi-power-standby:before { + content:'\e0ab'; +} + +.fi-print:before { + content:'\e0ac'; +} + +.fi-project:before { + content:'\e0ad'; +} + +.fi-pulse:before { + content:'\e0ae'; +} + +.fi-puzzle-piece:before { + content:'\e0af'; +} + +.fi-question-mark:before { + content:'\e0b0'; +} + +.fi-rain:before { + content:'\e0b1'; +} + +.fi-random:before { + content:'\e0b2'; +} + +.fi-reload:before { + content:'\e0b3'; +} + +.fi-resize-both:before { + content:'\e0b4'; +} + +.fi-resize-height:before { + content:'\e0b5'; +} + +.fi-resize-width:before { + content:'\e0b6'; +} + +.fi-rss-alt:before { + content:'\e0b7'; +} + +.fi-rss:before { + content:'\e0b8'; +} + +.fi-script:before { + content:'\e0b9'; +} + +.fi-share-boxed:before { + content:'\e0ba'; +} + +.fi-share:before { + content:'\e0bb'; +} + +.fi-shield:before { + content:'\e0bc'; +} + +.fi-signal:before { + content:'\e0bd'; +} + +.fi-signpost:before { + content:'\e0be'; +} + +.fi-sort-ascending:before { + content:'\e0bf'; +} + +.fi-sort-descending:before { + content:'\e0c0'; +} + +.fi-spreadsheet:before { + content:'\e0c1'; +} + +.fi-star:before { + content:'\e0c2'; +} + +.fi-sun:before { + content:'\e0c3'; +} + +.fi-tablet:before { + content:'\e0c4'; +} + +.fi-tag:before { + content:'\e0c5'; +} + +.fi-tags:before { + content:'\e0c6'; +} + +.fi-target:before { + content:'\e0c7'; +} + +.fi-task:before { + content:'\e0c8'; +} + +.fi-terminal:before { + content:'\e0c9'; +} + +.fi-text:before { + content:'\e0ca'; +} + +.fi-thumb-down:before { + content:'\e0cb'; +} + +.fi-thumb-up:before { + content:'\e0cc'; +} + +.fi-timer:before { + content:'\e0cd'; +} + +.fi-transfer:before { + content:'\e0ce'; +} + +.fi-trash:before { + content:'\e0cf'; +} + +.fi-underline:before { + content:'\e0d0'; +} + +.fi-vertical-align-bottom:before { + content:'\e0d1'; +} + +.fi-vertical-align-center:before { + content:'\e0d2'; +} + +.fi-vertical-align-top:before { + content:'\e0d3'; +} + +.fi-video:before { + content:'\e0d4'; +} + +.fi-volume-high:before { + content:'\e0d5'; +} + +.fi-volume-low:before { + content:'\e0d6'; +} + +.fi-volume-off:before { + content:'\e0d7'; +} + +.fi-warning:before { + content:'\e0d8'; +} + +.fi-wifi:before { + content:'\e0d9'; +} + +.fi-wrench:before { + content:'\e0da'; +} + +.fi-x:before { + content:'\e0db'; +} + +.fi-yen:before { + content:'\e0dc'; +} + +.fi-zoom-in:before { + content:'\e0dd'; +} + +.fi-zoom-out:before { + content:'\e0de'; +} + diff --git a/static/open-iconic/css/open-iconic-foundation.min.css b/static/open-iconic/css/open-iconic-foundation.min.css new file mode 100644 index 00000000..bd124297 --- /dev/null +++ b/static/open-iconic/css/open-iconic-foundation.min.css @@ -0,0 +1 @@ +@font-face{font-family:Icons;src:url(../fonts/open-iconic.eot);src:url(../fonts/open-iconic.eot?#iconic-sm) format('embedded-opentype'),url(../fonts/open-iconic.woff) format('woff'),url(../fonts/open-iconic.ttf) format('truetype'),url(../fonts/open-iconic.otf) format('opentype'),url(../fonts/open-iconic.svg#iconic-sm) format('svg');font-weight:400;font-style:normal}.fi-account-login:before,.fi-account-logout:before,.fi-action-redo:before,.fi-action-undo:before,.fi-align-center:before,.fi-align-left:before,.fi-align-right:before,.fi-aperture:before,.fi-arrow-bottom:before,.fi-arrow-circle-bottom:before,.fi-arrow-circle-left:before,.fi-arrow-circle-right:before,.fi-arrow-circle-top:before,.fi-arrow-left:before,.fi-arrow-right:before,.fi-arrow-thick-bottom:before,.fi-arrow-thick-left:before,.fi-arrow-thick-right:before,.fi-arrow-thick-top:before,.fi-arrow-top:before,.fi-audio-spectrum:before,.fi-audio:before,.fi-badge:before,.fi-ban:before,.fi-bar-chart:before,.fi-basket:before,.fi-battery-empty:before,.fi-battery-full:before,.fi-beaker:before,.fi-bell:before,.fi-bluetooth:before,.fi-bold:before,.fi-bolt:before,.fi-book:before,.fi-bookmark:before,.fi-box:before,.fi-briefcase:before,.fi-british-pound:before,.fi-browser:before,.fi-brush:before,.fi-bug:before,.fi-bullhorn:before,.fi-calculator:before,.fi-calendar:before,.fi-camera-slr:before,.fi-caret-bottom:before,.fi-caret-left:before,.fi-caret-right:before,.fi-caret-top:before,.fi-cart:before,.fi-chat:before,.fi-check:before,.fi-chevron-bottom:before,.fi-chevron-left:before,.fi-chevron-right:before,.fi-chevron-top:before,.fi-circle-check:before,.fi-circle-x:before,.fi-clipboard:before,.fi-clock:before,.fi-cloud-download:before,.fi-cloud-upload:before,.fi-cloud:before,.fi-cloudy:before,.fi-code:before,.fi-cog:before,.fi-collapse-down:before,.fi-collapse-left:before,.fi-collapse-right:before,.fi-collapse-up:before,.fi-command:before,.fi-comment-square:before,.fi-compass:before,.fi-contrast:before,.fi-copywriting:before,.fi-credit-card:before,.fi-crop:before,.fi-dashboard:before,.fi-data-transfer-download:before,.fi-data-transfer-upload:before,.fi-delete:before,.fi-dial:before,.fi-document:before,.fi-dollar:before,.fi-double-quote-sans-left:before,.fi-double-quote-sans-right:before,.fi-double-quote-serif-left:before,.fi-double-quote-serif-right:before,.fi-droplet:before,.fi-eject:before,.fi-elevator:before,.fi-ellipses:before,.fi-envelope-closed:before,.fi-envelope-open:before,.fi-euro:before,.fi-excerpt:before,.fi-expand-down:before,.fi-expand-left:before,.fi-expand-right:before,.fi-expand-up:before,.fi-external-link:before,.fi-eye:before,.fi-eyedropper:before,.fi-file:before,.fi-fire:before,.fi-flag:before,.fi-flash:before,.fi-folder:before,.fi-fork:before,.fi-fullscreen-enter:before,.fi-fullscreen-exit:before,.fi-globe:before,.fi-graph:before,.fi-grid-four-up:before,.fi-grid-three-up:before,.fi-grid-two-up:before,.fi-hard-drive:before,.fi-header:before,.fi-headphones:before,.fi-heart:before,.fi-home:before,.fi-image:before,.fi-inbox:before,.fi-infinity:before,.fi-info:before,.fi-italic:before,.fi-justify-center:before,.fi-justify-left:before,.fi-justify-right:before,.fi-key:before,.fi-laptop:before,.fi-layers:before,.fi-lightbulb:before,.fi-link-broken:before,.fi-link-intact:before,.fi-list-rich:before,.fi-list:before,.fi-location:before,.fi-lock-locked:before,.fi-lock-unlocked:before,.fi-loop-circular:before,.fi-loop-square:before,.fi-loop:before,.fi-magnifying-glass:before,.fi-map-marker:before,.fi-map:before,.fi-media-pause:before,.fi-media-play:before,.fi-media-record:before,.fi-media-skip-backward:before,.fi-media-skip-forward:before,.fi-media-step-backward:before,.fi-media-step-forward:before,.fi-media-stop:before,.fi-medical-cross:before,.fi-menu:before,.fi-microphone:before,.fi-minus:before,.fi-monitor:before,.fi-moon:before,.fi-move:before,.fi-musical-note:before,.fi-paperclip:before,.fi-pencil:before,.fi-people:before,.fi-person:before,.fi-phone:before,.fi-pie-chart:before,.fi-pin:before,.fi-play-circle:before,.fi-plus:before,.fi-power-standby:before,.fi-print:before,.fi-project:before,.fi-pulse:before,.fi-puzzle-piece:before,.fi-question-mark:before,.fi-rain:before,.fi-random:before,.fi-reload:before,.fi-resize-both:before,.fi-resize-height:before,.fi-resize-width:before,.fi-rss-alt:before,.fi-rss:before,.fi-script:before,.fi-share-boxed:before,.fi-share:before,.fi-shield:before,.fi-signal:before,.fi-signpost:before,.fi-sort-ascending:before,.fi-sort-descending:before,.fi-spreadsheet:before,.fi-star:before,.fi-sun:before,.fi-tablet:before,.fi-tag:before,.fi-tags:before,.fi-target:before,.fi-task:before,.fi-terminal:before,.fi-text:before,.fi-thumb-down:before,.fi-thumb-up:before,.fi-timer:before,.fi-transfer:before,.fi-trash:before,.fi-underline:before,.fi-vertical-align-bottom:before,.fi-vertical-align-center:before,.fi-vertical-align-top:before,.fi-video:before,.fi-volume-high:before,.fi-volume-low:before,.fi-volume-off:before,.fi-warning:before,.fi-wifi:before,.fi-wrench:before,.fi-x:before,.fi-yen:before,.fi-zoom-in:before,.fi-zoom-out:before{font-family:Icons;font-style:normal;font-weight:400;font-variant:normal;text-transform:none;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;display:inline-block;text-decoration:inherit}[class*=fi-].oi-align-center:before{text-align:center}[class*=fi-].oi-align-left:before{text-align:left}[class*=fi-].oi-align-right:before{text-align:right}[class*=fi-].oi-flip-horizontal:before{-webkit-transform:scale(-1,1);-ms-transform:scale(-1,1);transform:scale(-1,1)}[class*=fi-].oi-flip-vertical:before{-webkit-transform:scale(1,-1);-ms-transform:scale(-1,1);transform:scale(1,-1)}[class*=fi-].oi-flip-horizontal-vertical:before{-webkit-transform:scale(-1,-1);-ms-transform:scale(-1,1);transform:scale(-1,-1)}.fi-account-login:before{content:'\e000'}.fi-account-logout:before{content:'\e001'}.fi-action-redo:before{content:'\e002'}.fi-action-undo:before{content:'\e003'}.fi-align-center:before{content:'\e004'}.fi-align-left:before{content:'\e005'}.fi-align-right:before{content:'\e006'}.fi-aperture:before{content:'\e007'}.fi-arrow-bottom:before{content:'\e008'}.fi-arrow-circle-bottom:before{content:'\e009'}.fi-arrow-circle-left:before{content:'\e00a'}.fi-arrow-circle-right:before{content:'\e00b'}.fi-arrow-circle-top:before{content:'\e00c'}.fi-arrow-left:before{content:'\e00d'}.fi-arrow-right:before{content:'\e00e'}.fi-arrow-thick-bottom:before{content:'\e00f'}.fi-arrow-thick-left:before{content:'\e010'}.fi-arrow-thick-right:before{content:'\e011'}.fi-arrow-thick-top:before{content:'\e012'}.fi-arrow-top:before{content:'\e013'}.fi-audio-spectrum:before{content:'\e014'}.fi-audio:before{content:'\e015'}.fi-badge:before{content:'\e016'}.fi-ban:before{content:'\e017'}.fi-bar-chart:before{content:'\e018'}.fi-basket:before{content:'\e019'}.fi-battery-empty:before{content:'\e01a'}.fi-battery-full:before{content:'\e01b'}.fi-beaker:before{content:'\e01c'}.fi-bell:before{content:'\e01d'}.fi-bluetooth:before{content:'\e01e'}.fi-bold:before{content:'\e01f'}.fi-bolt:before{content:'\e020'}.fi-book:before{content:'\e021'}.fi-bookmark:before{content:'\e022'}.fi-box:before{content:'\e023'}.fi-briefcase:before{content:'\e024'}.fi-british-pound:before{content:'\e025'}.fi-browser:before{content:'\e026'}.fi-brush:before{content:'\e027'}.fi-bug:before{content:'\e028'}.fi-bullhorn:before{content:'\e029'}.fi-calculator:before{content:'\e02a'}.fi-calendar:before{content:'\e02b'}.fi-camera-slr:before{content:'\e02c'}.fi-caret-bottom:before{content:'\e02d'}.fi-caret-left:before{content:'\e02e'}.fi-caret-right:before{content:'\e02f'}.fi-caret-top:before{content:'\e030'}.fi-cart:before{content:'\e031'}.fi-chat:before{content:'\e032'}.fi-check:before{content:'\e033'}.fi-chevron-bottom:before{content:'\e034'}.fi-chevron-left:before{content:'\e035'}.fi-chevron-right:before{content:'\e036'}.fi-chevron-top:before{content:'\e037'}.fi-circle-check:before{content:'\e038'}.fi-circle-x:before{content:'\e039'}.fi-clipboard:before{content:'\e03a'}.fi-clock:before{content:'\e03b'}.fi-cloud-download:before{content:'\e03c'}.fi-cloud-upload:before{content:'\e03d'}.fi-cloud:before{content:'\e03e'}.fi-cloudy:before{content:'\e03f'}.fi-code:before{content:'\e040'}.fi-cog:before{content:'\e041'}.fi-collapse-down:before{content:'\e042'}.fi-collapse-left:before{content:'\e043'}.fi-collapse-right:before{content:'\e044'}.fi-collapse-up:before{content:'\e045'}.fi-command:before{content:'\e046'}.fi-comment-square:before{content:'\e047'}.fi-compass:before{content:'\e048'}.fi-contrast:before{content:'\e049'}.fi-copywriting:before{content:'\e04a'}.fi-credit-card:before{content:'\e04b'}.fi-crop:before{content:'\e04c'}.fi-dashboard:before{content:'\e04d'}.fi-data-transfer-download:before{content:'\e04e'}.fi-data-transfer-upload:before{content:'\e04f'}.fi-delete:before{content:'\e050'}.fi-dial:before{content:'\e051'}.fi-document:before{content:'\e052'}.fi-dollar:before{content:'\e053'}.fi-double-quote-sans-left:before{content:'\e054'}.fi-double-quote-sans-right:before{content:'\e055'}.fi-double-quote-serif-left:before{content:'\e056'}.fi-double-quote-serif-right:before{content:'\e057'}.fi-droplet:before{content:'\e058'}.fi-eject:before{content:'\e059'}.fi-elevator:before{content:'\e05a'}.fi-ellipses:before{content:'\e05b'}.fi-envelope-closed:before{content:'\e05c'}.fi-envelope-open:before{content:'\e05d'}.fi-euro:before{content:'\e05e'}.fi-excerpt:before{content:'\e05f'}.fi-expand-down:before{content:'\e060'}.fi-expand-left:before{content:'\e061'}.fi-expand-right:before{content:'\e062'}.fi-expand-up:before{content:'\e063'}.fi-external-link:before{content:'\e064'}.fi-eye:before{content:'\e065'}.fi-eyedropper:before{content:'\e066'}.fi-file:before{content:'\e067'}.fi-fire:before{content:'\e068'}.fi-flag:before{content:'\e069'}.fi-flash:before{content:'\e06a'}.fi-folder:before{content:'\e06b'}.fi-fork:before{content:'\e06c'}.fi-fullscreen-enter:before{content:'\e06d'}.fi-fullscreen-exit:before{content:'\e06e'}.fi-globe:before{content:'\e06f'}.fi-graph:before{content:'\e070'}.fi-grid-four-up:before{content:'\e071'}.fi-grid-three-up:before{content:'\e072'}.fi-grid-two-up:before{content:'\e073'}.fi-hard-drive:before{content:'\e074'}.fi-header:before{content:'\e075'}.fi-headphones:before{content:'\e076'}.fi-heart:before{content:'\e077'}.fi-home:before{content:'\e078'}.fi-image:before{content:'\e079'}.fi-inbox:before{content:'\e07a'}.fi-infinity:before{content:'\e07b'}.fi-info:before{content:'\e07c'}.fi-italic:before{content:'\e07d'}.fi-justify-center:before{content:'\e07e'}.fi-justify-left:before{content:'\e07f'}.fi-justify-right:before{content:'\e080'}.fi-key:before{content:'\e081'}.fi-laptop:before{content:'\e082'}.fi-layers:before{content:'\e083'}.fi-lightbulb:before{content:'\e084'}.fi-link-broken:before{content:'\e085'}.fi-link-intact:before{content:'\e086'}.fi-list-rich:before{content:'\e087'}.fi-list:before{content:'\e088'}.fi-location:before{content:'\e089'}.fi-lock-locked:before{content:'\e08a'}.fi-lock-unlocked:before{content:'\e08b'}.fi-loop-circular:before{content:'\e08c'}.fi-loop-square:before{content:'\e08d'}.fi-loop:before{content:'\e08e'}.fi-magnifying-glass:before{content:'\e08f'}.fi-map-marker:before{content:'\e090'}.fi-map:before{content:'\e091'}.fi-media-pause:before{content:'\e092'}.fi-media-play:before{content:'\e093'}.fi-media-record:before{content:'\e094'}.fi-media-skip-backward:before{content:'\e095'}.fi-media-skip-forward:before{content:'\e096'}.fi-media-step-backward:before{content:'\e097'}.fi-media-step-forward:before{content:'\e098'}.fi-media-stop:before{content:'\e099'}.fi-medical-cross:before{content:'\e09a'}.fi-menu:before{content:'\e09b'}.fi-microphone:before{content:'\e09c'}.fi-minus:before{content:'\e09d'}.fi-monitor:before{content:'\e09e'}.fi-moon:before{content:'\e09f'}.fi-move:before{content:'\e0a0'}.fi-musical-note:before{content:'\e0a1'}.fi-paperclip:before{content:'\e0a2'}.fi-pencil:before{content:'\e0a3'}.fi-people:before{content:'\e0a4'}.fi-person:before{content:'\e0a5'}.fi-phone:before{content:'\e0a6'}.fi-pie-chart:before{content:'\e0a7'}.fi-pin:before{content:'\e0a8'}.fi-play-circle:before{content:'\e0a9'}.fi-plus:before{content:'\e0aa'}.fi-power-standby:before{content:'\e0ab'}.fi-print:before{content:'\e0ac'}.fi-project:before{content:'\e0ad'}.fi-pulse:before{content:'\e0ae'}.fi-puzzle-piece:before{content:'\e0af'}.fi-question-mark:before{content:'\e0b0'}.fi-rain:before{content:'\e0b1'}.fi-random:before{content:'\e0b2'}.fi-reload:before{content:'\e0b3'}.fi-resize-both:before{content:'\e0b4'}.fi-resize-height:before{content:'\e0b5'}.fi-resize-width:before{content:'\e0b6'}.fi-rss-alt:before{content:'\e0b7'}.fi-rss:before{content:'\e0b8'}.fi-script:before{content:'\e0b9'}.fi-share-boxed:before{content:'\e0ba'}.fi-share:before{content:'\e0bb'}.fi-shield:before{content:'\e0bc'}.fi-signal:before{content:'\e0bd'}.fi-signpost:before{content:'\e0be'}.fi-sort-ascending:before{content:'\e0bf'}.fi-sort-descending:before{content:'\e0c0'}.fi-spreadsheet:before{content:'\e0c1'}.fi-star:before{content:'\e0c2'}.fi-sun:before{content:'\e0c3'}.fi-tablet:before{content:'\e0c4'}.fi-tag:before{content:'\e0c5'}.fi-tags:before{content:'\e0c6'}.fi-target:before{content:'\e0c7'}.fi-task:before{content:'\e0c8'}.fi-terminal:before{content:'\e0c9'}.fi-text:before{content:'\e0ca'}.fi-thumb-down:before{content:'\e0cb'}.fi-thumb-up:before{content:'\e0cc'}.fi-timer:before{content:'\e0cd'}.fi-transfer:before{content:'\e0ce'}.fi-trash:before{content:'\e0cf'}.fi-underline:before{content:'\e0d0'}.fi-vertical-align-bottom:before{content:'\e0d1'}.fi-vertical-align-center:before{content:'\e0d2'}.fi-vertical-align-top:before{content:'\e0d3'}.fi-video:before{content:'\e0d4'}.fi-volume-high:before{content:'\e0d5'}.fi-volume-low:before{content:'\e0d6'}.fi-volume-off:before{content:'\e0d7'}.fi-warning:before{content:'\e0d8'}.fi-wifi:before{content:'\e0d9'}.fi-wrench:before{content:'\e0da'}.fi-x:before{content:'\e0db'}.fi-yen:before{content:'\e0dc'}.fi-zoom-in:before{content:'\e0dd'}.fi-zoom-out:before{content:'\e0de'} \ No newline at end of file diff --git a/static/open-iconic/css/open-iconic-foundation.scss b/static/open-iconic/css/open-iconic-foundation.scss new file mode 100644 index 00000000..fe471389 --- /dev/null +++ b/static/open-iconic/css/open-iconic-foundation.scss @@ -0,0 +1,1398 @@ +/* Foundation */ + +/* Font path variable */ +$icon-font-path: '../fonts/' !default; + +@font-face { + font-family: 'Icons'; + src: url('#{$icon-font-path}open-iconic.eot'); + src: url('#{$icon-font-path}open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('#{$icon-font-path}open-iconic.woff') format('woff'), url('#{$icon-font-path}open-iconic.ttf') format('truetype'), url('#{$icon-font-path}open-iconic.otf') format('opentype'), url('#{$icon-font-path}open-iconic.svg#iconic-sm') format('svg'); + font-weight: normal; + font-style: normal; +} + + +.fi-account-login:before, + +.fi-account-logout:before, + +.fi-action-redo:before, + +.fi-action-undo:before, + +.fi-align-center:before, + +.fi-align-left:before, + +.fi-align-right:before, + +.fi-aperture:before, + +.fi-arrow-bottom:before, + +.fi-arrow-circle-bottom:before, + +.fi-arrow-circle-left:before, + +.fi-arrow-circle-right:before, + +.fi-arrow-circle-top:before, + +.fi-arrow-left:before, + +.fi-arrow-right:before, + +.fi-arrow-thick-bottom:before, + +.fi-arrow-thick-left:before, + +.fi-arrow-thick-right:before, + +.fi-arrow-thick-top:before, + +.fi-arrow-top:before, + +.fi-audio-spectrum:before, + +.fi-audio:before, + +.fi-badge:before, + +.fi-ban:before, + +.fi-bar-chart:before, + +.fi-basket:before, + +.fi-battery-empty:before, + +.fi-battery-full:before, + +.fi-beaker:before, + +.fi-bell:before, + +.fi-bluetooth:before, + +.fi-bold:before, + +.fi-bolt:before, + +.fi-book:before, + +.fi-bookmark:before, + +.fi-box:before, + +.fi-briefcase:before, + +.fi-british-pound:before, + +.fi-browser:before, + +.fi-brush:before, + +.fi-bug:before, + +.fi-bullhorn:before, + +.fi-calculator:before, + +.fi-calendar:before, + +.fi-camera-slr:before, + +.fi-caret-bottom:before, + +.fi-caret-left:before, + +.fi-caret-right:before, + +.fi-caret-top:before, + +.fi-cart:before, + +.fi-chat:before, + +.fi-check:before, + +.fi-chevron-bottom:before, + +.fi-chevron-left:before, + +.fi-chevron-right:before, + +.fi-chevron-top:before, + +.fi-circle-check:before, + +.fi-circle-x:before, + +.fi-clipboard:before, + +.fi-clock:before, + +.fi-cloud-download:before, + +.fi-cloud-upload:before, + +.fi-cloud:before, + +.fi-cloudy:before, + +.fi-code:before, + +.fi-cog:before, + +.fi-collapse-down:before, + +.fi-collapse-left:before, + +.fi-collapse-right:before, + +.fi-collapse-up:before, + +.fi-command:before, + +.fi-comment-square:before, + +.fi-compass:before, + +.fi-contrast:before, + +.fi-copywriting:before, + +.fi-credit-card:before, + +.fi-crop:before, + +.fi-dashboard:before, + +.fi-data-transfer-download:before, + +.fi-data-transfer-upload:before, + +.fi-delete:before, + +.fi-dial:before, + +.fi-document:before, + +.fi-dollar:before, + +.fi-double-quote-sans-left:before, + +.fi-double-quote-sans-right:before, + +.fi-double-quote-serif-left:before, + +.fi-double-quote-serif-right:before, + +.fi-droplet:before, + +.fi-eject:before, + +.fi-elevator:before, + +.fi-ellipses:before, + +.fi-envelope-closed:before, + +.fi-envelope-open:before, + +.fi-euro:before, + +.fi-excerpt:before, + +.fi-expand-down:before, + +.fi-expand-left:before, + +.fi-expand-right:before, + +.fi-expand-up:before, + +.fi-external-link:before, + +.fi-eye:before, + +.fi-eyedropper:before, + +.fi-file:before, + +.fi-fire:before, + +.fi-flag:before, + +.fi-flash:before, + +.fi-folder:before, + +.fi-fork:before, + +.fi-fullscreen-enter:before, + +.fi-fullscreen-exit:before, + +.fi-globe:before, + +.fi-graph:before, + +.fi-grid-four-up:before, + +.fi-grid-three-up:before, + +.fi-grid-two-up:before, + +.fi-hard-drive:before, + +.fi-header:before, + +.fi-headphones:before, + +.fi-heart:before, + +.fi-home:before, + +.fi-image:before, + +.fi-inbox:before, + +.fi-infinity:before, + +.fi-info:before, + +.fi-italic:before, + +.fi-justify-center:before, + +.fi-justify-left:before, + +.fi-justify-right:before, + +.fi-key:before, + +.fi-laptop:before, + +.fi-layers:before, + +.fi-lightbulb:before, + +.fi-link-broken:before, + +.fi-link-intact:before, + +.fi-list-rich:before, + +.fi-list:before, + +.fi-location:before, + +.fi-lock-locked:before, + +.fi-lock-unlocked:before, + +.fi-loop-circular:before, + +.fi-loop-square:before, + +.fi-loop:before, + +.fi-magnifying-glass:before, + +.fi-map-marker:before, + +.fi-map:before, + +.fi-media-pause:before, + +.fi-media-play:before, + +.fi-media-record:before, + +.fi-media-skip-backward:before, + +.fi-media-skip-forward:before, + +.fi-media-step-backward:before, + +.fi-media-step-forward:before, + +.fi-media-stop:before, + +.fi-medical-cross:before, + +.fi-menu:before, + +.fi-microphone:before, + +.fi-minus:before, + +.fi-monitor:before, + +.fi-moon:before, + +.fi-move:before, + +.fi-musical-note:before, + +.fi-paperclip:before, + +.fi-pencil:before, + +.fi-people:before, + +.fi-person:before, + +.fi-phone:before, + +.fi-pie-chart:before, + +.fi-pin:before, + +.fi-play-circle:before, + +.fi-plus:before, + +.fi-power-standby:before, + +.fi-print:before, + +.fi-project:before, + +.fi-pulse:before, + +.fi-puzzle-piece:before, + +.fi-question-mark:before, + +.fi-rain:before, + +.fi-random:before, + +.fi-reload:before, + +.fi-resize-both:before, + +.fi-resize-height:before, + +.fi-resize-width:before, + +.fi-rss-alt:before, + +.fi-rss:before, + +.fi-script:before, + +.fi-share-boxed:before, + +.fi-share:before, + +.fi-shield:before, + +.fi-signal:before, + +.fi-signpost:before, + +.fi-sort-ascending:before, + +.fi-sort-descending:before, + +.fi-spreadsheet:before, + +.fi-star:before, + +.fi-sun:before, + +.fi-tablet:before, + +.fi-tag:before, + +.fi-tags:before, + +.fi-target:before, + +.fi-task:before, + +.fi-terminal:before, + +.fi-text:before, + +.fi-thumb-down:before, + +.fi-thumb-up:before, + +.fi-timer:before, + +.fi-transfer:before, + +.fi-trash:before, + +.fi-underline:before, + +.fi-vertical-align-bottom:before, + +.fi-vertical-align-center:before, + +.fi-vertical-align-top:before, + +.fi-video:before, + +.fi-volume-high:before, + +.fi-volume-low:before, + +.fi-volume-off:before, + +.fi-warning:before, + +.fi-wifi:before, + +.fi-wrench:before, + +.fi-x:before, + +.fi-yen:before, + +.fi-zoom-in:before, + +.fi-zoom-out:before + { + font-family: 'Icons'; + font-style: normal; + font-weight: normal; + font-variant: normal; + text-transform: none; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + display: inline-block; + text-decoration: inherit; +} + + +[class*='fi-'].oi-align-center:before { + text-align: center; +} + +[class*='fi-'].oi-align-left:before { + text-align: left; +} + +[class*='fi-'].oi-align-right:before { + text-align: right; +} + + +[class*='fi-'].oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); +} + +[class*='fi-'].oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); +} + +[class*='fi-'].oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); +} + + + +.fi-account-login:before { + content:'\e000'; +} + +.fi-account-logout:before { + content:'\e001'; +} + +.fi-action-redo:before { + content:'\e002'; +} + +.fi-action-undo:before { + content:'\e003'; +} + +.fi-align-center:before { + content:'\e004'; +} + +.fi-align-left:before { + content:'\e005'; +} + +.fi-align-right:before { + content:'\e006'; +} + +.fi-aperture:before { + content:'\e007'; +} + +.fi-arrow-bottom:before { + content:'\e008'; +} + +.fi-arrow-circle-bottom:before { + content:'\e009'; +} + +.fi-arrow-circle-left:before { + content:'\e00a'; +} + +.fi-arrow-circle-right:before { + content:'\e00b'; +} + +.fi-arrow-circle-top:before { + content:'\e00c'; +} + +.fi-arrow-left:before { + content:'\e00d'; +} + +.fi-arrow-right:before { + content:'\e00e'; +} + +.fi-arrow-thick-bottom:before { + content:'\e00f'; +} + +.fi-arrow-thick-left:before { + content:'\e010'; +} + +.fi-arrow-thick-right:before { + content:'\e011'; +} + +.fi-arrow-thick-top:before { + content:'\e012'; +} + +.fi-arrow-top:before { + content:'\e013'; +} + +.fi-audio-spectrum:before { + content:'\e014'; +} + +.fi-audio:before { + content:'\e015'; +} + +.fi-badge:before { + content:'\e016'; +} + +.fi-ban:before { + content:'\e017'; +} + +.fi-bar-chart:before { + content:'\e018'; +} + +.fi-basket:before { + content:'\e019'; +} + +.fi-battery-empty:before { + content:'\e01a'; +} + +.fi-battery-full:before { + content:'\e01b'; +} + +.fi-beaker:before { + content:'\e01c'; +} + +.fi-bell:before { + content:'\e01d'; +} + +.fi-bluetooth:before { + content:'\e01e'; +} + +.fi-bold:before { + content:'\e01f'; +} + +.fi-bolt:before { + content:'\e020'; +} + +.fi-book:before { + content:'\e021'; +} + +.fi-bookmark:before { + content:'\e022'; +} + +.fi-box:before { + content:'\e023'; +} + +.fi-briefcase:before { + content:'\e024'; +} + +.fi-british-pound:before { + content:'\e025'; +} + +.fi-browser:before { + content:'\e026'; +} + +.fi-brush:before { + content:'\e027'; +} + +.fi-bug:before { + content:'\e028'; +} + +.fi-bullhorn:before { + content:'\e029'; +} + +.fi-calculator:before { + content:'\e02a'; +} + +.fi-calendar:before { + content:'\e02b'; +} + +.fi-camera-slr:before { + content:'\e02c'; +} + +.fi-caret-bottom:before { + content:'\e02d'; +} + +.fi-caret-left:before { + content:'\e02e'; +} + +.fi-caret-right:before { + content:'\e02f'; +} + +.fi-caret-top:before { + content:'\e030'; +} + +.fi-cart:before { + content:'\e031'; +} + +.fi-chat:before { + content:'\e032'; +} + +.fi-check:before { + content:'\e033'; +} + +.fi-chevron-bottom:before { + content:'\e034'; +} + +.fi-chevron-left:before { + content:'\e035'; +} + +.fi-chevron-right:before { + content:'\e036'; +} + +.fi-chevron-top:before { + content:'\e037'; +} + +.fi-circle-check:before { + content:'\e038'; +} + +.fi-circle-x:before { + content:'\e039'; +} + +.fi-clipboard:before { + content:'\e03a'; +} + +.fi-clock:before { + content:'\e03b'; +} + +.fi-cloud-download:before { + content:'\e03c'; +} + +.fi-cloud-upload:before { + content:'\e03d'; +} + +.fi-cloud:before { + content:'\e03e'; +} + +.fi-cloudy:before { + content:'\e03f'; +} + +.fi-code:before { + content:'\e040'; +} + +.fi-cog:before { + content:'\e041'; +} + +.fi-collapse-down:before { + content:'\e042'; +} + +.fi-collapse-left:before { + content:'\e043'; +} + +.fi-collapse-right:before { + content:'\e044'; +} + +.fi-collapse-up:before { + content:'\e045'; +} + +.fi-command:before { + content:'\e046'; +} + +.fi-comment-square:before { + content:'\e047'; +} + +.fi-compass:before { + content:'\e048'; +} + +.fi-contrast:before { + content:'\e049'; +} + +.fi-copywriting:before { + content:'\e04a'; +} + +.fi-credit-card:before { + content:'\e04b'; +} + +.fi-crop:before { + content:'\e04c'; +} + +.fi-dashboard:before { + content:'\e04d'; +} + +.fi-data-transfer-download:before { + content:'\e04e'; +} + +.fi-data-transfer-upload:before { + content:'\e04f'; +} + +.fi-delete:before { + content:'\e050'; +} + +.fi-dial:before { + content:'\e051'; +} + +.fi-document:before { + content:'\e052'; +} + +.fi-dollar:before { + content:'\e053'; +} + +.fi-double-quote-sans-left:before { + content:'\e054'; +} + +.fi-double-quote-sans-right:before { + content:'\e055'; +} + +.fi-double-quote-serif-left:before { + content:'\e056'; +} + +.fi-double-quote-serif-right:before { + content:'\e057'; +} + +.fi-droplet:before { + content:'\e058'; +} + +.fi-eject:before { + content:'\e059'; +} + +.fi-elevator:before { + content:'\e05a'; +} + +.fi-ellipses:before { + content:'\e05b'; +} + +.fi-envelope-closed:before { + content:'\e05c'; +} + +.fi-envelope-open:before { + content:'\e05d'; +} + +.fi-euro:before { + content:'\e05e'; +} + +.fi-excerpt:before { + content:'\e05f'; +} + +.fi-expand-down:before { + content:'\e060'; +} + +.fi-expand-left:before { + content:'\e061'; +} + +.fi-expand-right:before { + content:'\e062'; +} + +.fi-expand-up:before { + content:'\e063'; +} + +.fi-external-link:before { + content:'\e064'; +} + +.fi-eye:before { + content:'\e065'; +} + +.fi-eyedropper:before { + content:'\e066'; +} + +.fi-file:before { + content:'\e067'; +} + +.fi-fire:before { + content:'\e068'; +} + +.fi-flag:before { + content:'\e069'; +} + +.fi-flash:before { + content:'\e06a'; +} + +.fi-folder:before { + content:'\e06b'; +} + +.fi-fork:before { + content:'\e06c'; +} + +.fi-fullscreen-enter:before { + content:'\e06d'; +} + +.fi-fullscreen-exit:before { + content:'\e06e'; +} + +.fi-globe:before { + content:'\e06f'; +} + +.fi-graph:before { + content:'\e070'; +} + +.fi-grid-four-up:before { + content:'\e071'; +} + +.fi-grid-three-up:before { + content:'\e072'; +} + +.fi-grid-two-up:before { + content:'\e073'; +} + +.fi-hard-drive:before { + content:'\e074'; +} + +.fi-header:before { + content:'\e075'; +} + +.fi-headphones:before { + content:'\e076'; +} + +.fi-heart:before { + content:'\e077'; +} + +.fi-home:before { + content:'\e078'; +} + +.fi-image:before { + content:'\e079'; +} + +.fi-inbox:before { + content:'\e07a'; +} + +.fi-infinity:before { + content:'\e07b'; +} + +.fi-info:before { + content:'\e07c'; +} + +.fi-italic:before { + content:'\e07d'; +} + +.fi-justify-center:before { + content:'\e07e'; +} + +.fi-justify-left:before { + content:'\e07f'; +} + +.fi-justify-right:before { + content:'\e080'; +} + +.fi-key:before { + content:'\e081'; +} + +.fi-laptop:before { + content:'\e082'; +} + +.fi-layers:before { + content:'\e083'; +} + +.fi-lightbulb:before { + content:'\e084'; +} + +.fi-link-broken:before { + content:'\e085'; +} + +.fi-link-intact:before { + content:'\e086'; +} + +.fi-list-rich:before { + content:'\e087'; +} + +.fi-list:before { + content:'\e088'; +} + +.fi-location:before { + content:'\e089'; +} + +.fi-lock-locked:before { + content:'\e08a'; +} + +.fi-lock-unlocked:before { + content:'\e08b'; +} + +.fi-loop-circular:before { + content:'\e08c'; +} + +.fi-loop-square:before { + content:'\e08d'; +} + +.fi-loop:before { + content:'\e08e'; +} + +.fi-magnifying-glass:before { + content:'\e08f'; +} + +.fi-map-marker:before { + content:'\e090'; +} + +.fi-map:before { + content:'\e091'; +} + +.fi-media-pause:before { + content:'\e092'; +} + +.fi-media-play:before { + content:'\e093'; +} + +.fi-media-record:before { + content:'\e094'; +} + +.fi-media-skip-backward:before { + content:'\e095'; +} + +.fi-media-skip-forward:before { + content:'\e096'; +} + +.fi-media-step-backward:before { + content:'\e097'; +} + +.fi-media-step-forward:before { + content:'\e098'; +} + +.fi-media-stop:before { + content:'\e099'; +} + +.fi-medical-cross:before { + content:'\e09a'; +} + +.fi-menu:before { + content:'\e09b'; +} + +.fi-microphone:before { + content:'\e09c'; +} + +.fi-minus:before { + content:'\e09d'; +} + +.fi-monitor:before { + content:'\e09e'; +} + +.fi-moon:before { + content:'\e09f'; +} + +.fi-move:before { + content:'\e0a0'; +} + +.fi-musical-note:before { + content:'\e0a1'; +} + +.fi-paperclip:before { + content:'\e0a2'; +} + +.fi-pencil:before { + content:'\e0a3'; +} + +.fi-people:before { + content:'\e0a4'; +} + +.fi-person:before { + content:'\e0a5'; +} + +.fi-phone:before { + content:'\e0a6'; +} + +.fi-pie-chart:before { + content:'\e0a7'; +} + +.fi-pin:before { + content:'\e0a8'; +} + +.fi-play-circle:before { + content:'\e0a9'; +} + +.fi-plus:before { + content:'\e0aa'; +} + +.fi-power-standby:before { + content:'\e0ab'; +} + +.fi-print:before { + content:'\e0ac'; +} + +.fi-project:before { + content:'\e0ad'; +} + +.fi-pulse:before { + content:'\e0ae'; +} + +.fi-puzzle-piece:before { + content:'\e0af'; +} + +.fi-question-mark:before { + content:'\e0b0'; +} + +.fi-rain:before { + content:'\e0b1'; +} + +.fi-random:before { + content:'\e0b2'; +} + +.fi-reload:before { + content:'\e0b3'; +} + +.fi-resize-both:before { + content:'\e0b4'; +} + +.fi-resize-height:before { + content:'\e0b5'; +} + +.fi-resize-width:before { + content:'\e0b6'; +} + +.fi-rss-alt:before { + content:'\e0b7'; +} + +.fi-rss:before { + content:'\e0b8'; +} + +.fi-script:before { + content:'\e0b9'; +} + +.fi-share-boxed:before { + content:'\e0ba'; +} + +.fi-share:before { + content:'\e0bb'; +} + +.fi-shield:before { + content:'\e0bc'; +} + +.fi-signal:before { + content:'\e0bd'; +} + +.fi-signpost:before { + content:'\e0be'; +} + +.fi-sort-ascending:before { + content:'\e0bf'; +} + +.fi-sort-descending:before { + content:'\e0c0'; +} + +.fi-spreadsheet:before { + content:'\e0c1'; +} + +.fi-star:before { + content:'\e0c2'; +} + +.fi-sun:before { + content:'\e0c3'; +} + +.fi-tablet:before { + content:'\e0c4'; +} + +.fi-tag:before { + content:'\e0c5'; +} + +.fi-tags:before { + content:'\e0c6'; +} + +.fi-target:before { + content:'\e0c7'; +} + +.fi-task:before { + content:'\e0c8'; +} + +.fi-terminal:before { + content:'\e0c9'; +} + +.fi-text:before { + content:'\e0ca'; +} + +.fi-thumb-down:before { + content:'\e0cb'; +} + +.fi-thumb-up:before { + content:'\e0cc'; +} + +.fi-timer:before { + content:'\e0cd'; +} + +.fi-transfer:before { + content:'\e0ce'; +} + +.fi-trash:before { + content:'\e0cf'; +} + +.fi-underline:before { + content:'\e0d0'; +} + +.fi-vertical-align-bottom:before { + content:'\e0d1'; +} + +.fi-vertical-align-center:before { + content:'\e0d2'; +} + +.fi-vertical-align-top:before { + content:'\e0d3'; +} + +.fi-video:before { + content:'\e0d4'; +} + +.fi-volume-high:before { + content:'\e0d5'; +} + +.fi-volume-low:before { + content:'\e0d6'; +} + +.fi-volume-off:before { + content:'\e0d7'; +} + +.fi-warning:before { + content:'\e0d8'; +} + +.fi-wifi:before { + content:'\e0d9'; +} + +.fi-wrench:before { + content:'\e0da'; +} + +.fi-x:before { + content:'\e0db'; +} + +.fi-yen:before { + content:'\e0dc'; +} + +.fi-zoom-in:before { + content:'\e0dd'; +} + +.fi-zoom-out:before { + content:'\e0de'; +} + diff --git a/static/open-iconic/css/open-iconic-foundation.styl b/static/open-iconic/css/open-iconic-foundation.styl new file mode 100644 index 00000000..a52637ab --- /dev/null +++ b/static/open-iconic/css/open-iconic-foundation.styl @@ -0,0 +1,1392 @@ +/* Foundation */ + +@font-face + font-family 'Icons' + src url('../fonts/open-iconic.eot') + src url('../fonts/open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('../fonts/open-iconic.woff') format('woff'), url('../fonts/open-iconic.ttf') format('truetype'), url('../fonts/open-iconic.otf') format('opentype'), url('../fonts/open-iconic.svg#iconic-sm') format('svg') + font-weight normal + font-style normal + + + +.fi-account-loginbefore, + +.fi-account-logoutbefore, + +.fi-action-redobefore, + +.fi-action-undobefore, + +.fi-align-centerbefore, + +.fi-align-leftbefore, + +.fi-align-rightbefore, + +.fi-aperturebefore, + +.fi-arrow-bottombefore, + +.fi-arrow-circle-bottombefore, + +.fi-arrow-circle-leftbefore, + +.fi-arrow-circle-rightbefore, + +.fi-arrow-circle-topbefore, + +.fi-arrow-leftbefore, + +.fi-arrow-rightbefore, + +.fi-arrow-thick-bottombefore, + +.fi-arrow-thick-leftbefore, + +.fi-arrow-thick-rightbefore, + +.fi-arrow-thick-topbefore, + +.fi-arrow-topbefore, + +.fi-audio-spectrumbefore, + +.fi-audiobefore, + +.fi-badgebefore, + +.fi-banbefore, + +.fi-bar-chartbefore, + +.fi-basketbefore, + +.fi-battery-emptybefore, + +.fi-battery-fullbefore, + +.fi-beakerbefore, + +.fi-bellbefore, + +.fi-bluetoothbefore, + +.fi-boldbefore, + +.fi-boltbefore, + +.fi-bookbefore, + +.fi-bookmarkbefore, + +.fi-boxbefore, + +.fi-briefcasebefore, + +.fi-british-poundbefore, + +.fi-browserbefore, + +.fi-brushbefore, + +.fi-bugbefore, + +.fi-bullhornbefore, + +.fi-calculatorbefore, + +.fi-calendarbefore, + +.fi-camera-slrbefore, + +.fi-caret-bottombefore, + +.fi-caret-leftbefore, + +.fi-caret-rightbefore, + +.fi-caret-topbefore, + +.fi-cartbefore, + +.fi-chatbefore, + +.fi-checkbefore, + +.fi-chevron-bottombefore, + +.fi-chevron-leftbefore, + +.fi-chevron-rightbefore, + +.fi-chevron-topbefore, + +.fi-circle-checkbefore, + +.fi-circle-xbefore, + +.fi-clipboardbefore, + +.fi-clockbefore, + +.fi-cloud-downloadbefore, + +.fi-cloud-uploadbefore, + +.fi-cloudbefore, + +.fi-cloudybefore, + +.fi-codebefore, + +.fi-cogbefore, + +.fi-collapse-downbefore, + +.fi-collapse-leftbefore, + +.fi-collapse-rightbefore, + +.fi-collapse-upbefore, + +.fi-commandbefore, + +.fi-comment-squarebefore, + +.fi-compassbefore, + +.fi-contrastbefore, + +.fi-copywritingbefore, + +.fi-credit-cardbefore, + +.fi-cropbefore, + +.fi-dashboardbefore, + +.fi-data-transfer-downloadbefore, + +.fi-data-transfer-uploadbefore, + +.fi-deletebefore, + +.fi-dialbefore, + +.fi-documentbefore, + +.fi-dollarbefore, + +.fi-double-quote-sans-leftbefore, + +.fi-double-quote-sans-rightbefore, + +.fi-double-quote-serif-leftbefore, + +.fi-double-quote-serif-rightbefore, + +.fi-dropletbefore, + +.fi-ejectbefore, + +.fi-elevatorbefore, + +.fi-ellipsesbefore, + +.fi-envelope-closedbefore, + +.fi-envelope-openbefore, + +.fi-eurobefore, + +.fi-excerptbefore, + +.fi-expand-downbefore, + +.fi-expand-leftbefore, + +.fi-expand-rightbefore, + +.fi-expand-upbefore, + +.fi-external-linkbefore, + +.fi-eyebefore, + +.fi-eyedropperbefore, + +.fi-filebefore, + +.fi-firebefore, + +.fi-flagbefore, + +.fi-flashbefore, + +.fi-folderbefore, + +.fi-forkbefore, + +.fi-fullscreen-enterbefore, + +.fi-fullscreen-exitbefore, + +.fi-globebefore, + +.fi-graphbefore, + +.fi-grid-four-upbefore, + +.fi-grid-three-upbefore, + +.fi-grid-two-upbefore, + +.fi-hard-drivebefore, + +.fi-headerbefore, + +.fi-headphonesbefore, + +.fi-heartbefore, + +.fi-homebefore, + +.fi-imagebefore, + +.fi-inboxbefore, + +.fi-infinitybefore, + +.fi-infobefore, + +.fi-italicbefore, + +.fi-justify-centerbefore, + +.fi-justify-leftbefore, + +.fi-justify-rightbefore, + +.fi-keybefore, + +.fi-laptopbefore, + +.fi-layersbefore, + +.fi-lightbulbbefore, + +.fi-link-brokenbefore, + +.fi-link-intactbefore, + +.fi-list-richbefore, + +.fi-listbefore, + +.fi-locationbefore, + +.fi-lock-lockedbefore, + +.fi-lock-unlockedbefore, + +.fi-loop-circularbefore, + +.fi-loop-squarebefore, + +.fi-loopbefore, + +.fi-magnifying-glassbefore, + +.fi-map-markerbefore, + +.fi-mapbefore, + +.fi-media-pausebefore, + +.fi-media-playbefore, + +.fi-media-recordbefore, + +.fi-media-skip-backwardbefore, + +.fi-media-skip-forwardbefore, + +.fi-media-step-backwardbefore, + +.fi-media-step-forwardbefore, + +.fi-media-stopbefore, + +.fi-medical-crossbefore, + +.fi-menubefore, + +.fi-microphonebefore, + +.fi-minusbefore, + +.fi-monitorbefore, + +.fi-moonbefore, + +.fi-movebefore, + +.fi-musical-notebefore, + +.fi-paperclipbefore, + +.fi-pencilbefore, + +.fi-peoplebefore, + +.fi-personbefore, + +.fi-phonebefore, + +.fi-pie-chartbefore, + +.fi-pinbefore, + +.fi-play-circlebefore, + +.fi-plusbefore, + +.fi-power-standbybefore, + +.fi-printbefore, + +.fi-projectbefore, + +.fi-pulsebefore, + +.fi-puzzle-piecebefore, + +.fi-question-markbefore, + +.fi-rainbefore, + +.fi-randombefore, + +.fi-reloadbefore, + +.fi-resize-bothbefore, + +.fi-resize-heightbefore, + +.fi-resize-widthbefore, + +.fi-rss-altbefore, + +.fi-rssbefore, + +.fi-scriptbefore, + +.fi-share-boxedbefore, + +.fi-sharebefore, + +.fi-shieldbefore, + +.fi-signalbefore, + +.fi-signpostbefore, + +.fi-sort-ascendingbefore, + +.fi-sort-descendingbefore, + +.fi-spreadsheetbefore, + +.fi-starbefore, + +.fi-sunbefore, + +.fi-tabletbefore, + +.fi-tagbefore, + +.fi-tagsbefore, + +.fi-targetbefore, + +.fi-taskbefore, + +.fi-terminalbefore, + +.fi-textbefore, + +.fi-thumb-downbefore, + +.fi-thumb-upbefore, + +.fi-timerbefore, + +.fi-transferbefore, + +.fi-trashbefore, + +.fi-underlinebefore, + +.fi-vertical-align-bottombefore, + +.fi-vertical-align-centerbefore, + +.fi-vertical-align-topbefore, + +.fi-videobefore, + +.fi-volume-highbefore, + +.fi-volume-lowbefore, + +.fi-volume-offbefore, + +.fi-warningbefore, + +.fi-wifibefore, + +.fi-wrenchbefore, + +.fi-xbefore, + +.fi-yenbefore, + +.fi-zoom-inbefore, + +.fi-zoom-outbefore + + font-family 'Icons' + font-style normal + font-weight normal + font-variant normal + text-transform none + line-height 1 + -webkit-font-smoothing antialiased + -moz-osx-font-smoothing grayscale + display inline-block + text-decoration inherit + + +[class*='fi-'].oi-align-center:before + text-align center + + +[class*='fi-'].oi-align-left:before + text-align left + + +[class*='fi-'].oi-align-right:before + text-align right + + + +[class*='fi-'].oi-flip-horizontal:before + -webkit-transform scale(-1, 1) + -ms-transform scale(-1, 1) + transform scale(-1, 1) + + +[class*='fi-'].oi-flip-vertical:before + -webkit-transform scale(1, -1) + -ms-transform scale(-1, 1) + transform scale(1, -1) + + +[class*='fi-'].oi-flip-horizontal-vertical:before + -webkit-transform scale(-1, -1) + -ms-transform scale(-1, 1) + transform scale(-1, -1) + + +.fi-account-login:before + content'\e000' + + +.fi-account-logout:before + content'\e001' + + +.fi-action-redo:before + content'\e002' + + +.fi-action-undo:before + content'\e003' + + +.fi-align-center:before + content'\e004' + + +.fi-align-left:before + content'\e005' + + +.fi-align-right:before + content'\e006' + + +.fi-aperture:before + content'\e007' + + +.fi-arrow-bottom:before + content'\e008' + + +.fi-arrow-circle-bottom:before + content'\e009' + + +.fi-arrow-circle-left:before + content'\e00a' + + +.fi-arrow-circle-right:before + content'\e00b' + + +.fi-arrow-circle-top:before + content'\e00c' + + +.fi-arrow-left:before + content'\e00d' + + +.fi-arrow-right:before + content'\e00e' + + +.fi-arrow-thick-bottom:before + content'\e00f' + + +.fi-arrow-thick-left:before + content'\e010' + + +.fi-arrow-thick-right:before + content'\e011' + + +.fi-arrow-thick-top:before + content'\e012' + + +.fi-arrow-top:before + content'\e013' + + +.fi-audio-spectrum:before + content'\e014' + + +.fi-audio:before + content'\e015' + + +.fi-badge:before + content'\e016' + + +.fi-ban:before + content'\e017' + + +.fi-bar-chart:before + content'\e018' + + +.fi-basket:before + content'\e019' + + +.fi-battery-empty:before + content'\e01a' + + +.fi-battery-full:before + content'\e01b' + + +.fi-beaker:before + content'\e01c' + + +.fi-bell:before + content'\e01d' + + +.fi-bluetooth:before + content'\e01e' + + +.fi-bold:before + content'\e01f' + + +.fi-bolt:before + content'\e020' + + +.fi-book:before + content'\e021' + + +.fi-bookmark:before + content'\e022' + + +.fi-box:before + content'\e023' + + +.fi-briefcase:before + content'\e024' + + +.fi-british-pound:before + content'\e025' + + +.fi-browser:before + content'\e026' + + +.fi-brush:before + content'\e027' + + +.fi-bug:before + content'\e028' + + +.fi-bullhorn:before + content'\e029' + + +.fi-calculator:before + content'\e02a' + + +.fi-calendar:before + content'\e02b' + + +.fi-camera-slr:before + content'\e02c' + + +.fi-caret-bottom:before + content'\e02d' + + +.fi-caret-left:before + content'\e02e' + + +.fi-caret-right:before + content'\e02f' + + +.fi-caret-top:before + content'\e030' + + +.fi-cart:before + content'\e031' + + +.fi-chat:before + content'\e032' + + +.fi-check:before + content'\e033' + + +.fi-chevron-bottom:before + content'\e034' + + +.fi-chevron-left:before + content'\e035' + + +.fi-chevron-right:before + content'\e036' + + +.fi-chevron-top:before + content'\e037' + + +.fi-circle-check:before + content'\e038' + + +.fi-circle-x:before + content'\e039' + + +.fi-clipboard:before + content'\e03a' + + +.fi-clock:before + content'\e03b' + + +.fi-cloud-download:before + content'\e03c' + + +.fi-cloud-upload:before + content'\e03d' + + +.fi-cloud:before + content'\e03e' + + +.fi-cloudy:before + content'\e03f' + + +.fi-code:before + content'\e040' + + +.fi-cog:before + content'\e041' + + +.fi-collapse-down:before + content'\e042' + + +.fi-collapse-left:before + content'\e043' + + +.fi-collapse-right:before + content'\e044' + + +.fi-collapse-up:before + content'\e045' + + +.fi-command:before + content'\e046' + + +.fi-comment-square:before + content'\e047' + + +.fi-compass:before + content'\e048' + + +.fi-contrast:before + content'\e049' + + +.fi-copywriting:before + content'\e04a' + + +.fi-credit-card:before + content'\e04b' + + +.fi-crop:before + content'\e04c' + + +.fi-dashboard:before + content'\e04d' + + +.fi-data-transfer-download:before + content'\e04e' + + +.fi-data-transfer-upload:before + content'\e04f' + + +.fi-delete:before + content'\e050' + + +.fi-dial:before + content'\e051' + + +.fi-document:before + content'\e052' + + +.fi-dollar:before + content'\e053' + + +.fi-double-quote-sans-left:before + content'\e054' + + +.fi-double-quote-sans-right:before + content'\e055' + + +.fi-double-quote-serif-left:before + content'\e056' + + +.fi-double-quote-serif-right:before + content'\e057' + + +.fi-droplet:before + content'\e058' + + +.fi-eject:before + content'\e059' + + +.fi-elevator:before + content'\e05a' + + +.fi-ellipses:before + content'\e05b' + + +.fi-envelope-closed:before + content'\e05c' + + +.fi-envelope-open:before + content'\e05d' + + +.fi-euro:before + content'\e05e' + + +.fi-excerpt:before + content'\e05f' + + +.fi-expand-down:before + content'\e060' + + +.fi-expand-left:before + content'\e061' + + +.fi-expand-right:before + content'\e062' + + +.fi-expand-up:before + content'\e063' + + +.fi-external-link:before + content'\e064' + + +.fi-eye:before + content'\e065' + + +.fi-eyedropper:before + content'\e066' + + +.fi-file:before + content'\e067' + + +.fi-fire:before + content'\e068' + + +.fi-flag:before + content'\e069' + + +.fi-flash:before + content'\e06a' + + +.fi-folder:before + content'\e06b' + + +.fi-fork:before + content'\e06c' + + +.fi-fullscreen-enter:before + content'\e06d' + + +.fi-fullscreen-exit:before + content'\e06e' + + +.fi-globe:before + content'\e06f' + + +.fi-graph:before + content'\e070' + + +.fi-grid-four-up:before + content'\e071' + + +.fi-grid-three-up:before + content'\e072' + + +.fi-grid-two-up:before + content'\e073' + + +.fi-hard-drive:before + content'\e074' + + +.fi-header:before + content'\e075' + + +.fi-headphones:before + content'\e076' + + +.fi-heart:before + content'\e077' + + +.fi-home:before + content'\e078' + + +.fi-image:before + content'\e079' + + +.fi-inbox:before + content'\e07a' + + +.fi-infinity:before + content'\e07b' + + +.fi-info:before + content'\e07c' + + +.fi-italic:before + content'\e07d' + + +.fi-justify-center:before + content'\e07e' + + +.fi-justify-left:before + content'\e07f' + + +.fi-justify-right:before + content'\e080' + + +.fi-key:before + content'\e081' + + +.fi-laptop:before + content'\e082' + + +.fi-layers:before + content'\e083' + + +.fi-lightbulb:before + content'\e084' + + +.fi-link-broken:before + content'\e085' + + +.fi-link-intact:before + content'\e086' + + +.fi-list-rich:before + content'\e087' + + +.fi-list:before + content'\e088' + + +.fi-location:before + content'\e089' + + +.fi-lock-locked:before + content'\e08a' + + +.fi-lock-unlocked:before + content'\e08b' + + +.fi-loop-circular:before + content'\e08c' + + +.fi-loop-square:before + content'\e08d' + + +.fi-loop:before + content'\e08e' + + +.fi-magnifying-glass:before + content'\e08f' + + +.fi-map-marker:before + content'\e090' + + +.fi-map:before + content'\e091' + + +.fi-media-pause:before + content'\e092' + + +.fi-media-play:before + content'\e093' + + +.fi-media-record:before + content'\e094' + + +.fi-media-skip-backward:before + content'\e095' + + +.fi-media-skip-forward:before + content'\e096' + + +.fi-media-step-backward:before + content'\e097' + + +.fi-media-step-forward:before + content'\e098' + + +.fi-media-stop:before + content'\e099' + + +.fi-medical-cross:before + content'\e09a' + + +.fi-menu:before + content'\e09b' + + +.fi-microphone:before + content'\e09c' + + +.fi-minus:before + content'\e09d' + + +.fi-monitor:before + content'\e09e' + + +.fi-moon:before + content'\e09f' + + +.fi-move:before + content'\e0a0' + + +.fi-musical-note:before + content'\e0a1' + + +.fi-paperclip:before + content'\e0a2' + + +.fi-pencil:before + content'\e0a3' + + +.fi-people:before + content'\e0a4' + + +.fi-person:before + content'\e0a5' + + +.fi-phone:before + content'\e0a6' + + +.fi-pie-chart:before + content'\e0a7' + + +.fi-pin:before + content'\e0a8' + + +.fi-play-circle:before + content'\e0a9' + + +.fi-plus:before + content'\e0aa' + + +.fi-power-standby:before + content'\e0ab' + + +.fi-print:before + content'\e0ac' + + +.fi-project:before + content'\e0ad' + + +.fi-pulse:before + content'\e0ae' + + +.fi-puzzle-piece:before + content'\e0af' + + +.fi-question-mark:before + content'\e0b0' + + +.fi-rain:before + content'\e0b1' + + +.fi-random:before + content'\e0b2' + + +.fi-reload:before + content'\e0b3' + + +.fi-resize-both:before + content'\e0b4' + + +.fi-resize-height:before + content'\e0b5' + + +.fi-resize-width:before + content'\e0b6' + + +.fi-rss-alt:before + content'\e0b7' + + +.fi-rss:before + content'\e0b8' + + +.fi-script:before + content'\e0b9' + + +.fi-share-boxed:before + content'\e0ba' + + +.fi-share:before + content'\e0bb' + + +.fi-shield:before + content'\e0bc' + + +.fi-signal:before + content'\e0bd' + + +.fi-signpost:before + content'\e0be' + + +.fi-sort-ascending:before + content'\e0bf' + + +.fi-sort-descending:before + content'\e0c0' + + +.fi-spreadsheet:before + content'\e0c1' + + +.fi-star:before + content'\e0c2' + + +.fi-sun:before + content'\e0c3' + + +.fi-tablet:before + content'\e0c4' + + +.fi-tag:before + content'\e0c5' + + +.fi-tags:before + content'\e0c6' + + +.fi-target:before + content'\e0c7' + + +.fi-task:before + content'\e0c8' + + +.fi-terminal:before + content'\e0c9' + + +.fi-text:before + content'\e0ca' + + +.fi-thumb-down:before + content'\e0cb' + + +.fi-thumb-up:before + content'\e0cc' + + +.fi-timer:before + content'\e0cd' + + +.fi-transfer:before + content'\e0ce' + + +.fi-trash:before + content'\e0cf' + + +.fi-underline:before + content'\e0d0' + + +.fi-vertical-align-bottom:before + content'\e0d1' + + +.fi-vertical-align-center:before + content'\e0d2' + + +.fi-vertical-align-top:before + content'\e0d3' + + +.fi-video:before + content'\e0d4' + + +.fi-volume-high:before + content'\e0d5' + + +.fi-volume-low:before + content'\e0d6' + + +.fi-volume-off:before + content'\e0d7' + + +.fi-warning:before + content'\e0d8' + + +.fi-wifi:before + content'\e0d9' + + +.fi-wrench:before + content'\e0da' + + +.fi-x:before + content'\e0db' + + +.fi-yen:before + content'\e0dc' + + +.fi-zoom-in:before + content'\e0dd' + + +.fi-zoom-out:before + content'\e0de' + + diff --git a/static/open-iconic/css/open-iconic.css b/static/open-iconic/css/open-iconic.css new file mode 100644 index 00000000..301a138c --- /dev/null +++ b/static/open-iconic/css/open-iconic.css @@ -0,0 +1,511 @@ + +@font-face { + font-family: 'Icons'; + src: url('../fonts/open-iconic.eot'); + src: url('../fonts/open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('../fonts/open-iconic.woff') format('woff'), url('../fonts/open-iconic.ttf') format('truetype'), url('../fonts/open-iconic.otf') format('opentype'), url('../fonts/open-iconic.svg#iconic-sm') format('svg'); + font-weight: normal; + font-style: normal; +} + +.oi[data-glyph].oi-text-replace { + font-size: 0; + line-height: 0; +} + +.oi[data-glyph].oi-text-replace:before { + width: 1em; + text-align: center; +} + +.oi[data-glyph]:before { + font-family: 'Icons'; + display: inline-block; + speak: none; + line-height: 1; + vertical-align: baseline; + font-weight: normal; + font-style: normal; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +.oi[data-glyph]:empty:before { + width: 1em; + text-align: center; + box-sizing: content-box; +} + +.oi[data-glyph].oi-align-left:before { + text-align: left; +} + +.oi[data-glyph].oi-align-right:before { + text-align: right; +} + +.oi[data-glyph].oi-align-center:before { + text-align: center; +} + +.oi[data-glyph].oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); +} +.oi[data-glyph].oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); +} +.oi[data-glyph].oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); +} + + +.oi[data-glyph=account-login]:before { content:'\e000'; } + +.oi[data-glyph=account-logout]:before { content:'\e001'; } + +.oi[data-glyph=action-redo]:before { content:'\e002'; } + +.oi[data-glyph=action-undo]:before { content:'\e003'; } + +.oi[data-glyph=align-center]:before { content:'\e004'; } + +.oi[data-glyph=align-left]:before { content:'\e005'; } + +.oi[data-glyph=align-right]:before { content:'\e006'; } + +.oi[data-glyph=aperture]:before { content:'\e007'; } + +.oi[data-glyph=arrow-bottom]:before { content:'\e008'; } + +.oi[data-glyph=arrow-circle-bottom]:before { content:'\e009'; } + +.oi[data-glyph=arrow-circle-left]:before { content:'\e00a'; } + +.oi[data-glyph=arrow-circle-right]:before { content:'\e00b'; } + +.oi[data-glyph=arrow-circle-top]:before { content:'\e00c'; } + +.oi[data-glyph=arrow-left]:before { content:'\e00d'; } + +.oi[data-glyph=arrow-right]:before { content:'\e00e'; } + +.oi[data-glyph=arrow-thick-bottom]:before { content:'\e00f'; } + +.oi[data-glyph=arrow-thick-left]:before { content:'\e010'; } + +.oi[data-glyph=arrow-thick-right]:before { content:'\e011'; } + +.oi[data-glyph=arrow-thick-top]:before { content:'\e012'; } + +.oi[data-glyph=arrow-top]:before { content:'\e013'; } + +.oi[data-glyph=audio-spectrum]:before { content:'\e014'; } + +.oi[data-glyph=audio]:before { content:'\e015'; } + +.oi[data-glyph=badge]:before { content:'\e016'; } + +.oi[data-glyph=ban]:before { content:'\e017'; } + +.oi[data-glyph=bar-chart]:before { content:'\e018'; } + +.oi[data-glyph=basket]:before { content:'\e019'; } + +.oi[data-glyph=battery-empty]:before { content:'\e01a'; } + +.oi[data-glyph=battery-full]:before { content:'\e01b'; } + +.oi[data-glyph=beaker]:before { content:'\e01c'; } + +.oi[data-glyph=bell]:before { content:'\e01d'; } + +.oi[data-glyph=bluetooth]:before { content:'\e01e'; } + +.oi[data-glyph=bold]:before { content:'\e01f'; } + +.oi[data-glyph=bolt]:before { content:'\e020'; } + +.oi[data-glyph=book]:before { content:'\e021'; } + +.oi[data-glyph=bookmark]:before { content:'\e022'; } + +.oi[data-glyph=box]:before { content:'\e023'; } + +.oi[data-glyph=briefcase]:before { content:'\e024'; } + +.oi[data-glyph=british-pound]:before { content:'\e025'; } + +.oi[data-glyph=browser]:before { content:'\e026'; } + +.oi[data-glyph=brush]:before { content:'\e027'; } + +.oi[data-glyph=bug]:before { content:'\e028'; } + +.oi[data-glyph=bullhorn]:before { content:'\e029'; } + +.oi[data-glyph=calculator]:before { content:'\e02a'; } + +.oi[data-glyph=calendar]:before { content:'\e02b'; } + +.oi[data-glyph=camera-slr]:before { content:'\e02c'; } + +.oi[data-glyph=caret-bottom]:before { content:'\e02d'; } + +.oi[data-glyph=caret-left]:before { content:'\e02e'; } + +.oi[data-glyph=caret-right]:before { content:'\e02f'; } + +.oi[data-glyph=caret-top]:before { content:'\e030'; } + +.oi[data-glyph=cart]:before { content:'\e031'; } + +.oi[data-glyph=chat]:before { content:'\e032'; } + +.oi[data-glyph=check]:before { content:'\e033'; } + +.oi[data-glyph=chevron-bottom]:before { content:'\e034'; } + +.oi[data-glyph=chevron-left]:before { content:'\e035'; } + +.oi[data-glyph=chevron-right]:before { content:'\e036'; } + +.oi[data-glyph=chevron-top]:before { content:'\e037'; } + +.oi[data-glyph=circle-check]:before { content:'\e038'; } + +.oi[data-glyph=circle-x]:before { content:'\e039'; } + +.oi[data-glyph=clipboard]:before { content:'\e03a'; } + +.oi[data-glyph=clock]:before { content:'\e03b'; } + +.oi[data-glyph=cloud-download]:before { content:'\e03c'; } + +.oi[data-glyph=cloud-upload]:before { content:'\e03d'; } + +.oi[data-glyph=cloud]:before { content:'\e03e'; } + +.oi[data-glyph=cloudy]:before { content:'\e03f'; } + +.oi[data-glyph=code]:before { content:'\e040'; } + +.oi[data-glyph=cog]:before { content:'\e041'; } + +.oi[data-glyph=collapse-down]:before { content:'\e042'; } + +.oi[data-glyph=collapse-left]:before { content:'\e043'; } + +.oi[data-glyph=collapse-right]:before { content:'\e044'; } + +.oi[data-glyph=collapse-up]:before { content:'\e045'; } + +.oi[data-glyph=command]:before { content:'\e046'; } + +.oi[data-glyph=comment-square]:before { content:'\e047'; } + +.oi[data-glyph=compass]:before { content:'\e048'; } + +.oi[data-glyph=contrast]:before { content:'\e049'; } + +.oi[data-glyph=copywriting]:before { content:'\e04a'; } + +.oi[data-glyph=credit-card]:before { content:'\e04b'; } + +.oi[data-glyph=crop]:before { content:'\e04c'; } + +.oi[data-glyph=dashboard]:before { content:'\e04d'; } + +.oi[data-glyph=data-transfer-download]:before { content:'\e04e'; } + +.oi[data-glyph=data-transfer-upload]:before { content:'\e04f'; } + +.oi[data-glyph=delete]:before { content:'\e050'; } + +.oi[data-glyph=dial]:before { content:'\e051'; } + +.oi[data-glyph=document]:before { content:'\e052'; } + +.oi[data-glyph=dollar]:before { content:'\e053'; } + +.oi[data-glyph=double-quote-sans-left]:before { content:'\e054'; } + +.oi[data-glyph=double-quote-sans-right]:before { content:'\e055'; } + +.oi[data-glyph=double-quote-serif-left]:before { content:'\e056'; } + +.oi[data-glyph=double-quote-serif-right]:before { content:'\e057'; } + +.oi[data-glyph=droplet]:before { content:'\e058'; } + +.oi[data-glyph=eject]:before { content:'\e059'; } + +.oi[data-glyph=elevator]:before { content:'\e05a'; } + +.oi[data-glyph=ellipses]:before { content:'\e05b'; } + +.oi[data-glyph=envelope-closed]:before { content:'\e05c'; } + +.oi[data-glyph=envelope-open]:before { content:'\e05d'; } + +.oi[data-glyph=euro]:before { content:'\e05e'; } + +.oi[data-glyph=excerpt]:before { content:'\e05f'; } + +.oi[data-glyph=expand-down]:before { content:'\e060'; } + +.oi[data-glyph=expand-left]:before { content:'\e061'; } + +.oi[data-glyph=expand-right]:before { content:'\e062'; } + +.oi[data-glyph=expand-up]:before { content:'\e063'; } + +.oi[data-glyph=external-link]:before { content:'\e064'; } + +.oi[data-glyph=eye]:before { content:'\e065'; } + +.oi[data-glyph=eyedropper]:before { content:'\e066'; } + +.oi[data-glyph=file]:before { content:'\e067'; } + +.oi[data-glyph=fire]:before { content:'\e068'; } + +.oi[data-glyph=flag]:before { content:'\e069'; } + +.oi[data-glyph=flash]:before { content:'\e06a'; } + +.oi[data-glyph=folder]:before { content:'\e06b'; } + +.oi[data-glyph=fork]:before { content:'\e06c'; } + +.oi[data-glyph=fullscreen-enter]:before { content:'\e06d'; } + +.oi[data-glyph=fullscreen-exit]:before { content:'\e06e'; } + +.oi[data-glyph=globe]:before { content:'\e06f'; } + +.oi[data-glyph=graph]:before { content:'\e070'; } + +.oi[data-glyph=grid-four-up]:before { content:'\e071'; } + +.oi[data-glyph=grid-three-up]:before { content:'\e072'; } + +.oi[data-glyph=grid-two-up]:before { content:'\e073'; } + +.oi[data-glyph=hard-drive]:before { content:'\e074'; } + +.oi[data-glyph=header]:before { content:'\e075'; } + +.oi[data-glyph=headphones]:before { content:'\e076'; } + +.oi[data-glyph=heart]:before { content:'\e077'; } + +.oi[data-glyph=home]:before { content:'\e078'; } + +.oi[data-glyph=image]:before { content:'\e079'; } + +.oi[data-glyph=inbox]:before { content:'\e07a'; } + +.oi[data-glyph=infinity]:before { content:'\e07b'; } + +.oi[data-glyph=info]:before { content:'\e07c'; } + +.oi[data-glyph=italic]:before { content:'\e07d'; } + +.oi[data-glyph=justify-center]:before { content:'\e07e'; } + +.oi[data-glyph=justify-left]:before { content:'\e07f'; } + +.oi[data-glyph=justify-right]:before { content:'\e080'; } + +.oi[data-glyph=key]:before { content:'\e081'; } + +.oi[data-glyph=laptop]:before { content:'\e082'; } + +.oi[data-glyph=layers]:before { content:'\e083'; } + +.oi[data-glyph=lightbulb]:before { content:'\e084'; } + +.oi[data-glyph=link-broken]:before { content:'\e085'; } + +.oi[data-glyph=link-intact]:before { content:'\e086'; } + +.oi[data-glyph=list-rich]:before { content:'\e087'; } + +.oi[data-glyph=list]:before { content:'\e088'; } + +.oi[data-glyph=location]:before { content:'\e089'; } + +.oi[data-glyph=lock-locked]:before { content:'\e08a'; } + +.oi[data-glyph=lock-unlocked]:before { content:'\e08b'; } + +.oi[data-glyph=loop-circular]:before { content:'\e08c'; } + +.oi[data-glyph=loop-square]:before { content:'\e08d'; } + +.oi[data-glyph=loop]:before { content:'\e08e'; } + +.oi[data-glyph=magnifying-glass]:before { content:'\e08f'; } + +.oi[data-glyph=map-marker]:before { content:'\e090'; } + +.oi[data-glyph=map]:before { content:'\e091'; } + +.oi[data-glyph=media-pause]:before { content:'\e092'; } + +.oi[data-glyph=media-play]:before { content:'\e093'; } + +.oi[data-glyph=media-record]:before { content:'\e094'; } + +.oi[data-glyph=media-skip-backward]:before { content:'\e095'; } + +.oi[data-glyph=media-skip-forward]:before { content:'\e096'; } + +.oi[data-glyph=media-step-backward]:before { content:'\e097'; } + +.oi[data-glyph=media-step-forward]:before { content:'\e098'; } + +.oi[data-glyph=media-stop]:before { content:'\e099'; } + +.oi[data-glyph=medical-cross]:before { content:'\e09a'; } + +.oi[data-glyph=menu]:before { content:'\e09b'; } + +.oi[data-glyph=microphone]:before { content:'\e09c'; } + +.oi[data-glyph=minus]:before { content:'\e09d'; } + +.oi[data-glyph=monitor]:before { content:'\e09e'; } + +.oi[data-glyph=moon]:before { content:'\e09f'; } + +.oi[data-glyph=move]:before { content:'\e0a0'; } + +.oi[data-glyph=musical-note]:before { content:'\e0a1'; } + +.oi[data-glyph=paperclip]:before { content:'\e0a2'; } + +.oi[data-glyph=pencil]:before { content:'\e0a3'; } + +.oi[data-glyph=people]:before { content:'\e0a4'; } + +.oi[data-glyph=person]:before { content:'\e0a5'; } + +.oi[data-glyph=phone]:before { content:'\e0a6'; } + +.oi[data-glyph=pie-chart]:before { content:'\e0a7'; } + +.oi[data-glyph=pin]:before { content:'\e0a8'; } + +.oi[data-glyph=play-circle]:before { content:'\e0a9'; } + +.oi[data-glyph=plus]:before { content:'\e0aa'; } + +.oi[data-glyph=power-standby]:before { content:'\e0ab'; } + +.oi[data-glyph=print]:before { content:'\e0ac'; } + +.oi[data-glyph=project]:before { content:'\e0ad'; } + +.oi[data-glyph=pulse]:before { content:'\e0ae'; } + +.oi[data-glyph=puzzle-piece]:before { content:'\e0af'; } + +.oi[data-glyph=question-mark]:before { content:'\e0b0'; } + +.oi[data-glyph=rain]:before { content:'\e0b1'; } + +.oi[data-glyph=random]:before { content:'\e0b2'; } + +.oi[data-glyph=reload]:before { content:'\e0b3'; } + +.oi[data-glyph=resize-both]:before { content:'\e0b4'; } + +.oi[data-glyph=resize-height]:before { content:'\e0b5'; } + +.oi[data-glyph=resize-width]:before { content:'\e0b6'; } + +.oi[data-glyph=rss-alt]:before { content:'\e0b7'; } + +.oi[data-glyph=rss]:before { content:'\e0b8'; } + +.oi[data-glyph=script]:before { content:'\e0b9'; } + +.oi[data-glyph=share-boxed]:before { content:'\e0ba'; } + +.oi[data-glyph=share]:before { content:'\e0bb'; } + +.oi[data-glyph=shield]:before { content:'\e0bc'; } + +.oi[data-glyph=signal]:before { content:'\e0bd'; } + +.oi[data-glyph=signpost]:before { content:'\e0be'; } + +.oi[data-glyph=sort-ascending]:before { content:'\e0bf'; } + +.oi[data-glyph=sort-descending]:before { content:'\e0c0'; } + +.oi[data-glyph=spreadsheet]:before { content:'\e0c1'; } + +.oi[data-glyph=star]:before { content:'\e0c2'; } + +.oi[data-glyph=sun]:before { content:'\e0c3'; } + +.oi[data-glyph=tablet]:before { content:'\e0c4'; } + +.oi[data-glyph=tag]:before { content:'\e0c5'; } + +.oi[data-glyph=tags]:before { content:'\e0c6'; } + +.oi[data-glyph=target]:before { content:'\e0c7'; } + +.oi[data-glyph=task]:before { content:'\e0c8'; } + +.oi[data-glyph=terminal]:before { content:'\e0c9'; } + +.oi[data-glyph=text]:before { content:'\e0ca'; } + +.oi[data-glyph=thumb-down]:before { content:'\e0cb'; } + +.oi[data-glyph=thumb-up]:before { content:'\e0cc'; } + +.oi[data-glyph=timer]:before { content:'\e0cd'; } + +.oi[data-glyph=transfer]:before { content:'\e0ce'; } + +.oi[data-glyph=trash]:before { content:'\e0cf'; } + +.oi[data-glyph=underline]:before { content:'\e0d0'; } + +.oi[data-glyph=vertical-align-bottom]:before { content:'\e0d1'; } + +.oi[data-glyph=vertical-align-center]:before { content:'\e0d2'; } + +.oi[data-glyph=vertical-align-top]:before { content:'\e0d3'; } + +.oi[data-glyph=video]:before { content:'\e0d4'; } + +.oi[data-glyph=volume-high]:before { content:'\e0d5'; } + +.oi[data-glyph=volume-low]:before { content:'\e0d6'; } + +.oi[data-glyph=volume-off]:before { content:'\e0d7'; } + +.oi[data-glyph=warning]:before { content:'\e0d8'; } + +.oi[data-glyph=wifi]:before { content:'\e0d9'; } + +.oi[data-glyph=wrench]:before { content:'\e0da'; } + +.oi[data-glyph=x]:before { content:'\e0db'; } + +.oi[data-glyph=yen]:before { content:'\e0dc'; } + +.oi[data-glyph=zoom-in]:before { content:'\e0dd'; } + +.oi[data-glyph=zoom-out]:before { content:'\e0de'; } diff --git a/static/open-iconic/css/open-iconic.less b/static/open-iconic/css/open-iconic.less new file mode 100644 index 00000000..d505e9f2 --- /dev/null +++ b/static/open-iconic/css/open-iconic.less @@ -0,0 +1,962 @@ +@iconic-font-path: '../fonts/'; + +@font-face { + font-family: 'Icons'; + src: url('@{iconic-font-path}open-iconic.eot'); + src: url('@{iconic-font-path}open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('@{iconic-font-path}open-iconic.woff') format('woff'), url('@{iconic-font-path}open-iconic.ttf') format('truetype'), url('@{iconic-font-path}open-iconic.otf') format('opentype'), url('@{iconic-font-path}open-iconic.svg#iconic-sm') format('svg'); + font-weight: normal; + font-style: normal; +} + +.oi[data-glyph].oi-text-replace { + font-size: 0; + line-height: 0; +} + +.oi[data-glyph].oi-text-replace:before { + width: 1em; + text-align: center; +} + +.oi[data-glyph] { + &:before { + position: relative; + top: 1px; + font-family: 'Icons'; + display: inline-block; + speak: none; + line-height: 1; + vertical-align: baseline; + font-weight: normal; + font-style: normal; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + } + + &:empty:before { + width: 1em; + text-align: center; + box-sizing: content-box; + } + + &.oi-align-left:before { + text-align: left; + } + &.oi-align-right:before { + text-align: right; + } + &.oi-align-center:before { + text-align: center; + } + + &.oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); + } + + &.oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); + } + + &.oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); + } +} + + +.oi[data-glyph=account-login]:before { + content: '\e000'; +} + +.oi[data-glyph=account-logout]:before { + content: '\e001'; +} + +.oi[data-glyph=action-redo]:before { + content: '\e002'; +} + +.oi[data-glyph=action-undo]:before { + content: '\e003'; +} + +.oi[data-glyph=align-center]:before { + content: '\e004'; +} + +.oi[data-glyph=align-left]:before { + content: '\e005'; +} + +.oi[data-glyph=align-right]:before { + content: '\e006'; +} + +.oi[data-glyph=aperture]:before { + content: '\e007'; +} + +.oi[data-glyph=arrow-bottom]:before { + content: '\e008'; +} + +.oi[data-glyph=arrow-circle-bottom]:before { + content: '\e009'; +} + +.oi[data-glyph=arrow-circle-left]:before { + content: '\e00a'; +} + +.oi[data-glyph=arrow-circle-right]:before { + content: '\e00b'; +} + +.oi[data-glyph=arrow-circle-top]:before { + content: '\e00c'; +} + +.oi[data-glyph=arrow-left]:before { + content: '\e00d'; +} + +.oi[data-glyph=arrow-right]:before { + content: '\e00e'; +} + +.oi[data-glyph=arrow-thick-bottom]:before { + content: '\e00f'; +} + +.oi[data-glyph=arrow-thick-left]:before { + content: '\e010'; +} + +.oi[data-glyph=arrow-thick-right]:before { + content: '\e011'; +} + +.oi[data-glyph=arrow-thick-top]:before { + content: '\e012'; +} + +.oi[data-glyph=arrow-top]:before { + content: '\e013'; +} + +.oi[data-glyph=audio-spectrum]:before { + content: '\e014'; +} + +.oi[data-glyph=audio]:before { + content: '\e015'; +} + +.oi[data-glyph=badge]:before { + content: '\e016'; +} + +.oi[data-glyph=ban]:before { + content: '\e017'; +} + +.oi[data-glyph=bar-chart]:before { + content: '\e018'; +} + +.oi[data-glyph=basket]:before { + content: '\e019'; +} + +.oi[data-glyph=battery-empty]:before { + content: '\e01a'; +} + +.oi[data-glyph=battery-full]:before { + content: '\e01b'; +} + +.oi[data-glyph=beaker]:before { + content: '\e01c'; +} + +.oi[data-glyph=bell]:before { + content: '\e01d'; +} + +.oi[data-glyph=bluetooth]:before { + content: '\e01e'; +} + +.oi[data-glyph=bold]:before { + content: '\e01f'; +} + +.oi[data-glyph=bolt]:before { + content: '\e020'; +} + +.oi[data-glyph=book]:before { + content: '\e021'; +} + +.oi[data-glyph=bookmark]:before { + content: '\e022'; +} + +.oi[data-glyph=box]:before { + content: '\e023'; +} + +.oi[data-glyph=briefcase]:before { + content: '\e024'; +} + +.oi[data-glyph=british-pound]:before { + content: '\e025'; +} + +.oi[data-glyph=browser]:before { + content: '\e026'; +} + +.oi[data-glyph=brush]:before { + content: '\e027'; +} + +.oi[data-glyph=bug]:before { + content: '\e028'; +} + +.oi[data-glyph=bullhorn]:before { + content: '\e029'; +} + +.oi[data-glyph=calculator]:before { + content: '\e02a'; +} + +.oi[data-glyph=calendar]:before { + content: '\e02b'; +} + +.oi[data-glyph=camera-slr]:before { + content: '\e02c'; +} + +.oi[data-glyph=caret-bottom]:before { + content: '\e02d'; +} + +.oi[data-glyph=caret-left]:before { + content: '\e02e'; +} + +.oi[data-glyph=caret-right]:before { + content: '\e02f'; +} + +.oi[data-glyph=caret-top]:before { + content: '\e030'; +} + +.oi[data-glyph=cart]:before { + content: '\e031'; +} + +.oi[data-glyph=chat]:before { + content: '\e032'; +} + +.oi[data-glyph=check]:before { + content: '\e033'; +} + +.oi[data-glyph=chevron-bottom]:before { + content: '\e034'; +} + +.oi[data-glyph=chevron-left]:before { + content: '\e035'; +} + +.oi[data-glyph=chevron-right]:before { + content: '\e036'; +} + +.oi[data-glyph=chevron-top]:before { + content: '\e037'; +} + +.oi[data-glyph=circle-check]:before { + content: '\e038'; +} + +.oi[data-glyph=circle-x]:before { + content: '\e039'; +} + +.oi[data-glyph=clipboard]:before { + content: '\e03a'; +} + +.oi[data-glyph=clock]:before { + content: '\e03b'; +} + +.oi[data-glyph=cloud-download]:before { + content: '\e03c'; +} + +.oi[data-glyph=cloud-upload]:before { + content: '\e03d'; +} + +.oi[data-glyph=cloud]:before { + content: '\e03e'; +} + +.oi[data-glyph=cloudy]:before { + content: '\e03f'; +} + +.oi[data-glyph=code]:before { + content: '\e040'; +} + +.oi[data-glyph=cog]:before { + content: '\e041'; +} + +.oi[data-glyph=collapse-down]:before { + content: '\e042'; +} + +.oi[data-glyph=collapse-left]:before { + content: '\e043'; +} + +.oi[data-glyph=collapse-right]:before { + content: '\e044'; +} + +.oi[data-glyph=collapse-up]:before { + content: '\e045'; +} + +.oi[data-glyph=command]:before { + content: '\e046'; +} + +.oi[data-glyph=comment-square]:before { + content: '\e047'; +} + +.oi[data-glyph=compass]:before { + content: '\e048'; +} + +.oi[data-glyph=contrast]:before { + content: '\e049'; +} + +.oi[data-glyph=copywriting]:before { + content: '\e04a'; +} + +.oi[data-glyph=credit-card]:before { + content: '\e04b'; +} + +.oi[data-glyph=crop]:before { + content: '\e04c'; +} + +.oi[data-glyph=dashboard]:before { + content: '\e04d'; +} + +.oi[data-glyph=data-transfer-download]:before { + content: '\e04e'; +} + +.oi[data-glyph=data-transfer-upload]:before { + content: '\e04f'; +} + +.oi[data-glyph=delete]:before { + content: '\e050'; +} + +.oi[data-glyph=dial]:before { + content: '\e051'; +} + +.oi[data-glyph=document]:before { + content: '\e052'; +} + +.oi[data-glyph=dollar]:before { + content: '\e053'; +} + +.oi[data-glyph=double-quote-sans-left]:before { + content: '\e054'; +} + +.oi[data-glyph=double-quote-sans-right]:before { + content: '\e055'; +} + +.oi[data-glyph=double-quote-serif-left]:before { + content: '\e056'; +} + +.oi[data-glyph=double-quote-serif-right]:before { + content: '\e057'; +} + +.oi[data-glyph=droplet]:before { + content: '\e058'; +} + +.oi[data-glyph=eject]:before { + content: '\e059'; +} + +.oi[data-glyph=elevator]:before { + content: '\e05a'; +} + +.oi[data-glyph=ellipses]:before { + content: '\e05b'; +} + +.oi[data-glyph=envelope-closed]:before { + content: '\e05c'; +} + +.oi[data-glyph=envelope-open]:before { + content: '\e05d'; +} + +.oi[data-glyph=euro]:before { + content: '\e05e'; +} + +.oi[data-glyph=excerpt]:before { + content: '\e05f'; +} + +.oi[data-glyph=expand-down]:before { + content: '\e060'; +} + +.oi[data-glyph=expand-left]:before { + content: '\e061'; +} + +.oi[data-glyph=expand-right]:before { + content: '\e062'; +} + +.oi[data-glyph=expand-up]:before { + content: '\e063'; +} + +.oi[data-glyph=external-link]:before { + content: '\e064'; +} + +.oi[data-glyph=eye]:before { + content: '\e065'; +} + +.oi[data-glyph=eyedropper]:before { + content: '\e066'; +} + +.oi[data-glyph=file]:before { + content: '\e067'; +} + +.oi[data-glyph=fire]:before { + content: '\e068'; +} + +.oi[data-glyph=flag]:before { + content: '\e069'; +} + +.oi[data-glyph=flash]:before { + content: '\e06a'; +} + +.oi[data-glyph=folder]:before { + content: '\e06b'; +} + +.oi[data-glyph=fork]:before { + content: '\e06c'; +} + +.oi[data-glyph=fullscreen-enter]:before { + content: '\e06d'; +} + +.oi[data-glyph=fullscreen-exit]:before { + content: '\e06e'; +} + +.oi[data-glyph=globe]:before { + content: '\e06f'; +} + +.oi[data-glyph=graph]:before { + content: '\e070'; +} + +.oi[data-glyph=grid-four-up]:before { + content: '\e071'; +} + +.oi[data-glyph=grid-three-up]:before { + content: '\e072'; +} + +.oi[data-glyph=grid-two-up]:before { + content: '\e073'; +} + +.oi[data-glyph=hard-drive]:before { + content: '\e074'; +} + +.oi[data-glyph=header]:before { + content: '\e075'; +} + +.oi[data-glyph=headphones]:before { + content: '\e076'; +} + +.oi[data-glyph=heart]:before { + content: '\e077'; +} + +.oi[data-glyph=home]:before { + content: '\e078'; +} + +.oi[data-glyph=image]:before { + content: '\e079'; +} + +.oi[data-glyph=inbox]:before { + content: '\e07a'; +} + +.oi[data-glyph=infinity]:before { + content: '\e07b'; +} + +.oi[data-glyph=info]:before { + content: '\e07c'; +} + +.oi[data-glyph=italic]:before { + content: '\e07d'; +} + +.oi[data-glyph=justify-center]:before { + content: '\e07e'; +} + +.oi[data-glyph=justify-left]:before { + content: '\e07f'; +} + +.oi[data-glyph=justify-right]:before { + content: '\e080'; +} + +.oi[data-glyph=key]:before { + content: '\e081'; +} + +.oi[data-glyph=laptop]:before { + content: '\e082'; +} + +.oi[data-glyph=layers]:before { + content: '\e083'; +} + +.oi[data-glyph=lightbulb]:before { + content: '\e084'; +} + +.oi[data-glyph=link-broken]:before { + content: '\e085'; +} + +.oi[data-glyph=link-intact]:before { + content: '\e086'; +} + +.oi[data-glyph=list-rich]:before { + content: '\e087'; +} + +.oi[data-glyph=list]:before { + content: '\e088'; +} + +.oi[data-glyph=location]:before { + content: '\e089'; +} + +.oi[data-glyph=lock-locked]:before { + content: '\e08a'; +} + +.oi[data-glyph=lock-unlocked]:before { + content: '\e08b'; +} + +.oi[data-glyph=loop-circular]:before { + content: '\e08c'; +} + +.oi[data-glyph=loop-square]:before { + content: '\e08d'; +} + +.oi[data-glyph=loop]:before { + content: '\e08e'; +} + +.oi[data-glyph=magnifying-glass]:before { + content: '\e08f'; +} + +.oi[data-glyph=map-marker]:before { + content: '\e090'; +} + +.oi[data-glyph=map]:before { + content: '\e091'; +} + +.oi[data-glyph=media-pause]:before { + content: '\e092'; +} + +.oi[data-glyph=media-play]:before { + content: '\e093'; +} + +.oi[data-glyph=media-record]:before { + content: '\e094'; +} + +.oi[data-glyph=media-skip-backward]:before { + content: '\e095'; +} + +.oi[data-glyph=media-skip-forward]:before { + content: '\e096'; +} + +.oi[data-glyph=media-step-backward]:before { + content: '\e097'; +} + +.oi[data-glyph=media-step-forward]:before { + content: '\e098'; +} + +.oi[data-glyph=media-stop]:before { + content: '\e099'; +} + +.oi[data-glyph=medical-cross]:before { + content: '\e09a'; +} + +.oi[data-glyph=menu]:before { + content: '\e09b'; +} + +.oi[data-glyph=microphone]:before { + content: '\e09c'; +} + +.oi[data-glyph=minus]:before { + content: '\e09d'; +} + +.oi[data-glyph=monitor]:before { + content: '\e09e'; +} + +.oi[data-glyph=moon]:before { + content: '\e09f'; +} + +.oi[data-glyph=move]:before { + content: '\e0a0'; +} + +.oi[data-glyph=musical-note]:before { + content: '\e0a1'; +} + +.oi[data-glyph=paperclip]:before { + content: '\e0a2'; +} + +.oi[data-glyph=pencil]:before { + content: '\e0a3'; +} + +.oi[data-glyph=people]:before { + content: '\e0a4'; +} + +.oi[data-glyph=person]:before { + content: '\e0a5'; +} + +.oi[data-glyph=phone]:before { + content: '\e0a6'; +} + +.oi[data-glyph=pie-chart]:before { + content: '\e0a7'; +} + +.oi[data-glyph=pin]:before { + content: '\e0a8'; +} + +.oi[data-glyph=play-circle]:before { + content: '\e0a9'; +} + +.oi[data-glyph=plus]:before { + content: '\e0aa'; +} + +.oi[data-glyph=power-standby]:before { + content: '\e0ab'; +} + +.oi[data-glyph=print]:before { + content: '\e0ac'; +} + +.oi[data-glyph=project]:before { + content: '\e0ad'; +} + +.oi[data-glyph=pulse]:before { + content: '\e0ae'; +} + +.oi[data-glyph=puzzle-piece]:before { + content: '\e0af'; +} + +.oi[data-glyph=question-mark]:before { + content: '\e0b0'; +} + +.oi[data-glyph=rain]:before { + content: '\e0b1'; +} + +.oi[data-glyph=random]:before { + content: '\e0b2'; +} + +.oi[data-glyph=reload]:before { + content: '\e0b3'; +} + +.oi[data-glyph=resize-both]:before { + content: '\e0b4'; +} + +.oi[data-glyph=resize-height]:before { + content: '\e0b5'; +} + +.oi[data-glyph=resize-width]:before { + content: '\e0b6'; +} + +.oi[data-glyph=rss-alt]:before { + content: '\e0b7'; +} + +.oi[data-glyph=rss]:before { + content: '\e0b8'; +} + +.oi[data-glyph=script]:before { + content: '\e0b9'; +} + +.oi[data-glyph=share-boxed]:before { + content: '\e0ba'; +} + +.oi[data-glyph=share]:before { + content: '\e0bb'; +} + +.oi[data-glyph=shield]:before { + content: '\e0bc'; +} + +.oi[data-glyph=signal]:before { + content: '\e0bd'; +} + +.oi[data-glyph=signpost]:before { + content: '\e0be'; +} + +.oi[data-glyph=sort-ascending]:before { + content: '\e0bf'; +} + +.oi[data-glyph=sort-descending]:before { + content: '\e0c0'; +} + +.oi[data-glyph=spreadsheet]:before { + content: '\e0c1'; +} + +.oi[data-glyph=star]:before { + content: '\e0c2'; +} + +.oi[data-glyph=sun]:before { + content: '\e0c3'; +} + +.oi[data-glyph=tablet]:before { + content: '\e0c4'; +} + +.oi[data-glyph=tag]:before { + content: '\e0c5'; +} + +.oi[data-glyph=tags]:before { + content: '\e0c6'; +} + +.oi[data-glyph=target]:before { + content: '\e0c7'; +} + +.oi[data-glyph=task]:before { + content: '\e0c8'; +} + +.oi[data-glyph=terminal]:before { + content: '\e0c9'; +} + +.oi[data-glyph=text]:before { + content: '\e0ca'; +} + +.oi[data-glyph=thumb-down]:before { + content: '\e0cb'; +} + +.oi[data-glyph=thumb-up]:before { + content: '\e0cc'; +} + +.oi[data-glyph=timer]:before { + content: '\e0cd'; +} + +.oi[data-glyph=transfer]:before { + content: '\e0ce'; +} + +.oi[data-glyph=trash]:before { + content: '\e0cf'; +} + +.oi[data-glyph=underline]:before { + content: '\e0d0'; +} + +.oi[data-glyph=vertical-align-bottom]:before { + content: '\e0d1'; +} + +.oi[data-glyph=vertical-align-center]:before { + content: '\e0d2'; +} + +.oi[data-glyph=vertical-align-top]:before { + content: '\e0d3'; +} + +.oi[data-glyph=video]:before { + content: '\e0d4'; +} + +.oi[data-glyph=volume-high]:before { + content: '\e0d5'; +} + +.oi[data-glyph=volume-low]:before { + content: '\e0d6'; +} + +.oi[data-glyph=volume-off]:before { + content: '\e0d7'; +} + +.oi[data-glyph=warning]:before { + content: '\e0d8'; +} + +.oi[data-glyph=wifi]:before { + content: '\e0d9'; +} + +.oi[data-glyph=wrench]:before { + content: '\e0da'; +} + +.oi[data-glyph=x]:before { + content: '\e0db'; +} + +.oi[data-glyph=yen]:before { + content: '\e0dc'; +} + +.oi[data-glyph=zoom-in]:before { + content: '\e0dd'; +} + +.oi[data-glyph=zoom-out]:before { + content: '\e0de'; +} diff --git a/static/open-iconic/css/open-iconic.min.css b/static/open-iconic/css/open-iconic.min.css new file mode 100644 index 00000000..1f6afb82 --- /dev/null +++ b/static/open-iconic/css/open-iconic.min.css @@ -0,0 +1 @@ +@font-face{font-family:Icons;src:url(../fonts/open-iconic.eot);src:url(../fonts/open-iconic.eot?#iconic-sm) format('embedded-opentype'),url(../fonts/open-iconic.woff) format('woff'),url(../fonts/open-iconic.ttf) format('truetype'),url(../fonts/open-iconic.otf) format('opentype'),url(../fonts/open-iconic.svg#iconic-sm) format('svg');font-weight:400;font-style:normal}.oi[data-glyph].oi-text-replace{font-size:0;line-height:0}.oi[data-glyph].oi-text-replace:before{width:1em;text-align:center}.oi[data-glyph]:before{font-family:Icons;display:inline-block;speak:none;line-height:1;vertical-align:baseline;font-weight:400;font-style:normal;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.oi[data-glyph]:empty:before{width:1em;text-align:center;box-sizing:content-box}.oi[data-glyph].oi-align-left:before{text-align:left}.oi[data-glyph].oi-align-right:before{text-align:right}.oi[data-glyph].oi-align-center:before{text-align:center}.oi[data-glyph].oi-flip-horizontal:before{-webkit-transform:scale(-1,1);-ms-transform:scale(-1,1);transform:scale(-1,1)}.oi[data-glyph].oi-flip-vertical:before{-webkit-transform:scale(1,-1);-ms-transform:scale(-1,1);transform:scale(1,-1)}.oi[data-glyph].oi-flip-horizontal-vertical:before{-webkit-transform:scale(-1,-1);-ms-transform:scale(-1,1);transform:scale(-1,-1)}.oi[data-glyph=account-login]:before{content:'\e000'}.oi[data-glyph=account-logout]:before{content:'\e001'}.oi[data-glyph=action-redo]:before{content:'\e002'}.oi[data-glyph=action-undo]:before{content:'\e003'}.oi[data-glyph=align-center]:before{content:'\e004'}.oi[data-glyph=align-left]:before{content:'\e005'}.oi[data-glyph=align-right]:before{content:'\e006'}.oi[data-glyph=aperture]:before{content:'\e007'}.oi[data-glyph=arrow-bottom]:before{content:'\e008'}.oi[data-glyph=arrow-circle-bottom]:before{content:'\e009'}.oi[data-glyph=arrow-circle-left]:before{content:'\e00a'}.oi[data-glyph=arrow-circle-right]:before{content:'\e00b'}.oi[data-glyph=arrow-circle-top]:before{content:'\e00c'}.oi[data-glyph=arrow-left]:before{content:'\e00d'}.oi[data-glyph=arrow-right]:before{content:'\e00e'}.oi[data-glyph=arrow-thick-bottom]:before{content:'\e00f'}.oi[data-glyph=arrow-thick-left]:before{content:'\e010'}.oi[data-glyph=arrow-thick-right]:before{content:'\e011'}.oi[data-glyph=arrow-thick-top]:before{content:'\e012'}.oi[data-glyph=arrow-top]:before{content:'\e013'}.oi[data-glyph=audio-spectrum]:before{content:'\e014'}.oi[data-glyph=audio]:before{content:'\e015'}.oi[data-glyph=badge]:before{content:'\e016'}.oi[data-glyph=ban]:before{content:'\e017'}.oi[data-glyph=bar-chart]:before{content:'\e018'}.oi[data-glyph=basket]:before{content:'\e019'}.oi[data-glyph=battery-empty]:before{content:'\e01a'}.oi[data-glyph=battery-full]:before{content:'\e01b'}.oi[data-glyph=beaker]:before{content:'\e01c'}.oi[data-glyph=bell]:before{content:'\e01d'}.oi[data-glyph=bluetooth]:before{content:'\e01e'}.oi[data-glyph=bold]:before{content:'\e01f'}.oi[data-glyph=bolt]:before{content:'\e020'}.oi[data-glyph=book]:before{content:'\e021'}.oi[data-glyph=bookmark]:before{content:'\e022'}.oi[data-glyph=box]:before{content:'\e023'}.oi[data-glyph=briefcase]:before{content:'\e024'}.oi[data-glyph=british-pound]:before{content:'\e025'}.oi[data-glyph=browser]:before{content:'\e026'}.oi[data-glyph=brush]:before{content:'\e027'}.oi[data-glyph=bug]:before{content:'\e028'}.oi[data-glyph=bullhorn]:before{content:'\e029'}.oi[data-glyph=calculator]:before{content:'\e02a'}.oi[data-glyph=calendar]:before{content:'\e02b'}.oi[data-glyph=camera-slr]:before{content:'\e02c'}.oi[data-glyph=caret-bottom]:before{content:'\e02d'}.oi[data-glyph=caret-left]:before{content:'\e02e'}.oi[data-glyph=caret-right]:before{content:'\e02f'}.oi[data-glyph=caret-top]:before{content:'\e030'}.oi[data-glyph=cart]:before{content:'\e031'}.oi[data-glyph=chat]:before{content:'\e032'}.oi[data-glyph=check]:before{content:'\e033'}.oi[data-glyph=chevron-bottom]:before{content:'\e034'}.oi[data-glyph=chevron-left]:before{content:'\e035'}.oi[data-glyph=chevron-right]:before{content:'\e036'}.oi[data-glyph=chevron-top]:before{content:'\e037'}.oi[data-glyph=circle-check]:before{content:'\e038'}.oi[data-glyph=circle-x]:before{content:'\e039'}.oi[data-glyph=clipboard]:before{content:'\e03a'}.oi[data-glyph=clock]:before{content:'\e03b'}.oi[data-glyph=cloud-download]:before{content:'\e03c'}.oi[data-glyph=cloud-upload]:before{content:'\e03d'}.oi[data-glyph=cloud]:before{content:'\e03e'}.oi[data-glyph=cloudy]:before{content:'\e03f'}.oi[data-glyph=code]:before{content:'\e040'}.oi[data-glyph=cog]:before{content:'\e041'}.oi[data-glyph=collapse-down]:before{content:'\e042'}.oi[data-glyph=collapse-left]:before{content:'\e043'}.oi[data-glyph=collapse-right]:before{content:'\e044'}.oi[data-glyph=collapse-up]:before{content:'\e045'}.oi[data-glyph=command]:before{content:'\e046'}.oi[data-glyph=comment-square]:before{content:'\e047'}.oi[data-glyph=compass]:before{content:'\e048'}.oi[data-glyph=contrast]:before{content:'\e049'}.oi[data-glyph=copywriting]:before{content:'\e04a'}.oi[data-glyph=credit-card]:before{content:'\e04b'}.oi[data-glyph=crop]:before{content:'\e04c'}.oi[data-glyph=dashboard]:before{content:'\e04d'}.oi[data-glyph=data-transfer-download]:before{content:'\e04e'}.oi[data-glyph=data-transfer-upload]:before{content:'\e04f'}.oi[data-glyph=delete]:before{content:'\e050'}.oi[data-glyph=dial]:before{content:'\e051'}.oi[data-glyph=document]:before{content:'\e052'}.oi[data-glyph=dollar]:before{content:'\e053'}.oi[data-glyph=double-quote-sans-left]:before{content:'\e054'}.oi[data-glyph=double-quote-sans-right]:before{content:'\e055'}.oi[data-glyph=double-quote-serif-left]:before{content:'\e056'}.oi[data-glyph=double-quote-serif-right]:before{content:'\e057'}.oi[data-glyph=droplet]:before{content:'\e058'}.oi[data-glyph=eject]:before{content:'\e059'}.oi[data-glyph=elevator]:before{content:'\e05a'}.oi[data-glyph=ellipses]:before{content:'\e05b'}.oi[data-glyph=envelope-closed]:before{content:'\e05c'}.oi[data-glyph=envelope-open]:before{content:'\e05d'}.oi[data-glyph=euro]:before{content:'\e05e'}.oi[data-glyph=excerpt]:before{content:'\e05f'}.oi[data-glyph=expand-down]:before{content:'\e060'}.oi[data-glyph=expand-left]:before{content:'\e061'}.oi[data-glyph=expand-right]:before{content:'\e062'}.oi[data-glyph=expand-up]:before{content:'\e063'}.oi[data-glyph=external-link]:before{content:'\e064'}.oi[data-glyph=eye]:before{content:'\e065'}.oi[data-glyph=eyedropper]:before{content:'\e066'}.oi[data-glyph=file]:before{content:'\e067'}.oi[data-glyph=fire]:before{content:'\e068'}.oi[data-glyph=flag]:before{content:'\e069'}.oi[data-glyph=flash]:before{content:'\e06a'}.oi[data-glyph=folder]:before{content:'\e06b'}.oi[data-glyph=fork]:before{content:'\e06c'}.oi[data-glyph=fullscreen-enter]:before{content:'\e06d'}.oi[data-glyph=fullscreen-exit]:before{content:'\e06e'}.oi[data-glyph=globe]:before{content:'\e06f'}.oi[data-glyph=graph]:before{content:'\e070'}.oi[data-glyph=grid-four-up]:before{content:'\e071'}.oi[data-glyph=grid-three-up]:before{content:'\e072'}.oi[data-glyph=grid-two-up]:before{content:'\e073'}.oi[data-glyph=hard-drive]:before{content:'\e074'}.oi[data-glyph=header]:before{content:'\e075'}.oi[data-glyph=headphones]:before{content:'\e076'}.oi[data-glyph=heart]:before{content:'\e077'}.oi[data-glyph=home]:before{content:'\e078'}.oi[data-glyph=image]:before{content:'\e079'}.oi[data-glyph=inbox]:before{content:'\e07a'}.oi[data-glyph=infinity]:before{content:'\e07b'}.oi[data-glyph=info]:before{content:'\e07c'}.oi[data-glyph=italic]:before{content:'\e07d'}.oi[data-glyph=justify-center]:before{content:'\e07e'}.oi[data-glyph=justify-left]:before{content:'\e07f'}.oi[data-glyph=justify-right]:before{content:'\e080'}.oi[data-glyph=key]:before{content:'\e081'}.oi[data-glyph=laptop]:before{content:'\e082'}.oi[data-glyph=layers]:before{content:'\e083'}.oi[data-glyph=lightbulb]:before{content:'\e084'}.oi[data-glyph=link-broken]:before{content:'\e085'}.oi[data-glyph=link-intact]:before{content:'\e086'}.oi[data-glyph=list-rich]:before{content:'\e087'}.oi[data-glyph=list]:before{content:'\e088'}.oi[data-glyph=location]:before{content:'\e089'}.oi[data-glyph=lock-locked]:before{content:'\e08a'}.oi[data-glyph=lock-unlocked]:before{content:'\e08b'}.oi[data-glyph=loop-circular]:before{content:'\e08c'}.oi[data-glyph=loop-square]:before{content:'\e08d'}.oi[data-glyph=loop]:before{content:'\e08e'}.oi[data-glyph=magnifying-glass]:before{content:'\e08f'}.oi[data-glyph=map-marker]:before{content:'\e090'}.oi[data-glyph=map]:before{content:'\e091'}.oi[data-glyph=media-pause]:before{content:'\e092'}.oi[data-glyph=media-play]:before{content:'\e093'}.oi[data-glyph=media-record]:before{content:'\e094'}.oi[data-glyph=media-skip-backward]:before{content:'\e095'}.oi[data-glyph=media-skip-forward]:before{content:'\e096'}.oi[data-glyph=media-step-backward]:before{content:'\e097'}.oi[data-glyph=media-step-forward]:before{content:'\e098'}.oi[data-glyph=media-stop]:before{content:'\e099'}.oi[data-glyph=medical-cross]:before{content:'\e09a'}.oi[data-glyph=menu]:before{content:'\e09b'}.oi[data-glyph=microphone]:before{content:'\e09c'}.oi[data-glyph=minus]:before{content:'\e09d'}.oi[data-glyph=monitor]:before{content:'\e09e'}.oi[data-glyph=moon]:before{content:'\e09f'}.oi[data-glyph=move]:before{content:'\e0a0'}.oi[data-glyph=musical-note]:before{content:'\e0a1'}.oi[data-glyph=paperclip]:before{content:'\e0a2'}.oi[data-glyph=pencil]:before{content:'\e0a3'}.oi[data-glyph=people]:before{content:'\e0a4'}.oi[data-glyph=person]:before{content:'\e0a5'}.oi[data-glyph=phone]:before{content:'\e0a6'}.oi[data-glyph=pie-chart]:before{content:'\e0a7'}.oi[data-glyph=pin]:before{content:'\e0a8'}.oi[data-glyph=play-circle]:before{content:'\e0a9'}.oi[data-glyph=plus]:before{content:'\e0aa'}.oi[data-glyph=power-standby]:before{content:'\e0ab'}.oi[data-glyph=print]:before{content:'\e0ac'}.oi[data-glyph=project]:before{content:'\e0ad'}.oi[data-glyph=pulse]:before{content:'\e0ae'}.oi[data-glyph=puzzle-piece]:before{content:'\e0af'}.oi[data-glyph=question-mark]:before{content:'\e0b0'}.oi[data-glyph=rain]:before{content:'\e0b1'}.oi[data-glyph=random]:before{content:'\e0b2'}.oi[data-glyph=reload]:before{content:'\e0b3'}.oi[data-glyph=resize-both]:before{content:'\e0b4'}.oi[data-glyph=resize-height]:before{content:'\e0b5'}.oi[data-glyph=resize-width]:before{content:'\e0b6'}.oi[data-glyph=rss-alt]:before{content:'\e0b7'}.oi[data-glyph=rss]:before{content:'\e0b8'}.oi[data-glyph=script]:before{content:'\e0b9'}.oi[data-glyph=share-boxed]:before{content:'\e0ba'}.oi[data-glyph=share]:before{content:'\e0bb'}.oi[data-glyph=shield]:before{content:'\e0bc'}.oi[data-glyph=signal]:before{content:'\e0bd'}.oi[data-glyph=signpost]:before{content:'\e0be'}.oi[data-glyph=sort-ascending]:before{content:'\e0bf'}.oi[data-glyph=sort-descending]:before{content:'\e0c0'}.oi[data-glyph=spreadsheet]:before{content:'\e0c1'}.oi[data-glyph=star]:before{content:'\e0c2'}.oi[data-glyph=sun]:before{content:'\e0c3'}.oi[data-glyph=tablet]:before{content:'\e0c4'}.oi[data-glyph=tag]:before{content:'\e0c5'}.oi[data-glyph=tags]:before{content:'\e0c6'}.oi[data-glyph=target]:before{content:'\e0c7'}.oi[data-glyph=task]:before{content:'\e0c8'}.oi[data-glyph=terminal]:before{content:'\e0c9'}.oi[data-glyph=text]:before{content:'\e0ca'}.oi[data-glyph=thumb-down]:before{content:'\e0cb'}.oi[data-glyph=thumb-up]:before{content:'\e0cc'}.oi[data-glyph=timer]:before{content:'\e0cd'}.oi[data-glyph=transfer]:before{content:'\e0ce'}.oi[data-glyph=trash]:before{content:'\e0cf'}.oi[data-glyph=underline]:before{content:'\e0d0'}.oi[data-glyph=vertical-align-bottom]:before{content:'\e0d1'}.oi[data-glyph=vertical-align-center]:before{content:'\e0d2'}.oi[data-glyph=vertical-align-top]:before{content:'\e0d3'}.oi[data-glyph=video]:before{content:'\e0d4'}.oi[data-glyph=volume-high]:before{content:'\e0d5'}.oi[data-glyph=volume-low]:before{content:'\e0d6'}.oi[data-glyph=volume-off]:before{content:'\e0d7'}.oi[data-glyph=warning]:before{content:'\e0d8'}.oi[data-glyph=wifi]:before{content:'\e0d9'}.oi[data-glyph=wrench]:before{content:'\e0da'}.oi[data-glyph=x]:before{content:'\e0db'}.oi[data-glyph=yen]:before{content:'\e0dc'}.oi[data-glyph=zoom-in]:before{content:'\e0dd'}.oi[data-glyph=zoom-out]:before{content:'\e0de'} \ No newline at end of file diff --git a/static/open-iconic/css/open-iconic.scss b/static/open-iconic/css/open-iconic.scss new file mode 100644 index 00000000..e03d979f --- /dev/null +++ b/static/open-iconic/css/open-iconic.scss @@ -0,0 +1,963 @@ +$iconic-font-path: '../fonts/' !default; + +@font-face { + font-family: 'Icons'; + src: url('#{$iconic-font-path}open-iconic.eot'); + src: url('#{$iconic-font-path}open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('#{$iconic-font-path}open-iconic.woff') format('woff'), url('#{$iconic-font-path}open-iconic.ttf') format('truetype'), url('#{$iconic-font-path}open-iconic.otf') format('opentype'), url('#{$iconic-font-path}open-iconic.svg#iconic-sm') format('svg'); + font-weight: normal; + font-style: normal; +} + +.oi[data-glyph].oi-text-replace { + font-size: 0; + line-height: 0; +} + +.oi[data-glyph].oi-text-replace:before { + width: 1em; + text-align: center; +} + +.oi[data-glyph] { + &:before { + position: relative; + top: 1px; + font-family: 'Icons'; + display: inline-block; + speak: none; + line-height: 1; + vertical-align: baseline; + font-weight: normal; + font-style: normal; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + } + + &:empty:before { + width: 1em; + text-align: center; + box-sizing: content-box; + } + + &.oi-align-left:before { + text-align: left; + } + &.oi-align-right:before { + text-align: right; + } + &.oi-align-center:before { + text-align: center; + } + + &.oi-flip-horizontal:before { + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); + } + + &.oi-flip-vertical:before { + -webkit-transform: scale(1, -1); + -ms-transform: scale(-1, 1); + transform: scale(1, -1); + } + + &.oi-flip-horizontal-vertical:before { + -webkit-transform: scale(-1, -1); + -ms-transform: scale(-1, 1); + transform: scale(-1, -1); + } +} + + +.oi[data-glyph=account-login]:before { + content: '\e000'; +} + +.oi[data-glyph=account-logout]:before { + content: '\e001'; +} + +.oi[data-glyph=action-redo]:before { + content: '\e002'; +} + +.oi[data-glyph=action-undo]:before { + content: '\e003'; +} + +.oi[data-glyph=align-center]:before { + content: '\e004'; +} + +.oi[data-glyph=align-left]:before { + content: '\e005'; +} + +.oi[data-glyph=align-right]:before { + content: '\e006'; +} + +.oi[data-glyph=aperture]:before { + content: '\e007'; +} + +.oi[data-glyph=arrow-bottom]:before { + content: '\e008'; +} + +.oi[data-glyph=arrow-circle-bottom]:before { + content: '\e009'; +} + +.oi[data-glyph=arrow-circle-left]:before { + content: '\e00a'; +} + +.oi[data-glyph=arrow-circle-right]:before { + content: '\e00b'; +} + +.oi[data-glyph=arrow-circle-top]:before { + content: '\e00c'; +} + +.oi[data-glyph=arrow-left]:before { + content: '\e00d'; +} + +.oi[data-glyph=arrow-right]:before { + content: '\e00e'; +} + +.oi[data-glyph=arrow-thick-bottom]:before { + content: '\e00f'; +} + +.oi[data-glyph=arrow-thick-left]:before { + content: '\e010'; +} + +.oi[data-glyph=arrow-thick-right]:before { + content: '\e011'; +} + +.oi[data-glyph=arrow-thick-top]:before { + content: '\e012'; +} + +.oi[data-glyph=arrow-top]:before { + content: '\e013'; +} + +.oi[data-glyph=audio-spectrum]:before { + content: '\e014'; +} + +.oi[data-glyph=audio]:before { + content: '\e015'; +} + +.oi[data-glyph=badge]:before { + content: '\e016'; +} + +.oi[data-glyph=ban]:before { + content: '\e017'; +} + +.oi[data-glyph=bar-chart]:before { + content: '\e018'; +} + +.oi[data-glyph=basket]:before { + content: '\e019'; +} + +.oi[data-glyph=battery-empty]:before { + content: '\e01a'; +} + +.oi[data-glyph=battery-full]:before { + content: '\e01b'; +} + +.oi[data-glyph=beaker]:before { + content: '\e01c'; +} + +.oi[data-glyph=bell]:before { + content: '\e01d'; +} + +.oi[data-glyph=bluetooth]:before { + content: '\e01e'; +} + +.oi[data-glyph=bold]:before { + content: '\e01f'; +} + +.oi[data-glyph=bolt]:before { + content: '\e020'; +} + +.oi[data-glyph=book]:before { + content: '\e021'; +} + +.oi[data-glyph=bookmark]:before { + content: '\e022'; +} + +.oi[data-glyph=box]:before { + content: '\e023'; +} + +.oi[data-glyph=briefcase]:before { + content: '\e024'; +} + +.oi[data-glyph=british-pound]:before { + content: '\e025'; +} + +.oi[data-glyph=browser]:before { + content: '\e026'; +} + +.oi[data-glyph=brush]:before { + content: '\e027'; +} + +.oi[data-glyph=bug]:before { + content: '\e028'; +} + +.oi[data-glyph=bullhorn]:before { + content: '\e029'; +} + +.oi[data-glyph=calculator]:before { + content: '\e02a'; +} + +.oi[data-glyph=calendar]:before { + content: '\e02b'; +} + +.oi[data-glyph=camera-slr]:before { + content: '\e02c'; +} + +.oi[data-glyph=caret-bottom]:before { + content: '\e02d'; +} + +.oi[data-glyph=caret-left]:before { + content: '\e02e'; +} + +.oi[data-glyph=caret-right]:before { + content: '\e02f'; +} + +.oi[data-glyph=caret-top]:before { + content: '\e030'; +} + +.oi[data-glyph=cart]:before { + content: '\e031'; +} + +.oi[data-glyph=chat]:before { + content: '\e032'; +} + +.oi[data-glyph=check]:before { + content: '\e033'; +} + +.oi[data-glyph=chevron-bottom]:before { + content: '\e034'; +} + +.oi[data-glyph=chevron-left]:before { + content: '\e035'; +} + +.oi[data-glyph=chevron-right]:before { + content: '\e036'; +} + +.oi[data-glyph=chevron-top]:before { + content: '\e037'; +} + +.oi[data-glyph=circle-check]:before { + content: '\e038'; +} + +.oi[data-glyph=circle-x]:before { + content: '\e039'; +} + +.oi[data-glyph=clipboard]:before { + content: '\e03a'; +} + +.oi[data-glyph=clock]:before { + content: '\e03b'; +} + +.oi[data-glyph=cloud-download]:before { + content: '\e03c'; +} + +.oi[data-glyph=cloud-upload]:before { + content: '\e03d'; +} + +.oi[data-glyph=cloud]:before { + content: '\e03e'; +} + +.oi[data-glyph=cloudy]:before { + content: '\e03f'; +} + +.oi[data-glyph=code]:before { + content: '\e040'; +} + +.oi[data-glyph=cog]:before { + content: '\e041'; +} + +.oi[data-glyph=collapse-down]:before { + content: '\e042'; +} + +.oi[data-glyph=collapse-left]:before { + content: '\e043'; +} + +.oi[data-glyph=collapse-right]:before { + content: '\e044'; +} + +.oi[data-glyph=collapse-up]:before { + content: '\e045'; +} + +.oi[data-glyph=command]:before { + content: '\e046'; +} + +.oi[data-glyph=comment-square]:before { + content: '\e047'; +} + +.oi[data-glyph=compass]:before { + content: '\e048'; +} + +.oi[data-glyph=contrast]:before { + content: '\e049'; +} + +.oi[data-glyph=copywriting]:before { + content: '\e04a'; +} + +.oi[data-glyph=credit-card]:before { + content: '\e04b'; +} + +.oi[data-glyph=crop]:before { + content: '\e04c'; +} + +.oi[data-glyph=dashboard]:before { + content: '\e04d'; +} + +.oi[data-glyph=data-transfer-download]:before { + content: '\e04e'; +} + +.oi[data-glyph=data-transfer-upload]:before { + content: '\e04f'; +} + +.oi[data-glyph=delete]:before { + content: '\e050'; +} + +.oi[data-glyph=dial]:before { + content: '\e051'; +} + +.oi[data-glyph=document]:before { + content: '\e052'; +} + +.oi[data-glyph=dollar]:before { + content: '\e053'; +} + +.oi[data-glyph=double-quote-sans-left]:before { + content: '\e054'; +} + +.oi[data-glyph=double-quote-sans-right]:before { + content: '\e055'; +} + +.oi[data-glyph=double-quote-serif-left]:before { + content: '\e056'; +} + +.oi[data-glyph=double-quote-serif-right]:before { + content: '\e057'; +} + +.oi[data-glyph=droplet]:before { + content: '\e058'; +} + +.oi[data-glyph=eject]:before { + content: '\e059'; +} + +.oi[data-glyph=elevator]:before { + content: '\e05a'; +} + +.oi[data-glyph=ellipses]:before { + content: '\e05b'; +} + +.oi[data-glyph=envelope-closed]:before { + content: '\e05c'; +} + +.oi[data-glyph=envelope-open]:before { + content: '\e05d'; +} + +.oi[data-glyph=euro]:before { + content: '\e05e'; +} + +.oi[data-glyph=excerpt]:before { + content: '\e05f'; +} + +.oi[data-glyph=expand-down]:before { + content: '\e060'; +} + +.oi[data-glyph=expand-left]:before { + content: '\e061'; +} + +.oi[data-glyph=expand-right]:before { + content: '\e062'; +} + +.oi[data-glyph=expand-up]:before { + content: '\e063'; +} + +.oi[data-glyph=external-link]:before { + content: '\e064'; +} + +.oi[data-glyph=eye]:before { + content: '\e065'; +} + +.oi[data-glyph=eyedropper]:before { + content: '\e066'; +} + +.oi[data-glyph=file]:before { + content: '\e067'; +} + +.oi[data-glyph=fire]:before { + content: '\e068'; +} + +.oi[data-glyph=flag]:before { + content: '\e069'; +} + +.oi[data-glyph=flash]:before { + content: '\e06a'; +} + +.oi[data-glyph=folder]:before { + content: '\e06b'; +} + +.oi[data-glyph=fork]:before { + content: '\e06c'; +} + +.oi[data-glyph=fullscreen-enter]:before { + content: '\e06d'; +} + +.oi[data-glyph=fullscreen-exit]:before { + content: '\e06e'; +} + +.oi[data-glyph=globe]:before { + content: '\e06f'; +} + +.oi[data-glyph=graph]:before { + content: '\e070'; +} + +.oi[data-glyph=grid-four-up]:before { + content: '\e071'; +} + +.oi[data-glyph=grid-three-up]:before { + content: '\e072'; +} + +.oi[data-glyph=grid-two-up]:before { + content: '\e073'; +} + +.oi[data-glyph=hard-drive]:before { + content: '\e074'; +} + +.oi[data-glyph=header]:before { + content: '\e075'; +} + +.oi[data-glyph=headphones]:before { + content: '\e076'; +} + +.oi[data-glyph=heart]:before { + content: '\e077'; +} + +.oi[data-glyph=home]:before { + content: '\e078'; +} + +.oi[data-glyph=image]:before { + content: '\e079'; +} + +.oi[data-glyph=inbox]:before { + content: '\e07a'; +} + +.oi[data-glyph=infinity]:before { + content: '\e07b'; +} + +.oi[data-glyph=info]:before { + content: '\e07c'; +} + +.oi[data-glyph=italic]:before { + content: '\e07d'; +} + +.oi[data-glyph=justify-center]:before { + content: '\e07e'; +} + +.oi[data-glyph=justify-left]:before { + content: '\e07f'; +} + +.oi[data-glyph=justify-right]:before { + content: '\e080'; +} + +.oi[data-glyph=key]:before { + content: '\e081'; +} + +.oi[data-glyph=laptop]:before { + content: '\e082'; +} + +.oi[data-glyph=layers]:before { + content: '\e083'; +} + +.oi[data-glyph=lightbulb]:before { + content: '\e084'; +} + +.oi[data-glyph=link-broken]:before { + content: '\e085'; +} + +.oi[data-glyph=link-intact]:before { + content: '\e086'; +} + +.oi[data-glyph=list-rich]:before { + content: '\e087'; +} + +.oi[data-glyph=list]:before { + content: '\e088'; +} + +.oi[data-glyph=location]:before { + content: '\e089'; +} + +.oi[data-glyph=lock-locked]:before { + content: '\e08a'; +} + +.oi[data-glyph=lock-unlocked]:before { + content: '\e08b'; +} + +.oi[data-glyph=loop-circular]:before { + content: '\e08c'; +} + +.oi[data-glyph=loop-square]:before { + content: '\e08d'; +} + +.oi[data-glyph=loop]:before { + content: '\e08e'; +} + +.oi[data-glyph=magnifying-glass]:before { + content: '\e08f'; +} + +.oi[data-glyph=map-marker]:before { + content: '\e090'; +} + +.oi[data-glyph=map]:before { + content: '\e091'; +} + +.oi[data-glyph=media-pause]:before { + content: '\e092'; +} + +.oi[data-glyph=media-play]:before { + content: '\e093'; +} + +.oi[data-glyph=media-record]:before { + content: '\e094'; +} + +.oi[data-glyph=media-skip-backward]:before { + content: '\e095'; +} + +.oi[data-glyph=media-skip-forward]:before { + content: '\e096'; +} + +.oi[data-glyph=media-step-backward]:before { + content: '\e097'; +} + +.oi[data-glyph=media-step-forward]:before { + content: '\e098'; +} + +.oi[data-glyph=media-stop]:before { + content: '\e099'; +} + +.oi[data-glyph=medical-cross]:before { + content: '\e09a'; +} + +.oi[data-glyph=menu]:before { + content: '\e09b'; +} + +.oi[data-glyph=microphone]:before { + content: '\e09c'; +} + +.oi[data-glyph=minus]:before { + content: '\e09d'; +} + +.oi[data-glyph=monitor]:before { + content: '\e09e'; +} + +.oi[data-glyph=moon]:before { + content: '\e09f'; +} + +.oi[data-glyph=move]:before { + content: '\e0a0'; +} + +.oi[data-glyph=musical-note]:before { + content: '\e0a1'; +} + +.oi[data-glyph=paperclip]:before { + content: '\e0a2'; +} + +.oi[data-glyph=pencil]:before { + content: '\e0a3'; +} + +.oi[data-glyph=people]:before { + content: '\e0a4'; +} + +.oi[data-glyph=person]:before { + content: '\e0a5'; +} + +.oi[data-glyph=phone]:before { + content: '\e0a6'; +} + +.oi[data-glyph=pie-chart]:before { + content: '\e0a7'; +} + +.oi[data-glyph=pin]:before { + content: '\e0a8'; +} + +.oi[data-glyph=play-circle]:before { + content: '\e0a9'; +} + +.oi[data-glyph=plus]:before { + content: '\e0aa'; +} + +.oi[data-glyph=power-standby]:before { + content: '\e0ab'; +} + +.oi[data-glyph=print]:before { + content: '\e0ac'; +} + +.oi[data-glyph=project]:before { + content: '\e0ad'; +} + +.oi[data-glyph=pulse]:before { + content: '\e0ae'; +} + +.oi[data-glyph=puzzle-piece]:before { + content: '\e0af'; +} + +.oi[data-glyph=question-mark]:before { + content: '\e0b0'; +} + +.oi[data-glyph=rain]:before { + content: '\e0b1'; +} + +.oi[data-glyph=random]:before { + content: '\e0b2'; +} + +.oi[data-glyph=reload]:before { + content: '\e0b3'; +} + +.oi[data-glyph=resize-both]:before { + content: '\e0b4'; +} + +.oi[data-glyph=resize-height]:before { + content: '\e0b5'; +} + +.oi[data-glyph=resize-width]:before { + content: '\e0b6'; +} + +.oi[data-glyph=rss-alt]:before { + content: '\e0b7'; +} + +.oi[data-glyph=rss]:before { + content: '\e0b8'; +} + +.oi[data-glyph=script]:before { + content: '\e0b9'; +} + +.oi[data-glyph=share-boxed]:before { + content: '\e0ba'; +} + +.oi[data-glyph=share]:before { + content: '\e0bb'; +} + +.oi[data-glyph=shield]:before { + content: '\e0bc'; +} + +.oi[data-glyph=signal]:before { + content: '\e0bd'; +} + +.oi[data-glyph=signpost]:before { + content: '\e0be'; +} + +.oi[data-glyph=sort-ascending]:before { + content: '\e0bf'; +} + +.oi[data-glyph=sort-descending]:before { + content: '\e0c0'; +} + +.oi[data-glyph=spreadsheet]:before { + content: '\e0c1'; +} + +.oi[data-glyph=star]:before { + content: '\e0c2'; +} + +.oi[data-glyph=sun]:before { + content: '\e0c3'; +} + +.oi[data-glyph=tablet]:before { + content: '\e0c4'; +} + +.oi[data-glyph=tag]:before { + content: '\e0c5'; +} + +.oi[data-glyph=tags]:before { + content: '\e0c6'; +} + +.oi[data-glyph=target]:before { + content: '\e0c7'; +} + +.oi[data-glyph=task]:before { + content: '\e0c8'; +} + +.oi[data-glyph=terminal]:before { + content: '\e0c9'; +} + +.oi[data-glyph=text]:before { + content: '\e0ca'; +} + +.oi[data-glyph=thumb-down]:before { + content: '\e0cb'; +} + +.oi[data-glyph=thumb-up]:before { + content: '\e0cc'; +} + +.oi[data-glyph=timer]:before { + content: '\e0cd'; +} + +.oi[data-glyph=transfer]:before { + content: '\e0ce'; +} + +.oi[data-glyph=trash]:before { + content: '\e0cf'; +} + +.oi[data-glyph=underline]:before { + content: '\e0d0'; +} + +.oi[data-glyph=vertical-align-bottom]:before { + content: '\e0d1'; +} + +.oi[data-glyph=vertical-align-center]:before { + content: '\e0d2'; +} + +.oi[data-glyph=vertical-align-top]:before { + content: '\e0d3'; +} + +.oi[data-glyph=video]:before { + content: '\e0d4'; +} + +.oi[data-glyph=volume-high]:before { + content: '\e0d5'; +} + +.oi[data-glyph=volume-low]:before { + content: '\e0d6'; +} + +.oi[data-glyph=volume-off]:before { + content: '\e0d7'; +} + +.oi[data-glyph=warning]:before { + content: '\e0d8'; +} + +.oi[data-glyph=wifi]:before { + content: '\e0d9'; +} + +.oi[data-glyph=wrench]:before { + content: '\e0da'; +} + +.oi[data-glyph=x]:before { + content: '\e0db'; +} + +.oi[data-glyph=yen]:before { + content: '\e0dc'; +} + +.oi[data-glyph=zoom-in]:before { + content: '\e0dd'; +} + +.oi[data-glyph=zoom-out]:before { + content: '\e0de'; +} + diff --git a/static/open-iconic/css/open-iconic.styl b/static/open-iconic/css/open-iconic.styl new file mode 100644 index 00000000..f541bc2d --- /dev/null +++ b/static/open-iconic/css/open-iconic.styl @@ -0,0 +1,733 @@ +@font-face + font-family 'Icons' + src url('../fonts/open-iconic.eot') + src url('../fonts/open-iconic.eot?#iconic-sm') format('embedded-opentype'), url('../fonts/open-iconic.woff') format('woff'), url('../fonts/open-iconic.ttf') format('truetype'), url('../fonts/open-iconic.otf') format('opentype'), url('../fonts/open-iconic.svg#iconic-sm') format('svg') + font-weight normal + font-style normal + + +.oi[data-glyph].oi-text-replace + font-size 0 + line-height 0 + +.oi[data-glyph].oi-text-replace:before + width 1em + text-align center + +.oi[data-glyph] + &:before + position relative + top 1px + font-family 'Icons' + display inline-block + speak none + line-height 1 + vertical-align baseline + font-weight normal + font-style normal + -webkit-font-smoothing antialiased + -moz-osx-font-smoothing grayscale + + &:empty:before + width 1em + text-align center + box-sizing content-box + + &.oi-align-left:before + text-align left + + &.oi-align-right:before + text-align right + + &.oi-align-center:before + text-align center + + + &.oi-flip-horizontal:before + -webkit-transform scale(-1, 1) + -ms-transform scale(-1, 1) + transform scale(-1, 1) + + + &.oi-flip-vertical:before + -webkit-transform scale(1, -1) + -ms-transform scale(-1, 1) + transform scale(1, -1) + + + &.oi-flip-horizontal-vertical:before + -webkit-transform scale(-1, -1) + -ms-transform scale(-1, 1) + transform scale(-1, -1) + + + + +.oi[data-glyph=account-login]:before + content '\e000' + +.oi[data-glyph=account-logout]:before + content '\e001' + +.oi[data-glyph=action-redo]:before + content '\e002' + +.oi[data-glyph=action-undo]:before + content '\e003' + +.oi[data-glyph=align-center]:before + content '\e004' + +.oi[data-glyph=align-left]:before + content '\e005' + +.oi[data-glyph=align-right]:before + content '\e006' + +.oi[data-glyph=aperture]:before + content '\e007' + +.oi[data-glyph=arrow-bottom]:before + content '\e008' + +.oi[data-glyph=arrow-circle-bottom]:before + content '\e009' + +.oi[data-glyph=arrow-circle-left]:before + content '\e00a' + +.oi[data-glyph=arrow-circle-right]:before + content '\e00b' + +.oi[data-glyph=arrow-circle-top]:before + content '\e00c' + +.oi[data-glyph=arrow-left]:before + content '\e00d' + +.oi[data-glyph=arrow-right]:before + content '\e00e' + +.oi[data-glyph=arrow-thick-bottom]:before + content '\e00f' + +.oi[data-glyph=arrow-thick-left]:before + content '\e010' + +.oi[data-glyph=arrow-thick-right]:before + content '\e011' + +.oi[data-glyph=arrow-thick-top]:before + content '\e012' + +.oi[data-glyph=arrow-top]:before + content '\e013' + +.oi[data-glyph=audio-spectrum]:before + content '\e014' + +.oi[data-glyph=audio]:before + content '\e015' + +.oi[data-glyph=badge]:before + content '\e016' + +.oi[data-glyph=ban]:before + content '\e017' + +.oi[data-glyph=bar-chart]:before + content '\e018' + +.oi[data-glyph=basket]:before + content '\e019' + +.oi[data-glyph=battery-empty]:before + content '\e01a' + +.oi[data-glyph=battery-full]:before + content '\e01b' + +.oi[data-glyph=beaker]:before + content '\e01c' + +.oi[data-glyph=bell]:before + content '\e01d' + +.oi[data-glyph=bluetooth]:before + content '\e01e' + +.oi[data-glyph=bold]:before + content '\e01f' + +.oi[data-glyph=bolt]:before + content '\e020' + +.oi[data-glyph=book]:before + content '\e021' + +.oi[data-glyph=bookmark]:before + content '\e022' + +.oi[data-glyph=box]:before + content '\e023' + +.oi[data-glyph=briefcase]:before + content '\e024' + +.oi[data-glyph=british-pound]:before + content '\e025' + +.oi[data-glyph=browser]:before + content '\e026' + +.oi[data-glyph=brush]:before + content '\e027' + +.oi[data-glyph=bug]:before + content '\e028' + +.oi[data-glyph=bullhorn]:before + content '\e029' + +.oi[data-glyph=calculator]:before + content '\e02a' + +.oi[data-glyph=calendar]:before + content '\e02b' + +.oi[data-glyph=camera-slr]:before + content '\e02c' + +.oi[data-glyph=caret-bottom]:before + content '\e02d' + +.oi[data-glyph=caret-left]:before + content '\e02e' + +.oi[data-glyph=caret-right]:before + content '\e02f' + +.oi[data-glyph=caret-top]:before + content '\e030' + +.oi[data-glyph=cart]:before + content '\e031' + +.oi[data-glyph=chat]:before + content '\e032' + +.oi[data-glyph=check]:before + content '\e033' + +.oi[data-glyph=chevron-bottom]:before + content '\e034' + +.oi[data-glyph=chevron-left]:before + content '\e035' + +.oi[data-glyph=chevron-right]:before + content '\e036' + +.oi[data-glyph=chevron-top]:before + content '\e037' + +.oi[data-glyph=circle-check]:before + content '\e038' + +.oi[data-glyph=circle-x]:before + content '\e039' + +.oi[data-glyph=clipboard]:before + content '\e03a' + +.oi[data-glyph=clock]:before + content '\e03b' + +.oi[data-glyph=cloud-download]:before + content '\e03c' + +.oi[data-glyph=cloud-upload]:before + content '\e03d' + +.oi[data-glyph=cloud]:before + content '\e03e' + +.oi[data-glyph=cloudy]:before + content '\e03f' + +.oi[data-glyph=code]:before + content '\e040' + +.oi[data-glyph=cog]:before + content '\e041' + +.oi[data-glyph=collapse-down]:before + content '\e042' + +.oi[data-glyph=collapse-left]:before + content '\e043' + +.oi[data-glyph=collapse-right]:before + content '\e044' + +.oi[data-glyph=collapse-up]:before + content '\e045' + +.oi[data-glyph=command]:before + content '\e046' + +.oi[data-glyph=comment-square]:before + content '\e047' + +.oi[data-glyph=compass]:before + content '\e048' + +.oi[data-glyph=contrast]:before + content '\e049' + +.oi[data-glyph=copywriting]:before + content '\e04a' + +.oi[data-glyph=credit-card]:before + content '\e04b' + +.oi[data-glyph=crop]:before + content '\e04c' + +.oi[data-glyph=dashboard]:before + content '\e04d' + +.oi[data-glyph=data-transfer-download]:before + content '\e04e' + +.oi[data-glyph=data-transfer-upload]:before + content '\e04f' + +.oi[data-glyph=delete]:before + content '\e050' + +.oi[data-glyph=dial]:before + content '\e051' + +.oi[data-glyph=document]:before + content '\e052' + +.oi[data-glyph=dollar]:before + content '\e053' + +.oi[data-glyph=double-quote-sans-left]:before + content '\e054' + +.oi[data-glyph=double-quote-sans-right]:before + content '\e055' + +.oi[data-glyph=double-quote-serif-left]:before + content '\e056' + +.oi[data-glyph=double-quote-serif-right]:before + content '\e057' + +.oi[data-glyph=droplet]:before + content '\e058' + +.oi[data-glyph=eject]:before + content '\e059' + +.oi[data-glyph=elevator]:before + content '\e05a' + +.oi[data-glyph=ellipses]:before + content '\e05b' + +.oi[data-glyph=envelope-closed]:before + content '\e05c' + +.oi[data-glyph=envelope-open]:before + content '\e05d' + +.oi[data-glyph=euro]:before + content '\e05e' + +.oi[data-glyph=excerpt]:before + content '\e05f' + +.oi[data-glyph=expand-down]:before + content '\e060' + +.oi[data-glyph=expand-left]:before + content '\e061' + +.oi[data-glyph=expand-right]:before + content '\e062' + +.oi[data-glyph=expand-up]:before + content '\e063' + +.oi[data-glyph=external-link]:before + content '\e064' + +.oi[data-glyph=eye]:before + content '\e065' + +.oi[data-glyph=eyedropper]:before + content '\e066' + +.oi[data-glyph=file]:before + content '\e067' + +.oi[data-glyph=fire]:before + content '\e068' + +.oi[data-glyph=flag]:before + content '\e069' + +.oi[data-glyph=flash]:before + content '\e06a' + +.oi[data-glyph=folder]:before + content '\e06b' + +.oi[data-glyph=fork]:before + content '\e06c' + +.oi[data-glyph=fullscreen-enter]:before + content '\e06d' + +.oi[data-glyph=fullscreen-exit]:before + content '\e06e' + +.oi[data-glyph=globe]:before + content '\e06f' + +.oi[data-glyph=graph]:before + content '\e070' + +.oi[data-glyph=grid-four-up]:before + content '\e071' + +.oi[data-glyph=grid-three-up]:before + content '\e072' + +.oi[data-glyph=grid-two-up]:before + content '\e073' + +.oi[data-glyph=hard-drive]:before + content '\e074' + +.oi[data-glyph=header]:before + content '\e075' + +.oi[data-glyph=headphones]:before + content '\e076' + +.oi[data-glyph=heart]:before + content '\e077' + +.oi[data-glyph=home]:before + content '\e078' + +.oi[data-glyph=image]:before + content '\e079' + +.oi[data-glyph=inbox]:before + content '\e07a' + +.oi[data-glyph=infinity]:before + content '\e07b' + +.oi[data-glyph=info]:before + content '\e07c' + +.oi[data-glyph=italic]:before + content '\e07d' + +.oi[data-glyph=justify-center]:before + content '\e07e' + +.oi[data-glyph=justify-left]:before + content '\e07f' + +.oi[data-glyph=justify-right]:before + content '\e080' + +.oi[data-glyph=key]:before + content '\e081' + +.oi[data-glyph=laptop]:before + content '\e082' + +.oi[data-glyph=layers]:before + content '\e083' + +.oi[data-glyph=lightbulb]:before + content '\e084' + +.oi[data-glyph=link-broken]:before + content '\e085' + +.oi[data-glyph=link-intact]:before + content '\e086' + +.oi[data-glyph=list-rich]:before + content '\e087' + +.oi[data-glyph=list]:before + content '\e088' + +.oi[data-glyph=location]:before + content '\e089' + +.oi[data-glyph=lock-locked]:before + content '\e08a' + +.oi[data-glyph=lock-unlocked]:before + content '\e08b' + +.oi[data-glyph=loop-circular]:before + content '\e08c' + +.oi[data-glyph=loop-square]:before + content '\e08d' + +.oi[data-glyph=loop]:before + content '\e08e' + +.oi[data-glyph=magnifying-glass]:before + content '\e08f' + +.oi[data-glyph=map-marker]:before + content '\e090' + +.oi[data-glyph=map]:before + content '\e091' + +.oi[data-glyph=media-pause]:before + content '\e092' + +.oi[data-glyph=media-play]:before + content '\e093' + +.oi[data-glyph=media-record]:before + content '\e094' + +.oi[data-glyph=media-skip-backward]:before + content '\e095' + +.oi[data-glyph=media-skip-forward]:before + content '\e096' + +.oi[data-glyph=media-step-backward]:before + content '\e097' + +.oi[data-glyph=media-step-forward]:before + content '\e098' + +.oi[data-glyph=media-stop]:before + content '\e099' + +.oi[data-glyph=medical-cross]:before + content '\e09a' + +.oi[data-glyph=menu]:before + content '\e09b' + +.oi[data-glyph=microphone]:before + content '\e09c' + +.oi[data-glyph=minus]:before + content '\e09d' + +.oi[data-glyph=monitor]:before + content '\e09e' + +.oi[data-glyph=moon]:before + content '\e09f' + +.oi[data-glyph=move]:before + content '\e0a0' + +.oi[data-glyph=musical-note]:before + content '\e0a1' + +.oi[data-glyph=paperclip]:before + content '\e0a2' + +.oi[data-glyph=pencil]:before + content '\e0a3' + +.oi[data-glyph=people]:before + content '\e0a4' + +.oi[data-glyph=person]:before + content '\e0a5' + +.oi[data-glyph=phone]:before + content '\e0a6' + +.oi[data-glyph=pie-chart]:before + content '\e0a7' + +.oi[data-glyph=pin]:before + content '\e0a8' + +.oi[data-glyph=play-circle]:before + content '\e0a9' + +.oi[data-glyph=plus]:before + content '\e0aa' + +.oi[data-glyph=power-standby]:before + content '\e0ab' + +.oi[data-glyph=print]:before + content '\e0ac' + +.oi[data-glyph=project]:before + content '\e0ad' + +.oi[data-glyph=pulse]:before + content '\e0ae' + +.oi[data-glyph=puzzle-piece]:before + content '\e0af' + +.oi[data-glyph=question-mark]:before + content '\e0b0' + +.oi[data-glyph=rain]:before + content '\e0b1' + +.oi[data-glyph=random]:before + content '\e0b2' + +.oi[data-glyph=reload]:before + content '\e0b3' + +.oi[data-glyph=resize-both]:before + content '\e0b4' + +.oi[data-glyph=resize-height]:before + content '\e0b5' + +.oi[data-glyph=resize-width]:before + content '\e0b6' + +.oi[data-glyph=rss-alt]:before + content '\e0b7' + +.oi[data-glyph=rss]:before + content '\e0b8' + +.oi[data-glyph=script]:before + content '\e0b9' + +.oi[data-glyph=share-boxed]:before + content '\e0ba' + +.oi[data-glyph=share]:before + content '\e0bb' + +.oi[data-glyph=shield]:before + content '\e0bc' + +.oi[data-glyph=signal]:before + content '\e0bd' + +.oi[data-glyph=signpost]:before + content '\e0be' + +.oi[data-glyph=sort-ascending]:before + content '\e0bf' + +.oi[data-glyph=sort-descending]:before + content '\e0c0' + +.oi[data-glyph=spreadsheet]:before + content '\e0c1' + +.oi[data-glyph=star]:before + content '\e0c2' + +.oi[data-glyph=sun]:before + content '\e0c3' + +.oi[data-glyph=tablet]:before + content '\e0c4' + +.oi[data-glyph=tag]:before + content '\e0c5' + +.oi[data-glyph=tags]:before + content '\e0c6' + +.oi[data-glyph=target]:before + content '\e0c7' + +.oi[data-glyph=task]:before + content '\e0c8' + +.oi[data-glyph=terminal]:before + content '\e0c9' + +.oi[data-glyph=text]:before + content '\e0ca' + +.oi[data-glyph=thumb-down]:before + content '\e0cb' + +.oi[data-glyph=thumb-up]:before + content '\e0cc' + +.oi[data-glyph=timer]:before + content '\e0cd' + +.oi[data-glyph=transfer]:before + content '\e0ce' + +.oi[data-glyph=trash]:before + content '\e0cf' + +.oi[data-glyph=underline]:before + content '\e0d0' + +.oi[data-glyph=vertical-align-bottom]:before + content '\e0d1' + +.oi[data-glyph=vertical-align-center]:before + content '\e0d2' + +.oi[data-glyph=vertical-align-top]:before + content '\e0d3' + +.oi[data-glyph=video]:before + content '\e0d4' + +.oi[data-glyph=volume-high]:before + content '\e0d5' + +.oi[data-glyph=volume-low]:before + content '\e0d6' + +.oi[data-glyph=volume-off]:before + content '\e0d7' + +.oi[data-glyph=warning]:before + content '\e0d8' + +.oi[data-glyph=wifi]:before + content '\e0d9' + +.oi[data-glyph=wrench]:before + content '\e0da' + +.oi[data-glyph=x]:before + content '\e0db' + +.oi[data-glyph=yen]:before + content '\e0dc' + +.oi[data-glyph=zoom-in]:before + content '\e0dd' + +.oi[data-glyph=zoom-out]:before + content '\e0de' diff --git a/static/open-iconic/fonts/open-iconic.eot b/static/open-iconic/fonts/open-iconic.eot new file mode 100644 index 0000000000000000000000000000000000000000..f98177dbf711863eff7c90f84d5d419d02d99ba8 GIT binary patch literal 28196 zcmdsfdwg8gedj&r&QluAL-W#Wq&pgEMvsv!&0Cf&+mau`20w)Dj4&8Iu59zN6=RG; z451+<)Ej~^SrrmCp$=hb!Zu?PlZ0v^rFqOYfzqruY1s`+ve{(Uv}w|M+teR4-tX_6 zJJQHDgm(Majx=-5J@?%6_?_SRz0Ykss3^zpP!y(cg+5#{t0IGvlZlxgLVa!|Pwg%0HwaAkJPsR_7CkF z{hz=5BS2$bQO4>H%uMR+@Bes%qU=0}`qqrY1!(P0t>lnf>u?>hCHF7DiD%jIRLs_gA0(b1L}rzgltYVrt?gc2Y5;9UDjQ z%B)P;{Yp$h?WOgkCosju&-Q&Abmg0GDQ~^0YA77V?+nuN;!-_LToFFdx5>D-3RhIC zNim@Y28=&kzxC#&OZZhTUDD)z++voc1{on3eJelI&j0@(PPn1`HTMH@R>gMK0^H#} z-APZ<6H9s`4L|t$XFtpR3vV~DpGXL)8ZghQI8nFC#;Gm~d%|gaTbMPC42!c1B?miM zn$?TN(kwg4=NH!N?1DZwr|Va=QM0@at3QmtSVbGuP_f*EuIqDh*>o`umty&fMPWVN zwOSy=lGa!#OKqKlS=4KL6^YiDEHv;MA!Dj|%KqdbXOLRkVPgo+>xM z`tdLxr03~jdXO4;l(4}>Kca7fS2gy1&DtubqsnG6amCcr?ZNni_*#ur)!una=lO+a z(W#N+^Oy#G-fw#XCIlD!Q7hD3IjwB$Uoy5LHCCk7M6R+q+PRlLC+2F#Og&0KX;fTm z9gRV6t=nO-P_Az=CG4l*~#0dwv=AFvG8)~&n&z! z>wcqjdUo&ccd;$(NdM=j`265c&L?J1yxG?F>}_{_wry>?^aan|yPK}R#cpg(b^$xz zf;Gl2?&aw=%jBtFht&{S}(z)fW6^mCJSIuQ@i4|p+ zx3$z#v51krkNGj$t;x!E@Z?f6a(ZZoC>r5@Ucl5$FlAy4?Q*}B&hb1!m&U%lE*Euc z#N62h7Dtl~c7f-y5Wr$VDS7_#wX$QaKmmSK`iqLyDz`g-`54&Z80Kl-ofTt{b;TI$ zT#%ThARiNAa&`dV8`oF>zV?w_b1QPe8_mRA%fyml9N}zE z_-m(6zyG|m?j+Mnf7=xbb%mHqB&x=o>~}ut(o3hDKA)2v)LFgfzUPV|zwQq${}Jm! zdvqS0#f$auxa~yCyx|1clRx73VPI)bD(DG&?EH&%UAHgnwu8I!`Kp(SFWc>Wqg^Ma zTe*j+Ez4Kzf`(q!&Qco{4bZc|i%U<6aYU6B7)Lx7;53d@W>5_ia)5Ny1_i;Fuu5e! z-gKnZ5^0T^BYvyJ8eYL}Z1AdPGrK^uOnkDgwNvdLC@Di@t#zMFFbngC*yBaZnjCxO zZVNwAs{vvUm;SyZn;h!w92-hzJ6O%btT}YL>chAEtV)iFcrVtkM#9EvCDS2-twqu&y5y= zw;q?%OgQCDn!(c|X=^MS%LcRltks{LOR&8^`AO+?V#}7fxh-2D&&;XX#mAnwc+n^T z?I3bku^;?ONNGpAEzQ9|wZK)t4otF{`3c3+*b1IhG!ph>Qy^76GG!OWj>gw*J9S{; z4GguD#dS*bxuJZ1h^DeJ+j4C4fm1qeo$MT>2@;LZAJ13vO*7V9&^G2tG7zXZ?FfUm z#SMB%w5<{KY9(%XvO$a>;P-@EExte!yNWhJc8Fzlj6qNMLkn-vTJq?^8$)^3(jB7q zK=I-s|H2zsK0QCgqux+AWHJJLC*aI54Qv=}8o8CR zZwEnEGeI;95)@8khtt_i7IdVSr-7d=zV}u=kyugRRIfhw zeDDVL_QJF74|wmnm%D6ymv^z?^V}7hzydG+3&|d1l55zYhOj3av4&o`Cs_*%Sec7K6kNmX1R1PD zYix+tfd4N`+-xrWgR9=NE#s(Rcb7VHTc13*dDZG`u2Vy5+-xoVUX3HO%~S7URi&d_ za|fSnjU2xwx0TQZaKH4&{58k8C}uC~%bS*!t{HKh8i(U_G87Y4V6Mbq6(WCwXB8|!8EMz7QHK&Z*mcFpc< z+RRN&4^&tAL+^tIcvp=oXtiyp&{<>WDx_onB*c$TJG+1&G7a-fJb(lhUsyZ?n4aYuiGF!~%5BNht zkLp&(Oy-jvTIYsHHM$C!I<(f1-`DJlUJRPI*qqTW+kTY1z~}7?FWT8-kChzvs)6UdU2dnB zx$Q4tyPa>#r3G#wn2l*V56=aR2F{ncODvttVSQ>#9gal)dghYmi{bh)=H+FHv=R)hRtN(5RM_@E0? z5kM8i9$Uerye_+vY3w_3_P#}l!_lo1O@m<2iy=ee^_*n$LO%GqY8Q0?Zgjgfu%~GcgW`lM%ck$vJ0hs4ShNL&iUr07ttjmJdpcTs@YpWWi zLeN`YSMXY|ok4QJ?b0l&5gLe$Y$tuGLVQ^KYqd>=*0HTNl+kS35%>Tm0`e`E!ED_IcN2j(%)=h7jWUMUO0+h zRRdK=F-j8tO~s;7T+L5ZJE`9#xx)%NSO@&}!yd9s-zo3*_M|@$v_@C3vckh1zbO=c zQz)I*Tce|GeeMd4hi+VZwk!ITF`O4lyst z4Y9otCo>pme1^Sp;8gd3{bk67rC&829rHZ0Sv4^W_lM?+#W|mfdf9!dfV9s|K;O|StI2k1ficm_+HH-M&Az?i*JgaZ@5^* zE(GBy_gO3&{S94&SP6KeFT!J~`_y882z_O7zCy_m6O~Qphe|_ZM`==gUbZ=u2Swa{ zc-fe%m1d0D?+|)|HxUHK2lEHO%w;$(wR`cy*WG%iYh_pcDb`1TTj~Ka=bd}qEvd|b zQ^m{sB3zJTR-u==fD1KM#C|~QSdzg!U=2oM?a81uk|lZ~xEUA=&kOD%%>%Gb(5GU} zTOiHa&bDc8$;Tnw1g$O1?*a*kxmaWcc5HS9ORvEu4`$0U9^0!Yn(iJ=IPSjNkr=(Z zDY5+W^zl3}LDjB$vt0K9RLLL5oR)B01*NRQyg(`CyrhZKYKCkpBzcJRl8dOC)PO3V zwaRCOc~t7^!d#+yVgv-}OF|o3m8R8-X8{D#>>(A*N?k%eEp2Xp{Og1~APhL#`%a==_CxDO?0Cstm3 z30%#eV0U(fut|VC7qL}fR)`ZvgHV2zC*{}rc8UrQR$o+3OBx1mZ zBw=TjS?FXCbR;9PLY)=VCY?28(R%*NYUev|5yJtCsjYSrP2lsA^AtqzGR9J<&#=SZlzmY*a6=bs1jPR3mA)Spy%lFF5 zROWpz3sBDaoT_RIIQP`UxG^?pxxq~=8DPB}F$ARVc7;st8!RO5cGmB4ZoCptXt$F* zCv5*@5{La6dkp?4(js8{AS3-dZwU(s)Cst!XwFM`ri$l@b{jSbv$P3IT0yOVSP=dS zw*x&V*WCoyCHggs=e+QPsqGa4jr6auy%nO1Ao}q)D@u%U$o8tSy3nH?Dvbl+CYu7R zr;${9Fe_A8p_~#-b)dOUM&F@rV13*8{M%o^J~;k`hJ4<8%LsADky~hvVqJxtWL9i& zd%G1Mt!u5vSyM$+o%}ek3E&T+d^?dS@rBYBXD1idLoy_TzhGTt(IHuqpa=xQPQX9) z0h)5@Nist!gP>qOtZ~ zMv}`QE9zVNwYYBcTms~PKGwK=(ESy}0lC<7k|w5-tgTAbC1>SlGFV{0;z+^k=% zP^`6tvGjFXO#;T4IOYvy2(y&V4OomZUoa&6Vs1-oEuS+>A1T9w;)~}99&%k-92Wn0 z#WQ5b|rc;Pr&qX~%&%}F#z(-avRX_b{G<+PY*7c;v8*q~hfsmb>XW+&kft>v*aLckMzT1J z?H52T$v0c|wF=q6AAu|`zT{OizHk$e;I$04CdhHNvo^$$PQGVNwOorbI=H7r;%%PvE>$cds9X%hLl`MJ6ID0UQ$ zMeHT$iSw|nEZP>KML>Fm^x}gE6TyOH{baI=g|o?MIs%(H=}Lgtd<{kFSU|8gs^G;wS0(6~;HoUQld?%1QRZPOq4L+V$^Kce3< zza;Al%6f$Xs zJ(ifhc0+%g-EIkP+x_5%O&`B;lgFbvI(tX2(;pCqr(#uYQ^?=!6x^22htq48xpO$v_M&$&HhkRZI$5SG*{TDTls&4?T2*ow$^%;=-wcMati4n z1CHQ>9wQCHD;N>p7-?idNGxoNs;bt2YwvLPeckc+x|?c4{(9F?>4DPUv%A;0{U0rT z_kOmD&oj?W>$p&VVcQqtdrO##R}$gZvxB^K55{&58Yt zJxOe?lC{aLO=P4@bLhDSp?60bYv?&Ikwm8{*lPk&G^LoJkdZLui?+rM>F(~;>w2o| zMK;_&(66yNkzdnZIw!7G&E(FlJ&^0YY17!o8++wN$M&_u>xQ?M7Ubo=DWd@UWC>?f zaBRpICMlP|)$9eavi2=$}kiDm__jweO@3rN;(HfCW16c9Drzu=v&AdeV|?K z)Hl>6;GWe_22rqia&JR(5=A5kv`TN7kZQ7Nx(gj9+tU~<`a?Zgk%=6%J-S;Vf)l z0Lt7Py8yV%l2=b$%8RSCQEe5x!D~D$o5J(-tk}HN7&Sr#rE{V&8p{&>vO=@mh5fr@ zQ*622sGaQeFjBNykn}REr5UPzt2F@U1^%tXhqD=YE_!)(NR36wpAto)W}`tTHWeJ$ z>Kc}gmd$AFZ|-gi@CbSTFbq6RJAy4%%b{gEY$%uTDdmFttp;N%I-l% z_DCo&{xE-elH$n7{aCg!AftazXDcW*!Ul!TUdgkhUm~V-!*`ujvXDvFDD7)ohgPl3 zWm1X0-gs9>w5?TZZfdBjTAsney4@_8{!`-jJF=) z!Ih4dvLfo`b6!xSXZ<1gZ}Sax-i2Gee9%xRy`{56px72K`EN^adc9{21=65bkhPMa zR}Dn3Al|?mA(VFLEopIu&Y`6UD>6tJS#HW#Rgp`MU*q7S=7Roe3s? zbg=ZL(wEq2hzDcPE1w=LJ;!!djFtF|h&6!Q0rm&jArNo?F@_L_;&0BWr8|IO@M|p5 zV^z@OMSa^7_Ik3gs==b^kpd(=UXG#yyApH&grKsGYS>(CXI*eP5|0)*5;5XqlEGv) z>GAT5Uhjg%i|r)ZqCAxW=_qVL;vCo@d{ur$1HGvFS~T1cs1i7rfLDhc3FNwt#^9_X z`3W{;p$@^_j3^24E}?yX_{*-JGFZvcEqWTGQ3FhTSQW5DIvH?aGyF zk3DtFNc2_PSEc&;QuIYu!pDfmBKavGX=2$iW)X~27!K12bis%qj}Q|O76PUUm*Ff- zh(K=yW32f=f-Gtf8ik+mT7n?g`{Fb;KX*699YJse1^RPncoAwWVN!L?8DcsO|&<8t7Kdq z`Q9J`nkB+!vSBC#S1)l1?-teTmXcyN2z!u8TG~Z)8QW1+P4O3{b27q$os{tyrP<}z zx7OA-`w?YU^oCs3PI!_{W{^hEMU?qN`~?|#F(>0GzkJ~2VzhR7p{k1)r2?m6sBWH{_0ElUbM_IgNLK-IGf3H)siHZ*NlW8BqDLfvrrdWs4Q)9dtse@ zdgUjCVS;eqtTrRor(4+x+}wGcodNd|HfhW?)@zo&Kqz^^fH7$!vL>6cBDm6s!HHpl z#=MPK9r)$MtSMq*b3{&d=aeH*<1sr~L&)!RxEiuaV}1e(iF*QComGb3c$)@#%l813 zpfU5g?P{nz=baV?-BPtdTWz*ha}(MUGZoWM{SRhCnFzkYoX}SJUdUO7!Q6JDaqr(o zLb8vfcTx_Lc_9mdGtxeS>Lq@OQ_38%N{X~2GqXscyW%7GGs(zgkD-Vgl572IYkT7z zkYbx4!@3a-Yf@}N*%Eqw7JY+R{MNh>gF=GJk+TUtTB4p;&mta7RDt|*^%O%D@{~bW zj5rfJQ`?DTU`|A(F)!2;bd*BO#H?&*-40?SRIJPwWee=&%AG603XhI~c)|FF{nSOFGh!?# z$5_gC)e2iJoat~E2P2Di)sxrX1@%rZu%q~ai52n-sVc2aS;J)k-@p zd;{Wy3fO83T!q5&L-ERaY7XE@%u(n#W=fLr#fwEffiJ}Ja(e<+LE<| zAKks(g4^Amu2r=T-DK~?6Q#RO-ipICub*04fAsAZ{tmxK*q(*0z{wFf2t!Mmg~HS< z>`uZ0#bj`lsuhmsPTqG=(;VIR-t}1S__ab%HRvO3wh`Qv~V zG&_H|9c+aQBq1r93w9*CE!)muNoGLTzeVug92sfn5XkrE$Maj-qZVJPLz8<%)fWDT zYO|`pyy$C&v*cMl#O}-w#qaIxfR$|J=B6QX#Ts!(SZYHyqH|Va4G|3|{NW@V%W!qt zet-|{BU!&P7E4MthFhYdjup5s;)wu1vE>0W{6qMs6irp&xM52#`!HY%^9b?-BDCbe zxT3yEmE)D3l9RN7s6GvaZ1A$ap@)-g-y;2CG(Ru%Kn)<@5P3$(YF{3Ys4sm1mF*`z zWJN{{f4O};u>=p;jThsI!xA9IeMQin>M|XGoeaHWV?;bj0bXenCTp2cMTEYoihVET z)k=SXLAtLHE$8)bgCWbk^CZ^uo50^ynC}X|!3)9CL!8!NHBV)%i$OWY;Q<)FNR5Mo z4G0$|PZum+RFegqHeo^SJ!b+lN01IFab2NDZcAX#&JK1aZhOSX=S_p1CPXYFPML>S z{t1QZBuJ+dieKX3Gqtx4c6JWlTKmkwgbd#yxGnlb7U3qvWdPWihk${mv|%2t;aZ_f zErt@qWwkU`(l?~sxh#bEA_&UDvxt>Oe1dPg3>+>wAcoRtAd+J3N%#cL(0DFAuU26n zES^bVhJ{)vSfFOi9XS8Yx-}iIfApF2kMsF8>z+9uIQIDYXFmEm@P_a}#%Khw&JNO3 z7{ZQ{X%IssbOJEqkCBHx!uFCK4rEXK<44fI@&%>k_5|L9(4Jeg2hEx^JvcAZChO9L zXUGK8BgJV18%zJ^ca5CMmp}G1PyqzQqs0E2t*dmW%(5p;&en#281ton$6v&pbEmcw=4n?au4S-Sy0OJ!_)R437?}-km!s`%H9AALC89lE}Q4u=a{lsF?svCed+$tOaa z7j01y!_E-)lp}n->@^&SN_b&c_#Gi1sao0GfB+13L7b4F;FcvjFxlAyXuB3Cz*OnS zLFh&Xup&LLHOAWIaWJ;Gp|13!8P;+CbFV)7;c4bB?f;u|8Jq=COLwx){kM8wdEn7k zcQE%~oIlrf&ql+pbLmMzUxg2m>^jTN?ub3@vBo@-2+8o<8-?zdFfJ=@giXjUz22DTppvsdH%LW6F|Deg9C$UdSM+ zp7x>W(CDkBH(v!RK|E#3)|M^z&|%-f{gIZfE&V6Q9)0!IN5@WzQ~pb9rV1&%>T3ZX z`D6q>&~aZGYfl21IG+XS6HKNw`!b@b?0XiT-D4M*6e4FY{oGzG+F64gv%yqkd`1Ny zq8KZR&sg-iQhbIXD9|A=I$A3-(&ZcZ!(Y^Fjs_FH{2%G9mVVYK`jKbF20-6h3|u3L3WtCZ?%+>khd2<9P#On9qR?tn zD3Q`R#3ncc!J<>KUS1s7Jz#gM>M!5}2?cAq2L`%pf+4FV@C#LS+sik_1<$|B-OC^4 zc~K&91~DqX1|25-$#%9k?h?EXv{($)X`)ya*weB@HV~>Po#eq8OdMbMCb%Whq zt->d?0gkZ?msD9O$U4ug~o53-O@Y zXY)D(L1$-uYkOUfV_X05!g^AJDrjj7EYO>jJw!`)Ub{9IZ>u7C6|__a{914>6a(r- zAdQtqM)(Y;zq%x0Tq$!HCGA(#kukJu`aN5E8$&hQ_ie8UH4b#7DV(;!5I-P$_+G5Y zv(FmA!*rt@$D7<<)0J}cuUXUYXkB@&h#z*4P$JCDMPmANCCx6lGA+BR*!x7Igsq!& zng~K&B|pbm9V?97=_G<(fuzEJJcu|49L9g*%a%Z~Sl_EX^8~_w^k+V=>UyvC#KSEs z5Zw;m{_<-o@%`vaFGcm&URL$!^UuTMWXKPK-uM^!eL^_$094|_*&whq>dvr}r|-VI zbncGvV~A$?O@8#qvtM}oZA8yf*&c}1D4`gv zO6G7O=P!87;&V8M?59KS=?E0SB7G~Uo{)jDpY!ktmHUC9gJandKaOyhDJ8*2JWXR; zqFYsXfeG=kfY(_q&NzA!ra&#WB5#Wz{F=hdkYX#IW}QF$Nb#xCUqAgCix$6p@7Pfc z;v+vS{pj@5%=eUDdgHZwzpNjH=DZ{aRDohqOagFMYYO@(FbTNpO_-?tUXFIb(H1*E zM`hE5{t_FW*KdC6zu)uF&mYv!KO+?APQyexUwY}Kd;a@VH|r1n{Gn&gOJ%!kC>3&` zSjRA6;Sq9MnD&ZP`jJv3l(dveW`K|@a{7}r4HRZ4Ni8Pn6tPJ#k9QV@o%CYqoRF@? z1&?-$bD~@TlI#PuIM0a~cyE=U8=wl{QDu`X+%lOkp)WQl+y+~I0)nr{TS`MM@i?dG z!Hu`OJ#Re$k`3kjUKFk-)zFzjPXGpqjQ0<5BRHvT`n68n1WDt$)8LXx794u=Jl9inhOTl zy4*tU3>eu#sT3Fv|_Nmk$>MddiLLcl?ftEQR)K?w&D2nwZuD7ZAh`NI%oX?s8k zMEAs_A-z8f?rCt%O1ysWHp@C9+BVuO+wo}IE^kwuTNAvv^5k5M&d#;BEuEgT8fWL0 z9aW)2tK^1}=hl|eE&K$b(ZW&u=HSjE^TXmVpU0gy%4kL=MS`L6Q%MJjmI&Jc^M!YV0ahT)5@ za9#<`svH+wRt?I;;PUeFb@@K~un?<%EPlC1B&DB=kR@r1F@m%gzFk>ER!6uB6>bv0 zWamU)Sd3)3EctQeU6GgcQ{XzSTRrG!5QiMChEIC=GQpYzT>vrtt^61r^j~-gzuVb` zAFm8Gt!h#=l(bPf|8ICxfYb;QiA3f8HDUKtEU^)LXy>qjibDbva|2t8qkJY%y!_+> zo&3h>Kcexv;0qLkSc@^b5Q8Z62^{^lvUdE$vSn);tt0S$=Tk_x-d*aFu!0Ro-Y9Op zM;sS`p0Y&W%WI9jRbE%@t+Ie$Zn?Z(pg^bE9+ zJX1I?X2i=u$_Bkf#13LZ;3nn>0eJ#+fP`L91YozIt)D|_xuBB&(Hm_1fDOI8MxOB( zGCOz#C^sFg!x=PeGCKZ1Co<gp2|!4jrbaSO6X!>?9ULbX+xTXvAmyQl}9%v~VI= z3!M8u(_J*DN5n14CUSX+?wpH_?oUJJiCINd(OXJh+ks_BR}#7t1V)I&!e15kkn~O@ot<>Ic)hij70o`d z$5cbTGh8|yZ?ffvN{0daPq(P5rQP=gIt%$7Pi?-Yg`I4&9r$qRpXgL5=4R-lEwC5Z z&PKGL;Guw-I3Xv6FR~bjNJXixr6V{?EQ}zK$$_4FBGB5oLYR=u#~x_PWUkePBgr`}zS=;U4%-t?Dj4?Q=CpUG}+675F7%!W>pkV-far zsGNdN2rIgXFUF}%kaB517sm6;&K|lz0Wlx9i0PzofhBucDgzcs`!|g>Tuce$Fc-)k zK!Nqpt_MFS-1Q(hI@u3M8X?0O+3IDm2HU%sVg<_U2YyKyZ9D6$#d$%&>K6MTM2V(V za47Nq3y5op{f}XPEUYJ0mqZ+5Rbxjf%)C+$0ZvpyN{nDm*z3`@P@M;xMetFn;L>IZ z8wblNZ?4Fbzl#nlzhLK+A}Re?Cc^K7lh&nXoMQed0&rwnBu$v~U^qVr|Ce~Aq&Fl{ zc0(%yk6aOtwY4-g7(9i}m(#l)psZmmBE>jlN=z9d8Rnlx%+s>8>a4xUr|?sHlYYdg ziWn^jq5W)?{KY6=#%omY)$MzrwCg%u(OG$<7^6WG0VjHA1-*3wa0)m1-DC^^oXB*6 zcMc$4h(@p+R+VrgF-XFSr3H|T1Q-khK^aaGJmqVG5z!q<>q&nRbO&)SkbB{)kHpAo z1eq88W)k$;6=L{^0e~qsM8N=XGo90gXe+{vmUIJpZ$KMpV;hdp3Y!M)_ZXCNyrKj& z0S4;`oiNA_(IJf}y-Idn{9nm!^>p9}5`n8g}>V zUrayz^{+gV{$l?8bb55puFaX}3@zx6u|0dn?kJrb+O=ZEu3wh*9|1d+{9F_%XFJ>6 zAZ!`*IyQe&kWexolH3mqGT90gLz3Vz%{5t^R3F>l)mM6}Dc=;rzVSX*dQr#$(5P?| z5hVt(sSYrJlWqR{?Xxg96*D6-wK{Y7L#b~VfIer zzOlAP7Mk|$iayeI{Y>M+!^!Xd6GQO!KQ+xrrT&F?_WiQxm?Z??tp^etdbtAaLlWc)xcYL#)OVvH1n*7eUFBOS(lA7c~Y z2IQT6?~!HXyAD|W6W!IHsK42@>i;O!z%+c8z28&0^cmqjR^UAl_=pNvLsh%<8D&)c z7}Zx><*HKN`22)XY&|}#it4`i7q*Ufty6iA@|D*VYWQAlm+O|(%KGK9_j;b{S3Xl& zm!5w=ZB#zQ&Z#x4Blyo$o9;7x(e%Ge z@0jD}A@g4Ilja{g{GwTJL#a3tQvK_O{*O0kr>aOb1>I2meR$p|~I<9pbbUfuaS7WJ}sJXx9$(nD~{GGGS zdDMBz`JD5I&XOzR+UnZp`k3n}*Ppp9?wotK`>6XQP) z-Rt!o^{eV9>OWfl#rhxAml{?z9BBAz!}lBBY`D7XE3jegVp>?=*qV+`US6knS)J0B4UWxp)&DplOZMN;nw(qoEY)`e{)Ba@p8&Okq zWAyRpUq(x@q1aUHSnS!@f9t60*w``K@k%EJ-V)#Zsd5032=w9NmwcF+>f1$LfnDs6 z7U}S?@}QAt@I3t&BTrEn|J%r`N*h~g=j5;%tTT#VU)}> zSRnqBk>{{x{8uBdDx=D;jJ!#yWj7mnv(m)wHS!iEz`m%A;1%36$|PR0O|RJ2lquyy z_}z|3p3V4bcq79>yq^0oUc;>^cZ-*CA3$!ScxCqyksijo!DdjFK>a?X9e~Xd{LLyW zVXIo9>@(_8D(m**rQiEd`yie>f_D}vBZp@ukId-W)Q7a~y_zD2wHmLmtW zjfV~%*?8#i{uwRN+oyFLIC5lm<%$*iP`Zywd+*%WdvN9m+NgNf_%+jq4q`=?y>I*$ zl-)9|yywVQV)R$ObX>zcG`v@-2X?m}%(4&p6dGDKu$9`bgGX*Ta{G+ludUSjd$K)= zzJAoYvN>h3qVnEvK;J!c_|97n9n|`J@uw+(-YnpC5Mx+2u|u;n2Ybr1lh~+SdI00R z+UKVz#3^9LnaWIfqmu>pDjVJySH-H8^~wf7XA>~z8s=a%piM63Mzm5b^D-avvjFTs zb*!E>uttV}2*j(kFb(lct$6=T8*67#7GoWF{c9KNhW)Gu@x&`wAKvbapb3^@X_kSM zpJM}TB~B-)0?GVe8ojwvlaOqwE^C880lpmR-lTvTbZT+rh@z^=v2G z#dfm~usj=QH?TeIMs^e1%Wh^9Y!dWyn(1tY?PL4d0d@=2t}A7qEw zo$Ls^iydWmvt#T->>l=EcAVYI?qeTe_p{$&A4R=}~ryJ;px8{wBWs(+ak*ctXb`wIIiJIh{RUt?cq-(WAYKW6jnKeCtD%j}!%PuMH$ zPuaKFx7l~tcUh7BC-!ITd+ht{RrVVDbM`v>3-E^j%+9g@!hXnp#Qu`~m2xFed4C_r zX@~v(8>f@ z^K^!%vpk*S=>eXemG|%WfGs83cc(#vc`*}9Ovq_#!@obuBGd!E+*&NRf@a!bd zPVwwC&+0ro!?XK%u8-&Xc`m_oNuEpbT$<-HJeTFU9M28#+$7IU@!T}e={z^XbNl!} zA0O!F0|`Emkm zHOZ%@_|!C?()rX3pW4T#`}lM}pHA@UB%e<4=`^3t@aZg{&hhC1K0V2&r}*?VpVs;G z44>Y|^**lmb3MWJB-c}1PjfxP^(@zOTp!>FWY?#-KFwiu)Mto(FudR2RY_h7N?a=_ zyYd^xHEqk+73YpE1TKJCP=e1W%5egj8?mFeloRAV??P{s?&NM!x< zXm4a005N+Y6@X4bOM5s*w%T8^-qJ!;x^~iM&?WzC9lcfYveKkp=s=Nir4{<3RTUKQmsl*>#sPK=L_ zHx^j;_;{qCY|qb(kM|VRxVAwnnA#^XAoIxfe8C(UE?6SN82)&HP4pB@@d(DH>1WJS z!y4U@ofoP`3d+QWg4z{E>4Y?vVhesuxa#NFn9G7tZ|J7SUocRb(1oMDj4G0iE*kj zv0e<&7JuGat&D6K?g}pg+8$pH_$t{7>&6g9Fxv@j!->cwErNiO(nydjXpIFdYa3NKRZDLrPK=)_eZU*Udc=*J`nOaMC z;c$0jE5PK#+`QdA1%Lbuqci|GQyPq)Q7Ns9pD|HdA3tNJv>|@RLTO|CjFr-+_!%3e zq4*g)rOk1rP}BV{7)T2S(u@W)4204!2102o2102B1EI7H1EI7X1EDmEflwO5Kq&3N zKq&2uYpVpFcf~P(_k=crMVO#Pn?zdZB&6z&7rMF&UDz&hVCp8I)K&LOWHJ{aI`y74 zfG<6Tp2am_fkM2i!2Epz%Dt6PS$=CpTuX~__Mr~jaOHLd6}alKs9XtrRnXe?Ly_E> z70i#B^kd!_=v5z?0M<_CdJ2hnZ*WylA^F>?0>h?JJ%y!E0_|F_wuyEoKzPlG6PqHN zKne1o*PwUUu1SVSN%Wrv2?+rE@h_?r>?7SXCwe2Aw(11h$}HX1dSx306WT;AtuR5G zdF_t;SGcBXjbFhF!5hYhiNM)FDA6B!jBLc#!YVG`C)m`iTT*d8GNDHb>d2%H8pB5> z8~6r`3`8wzXbaTZbVmBMRJYd ziuDeU8)Fc$e~xpta2BEhJE9 zQ@oHuGD=X}0Jv%!!L!P6x+YHOSQrIZH^-k>ly%5#L55N0+W7NKlw605DA`JNhH+~f z)uGIGszaF_REIKSRA&g8>!}W9c2XV6?4ml9*-drUBJ%;NLzz6)q0Bhdq09|bX9Sr& zREIJ*QXR_NM0F^$m+GuR=4PrxnF*>xnMtZcnW=aoy9nlKx+n~ySQoif$ju0RLh))` z?28w2i?#RDg{XZ%vdqYRqR@Tr+G9AMsVLf0GmB@H{k&9( z$MeMEdX%D4)$7*{jm=ME&&yC9P z5Iif6Z;~z1Ves>XqTo5s;51bGZ?#U*(Z8WluQScPTCKR04^gV`*3_0;xaw6`H2dQAVS%Dq4X|gY2a8zpT7?rYl=nrE^r*8M62n6<51-) zbynb5S0dELz_CRMSC3!?)zGWZ6^+q6Rmd)Y*8ZBUCJ<}6r;#h%J5x)=g(6r@tvg%QbyuGN*SfhP>NBf2*-2qU8YRMQ6|b} z;F$KM%Hy~<3adCsiN(GjYLsD{siZ5nVVe@DOMA2KAY~Rx2cd;R)a$P(!%7Qt%L)sk z@+zaU28|pPHEKq2X;IXiqOz$`nZ+~8GK)(eFN}&G6dToVYFXLL^xJNmg3>8eI%w9E zK{E==(8dTQUv@MLhxx@buqz6b&|WD*SrPXC?#a{f^yB2XXq?mKjKrag%Hx!QN(%nt zF~&G05e;>Du=J>LGs=p}rWY2(MWsi@4NMsr9~*~Smp7+esHiC8(M2gHqewnEbuuXM zABBsBrL&5PXGFyf!iMu=%xEE=ZeZ7e70)c3F)%nfq6_oCcYtzkr`1MTZzU9?0QF*CfW*)7K1+6`zJgVd<6P3we@&Yj6RAm~7d6y!czsZgF& zo>Jy1)yhJMn59aMvO;-UaVvGov&t%^L0PM;S2ie{lr73OrAgVTJg4k}8rZA6r0iE( zl>^Ev%3XlkfxQ4KXr?WRVk*Q!0#o@%6eoqB`XTXm>W>P>32 z+E?wT#;CWdgVb0xUQJY!)l@ZIyIlaY3g)!hB{L%Rm;@bYK8iw`jk3PtyUMRi`AuSjk-d8T6L>+>a*%9 zwLx90u2(mxo764pHnmCJslK58mwHYWaq$U>Ny#axX>qY}adGi+32}*WNpZ<>DRHTB zX>qx6d2#u11#yLOQ{rReWO4N=iyn=sX$fhGX-R3xX(?%`X=!P> zX?bb+X$5J8X;X4zbK`R3a}#nCbCYtDb5n9tbJKEjbMtcZa|?2(lt(<>luU@)VRFGVdQjl7ZR*+keSCC&&P*5m^=>NN#xgfg(Dn?P4flQWzP#8$% z84yb?u*F@_s&^~*fCcYWSAuxzK|ZTNKx;rk>p(<}Aft^Sq|G3utstiDAg3K5sAly! z^?7v{2y3^xN8PKwsJ^7`Q}?SaYODIPdO$s>zM>vd538@Luc>Y7Z`9XSkNSpsL_Mm$ zsUB0`Qr}kJQQuYHQ{PuVP>-u8)DP8@>TlKGsi)MB)ZeQgtA9}csD7e;s{Tp+O#NIv zt$v}NQU9#|Mg3C!O8r{>M*XY$t@@q%H}&soJ4pKxB9cDXsV`ZAzG-WYZlE4Bz2V*riE+Ww5zoU?HcV`t-IDkvuQmwyB4YS z(yr64*KW{m)Ou^b(j1yoi_-dNH)%I((b_FqU(KcU)B0;M+5qiVZJ;(tsnc%LVzoFe zUQ5stwInTBOVLubG%Z~ltlh3dEbSp}v^GW?tBupfYY%IWXxZAM+GARdHbI-HoFTb;Go)k{B$pqOQiQUI{pWUN>k4Jhe?yuQ9y1MILy6)TSM_%7{{hw|abi?Qy z=H2k}jrZO-{>I09NA}L>eYm&(S2zD^!LR_Y|9CP@b8P0uCiBZ3fs*P%i`a_?% zK1=)TxoO?a%cJK;ABz6*maA^L_m+jXeAxH;zLWcY?YhzRtZS#M#r37@d_Q}?n11*4 z%kHlsJ}nvp_nZLZXJ*{fZuxmt!r=nao__3rwyzhCR}d2C)`j zc8l85!WXxMv_$fce9w!IEG_;8c3(DM?9aAFFfY%cKeZ#v8`AR(_jF|0qr&{rBFFCX zN4tE{E-TOBG5Rl6Y)3_rBVsuInb#N1nAac8^ax+OSM}BKoDhB%EsAj>4%;~H;Gx(Y zv=^bm;moGyMGm^iaWU4Wb5!K0=#UNI!9slFJKcYI{Yx6Wct7)+9}FzCPuTe^Jm*d3 z?!p|ryKlZG4Equu8(^0 z?rlSuA(};~{m#1{?aPFPl|EBeJImnj@lxGq@a}dI;Sc9Cm|p)v{cg6Gotymk%u|Mc zy7<^GhKcU_5uyJpiT5ls4)XE#cSW|&uV2IUKfKRXBjVha*(#PUgy(d$+Wj>m$I4d< z4`Z7;5EM zsp7?2%zL4^P*jl{qh=Ytxrf@jykoN_o{btrMf%nwxW}tKq7JM~CNHu}0 zz8bok{tiZ;8fKh2rH^}~=nw2PJH6-B8*doC z#ivk3e`DO9VJwxU7Tq~+oN;QHe(Kc0vy5x_oAi%iprZ^CWq#m9}4 zr}WB=3wE$(*1US##*GFq`kg)VZhd3r>M~Z$iWihrRvIUV=`X&x&BKncBW15W{-O~v zXv=J0v@cp^zG!o{`-Zvv<#r}c;c;DzpVEI_J#EocHkB3CPj4_V6k>n*Z4TTO<_bN| z-k$y1RKuU*Ptm8oHv4UMobhyi1GaQ#@EXzGzW32Bqu2;0(!~wf(s4Ly%cFa#Ihsc) zr$WHZ=d(Imz2~zqhrZ}YS`lB3l~xanOr$4e8b~TIogqC_eSNS%^H$7Tys+93^TZy} zlQ9>T$*<{^ja3^RzUM3(8yhz|eVW%RdRk}h7E^iM@@J}7EvTEf!f=b8b{;K;h*qXA zK`;HnxF@n-ScDhS&f5cn#1mi%ZQrf}9WAM;S>p76YF*;4S?TDw!?M!tUg_jxthVp* z{1)4{EASMn^oQx;R2^bgI}c34*6?`!(P0# ztl9Alt9|+zX0(YumW5A>5HW2+Mpa2=5u3mY))($5*-^6Zsr}6Gt+MQ6FE;LIGTfFO zJJ#=G``Ig%d#iR#_(X*8X$vunL@#K{Y zbjIEj*Brgc@Q=3~{oy@+4P(a2)r=<-&(m0>^blHHoY0)?=7$HS-J4fb`WSoI=xDXD z*Gpf`+mrU;!{4!g8C;9|T4)Z}`7Ha`S0)}g^2#em9424KfD2-{cH+db4wvt+HK>`K%$s#4xy7*gcJA45kR1*_qsVdDy%xHSZgILS)QiRT z!|4;lQ&WczPj!kIi}~mtk_H}AQh*{oBvb<85VYbA@#1<#jb5;5`t(HwMok6tAJ$V( z3_tDg9rpSUTZ+pu{a6C0@38N%g%-k*Ej$*N*9As{00u8gKEyEC`BrmW=%Axjk04o( z;(+e*e;J^{Z6+1^z7%cIV$xag2T_m5dx44|AzSU{u*4XvBw?|{TD-Nq+0l_@kq^U{ zfd1S|9AXS6Vd5)e9W)=9P(ez>e z|D(Mp*1c_@1u+C`u;{}%N7--K{)Rmpwrtq4dG%h<_15ZjbJxvnC}#zR*TRlfy*}k7 zW6DbpH$KFS2p4fKhEEa~M=7nV-AAt!w8;O=${bg&8;w<)CKsg8Y+5B_kmY2H)wOZ8J_ zN5*a&W;Cr?zm{+Eh3oFxr)!th8j}v{{tCatKJ=kcL!GSOxWvH|_Lm=?|0-mpi-%)# z{eINjL!A*z|M4Rb)ECV#^?*H7CgD+Nh1?as~4BgDxtwR>sTAp zS=lq?wX=vkQC8CR^Y>Au}aih*=HkItHXx+ZAW&0uHgQ+9ESW*Zn?U<=ujnkCB& z(Q8EUR{fLH8GNt^XZXty8K0&bGs;D;hSJ^DO$|*A4cHk&c&6@Nx4M2kGngA=*XH0v3OCrvg+U32OFpu^X_o z$mz%eO991t?Ed*(JM+!A`r9F#E^Qv?0PtPPsddTw0z4>t!kO3R^$nzvuw~1ZFEs{= zk-F`RTLR?T$0CKB|ADUT9h}uP3+}32US|yCxXZh|ZdonvvVGxy01p~u4Ppx? zNfC$5%g;t~?Q19oQ$67OYpyv_gq_0`8WV;k4E06(fi`^6rm&OR1gwMtf1t>eeP$JW zx7+D*2lTTXpoe*T@ONmSwpV*QhjIY&Xk?0hV75F^BU)`L+M$| zI<{d=?ONkAXcF5iwQHBInTuik(VxW%PoZG(`Z;T##BAh%|4oHB2MUq@e$JmDOA*W7xUFP+GDlEWOyOfdHL#%VFtLHk0aL>oqb=3`X9YY`oNX3ayTy}Zsyu&)T zp?aO8!(mz1(6G+g;RsYDE&_zY3Y*xHyS?}$bVpVV0nCA6*)9Nv(#HAvb2FM}?0kYi zbLrMu+sd{Ze1sKC1gPdAYY6LNT9%lVt686%g%6+rwJYzzsyFxXZMQJg`i zjEA>1&&LJb%i4H&^BP<^bt;>OuW7~==EZ&Un{i>-Dco1QM#mLBTe$5(CenhV#3OHp=L5aC?6+aMr34S)3pyq!n`I|KN;uEi=E{~*l}_Y? zw|TRz!IRU&Pk`XO0qVnvl)u@oHmkhi3YDriJKK5zY+wQ+@I4jPA1vm%*N78@?CxR8cq+BKU#(3LsX4^f) zG>K-4;n-%1nH+mQ6WefXGo2h4P&5-7aA25i;}BP9To@>_pPkKrwrbTP!0L9vNd-&N`?Qt~w@PCkx#I#DJdxMt8^pU`x z@YlfjlAJ--gRCp(UU~q*8q%p@e$z#AngELs$>U5wF2LIX*)TqXM87GSr6LUJITK?> z#lV=IUQ5v053aofMZtk*i9&mN>8LwdoFRY@xE6o}?CVi~NN+N-62Nvu9}qQib}^|N z@SNvcJF=iqZ6ALbVPt^NDw_;Snu&(u8e+Y7 z^yqt?*;aP%fzijS48D4#zHZs(QudUQE%g=H$ugfUbT4xo-=Q&9w551k)wZhUCC@YC zV-U#4mJi>2^FwEwm3=t*%@K`;Sp9)Mw{}hwTMtb^TFk-SmNjfuO>K=a(Cf9bJ+qt3 z8p|4sS3bdvAztV-npz-vpoRppD-y79fgN`x4K{!awaQ!&U3>*v8(r$ziCR6G;Vc zQo%dPn7DG9HG&5wB^4Fv)zzY2tYKn?A=3Db;zpi^?M7^A4#sDQdcLN*!4UWRM@k$> zgc}q&Cg_u9CCO3~V~{6=5Zw7zDMO`iEkLtGWRR`kSsE@T09G(fgTz`=5fQP~gr@sDLbk-_3w#{RMI7`&7 zBvd7|MP|ZB-I-|OTbZxBulu_r z_4?{f3)cos-nEN1ET}gIefPm}{n#<~_lJ&+ezQLtJ=z#Ca^Sa++fUZdhscIQVTDm+ z;kqcc^IoEtIEk$%zYg+_9Ihl3f@03J9l)66a42P%NZZQumxE8sAwUIsEIAcI&+ zfBq={%|F3k63}^>gP6x|+j60z0q;f2+ijQ{lB&#UF0l!WypaTU(7F|^WkX<0qS*w| z55g)-$DCw~95w>o-T;gy*^;m?O))r5;v~o)*>(>bI5`x$$F>EYTNuMOj~C$tJdS^S zS2q*%EFJ?$K}tBnnA993lR)4~whvZqT{AcT+}2I_L#(=L*&DN7Jw3Ejhh%9)?)jhj!j`R za~D4U#NMg>9#}r1Cgm^lPBP&3-OU#ng{Z_R|cOV%&mcy#+d>77?Q#$W&f(GnMyP8Tf4RaEVX>j3uFRiR3V)hy+ysmzPK&k!bBIG|ja0!VOiJ~lMb%F6g-Mpa_JH^E3v0uo`fA7d4F7z) zIAE==U)12}h_N)(*Ecx%fuO4s-oAjV({~u_Ai=LW4ggDnzdcFQ0?JDa5AU<2yllAi zy#&$WC6VkCb9p%!(KPL_TrLy5!{JPdDOgTsCB^{0$szZqG*{H)ak2>6Z{1Rj8BJ6C~CDa}~hN7;aFXc0O;4N=;fPz08;5m@5i ziEsIL{96hgwXq}6Rk7a)q(j8U3M5BdJeKT4jE#*L2EIDjP!x?JRgK4|Z<1k9#V#-0 zBv()h9j#Doh@Zg5la6s3ErWlYB&3Tx6R>8`8rgcCm-W0muySs5YU6b z9-iPi{v*!@f*}Yi(U7#>f|gsrfWyuV zzW@6=R}8lY;_R1%+et$ZotX9t_94E*B+o8*H>wbDc*=l$J4%#9I6%^q*X`EV*EF(5 zEZK#;0n?8IquhQwp>9+Unt}WVtog;bfH(`SDq^|@2M}oj>qyR!;j(2===ysgP0%#a zk~iqmHKV6ANhFDgP{GsC#rBLa^E=|43vSC0{yD8WwT`)xuO7pX>EbCj z0bpnE+B;2-_iJaZQT{Zz4%tz|n_7`81?p9m|ifZNpOY2LQ2 z*~zw7Y@JnW{CGt#y={xwkFZ7OXrxJwG&xR}3=&W%kvyl6Ri?eoA0r+M;g4bYU~$tj zS$Rv1eN0XMoL^5fCQs7mEvlZwo-!j9>)ED;`nATvgZiF5C!cN2+h6eX$ozZ*f-vTi zdYh>pglUZa$tR3=&-kRcdD_Ou>nm&Lu*wyN{~GbObcgC08BBElB;)9q&#Hdgv~%^2 z^;@?Z2M+3M>l-$+^=1&_DOORvXr3`?l3rAlxj3)2VE>8_T3XD;>+4rGvIeu>a<**6 zat0{3h%KmI1{iTr900zh6}Lw4Re$^L9~s^rwrbyLM1joVbsZW#^5w&tH0klBCC`*R z^Hc+4W~c+`lp^&{HdL%%w0_a1xotH@Tg`7bz5DJJ#%om8&ZYrlZE{4FJ^Pt^D@Tno z=j#e1Ut7QW(otVNvdKM9EDi#{r%E;4da z3rYY@xgnv*r*jx80S&pKRZSO-vdI!|FO{y|V5S#xy^!(6$2s3($JW2L!@aC-3A`T&8#Gq! zp1X}5Wrq&oYunu2RgH$rt1qivT({J{^R*3cGQ@R*Nnrl=P~k*sLI`(ayRb)ogHzlj z6l^y+DZoLlD+~p$JE<&#PDPUa(h4N&B!?rd1Ww0vrzXydpIEiL>fqi5z<`>#~JpNFmqun z5f=~?X&jw3Bp+;5TpT$&nBm?2@BdxH!gW|N#p(ao!8fo zLXo&N#*3-4{ls^HJ0~xgI*Co9a6FtfK`R}Or5skPOV|VDwS4h%Lr~t&MID{3+s-l3 zkE_Q|yDvF7_&PAPz;&-ug=a3-DyJwz6a8zG7U(d`Gp)B*{y&pcqwc{rZ zzKb{OEiE6c*k7=}VEF@6fCSuv=?fNAvIVObtY#ZmuQr}_fBjwN$pJC?V~?@hUw!P= z$3A7RzG}dER1-u71^XY_{0N{ojC{yJf*}%jdv!mO%iyCjZ4onAO45_~%NLD|BFZd6 zU5YW|wnx~c$7eqL%DA0FSqhs`Q?jIFQ}xD0TbXhCgc;!;{xzHqCxHqf9c29bL>!_& z7q9t>#Yy|*M@CH_vD~nIw6k!-1eR@#AhBg-uTMWXX{&MG;j&LEpFRnRR3hDKTMI@_ zM?Mu@n>hZ#>6t8(J-BP42bz~2v&Q63$Oj-}Esnx|!tpiGF1gmt9NaiWFg2$rggM-2 zX>uYHis6ET#>%*o{Fgp;;~pGZkj~QC(Ea1yq2!%5ZySU?S(s2f#N==t|Lua!95k+c zd0mYwe|IDbAsq^)8js1g+kSu)BqtKZ1!GuZ!Tt9cybbUN6x*b1RVf>=nr8e=LRKt&Am7KttP~DM?F&vG2p-}FU}x!0mZE{a z0y+pCnED4ZCH0T#x0AVyBoiq#K2xfzTf#(zh_)9_*VFGC4;NmD5mcTWN)+2T2)>Yq zy=m_og}WZecxk$RY{LG#*D;U19%UCIrnHz#6Cc$r_{%5T7Ti|E-ZdhQeU zec!zF*O&fktS#nM@IZ2G~apy$t%;kLyig^3mVL6kMkbky1 z8j_tAZ=ADwmU{_Xz~&pa=R_51Raw{?xO`VG*j~9AxlV5$IPm712PThpu;R)&3ue`r zb$J!)p&DCRW7vjoU$D8dnVD559~kW{W^*cMEm%^6Rzb2=qRL85x>p*uy4Bk^%2rX$ zF?#ak(awlx;gf-98;X#k!3?vI%pA&zvzHbc-uZg%j{5DJ@Y%KTI2`;hR&B1_ zTv=bnN?GdEvg}FOlSbah#8pPAx5>&*@7mUOu+!_^JXZmQeN-eaDEtz+Nc@ai#Kxhxw(7?33w)iF4OAd_@m(VASU zPsLh+d7rat}dTRi8YyGAhNs4ca*Owf`7*4 zwYY0|iWmdLm

=q+oq7+tRRgr-9Vc(Lh=j6D4m!A>yC8%GnaP7{>EZ zX-pf@FJa{XJP#(u2LqqMU@wxK*gp@RI%Nz)Cil1@MXAUql8E#os&k%ZryhS}tU+!w z>9z16Hz-^mcBo!f4A~8e2ds3 z&cO2VMT!&rgg+8S7IJraDbK`0mQqOhIZ?*T#B+fQ(sxP4LH{J`Bc%*8f;>BtVQ{e! z?6*NAV;&_i^dFY)R`P{8C~r8&YP#5-_90GjzqEF28zgpiOJ6Iw)*QB5DSygpgG{yB zZk5V|mftjmV1|4Q4$mtp%5$Riygfy&4&Qi7>z+NWPTpM_oIu;KH$9OqtH`B%_d#Xi zu`OSI`oVV)B~VecE;QLvrv%j>=h`zIF8faA!5Dkq8bRA2Xw7wp0| zUi26%dOmDSx1!w>qVJ!gTE-uk^z!tVr?-?JVux7E)|Yp^yz9Wh7SEr4Jb@@APd9d1 zMbFnok0Zk7F)CK+=d(hWu^G=!+dgf3VawD*_npb+S1sZ_41SnL1mdRViczLztKEF3 z!Ib}`@_+&{5ft7b#Q~Tk6R%(tfJ=IS(rhouxu=P?orJU2_7X)O=+z1^A9<{4N?-DN zaSYpC5~(>AvQrsrm5OW#xf5s_i8M`jg6vbe806et>4vWU2lEDM1T$!UNMA}z^0FmF zMw(ngB#XBe?a6bT*Doel#v@(hm(K|ANF0XD7}#52DdbEM6XwW6EFlhYf!2`_IsGAr zvGa+ozam?R3$rCC!tFwC2Qrgvan%FD=*%{&x^Eb=P-5)1Ta*D|9a)jKK0^kC+42=> z!JCzHQQ5XNa5v3R4B*o!1RQRh)*&ul)~p~hEY13>QZ8uFw9K*bA{r46zR1YGilP8F_Xw6bMUB{ z4;CDs1S?3Q6;{|NA_2}?dW}b5wRPSHF;xI_I5h~`2B1DD1<8UKP{`$JzJZMTV4ClF zdxo74!5bpjhT)YM_%rYZ7~V(lV3~t%8|1dh1#d&%i4>h}cnJaTJMb8p^betuO{5zL z1o;jlv?E_qKrldh*U40Gw^d^tw}c^n3fsim%$gQ%s(^QIQ^nuJxOFA#N_NcKQNN>p z?Q@HEEZR}PuV+n0)7B=EYY4fL7H*E_2bpux#>%y`<$94cG#jQ+(IETWl3T^N3N(49 zqM~$RF*9J(pS5mb8`suvG}u{wuvtQ5yz5Y0-qhqoEVgMszaCxgnD<;sy;0%TE0$Nz zTTp@f#3sDn1S{EB)9wx~0vMMN3Z%mwvqYr8Lfm}?tb4Hfz}$UC>=eDBxNZiUei_US zx`G_fv*(vKR~vi2)645iYfEd5l`=~}7kXD>N5rI9LaEHfJoi!C%B8pj=uHj9}Wg(wmndeUV#b|UDAV)Y&Z zfRy$@;tUobDOdRinxhwthKBi)BZr3hXG3D%73QCBCPktaP@{Cg$kd|1Jw2_ql-0Ot z$udfp9|N957A(C3;!BBKy7ZDV+im`GmsvHI=OFiW*NVsS4-%vC_eJy zTTzdDBV(;_45D;|S^ACD*6fX>x}8hWbuh2E(~wM`(hKNhXc!NRyo zCB2kHNuPxO&1q73Gmx4u91RKw6Fm!rdXM2r)4zR-YcKF{#=9{dI{n*GhUar#sJ|7x z_M@5s_;x!RR{lV~@kX+K`1#j2yv^Xnee%!~hUbj_!2Ub8Wym^|tUtgMYbt+(`gv9M z6U;IGHQog*HpD^Eq8Ajf5&H`^&w*HC*y=ZLHh3#Ps5e(Xk0d7!`xe>Mv`28RX1x&u zoK5JoyBiRUV%38yvizpm2 z(`yYEB?A6Pd)Dw<1@@8ZPlS>dUZ6=L}CXP~r@~)LaVY#s)J) zo#8U3?Yby7y=LlzEGJec1TR@UoFsD4XG~Jq87{8}EK#Y!!h`-!ywnizg$~0Jm5P{Q zr-HsuJ)Au5ofDNWv)RHg7}T8y=LF!F;r7dI=pdSgO2fvhukr{I zF&schP6Qb_z)6U2Ai|0#Fgpvr1W9T~+DG!)KqOE>;pBorgdm(U5`tM-PLz^82;3`? zE_fROig4+E^3U$76@0Tz-CYxG})-B(dRFjKX-BUq$#7z9)MuHBw*zX$1g|K;fJT9{{6r9$S+^-e2tDf zpZ{-d2kQp+o$Ck7{@t@t{m%Dvu1oj-Cv9}T=l|mPN__^)g8TotAN*om=eoZ%*3NbQ zljHxbonLxRD!=R+o>7(s_E)R}`s#dN=i|=LtG(8ByuVbh^F4H|{?PS4D*I3Gy|k_W f%X4~$E_2;^J#ifP;CI~=<%5iE_!YyhznS + + + + +Created by FontForge 20120731 at Tue Jul 1 20:39:22 2014 + By P.J. Onori +Created by P.J. Onori with FontForge 2.0 (http://fontforge.sf.net) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/open-iconic/fonts/open-iconic.ttf b/static/open-iconic/fonts/open-iconic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..fab604866cd5e55ef4525ea22e420c411f510b01 GIT binary patch literal 28028 zcmdtKd3;;feJ6U)xmZaM3$bwn2@oW}1>CTclqiYRLQA$5Y6)oBGM7s&UL;16CB=~) zH%=W_6UVZgVeQ0|xQgR(6Hfyvk(0O_J9V>Qn%H&oG)e0>@i>{hWS-onNvmm7eN1S+ zzjH1~P?8h3Z~l59fphM;=bq(ve&@HJt1v}T9Lj@=s?4rmzvGs>e#M?e$-DSAY}wuu zPnxz(DGIB>^~Cf&le3EBXMcd}6Zj5KA3GXEIX;t*;HP5m?7n+mKJ@c`Tz^VYD(~Jm zd1MylPFz2T)UxmH5A7ZOe}4HVio)j=WvpiZ%%sNt@lV$&%8rY;pWcrG(tJLATS5ef5?>;m=`T3U~TdOF!ucQC(+%tJ%mOWkhLq)lj+7BL_yl3W< z|K$8OuAf04Cua{GIr?|bL{U+0Z%`D&^z7l8*&pAf{=TBzgX+qM@uk@--(Pw5FDd=Y zzv;PiF*WcaJFOVej)kLlWmcx_K_#l7Hdl-))s-Jiaq+Wt?>bHS=G)5KZ>d2Pj^cL) zspv_s6cktVJbfGVdn<57wHg$I5=3giAFkhi>*`hfDp#)t<$c^@rlkfMM*)4yKjpoZ zm;e7O&j~k_zvW&)&a7B2n1DOHt25zBxS|PHxb6pE|LkYEcj28n_7e#qH3-ZzD|Xba zuyCr&LatB>-zH{GA;V(qa?!?47iYCXp*YJ<^ZA9f8oR8`&1u?oZB#99!|V;=FIv_H zHB=}yp=sKjTsBRN!=aeIVp3RFXLZmQUKG&EInIE&niKmm!2v$!20ko9;D~#VS11nc$`+=KtG~yf>$N>ebwp;yRE`v zGH}Jv)#<|c{rH;oR1LoSw#IV{&!ba4$LBE(`n=!v1WX7n_@h>+xl&r**uQ0L1!}B7 zt%+QDbF_1>eooBQh?%++pHi_R?rNvaVp0_&7C-Jcx2Da0VHnH(`yji@Q4AK*~y%C}@R$UciWpw&Fz=BN&REs|Hb5 z;$@}9KzIq9aGHV#O5h8E}wr4JV`QcE{(tKyortc-Ac zv8~hc$>PQ3trZG48duddZHX0S*S59PQlWs6zK{7a+O3K5cJSm-tA>$kafivtXzwF&by768I+`}rql(K|3%uZ`sLDML~eis`agzI^b!&%^)q#exy z{uPQ>X;RvWcC-W=e9lS}(GIuYlzx?4YHksgUImQXzoMzdf+Q*$Kg_9fyOSJZs$*<<+E(%oGdnwYpO{(HB(_-7zv zf{W|>&!PC0imz2WsU5X!4}vIr{4C;UXb`h{hi!c4o#Kn{u+t~=S@!wOPZV$8Jb5y& z2B{D?Kb}81xtV=Fdw=ovEV7czOS)@RtV$L75Hy$i0P=${%0+O6L9*X{n_ULtT`Uma zcpe2nR-kN&c4Mx7aJ`5UC-`?oL-n;aHU{{!w7-%2v5+p0DI98!q+H=t!kzY;Lk8jw z9$!4Yk|kTp^6XKUi`{*~_MqmmFZ`|Dqdj=ZUUQlSi+|q{2y_IPLnLaD+1c-X(xDa4 z*gYOQJE*Z**8?vU0$$A%qWMuB6`;a#{Ho zt(sfqBHoMjtCFy>n+Y~b9K*m+LKs3S=}r*hvY}^>Jv{vG+rtlQg~72wVC>ju4rR7% z$sGF3*uqQggM&0jfww#&+H;~s;H}GHHxf>{6Grf~aLOFbL^J-3H)Hl@=HhJ6PkvH7 z8{f2PZf?^i$TM?l@X8ZUUAdwcfOZf$EZYxWC7`sT-KIvruTtPDUw=L zK&%PU2IwJhOkYnG7;3ptY2dV;w43plfJ`Z{ovO3g_gK62-G8vEK~3AYZ{eI3GQtww z@naTIz&YGdTO;7iFb!-NY#O#Y?0Lu^g&BK5+2eYB9kt&Chy zfn`Q4M6*FP82LQSjArinLqVwK=$geu>6<*q=jB~2_&j$6Ca}PZ|3b3InB*GPsR8WC zdaR*a?n&0fd}iig5CvB;D?tY9&>S72HQ@i#6f+u&|KzB3ZAsgz*zsapcJtE*H?CND z(=BR1jTz0wKd7>$x43E@tfF{qbN1lV&EbE1ts7D9GGDu?OG5h7FYwkgf$VxLUl*#P#m;wC zHy9Wj9BCPLIK2U%W3wr4q*}&xM$b{3ll^&h&^+u5hcn=JN7hh-m1 zUgY!Eg_o@Ci6@G-`&Hk0cZbvNW=`vi*luVYA0ZEs-s1)rt%np7R@|$dpbgX{mqGDrvr8pyH$VUJ#p{eOwmGZp&nc8YPIm z*Gqe^tGyMQPwYJa8z?`>2;_3sX zzCdyw-DiScxfm(eg1j!u3zB9pwPDrk6lbXw+0Ifwq8%#>vD54{>7}xcq{~ehO9(P< zALw#-N2Ix$ldJ~$!4UT~G4MeLq#}SSf<4y5q~rirF2v3jJ*|iQU?^1886#}I!lG_d zy_LnY6<*bzuBw=0M&@l~+a$}X0^=JH6Hh1O9908c; zM24g{$zMn|S**+aX1^KBA#1BaN`;`eysqH2ZYzW2g4@MeR3kJH8QJdA7^F_c%u#cc zmXKPcMWmFrIxV;^*H-~nwrliPJmz0iUom!V^aVD&sCQ=N^)>B~OnXf`8B7acfS?sM zmz3BmqjPhm|D_g7CAdXH6XO%~$OS3Oav@MHWMv=`v3~r7K+uWp8xx>F#1a-+V=~Qv zF`Fvw#f$dJO~t?4#4h8)Ub%1#ziJRv9mOb#dp8scdT}K`RcWVwm*fsJ=wJ=-+Y5Wh zGJU7C+glS}pWhtmVI_r!+kTVJ|0Z8Nt2IYPTY8;k8V}vL`9e!*w5``x2K!p@dCP@J zqnH~wX@C(UGlzwx3v(o{l^9}fkQ-uq0ZwKx(D*cab^n>pe(Nic3yZ&MI5y^bY@=#m zChiT)6$*16H3+kob7x;&O`PP)cwb`d*sjCS9UuZw1#tWlj0FyOKb%#EBWezp zhTw;O0^xfl3+sJ9S}43FdcO5a0lN@{qts`ip!YX)1!5)OjlKwvrS4OW{UP*~#rX;) zLrhdQof|3+jUA&&@p;+iP!1Gv*WqPju2dQ^X0J`?3GTQb93RXd05g{0xYX{I58ra< zxsHL3+B2+|0JqcwWX>adoK4B}{xgMZ`yyPBV^*P;I)DpR6~ul(>sW%pJYe>Rqpbslp0X^vu63MFpo-IU6@N$SCoJNeMx8o)D97z!m@tlv(mI$ z_AG!vnmwd~S*c6Nr=`uUyzkPujZ5P;`h{gy@;nS%@0}F40_I7`LvmCU{JmdUsjOGF zD6ZA^jT?rC1_x4ou{Mulf>DEz2bSiv6fL2=39bdS7w9i&4y4JXSQw%|!el_I9Z4Q$ zDG01&A!rFgAP3Afg8NXMc4GO(m%!D$adxC5fK3AAxq__%vqFqG8iev2JRu*qp@Q62 zfsQZ1C?)F0siXs&TJQ_8rz^0}Objx#D+!&*3+C6HBEhQw1xxi?E8e|SfZ(UwmBEXM z-nk+5LH4QfkP#RTmL(%kiReXDqq~HZ*U&u@<+Kk8UVSa)6Kpn4BkiDNptUIDJ=SY@ zkBcBzYMiV{WwxV*=RsldIPBMY8zuXlUxEGF<1E?hVZYXuO{sF?wJ0zat_j%kx*L8!tfj+p%JQRk~3}w^rf?yJY zV*aWYrv`*%%l5>JXW1UopyOI`2*sdC8Wo|OnqPt!t+O9|CrR+?>x$HS#99MhC8K(2 ztxNDSC)1fhPHLFk45>^sQo2`KrV{UaMSyb7V^>v+&%V1B#*MK-)2&Wo$pGuMh#??- z+z~K1Z#9v)+g`idzW#bVq1{gMoUr|qNgVcP>@oPGNQ;2&gN*d=zAY>uP$%G?qB$?& znJS(q+O69ljM647X$7?cVnO&T+z#}dTz3P!v*_0-o^!(wrnZ&|G}6Dq_LPY(g6PNI zDl5^)A=|6O>OzmUsWc9Nn`{cOo`#dH{)|vzg>p(T)qv(28GVPgfc0(R^Y45C`{3jk z>T)^vff3@4BL`@XVqJxtWK=AQ4deCDx>mdFRTV_l$&Uk@0RAA#w-SjGUnp%cc6wng zBttUz3)V#z9g-ypia;Rj1pHGUpea|MCNrcm2%6F;>`Bn~;(lO%I2D0PEi9;hV_O|{aD zG1j=HZ0Bz@2u7Al4yhUFui#VCE=icjV$D@;{Qkf@_DBwYjSE z@S!s+2@6-AIdr(Qs<<)W9Xp22I@sW81Nda{lRBinMQvcmvc4D} zLItj=PwpZ>n%0P559kRR$zm|JUk0@#-)zO#%47#`7_zwdl2=Xt!c9Pe*D}}|AjerQ zSP+{a>434-Yiz}?7I-fQ38W)|0rEo`T{eJzko;$_w15_n{Aa|Ner3bK;auwcn7 zxeVbVCyG*_N#y3{=jP@k*ikeVv6rAH&cn8{Xj_C90qGUeiw7c17z>i|lF2F>$|NGG zFl^?G=caFSZhrNtCbr30Jnv@h&bMy;*x_A!?!5cO^i{?EZD*nOm1baR{Lbv5ag7`~ zoA1lsvs+u;qCND-)US|#M873|N!As}KR)pK63>MEvy5i~s2TlB_7w8{(;Aj&1IcNN zAM~-r$Nn{PC0fHWl|TF5vZ0hKf0u0d-g2pwEq|L_`u^ogj2cV2#AB?2SJ*2o0=ED* zL{5Nvli2|hJ;Dug8es@&;u^Geaw7soNFmp*NZ3jGRS(Qa0oVHAJ**PA7H>2(F}oq$ zOy-CoQ%U@a#>sm~*h2PD$fRlZM11<@b$u;XtI5A**Td^JeEhZzE|+R+?;gEHdq^0b z3Ki820dJ#Sa9chfO08aR_L^Y{2RpcEEkB)iT#W{No=m1waKkbWTZrM=(#$fcZch%=s7o$M7zP?Z2(a; zB$=R);Sl8umil$6&d!xy{U7 zTUQUS8Qxr6ke7R>^aAXYC7e;gu_0d=q+9}5vm3<^{F*cC(ti4K+YnD2cX6hz4P z!uKNNd&!H<2{pmgL?(!72E_9eo zSG~XB4RmEhJ~vdTc1F5Iz6)NG+)&>wj$`oJ3_5Pd}~f^(Nh*@hrj7 z1gjn9B;`XFAPDnS$e(eAGO&FCD06e{GT<^xUOjOsFK*CArCIO>xBjqf3eVHCV)IgC z)Cd(6FN(%!EKBsu49#*U_V2b0(dBldRNYQLU(#_1KMyUGDW*?jv_%{gXX~s6RWmv zu4+v?2YNR>)Xx2Z#@@bq#+n*kRaHjMTE^5$lUwb7HQaAh(-zfgc3OR~RF&doVs1y+ zYOwn~7HDPFBkNgnMPpjER{0JDeIo;&8ne5-(Gd%^RaRHkR(Sm;V`Y`On!E3*XtG(D zN%d5jDt&6Cd~JwZQ#_fJ-TjR0kx*c~A^yrF#gUQwv1DUFM*E(|dMFi}xyUNZGLT0Id4ixx*U!xSYmhON8Q9@Isb_MOI zQfk3JD!$fO=e3)Nzajpi%y{b(9$e{YDJi0EKIaBSdfpp=|29`w<6gMa%?EXb(p|hj z1d45PlmE8(mfL+nS0HtI1^h{XUeyu3f_MXOgizX{x1_`sI)|1btjHi?WVtC_kpmw- zwit{nag?!sX^y-0lUF8{0{=MR_U%(oxug#5u4*_^P~05cHzr zYmrc$uR`El99|uAB#`Sm5{0vh#o}=cSo9X ziN3x>U{y!QDt1I90Tl4u>VbjPC!RT>C)$dwE0VpvN%|ry;iJc6k^JP7G_m9uGYQ5i z42LNMx?n_*M~Dds3jtGw%WxJZM4&fb^Xc-Z&@90ZE#n}xH|H^K?F2PgiU8cPzG*X;t<{~s@Ewc#f%^JAcM5Di|8`8 zt)i0RFNzmsgatb-<1vb}%dhXOu5I)p%B$7pyVM&>MF{e|PB~fa2F@KDSj3l;*s{#GqTM7HF%D=1OirTVkeS`pN&nEGQGf zH<%OJD%}g%OE8$*N;K~M+ek?Ek@QZ=K{797A#g_8M^L@QFL6qlBUVX~c4TH2DRftS z1b-$Ond~tXaYJ&gcXf4ltPN6Z17uhyqG1h+MJQWB&(EN5FpJ-r7h+IAP&slo!ADEf z^Tt`kgNZ7TUv8XYs6w97>53j_Vr6P8kqpd!*b?5bt9S~%0;F7}5P?W(7@-wX9l%d=znfr%CJ4UDvf z0&J@Ey?1+whJ!}P_Nt|w7QO*-LIrHK39dq6`Js5_95n~<#OEk<95W@!_{x=n7RMK2 zd8s`CD?jlZ8z-IvKWGYV0Z@q$6U`BC@J7k43WpDZLn-k5GBQOQAcsyg#4r*Ipio9c zP+$$N7F9%~gOi2PZd0A$HRN;fm=U9+Z&pMvM508voY3C|NIgC}UlXe^X}0PW9j;EB zW;EY2{`hNb&z+~i*UqTH*B;-s)r8xfu8tMeHqBsd#}mbSPv42dG;f?)T7UHI6#fpc zOW2-;t-#I^I0!>aiG{+{EbLCg0>xx-lp4&R%$|PWU@&Owy#L-OvL|mAf~roRAr4^Y z_z~mXO}wZx+En9mn8_apw4m8}L#<#dTp$Ta(Oj@2*=@;o21_yny8b=XdlV?<*`^&veDfVWp&KJeGyLt_=znKkl`P~Kc#4@ z499g_ddY_YQ55{%%4XPZk^pu>Y4Mg>6C}e||^>sa*Z2KnZ52N|HnG0$F z`G&|dLRS0Ictm~a3n*_t;UX(CV)#q#-_~f>Ap_1oY%e$hAj8a(^$`M0)JOvzCB)@7lNe+IIY1- zo=lq;gL3r412BA%8V3g(5H3WXE?B&%CiB@X!h+g;(Ew(SARSWTIs%W~6~~^P9c+)^ z^_Yjx8wT4Ah*(CPG7k;>8HMV^Nv9KvU;N;6)priIw-4S~{oKL04BsKRE&4jp z09c=gfI(1c!91En)k2qA3?+ukYH6&bZ%DawSqSkJ5R`@I5i5=O1kY9(I9#+r45iUP zB*og3@Clru@mxKxR$w12o=IT3g<2?Bpk~bJyY$?eRc&v4^tnq<^7&P3p1b5b@#LlF zKKcgmhVVezd;C~u8|f(wVMmD+h#?X>0T}j1$-^FId&mw4vM2uWBWPghg3?lZ0&fCn z&neo2W=)zNoR=wsdFjG6WPs_B;xzpA#sBsDdd}d?wo2 zxy~oXeDy!@moVoT`iN2=iZp{$KdYD@q7d+772=l>3u#7Jq#sw@4>KUdK*s*)*};K< zD=qs*TPD`sYBt+z%vTy%Ah5Hscqz^j$umjo(RKH4{n;~HnGa{`Ag*0*8Qs@1xo!{K z>rTr*H*RZ0%vka7lBW~Nr0s*K`pnO^GN+^oa?hy3My}H&3Nk`qUpOUBgK5&b3{E6+ z1b$sN1C6!8lia9u5RHvA)p}i3A|8Yh5rQ&ArxZ2i&@$Pmg~)GS)XhrwQ{d@{8!^!554>LAvO5K>rXuKdhv6bW;n7<)3zPK z9EB}PoDri~XFAj55uweCwy3afX9&4U5x#ErIu1m|-LNbCo{*2!V9DHo01S3noRFa4 zmL)qd+1Y()yBa6JRO!b-=tdf_B0aA;%39@dFt(?zrud^7*7o2FuRZ?ZY33~M`@4&2 zoCQ&fM_Bv5JKe87^!RJrnDehLUF^7Ty>8dJ`m~_0!iPw9on>ct#GZDUqb^B=WcclE zLQ5i36wFmZR>(p~#lDuOb@Vej1qc+vdV-@T(1@19Uc_KX*q1^@T3xM+_Gpm*MLTjc z2(jGH%jq^$TTovd-6P$T4r}T*LK2IFu@GcS@Ed6>R7H$mjpV0v3QWbukrt99M3;=z zIfCS4%8*R`;85Eh$RNqC)}hGI=xfEdUIQvYJY~w}rcL+JVc)@h;ik<^eW%ABf9X5yRtP?g%n=#HJ^ukG6EmyxUY=0CxJ|y&w}&`CR3b!1<_R2-3!m}wu(y%k+T+m zZY>n7tj>zrP}_RkjV>F=*m{c3SoFD4e1=87T0&n67J{Z=6Q)_163G85zB0H_ z(Au8}+P-+khxyz%%_9z{L=g$8nz%U7zo^<6@lATSdmFMx z=dG$^7oYz?@vE($YK=UsHGF;dO)NW7{HKxJpJ>gdK2|UKk!QvFLEoBmTqB7Jhkz08 z;EiX7I1r9d8V5om&}x$?k_S_^Uem`#Y=r0kg^X z3srSmOE<*@&%MXpYait~Q35z~@=dZ|1J0yBSuS+P9D>(@7K@?U4HT;ads=450zws` zlRP+siGytb_CG(cX0WrP*tznTr1iQwGKO|lpKDWheV}UV-mO)E z`u?^Qh11sQ;s<08&r4-__E|l6m~NEfcoSQzI+C`&Rjc}J%>y@!_+c9fCBocXAf``O z((HmO!?LTgy-zes*t$ul2_w{1@^hTkF~i86N+8%3NGkltgNSp$Vf?4QZ1NQfwcWwz zoJS=im`4^#ef% z$Fjp-9N{ieN`jAgn#Q)oYbum#!N+`Vd!;zz=!zSB)!2%>C5-TE3Nu5Bt$3ET|L`M) zXNrIO?CUI2`11W@$1sSG{IK|=v(GZmGg|S@*YE$bb_|;Hk{nP0nn*DTz};Yj-$Q{( zz+HFTK<#&Pvt}$20%^zDIukuy*M=p+L9mCer!h%P-&e-=Dcd zd-&&%Ja*|rBpHlgj|u+pQLG^Fgs0ZF-fP0 zO@ev6y&&wQSBe*fbS*A;q+Og71>FE3$v#kx^PGr*cUK6y0jdBVRWixKEt3ur`eK8^ zZLsMlAoyCWsW{XWi*bq`Tz|LI_4ZRB*-*~!M`06>G@)GEH8S_T(q2FxHq1xZ-*MKR z+Dd|UN{^ZLE``^G0$t{$BoUA^*&jm(}czG*v{jdvpQ*XlUZ*!1?F zZ|g~=dbWN0t)|8!3%Btt_g#2mV@s1UYkEa`}7TW_;u$D?h#yiIX# zP2f=Z$+;+Ci{KMi885SW&_!riG61xao5WJRr(K1GuPAc@k!@df< z3%=;Jt5;-`y)a9{Dk)=z;fpSFUJ1>r6c=1l4NAn|+VawM=|20g5UYPIez{8|#h;6i zC25S&gR~dEU0y?0N4N?VZVr2W9e@7{jA2)adP41?rJgqjDNB!`AOM`^3=%+y;A7fL%L+^HAY0{O1?gW7mBC+sS zg;MolS0cwW+7k1NNA#tF?!UXJZYP>`?JAVE^eRRW-GGoGzksjj8MI7=*yAdty{o?6`3 z+}LcNSuA^;WQ5+|)84wapH#SqzEiC_i_dx- zjS+`+ZbKP<$(S&knbTN=Jsm2i;1j}%F5-)EDifq!+RugY{F<|e4p2bM$0=euDO_O5 zUY1OQ1=9XaVGS2k!Z^$YvIkILEwt;w&k1)u2#!Yf1CmC_a7MOz8LYwfET&k2()xj4 z5=L7tc&c$;P_VkiJ_u1FDHR+_y#E5?T72IV*dGgPN!2A0hgj9vF$yy;*F&)9Dj_9? zF(>TxNK2r`h0P-Ps8n!ivxM}6<&-y;<;mYghm~Kn@=1{te=HN>_rXc)Vk1s5{}cf@ zGA)oMOnNY!AB6u)JW|pdk|;Z&6@f?g#G)-t4RtzCq4VYRZU-o97>h_T4w({DhDe6_ zrx5eBEUma;E$}J)6yKsBF{%Pa3qokUP$7RY%2)6j6?`@8ZYb@VMptxJ9x2AC(?r0D z-dRC!odBFd4PGZ10{|y7UErMqh!>&}EQeJ&+(-^8dK4Ji1iVaXO0NhL$H6hxHaHA#NfZiL> z0@~PuBecS%LHj)lr5vv)0Zo9xI!q@FGDCDoBSNoIAmYF_4-Y>~azSfk>LVYSQkx@n zHEVY6TvJn58|vr`*3ukF2(GC8qc_ghS~ZjFu20P^kE00*-yN+t;&?1_ zAL@M@ukB`etEERI*cM*gv-V3slWmsB; z*hOEK8nYN!M5Px6s4QY&04kWm!Y=nVt96?jFEJqLh)Ba?`@hECw1N}Yp?$x*s-k4u z6PkN8U5%Hfkq#gA>FyeK{EaWB9{u`P9!q^OcWF8`x_jrw^b5KcbkErC-DCF@FAnYO z>Dl?qlKvxLr;?wGBIPU>8ta5DgI>qxO$ZW7=0lSEVL>Kafuc(iJQ{RN7ADmv_I30Y z-)_h?1h8-1PZVDgasV_c+(bmm88%cvxwm2AvEJ{#OL$FRY15;&?SiL5a(5$gS(n{$yiNQiv|mJiq2XmbB6LtV%ZnFb z>e8>l6tQsyO~HCE`Z%MYC3qJ>TO<6Ou-m=2pHm1lh?%FL47`gAx(K)w!rD>^;rFx{ z_bvK84O?!7-}5`fZ*JRQcd04CA_RuK_IPd^Vor1)=su$*hNlmJHLdVl)RFQ1-KbT< znX)lb3|hy(c8qiw_kD~_gd31|_P38LE#Gy(YM<(?_)+Q($BO@@R07lRS@wQUc^A=0St)(r{b2RV>%P}q%j>+K{O@Y# zy~au9*WJSyMVX%7unzF6{JHXc`FO$4m(BOR>Xko3d7L#{_8gVH-)FCF>;L36jbRzA z%hwZm{o{l8$){wMTa^>algc-hpTqZfGn-lxVE@EzyqRbDX0Gx3_$T>`U}Med z4)vH?P=9H#8Fm>SFnrPQKMn61W5yxl9^=!-ADV)uoav`#pE+m#l=)}o%NCQR#?oOq zVVSeMX!*Y7rqtF@l3^cDs7b=m7|sWD<7`BVym{@Y&&Rs z#&)sFR5elcVAa!A->UitdyD;;{fzwu`w#6!N7}L3vDfi2$1{$-f2db8eJy$^Z|K7%jf zyV-Zx_oT1jd)MFWf3n6`^JL8%wQaR4YA0$xTKmP?AJi7>R@CjU`)b|y>)xunTyLvy zsb5jQqh70jp#JIlUo|KVS#Zz?8_qWr19br{@QJ`nfxm5RZd~1XTjQr1Uv2zlQ*+a? zrf&v^f+vD!gD(ev82nYJF?3t#Oz2yopElPu4>wOVpKAVU^Sj}i@agcY;h(nHTQ;`L zwmjYPot7)D$=3T?pKg6KVu-AdJQ?}xNHIDTor<1_J|F#WZ8dG{+h*HdZKuFn;+sEJ z_9GI3K3x2g4>MhPx5z87i~Y$W9UfL5*7FRWr~j(wDGKBN)$^*-!Ups_PD8RIdfuqm z*=O`T-k!r=g*3$sBoz}z$vlGv;=ky54r|8$t>;x`RQZ*jHz?KY4n1#F8rc1M-lX{0 z7nKp^Fy8h&sT{?xrUaEK)H#6sar_>|%!4>ja|q=}MS2+T z2Ae@y9QAvVwxPyR{LLx@uvPUad-b}M%DUak5tMeLg&EX?GCp#6X7cEa7M%J}aBKI* z?%4w(UQ9batSpXD>?kQfc>*z1;_Aj-rj5 zlxfismg1)ALkE!@&`T&)4xsD+(%&}n0gQg9m>13SZUK=#lu>z~(gnL)7iQUud=d>U z8`wZ_=fR@~j@~_^^#uoleO;NZcyAwSUEiFtSW!`Sp^L)+#sM*M>ZDu$261!d@R0+D z4hH+W@rUa}fanZH*R_0Nhh}FEc9mu)u~E7D5XO0<&reZ^Q^1Tfl^O6xCll;d7Q8X8 zf>kPOm34s524K!j%*Lufn;guEXr*fAW*+8cKG=b3SS_n#^$Y>PA9Iw!Sf-uimhgA*f1Mm zYuP%so^4>G>?XDmFD$;9-NH7rEo>{>#>Uuowu9|tyVwU{IODvpM#M>`C?% z`!xFudz$?R_F48h_6++Yc9wmfJUnc=!^5d1n*1oz7+3E^S%u4%ksW{ z-Z#nnrg+~p@6&kS4DZ{^$5T9>=J5=VXL-Dz$0vDwipQsUT;uT> z9^cCoy*$weuQE?0cp}LYDV|94M207_Jkie+lRPoS6Vp7Q@x%;I?B&T`p6uhvI8P>c zGRc!E1YPlDh9|Q;+0T=cJUPXa(>$s1f@<6PbJ`~=BX4XgXW~4Q;F%=PqgQ9Fd}@kMP4g*@PtEYDy?nZtPxtZZ zIG;}N=_H>{@#!?5&hY6hpYG?=lYDxLPfzn{jZe?;>AhU*w`~4l|1WJN*uYz)E%B3gjC&tIe>+`I0d_0_2w&rHW$Gh@sEVwS1 zH?&S-K*o`+xx6tvoHvDsG5qm7o9N0LVquIcsGT!T4F~Ct>^xsFl2<0y<<*W5N=JgH zf~U~(xn5)IscpH5t@V>*@|#un=G|;W9iN26)56 zlXFPd2MoSSKc1O1cJf5ZDb?O3z_inc)p6R#&A`I ztFF8Q%{T=}f`Gs@hMl*MOaxC&1oL(Ptt;=0ZQ7ALXVBJ;x8$p4!Y8`&uGpq+xlP+; zVSNbYZc$zxJEu5CcIM7G93y!)Ih=QN5`qG4htJvQrwTuL=EF*;ty^>F2x|eX;Zs;# z>b4^k#$%;?y}VD40PpGUIA*c|aRt$vF2nIrF6a%5O4FjRHJr-Oc@Vq02`8y|qBUpq9 zTC_=|`F298&RD*qGv9&j5(B1g07~6(zl0~VVWLyNwFdB|E8n%a2F#a_b>x}1S3tSD z94gCi^~8cHG0tApVe78nuAl-p92S);zOM>eyLKp?J=ep$m`NYzje*|qkqKb!WVS0G zk9GT3bmbGjt12*T8r73n3dPqN><(_Aoe2=$bn4WG@CHzV9OyOZ9ky$NAyN|kr$9n{ zz<&ITDtYTj=gg_@a4@*y6xvEJ-41rkHu46viCV$@1a0Qk+j3vwK{Z(a6}%9?P=mY~HN@&3D2JDSMB;$3hqQyx(+$sivU$77&VM~1hOELt5AbK}O zbQpwJ05n-qoVQ^227~Lv8>ll{t$qPAnt%>bWk;?%xB^U%Mywa2u_ch3T5)v~ZY{D^ zxlq?5*F;!f8H}+jKcJ6bq_i{>#CNX+Txlr>W8q*oL2W&#?uzm5bDhkCjkjX47^}Hd zymGNv)Gj@`tjPYLas1& zMK?By9OD`g3lQiEz|xCYmQXO-Y| zQ;g6tKMJsJjGb4MHOOp2hEe9`*m)*OZb3$rY^FNHxV44qP-ZLDq0Ba_LzywEGla}` zszaF_REIJ3CWBKf2?R|71YVQ|0s(nD@ zsOp`ueE(wAyXZnxy<6m{>OCSyRS(AU1B+D;(S@iwD{@rzgCa*&568X&|7J-t8t%+n zX7Xyw))T~Px)cc5g)s;q?2{nMQly?erx=GJFm%Y&vMl`uxQA7g=s8tcd#;5&vJJxG tBe`>`w)R|vu3oY{2>a6NN2Vb$p$g>T@pFo;#)kMsZl literal 0 HcmV?d00001 diff --git a/static/open-iconic/fonts/open-iconic.woff b/static/open-iconic/fonts/open-iconic.woff new file mode 100644 index 0000000000000000000000000000000000000000..f9309988aeab3868040d3b322658902098eba27f GIT binary patch literal 14984 zcmZ8|b8seK(C!=Cwr#($lZ~BhY}>Y-jcwc5*vZBlYh&9^ZhqhW{ZvpRobEY2 zRim2jc2|&)0Du6#g(m`l^xtUf0|3Fv_;2t37YPYfIRF6U=Qof04SefskYWWDCf0Ax zvBgA?Sg zQ{3X4{N{ANb;56uL&kuESlGIFd~-hEx-kF%7M7U{z_qbA{?BgvJGPPkQ1m-q%+}E3 zdtHw2HU7t!7$h5R$XB`1U|?VZ2x4oEo(?{~<9cW^U`%1|L<`O49o%ya3Cchk?TQjvHN{6At8vTKtqH+gT24Lz@);yzA(}YXmPMtu?=J) zB`AsehXP=+al-fk06b49&+lmeAMwbpQMYtnkU%E5*g+%ehk}td81f)!!euyQg~T*2 z)@9npKco9a9KNs1`!r1D7wjizEmb+j<)@`LL%3o_S^DOxFhSl--hj14 zM#H5aHC`i!yXJ}d7a=RP@L93co8&-xe2dITtXa!y%MBkDB~oaSX8=|B+}p%5@uonM zn_)dskE5dgxwy$B7UDtO_s#N{dQ@IiYRc?**2_dj%d{C+ob@a*k&~f+QCmvu@MvPv zXAzzv=m(mV@f35IWRg%#BWNS#Yb*+XqhW64orn;jVCARAp6(CT+dJl6*AU;? zM*P*yjc8Zknkp&+s)x#G((ur2&&kDr+QHf9@3~dEGc~r>L7*Gzy1Zi26w8WWema4O9nUHF1Ay`VkG|KN;jIkW!y|Iqm z_{%A18!12g;hLL=>v$cmr4i55J7qcYXU=B~yAkp<@s~C6tv|V{8@vThN7>Ar*+kUT zG#R!Mo!W$4Nb=yBdJDs4I&6_7L__a`awb5B)C3Ey=!p>9V1OES1_-UBB15l>gAY6! zgAcgD1lD&~n=am~Xzs0?{DhP>B#)UnBu6*&eKAo@JpMbD(YyVmvxqj z&@&kK=UwrH$rMA@KCPr0_vdj`DwkaL#P-jJHm=bJ?i!1 z8}!q?ktnS3m!tlo1#^A;Kj@_YSVeWK>j|c&ToS7G_GF@PG48OmO z9f5EK30J^t+iqJy*#ApP50`b1Itps9p(Y}?<(r0xM8Llb@Vv_bC)p7#QQo3mf&A%)o+*0URgNCG za4$QHzx$SKgZ`gRt#R0@*1!twSlSHhsoh;QsLMm8r|!LTG;ZrmyWdoHUi$My zm|}07P^J|LaHp^NgRiGf&NR(l5NXAon_%#8@W<{J!y{jdzW4$&DU}1qKxKQX)8XSL z?2mV_=`AIG5HC-7@$7A6{NO&-ydr#n74Uj&pF-Z$8y{E$zC4yusOM~M_{>Se`eA&?^+`>z6+^^e z-9zRTW5i&l^d`h>3TNz)Nke3o@P4#IaDYO_;5OYM^K&LQe2?L@Z-9NqAh8)@a0oa2 zBgZE0*v2lzCWIB9Dg+PnN60WgJt9X9;>y;|Kz%P)#Ht|n&;k+1CZVGLZfL=$4YG(l)XI zh)7x3yd;LHCXIWu%}triolkzfz}&Mv;H7!jBuw@gw*s$C$eu=Qa`1sc z5B}ui$H!Ce4T7GYUs-(D)QtlbRq-=L`#jXs?`*z*GJpGBAOxgH)eXYY$Hg~AG4DOq z=I=cl`sYCiMJzXE)U-~?69#ZqtZ&+AQf<3#MTmlm%g{%Umm_j2vh91ay zqv1Eg^xKZrziV{;&zZQAcXh9BJ$2;6V~=dAB!U$EAp{B=FqE%)N^YkP%oiRBdy5yc}^m({p@zFIc>%w~m)m9mf}!-OfW5B#m6e+P`6X=P7dmh0oT$%qeiyr_JA?e>=;4&-SO=&B8d&53>ph7P{!2UjA~-<}+y zPd{`k0wz%CSu^`360$||g)I7cO(uA+j+wedG2^l`$+y$zR;9Uh)P|Z7YDCGkDr?Emz*2pk z=&{N3d}iyDCb5)=dbZCriD^F425+7nvY$^RexMM&Y@~fu_8dox`Rv=J+(Qc9 zWn-qPasT@eA02E~FvN~G5E{6FE|YOYXW<6Lr~;=-HsGPY*-BMa)A~nN0YuSZvNR`; z?3GZSJ9gTT=B1hQ>?q8Z$4Lc+-+cJDeA2{i2Y;$GDd|}~D%QeStOPVz3q!BG*3_3< zsN9j}+#54rC}E;sx!5Odt+_wQl@-R;EOL%rm7PhG84}(HzEmEj=aMrK zIbG|+mgHB(oqX}A(s99tu1a)pigk_tAoUw~m?aQ&b3GAeI>XD0@EuIa$5l*WS1n*g zVJzBC98rNH+I+s$#v@W|d9@)RcYCycT4=Se+q`R8J-~u{;9-d3WS5+P6N)5m6Yiaf zW5r-x?=Ll_GwMmLqv7bF{L`WyIobWu>Q~t8YF*XhO1GVnn(*7@JyIqu1`U@KGOlS7 zDkIuCSkaEPKx|W0eg3B=i?9iL1FUT5wishps-be9I&>pL2hh8|-SBPq^WaW#5tOE~ zT}eCEtSL~gqcqjWVd7I9gOLIKbVX?4W{OO%%C0HvcP#h>_@M-fc}T%}R9KJL<`U9V zXu1u!HS7X0Ez~@YB)L|YW@u9W5-|tHX@2Vd^Q|Yoj6j=D&m1~FnIk%im7$;J?kgN=T59<}6@^cfW2XSeDIy;+ z;ETOlaWdwo5OPoV_ct=W{O6{#XMgMJ$9oeE-~m`CjpUZsw{hJ#0gvO&c?Cy}%w9Ms zF1qLs5n#X6OVn!u32_b_qY`#EKw4CB&te~7XZY(jWdCXUQ92kuUn~8)qF)SI2<%X% z$*37c99~#|tO)1lveW3!TBbb0&BE?sJ2VN2b`;e?d02KJA-GD}T=1K%plNHtYUYXp zgJD%O29qwCKm_~M0K>`K8^SP{D*2gCTZu`SM9S}-Ykw9zDoswD2oi?2TS?0j|YT&|8hjXaQoPL@9w`)i%-M<8&28g z`*F!&y{zlqjf@rLrt~FRSN5BK<&28)W4m>{vp08~u*1zMt6=`$Tiv_$EYw^6mW-W< zt8zy&d5h9t;u3Jj2lY=`hj8Cq$z7Jwz83FVg8EUT_;y_|+qcUF=C!0ITJ*U22Lx;V! zcKoPS=n8#~`Z=P6J*6*B$?-V%RjyUCCvVVwdl4E(WA=YtevNLvY$%)5Bc}Fw#;j-I z0#n6dHjW;Da&pE??)2+d3EbXdopfMeK@6A7^s%KeI88UNE8A_UQz9pRg$VLmUKJVl z4I&pPU<9*3OS$nt9-xj5K$8UbcV(lbl*jMiig1b^fo^TkNqIjEk~>Q^*t@Y56IUj>ezm7Kz-yTs!n(QG%R6u)`W@o3~fE4rr$BH|lu!66Zt>E+mol2P_*O ziCJ0f=UY}ApdzPxn7#+JwBo&4_`u(lc$Y5=bBVwn<&r;>yAaRJ-31VEoTj>*61yyd zp3YVTLPv?QW5862ulNZ1OgO37-b6gtqu(;CiQAmQ# zCr+Ycyg+WEcZ!?X&fSUptp-8 zOKi8O!M8Q-*Qu1ps0AggluG*V^1Nk{%4)ki%nw(VY+snRW|#=(2QwJB9_$3%HZg&v zGierEtLuJ=$|~f4f4fwK5=?TPAjUyj8Yew=i=kkkgavOh6g$X3)xPOz)zymuI+`8M zw>dd|>IZAe!R{&|(y{JJk1V~blgfVPyc@hkWl%sl(2&%1_ zBayVylj>~>f=ABwi~c<+Iw4?r-Y>*Ha5S^04!G0F`%{@_*=~3GPH#N7wy(VW#9K~% z^A}g?O}_Q?lKt*@WTk_H-hSSv3-$^pR130pW(KZ(yEogRXYxqJ=3(mI^u9}QZvQ-a z((-M|R_NJHj9Leb)GgW74j^HIe+xHZ9kE0~@bpOQ{p$rbO7MWSD}JS|^sjCkYlGuC zUORP_Sk^=&Xl>}jo)cc3(U8>A$EKMhU3Op5&q?!5bIRWKQy#{mHJe~z zpD_@@wKexPN7*mrUJtXFETM6Et`^w$d}C!Oti(ItQxZ<}ac+wqpcwP31>V3Xy^R=>z5USMBZKK+o&=70h3Nk7J|rhq`+&2=kGz zbKt(1>sMjxt*%JtH0X1QUjjrO+!WGqJ~>^oI7Jo_J)Kc&*z0~air!w9jp!g4?wfgq zJL+up-MtWP-#IVzI~_ZIvZ7?AAS3Z;mPEnwP_cT! z*JJkw8oBTf-J3$s=O1WSr-_ar>?Lq(5SfWB(V-~fojAhaKW3_-Gv)6Cs%N6kHOpSA zcS_*;`P_me1{t2on+Vr1a$ReDFnK`uz3Z3nG7l^pUjIFTxC`QjIs zw*4v<4CwC+ww4{v+O69!bR4?vCk|s{UsX-Jfap8;>_AXh$l|f<;E74Cz!jC7G9IXy zRd53A1wnR`fLa1lq+bZjJc+3|#A70PRV!DqsMBI+{Y`^Fjxpas$8>UHzBCi7^C*i6 zK(hW0jN5kPJk|E<^L0~z;qgZas_$AoR&%@#wjhOvWDm=21DL3NucshN z&4&0NC>nxBdAUC#X!+LbzQ^kjjbhE1k1OVX7~$`<-c{$9+pA7>tr~|B)r7k3PQii)1bP3cLR~PA43g zv4&593)87tEg~Q62W|9|3QnF4m?e!IAcZS5Ibl^1YcsARB`ADY4@045znu~7a01Rh z>+l$JuFC|4z7hK3+kCD|DCv!`W2+C<_BhK-N=Y> zl~TeiuMqwCt^g2?J(W(R_x%hzZ2vT01(hBOkf{W6GNbOatvp{|VWfZ@Gaj%s85B1e z{1-eVWEKKhhEWhGjoh&iS!ze1fT3o7ow#1s4uhlLS<=;VminN4iuf0PSxB_tM4{Q*zUBpS#fqtC8M||{+PW- z5(wRsj(WEBgf#w`o)_kNV2gkk)eH-#tUQ@!r1^IZh&ZD0`?tbafwU1|CVhznf zNcNSz+~+>zhi)M#9b%<-D2l7HP?UKitR+ZD(RSuH;DtL1{iZh<2ucun!sawL z`=q-fJdKD;G+Bv51liqQ+tU(A>7MJhhOnA&5qu5Rl=-K7=a^Bc5AfVym}bjN8}a31 zSC+FQ2;YpbwsQh&KyheTK+B>WMu-W!SdTKbq+HdKtis?NxkRxZ$qSeOCGaBhz|Z(DEp*18 z1VY0=kluAfiGjwwj;QdjMMGCGU*OjKSx<7Ei}Qj)i@i@!ss5pK%B8wKW43@}FZc$1 z-YoNXL5^b2WSlRy4ve@Z5jq~L&dXc<&fA`H7{ix;`+e}9bh&Hz9biU!LH$`ro>n{E z60{dR1cz+zB{R$pgoATCvTD1<7#BtK@y^5If#X$}l~ytQCQx-!#mp8tbkW2!!BzcyD)40=2|*Yu0mzK2QhCp1h#(R@$2;3wHfiXgEyLjy>&XZ{&M zX|0LbwAC69Uagm>U>z2#~Po-F%98OE1a8pWC?$^=_E$3P3gIXP#XRT!S%HmE3Nof?Q8}oXNel$6zZ6o5zeox?V*DP z#;gc)w7}{?5S6x8>d);zSK@Bkb2cjyb4fpGEQY8yvG{d=<)f#aeV&c7cz}dINU$Mi z(%?!S-H5nn;V;BHL`q}2RFUQG#`yzUbSbPC|xe%Okxc%);L zG_IfQ50^C{^A+S3h12axEIV`>eqL^5>t|45rId@hnBdprP!y7Z)cQ%p(8ARJ5fkIp zsXBB>UB(p=2!Bb&w+Ydbzv(Zoq=hleRCOX?9E-CqQnFv*KyBvL5g10fl#6st3l1r^ z{nu}0VD+#h3EPFLP)&G6MVtXL zojBMIJEED*owWecK9Axcvs^)EyxTG6kCj#khg~RI92J@%q-I~YswpGSNItHCSVz-Z z$aI%XJe@qt>YU7K`DFEY%(uxUQNk=Y1!MdKB!^j3lDhl& zB*r^qUR%{ANk;qd1q6@ttEMdwk?leq$2=`&Sl6|!Y!1R}KfWg7%;x6J6}JEmGNXFm zg|_y^m62>BRdyx`Y%_8b#P`(XCq2~>tsGTcLL!`UA*V>h`1J*&%T zdIHFYXJMi^OA7M~hfB<*ZueY+JM&>+Qfs#=kiLtfx0Ft)66%I_u?evJL21EhB1K~o z`y+e<;GfX>bBQsII2~e7232`QBzVq9t<1BI9gB&3v^Ec(tsL>=LHPD(3RZhi>+eHu zd|8z;=K=UNDEvmBsN1(=_6jNRl;dDjM9kO}*MC(c^F3lY{V&6y`f`AQZw?~-MqNy@ zTjAUYNJv+3iVw0y+J$1+cV)GLRf00|eV_EtDGG}ZM`MgKy1E3@Y68%4IWb*yvmw;1 zW4+u|$L@h*3@+;&b&FewrGx#rG#a-Y6k`B#0lUWXJ{=|geA4hq+^u1speQWAISOkxN6G2HT#(@9Tx^dB9XN_J?3OOn|~ zl$aAWj7%vg4nFC>fH5@o+O&Bq=Yw0FizVKxE{rDu<>BtzXAf=xem*|A%c3k`_IB1; zS?QAC^M3G%gl?zt#n9;@+H;`p^q*0YcXU&pIoTNQ@}1(qL22#*r= zZZi_}Yy%6t5zSkDn-$(McjvFXR9jx!dN;Or+L1<0IbO;R%_-O(w+5pxh#!$=qJ4Y4 zYD|XROqif~U`MF-?cxEZyv;j173tj z-YY(e%y5_KiS|+MCa32c^uh!YtRyu#U+7JX-2>9+vtNsXrX)PoX~9gbOv0o7fgfj} zB`?g8I*)BLm-MV-8F|9RS6zfd%mWs5oU49T_0Hc?R!?L211om!o0F5?OCs*R=6-{c#%b^7GQ}uK~jPH z!qWw1S0j(t4IW+yW|v#OYAN)jCMFo4AluBz$FX=j+Sk*9N}jv6sek`8*blveRYyK6 z@$$QlJR0o@v$S+f-zsLw0nh#kUV&fD{$c1Ky*FirKmqzg+)FWg)*qYr#!&xh)r5FM zyIhdtLDGe=z-F!B!f`gKQ;5@DmkA~JFJ)}&q2vWU*3SVpi6R6uxf)tZkEGzFa5#xh zgxWZZW?URJ?Z)bcPP-?uZsE@O`(e|((Jc)+yo;i4MIL;)hlm(2w741^jymCajG}`Y z0+9`yJ4PswEoFzGwoK&Bt{R)>WKNgeyhyZZrCWq%%VuYWOSZTCmc7B@AINXaIYw>g zD(_7~W$3#FFPFybE@REcF<7d=>Bl!Qs|)m~SLEeCXQD;JBti`=eSRQFLEkCdcI{wy zZh^j@{zDOlr}L}zgS3@RiQBzf2Jwro|}z zp(8`DShFcww4*$ph=`Zv&Qf;2lWqEvw#uf03PUx5*6Zt_ixy%t9Lsse#_!)n3$--l zOf$;2nUJKM8%rIVj%qU1>XT_ym2MR4aaD{P*8oOSZgIqcWfWlkoR%D~ll0=66q}CTgR^m^OW6AzkH7eH)iozB+LoEQPHk( z#`+MS)QEj`X~>v7ZPYe^*p)Xt3}Ja0T^Df?O^X*F|EApS<~55@Q05SkK0sF+UD=#y zt7#A&M)vf*n^sI0F~cOr_VJvOH0Xd?%4c zS9%8jMQZ#au03wIpvh_4m~jGGx}6aI{d!htmWrf+Ec501JY=~N`(k@SGWn!aRsfxN){B8UN2djrCZY-c;VfAmwKt~0mYbZs}* zN)bzhWb*t}1j2|hWp6O^-@hIy=snZ+vUl(7haLy(cRSqP)j6yC>k9j)-0U_2f`oC* zDq6$j2-(gxSw{;!Dp96XDiCcn<=s}RfXP?}T|Y2spwLwsB6ETb1}TfF=R{7Hzpnh5 zA8mde1`9$mIOIAp6)$HGzWUmv@fqHkz82Ew-Q~St6-GJ%T zoE#?-c3l0~iaA9*ZHhlS4{FA<9Xf40OlkBmvD;}@=7o63Ay)&<*d*Y$1s;!ljpE;>z#T%*x>L7ZnjI45Ij{?bC*!?k!+qG ztdZ3sm+s_sl6t;4RC2XWn51!HZA6K~SFd{_-)wmP_l?z2qE~E~<2OIQ+O+`I`?nv4 zTY=XT@qB)6R50(?106eq%h-+tvkEe1h`*@lmM&+x3DEC^osEhDdqcgXu%ke2MH&Xk z1C-O3ZCc_QBqYIvgg?eabiv}wJFj##c2D8mmh`lixXcu@YxCQrG8!B!t|Fs3VzCQ; z9hr_t$>&PsMb)7~T9Gy2%f@h*+#5)SQ1_;4J^h9y10)bshZ z;l2nhm_6Q$h;b}ZWEkFj``_4Ccc@<0bZ^yIU;nEXlUv%4ty-&3ERH>Fs*hBk2V4(@zX=>s`_S;> znv9FMT_}=x6fgK5Eocs51k=oLfx-1*kl`Xt-`Wy>}^8>`FDC3BHmx0tiP7SUAm<*Y2o55|>ORCS?h9s0JBXbw;#Cph$cb&794ji= z+q>GiW^0_In6F@|`Go$PG?<~CdAy08(5Tw{%|4#eF}0z$P|{heEvSj_fb)BSxH5<| z05&!eJ_hd`J6pRTn3-`De*kX~6ob6;5$76=(raIQ zLf|D#m~aFvX;k~)4ngj9jDkYEH>=9Bl0Y4lFbo2hwZ;8SM5yle*pjPB#+xSFQmlZS zx-6>M44W~rAali^78Y#mRKbxFx=eMiUEa9z(ucTGd4XT}DvL>5sH(2)4?_+6KO;-8 zrn@NfBWJqrmF0aeV)74j{RNieoN=x1WWDtZBl&cYz_p4>6*bDFG3D`jit{?pN}=Kb zA$HRnUz77!U1Y__9o>Mc9eAhu-xJAe)|vDDd>|D0$V1~)51#MF`!ucYiH0PDBh7hd zP@~9L9U6_>0ITN)i|*;n^J#Cuv4^nl9;%&+iqY3>S?5D)G#pDe#$!hX0bHuh9I~vq zA2D4T@VATH2!##Rj~ya`D*lSE^NQsk@^8~~tHFwqGoQhqMQ94Y#*!-iK3j^ml#r&i zOqazq3pA5ARb?ZISzwF}DezJS|A=-F4_sjNEx`+yGyRH{IhD+PA05?2fF70oRRvbTyn=GafV{2>-SOR5)yp}dOVJQnupdB__2H{ zi%Re7Q-_+nW%M@Y$ImbA3k6IhfhQs^_th%;8QPSFoVu@2dYLVA7&B7wEV3z3DWY|4`dJ^1W>(H5b9w2ewH26TeK*KTVdYH@0yhXow`Vt zEiQb%wNti%zh@KY^!l}LTgdz&+oC$>Osld`vBzQUXWP=M-9c}NQL_(n4;71kn5XGo zmVOZ3ksQkzy(!yLlj|9MYY%lc=Ah@ZOz?K%F2w`tdy65K9JF()4*MSTo^&Wn?TB3P zh4PYQtzNI2laZ^V1u@2%VYXofo#$f9?} z{g5ky{arkjo0YZngdjFBkKC`Vo`@ZkWNC`C_ZF7g_;LQ^=gJK60isc0nfD||;QbLh zqm?XPW>-Ds0dZJbpO zb}am_%z^ldSG0U6@a*@mqlI3hkR}r6(>VCjfiSOI46I~*s;(97Ro)8+>zQ@jlv$49PArKvxkxgwBdB;#)2(4-!CdDVF!4L+<>%U)0rggTDio~bmuS8 z*DD7#>a9n~qz&fVQ)Srb$Y8w@3@3OW!=V6HjEqk8@ilHta1dF<-HO!0i~(!}5~#<= z!n4PX!FG>le~I^w5dGJxZstqGGH1pB;o}eE(Eh6Be7L8vtB>x7O+Oo_hROX4XeF%iNrNuDbMF%%Fj5&tjH zZ7s_!M;$vi4iUxIB2MrA(l$%5jD^&&(JiBh?Iq~B=emhrk`8_i{Ffx(xx%$@JBb4$SlNt~?WQ(N zrbFis>F-n+Ewf$L%LDR}95)U!ev7AlHLtPc>%(EeK6Xt72Nfmhq@VH#)l!BvMwO(w<36$uo$fW(#UmwvEP`o}J zPq{_b+bON@JG)PrK_|W_HmDM^PA|s$o1Y4khOl?^I?z#%nE! z{XC7pZ{9)DmQ?j7%D20V@pyT&Qdj#Tq9{+FAHx6pAWx)0Eu9L z5P*=4FobZ6NRH@+n21=7xPVTSv+KMKCW`On=9T!~!Jpg?S1Asw@0mRV42*4P_1jnSrl*M$yOvfC< ze8(ciO2@{;PRE|bp~m6EF~AAJsl@q<^NGucYk}L0JBj-b_Z|-(j~tH=PZiGu&krvf z?;0O~55)h8AAsM8|4D#LU_uZ>@SEVAkd#n}P=_#?aDecVh?K~UsE=5H*n_x`xQBR& z_?m=}M294iWQb&!6qi(l)POXKw3+ms44W*0Y=CT+9Fbg_+<`ose1!a!f}O&PBAa53 z5}Zw{%81H?s+?+r8k<^z+JSn2=DS1cf3GEvp@e?oJ^-k!K_hm=RJ*f~ zEPy^8)bGD}--KRiQ5NiBg;%7?zy1B=B*CHtc5B`!uGQRYFqnRBRXcLS z5pE{wla8bepSRui&#pNdE4gXH30(*{{GCl_2&(6MoneF?{$&T+Oa5g?MnXO=2THwJ zNyu0l{80#UvlT~tQNytW?0(Xc(S$a90`+1L4jIB^YnjWGh~q2PwiAbQyrJWIs()GM z-LTx|QI(~BF!yZyu3jYOyxi)d6q1}%F&nsTiNOoMg)@>4DswO zd7&f@=3|L%Ce-$h8rp+jmYY_uB#UFDQ4=Lb^GwKDnU=3`E4&nCwr*b=o=B|s^hs1R#V!agd6;mD@GGo*1m^2txCCYJ=jET}Lb#)NzldN#7*)#TZtJX7)bZh()DN<&DULB-z4J%ASOCDOS zi0&0yIg1V%+Atv2pu!%dK1bsWTZ|X)or9^6BWGs)3I=Y28W_*KeR-jvY4B^gK*h{y^sAn)+SUTnDOF`orBX|!{9+a4 zVtJ-&laFDBi^D=mo7d6d<;Dz!8i#DF~u*T d`d@*P)=+z2O9=Gccp2C_0H}G=_V0V@{{Zm~b;kez literal 0 HcmV?d00001 diff --git a/templates/index_new.html b/templates/index_new.html new file mode 100644 index 00000000..47cf4c79 --- /dev/null +++ b/templates/index_new.html @@ -0,0 +1,63 @@ + + + + + + + + + + + + KoboldAI Client + + + + + + + +

+ {% include 'settings flyout.html' %} +
+ + +
+ + + + + +
+
+ + +
+
+
+ +
+ + + + + + +
+ + +
+ + +
Status:
+ options: +
+
+ +
+ + \ No newline at end of file diff --git a/templates/settings flyout.html b/templates/settings flyout.html new file mode 100644 index 00000000..21d6db01 --- /dev/null +++ b/templates/settings flyout.html @@ -0,0 +1,41 @@ + + +
+ + + + + +
+ +
+
+
Running Model: ReadOnly
+
+
+{% with menu='Model' %} + {% include 'settings item.html' %} +{% endwith %} +
+ + + \ No newline at end of file diff --git a/templates/settings item.html b/templates/settings item.html new file mode 100644 index 00000000..62b41416 --- /dev/null +++ b/templates/settings item.html @@ -0,0 +1,39 @@ +{% for item in settings %} +{% if item["menu_path"] == menu %} +
+ + + {{ item['label'] }}: + + ? + + {{ item['tooltip'] }} + + + + {% if (item['unit'] != 'bool') and (item['unit'] != 'text') %} + + {% endif %} + + + {% if item["uitype"] == "slider" %} + + {% elif item["uitype"] == "toggle" %} + + {% elif item['uuitype'] == "dropdown" %} + + {% endif %} + +
+{% endif %} +{% endfor %} \ No newline at end of file From 4c357abd78db234caf71d202c7731b027971cff5 Mon Sep 17 00:00:00 2001 From: ebolam Date: Fri, 24 Jun 2022 09:22:59 -0400 Subject: [PATCH 0007/1297] metadata merged with actions --- aiserver.py | 168 ++++++--------------------------- koboldai_settings.py | 38 +++++--- static/application.js | 1 + static/koboldai.css | 4 + static/koboldai.js | 8 ++ templates/settings flyout.html | 7 +- 6 files changed, 75 insertions(+), 151 deletions(-) diff --git a/aiserver.py b/aiserver.py index 71279db1..c9eb7e16 100644 --- a/aiserver.py +++ b/aiserver.py @@ -2099,8 +2099,7 @@ def download(): js["memory"] = story_settings.memory js["authorsnote"] = story_settings.authornote js["anotetemplate"] = story_settings.authornotetemplate - js["actions"] = tuple(story_settings.actions.values()) - js["actions_metadata"] = story_settings.actions_metadata + js["actions"] = story_settings.actions.to_json() js["worldinfo"] = [] # Extract only the important bits of WI @@ -2619,8 +2618,6 @@ def lua_set_chunk(k, v): if(not hasattr(story_settings, "_actions") or story_settings._actions is not story_settings.actions): #Instead of deleting we'll blank out the text. This way our actions and actions_metadata stay in sync and we can restore the chunk on an undo story_settings.actions[chunk-1] = "" - story_settings.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": story_settings.actions_metadata[chunk-1]['Selected Text'], "Pinned": False, "Editted": True}] + story_settings.actions_metadata[chunk-1]['Alternative Text'] - story_settings.actions_metadata[chunk-1]['Selected Text'] = '' send_debug() else: if(k == 0): @@ -2638,8 +2635,6 @@ def lua_set_chunk(k, v): story_settings._actions[chunk-1] = v story_settings.lua_edited.add(chunk) story_settings.actions[chunk-1] = v - story_settings.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": story_settings.actions_metadata[chunk-1]['Selected Text'], "Pinned": False, "Editted": True}] + story_settings.actions_metadata[chunk-1]['Alternative Text'] - story_settings.actions_metadata[chunk-1]['Selected Text'] = v send_debug() #==================================================================# @@ -3445,23 +3440,6 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, story_settings.prompt = data else: story_settings.actions.append(data) - # we now need to update the actions_metadata - # we'll have two conditions. - # 1. This is totally new (user entered) - if story_settings.actions.get_last_key() not in story_settings.actions_metadata: - story_settings.actions_metadata[story_settings.actions.get_last_key()] = {"Selected Text": data, "Alternative Text": []} - else: - # 2. We've selected a chunk of text that is was presented previously - try: - alternatives = [item['Text'] for item in story_settings.actions_metadata[len(story_settings.actions)-1]["Alternative Text"]] - except: - print(len(story_settings.actions)) - print(story_settings.actions_metadata) - raise - if data in alternatives: - alternatives = [item for item in story_settings.actions_metadata[story_settings.actions.get_last_key() ]["Alternative Text"] if item['Text'] != data] - story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"] = alternatives - story_settings.actions_metadata[story_settings.actions.get_last_key()]["Selected Text"] = data update_story_chunk('last') send_debug() @@ -3528,19 +3506,10 @@ def actionback(): return # Remove last index of actions and refresh game screen if(len(story_settings.genseqs) == 0 and len(story_settings.actions) > 0): - # We are going to move the selected text to alternative text in the actions_metadata variable so we can redo this action - story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Alternative Text'] = [{'Text': story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Selected Text'], - 'Pinned': False, - "Previous Selection": True, - "Edited": False}] + story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Alternative Text'] - story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Selected Text'] = "" - last_key = story_settings.actions.get_last_key() story_settings.actions.pop() story_settings.recentback = True remove_story_chunk(last_key + 1) - #for the redo to not get out of whack, need to reset the max # in the actions sequence - #story_settings.actions.set_next_id(last_key) success = True elif(len(story_settings.genseqs) == 0): emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}) @@ -3552,43 +3521,14 @@ def actionback(): return success def actionredo(): - i = 0 - #First we need to find the next valid key - #We might have deleted text so we don't want to show a redo for that blank chunk - - restore_id = story_settings.actions.get_last_key()+1 - if restore_id in story_settings.actions_metadata: - ok_to_use = False - while not ok_to_use: - for item in story_settings.actions_metadata[restore_id]['Alternative Text']: - if item['Previous Selection'] and item['Text'] != "": - ok_to_use = True - if not ok_to_use: - restore_id+=1 - if restore_id not in story_settings.actions_metadata: - return - #else: - #print("???") - #story_settings.actions.set_next_id(restore_id) - - if restore_id in story_settings.actions_metadata: - genout = [{"generated_text": item['Text']} for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]==True)] - if len(genout) > 0: - genout = genout + [{"generated_text": item['Text']} for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Pinned"]==True) and (item["Previous Selection"]==False)] - if len(genout) == 1: - story_settings.actions_metadata[restore_id]['Alternative Text'] = [item for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]!=True)] - genresult(genout[0]['generated_text'], flash=True, ignore_formatting=True) - else: - # Store sequences in memory until selection is made - story_settings.genseqs = genout - - - # Send sequences to UI for selection - genout = [[item['Text'], "redo"] for item in story_settings.actions_metadata[restore_id]['Alternative Text'] if (item["Previous Selection"]==True)] - - emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True) + genout = [[x['text'], "redo" if x['Previous Selection'] else "pinned" if x['Pinned'] else "normal"] for x in story_settings.actions.get_redo_options()] + if len(genout) == 0: + emit('from_server', {'cmd': 'popuperror', 'data': "There's nothing to redo"}, broadcast=True) + elif len(genout) == 1: + genresult(genout[0][0], flash=True, ignore_formatting=True) else: - emit('from_server', {'cmd': 'popuperror', 'data': "There's nothing to undo"}, broadcast=True) + story_settings.genseqs = [{"generated_text": x[0]} for x in genout] + emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True) send_debug() #==================================================================# @@ -3986,10 +3926,6 @@ def genresult(genout, flash=True, ignore_formatting=False): story_settings.prompt = genout else: story_settings.actions.append(genout) - if story_settings.actions.get_last_key() not in story_settings.actions_metadata: - story_settings.actions_metadata[story_settings.actions.get_last_key()] = {'Selected Text': genout, 'Alternative Text': []} - else: - story_settings.actions_metadata[story_settings.actions.get_last_key()]['Selected Text'] = genout update_story_chunk('last') if(flash): emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) @@ -4007,26 +3943,16 @@ def genselect(genout): print("{0}[Result {1}]\n{2}{3}".format(colors.CYAN, i, result["generated_text"], colors.END)) i += 1 - # Add the options to the actions metadata - # If we've already generated text for this action but haven't selected one we'll want to kill all non-pinned, non-previous selection, and non-edited options then add the new ones - if story_settings.actions.get_next_id() in story_settings.actions_metadata: - if (story_settings.actions_metadata[story_settings.actions.get_next_id()]['Selected Text'] == ""): - story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] = [{"Text": item['Text'], "Pinned": item['Pinned'], - "Previous Selection": item["Previous Selection"], - "Edited": item["Edited"]} for item in story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] - if item['Pinned'] or item["Previous Selection"] or item["Edited"]] + [{"Text": text["generated_text"], - "Pinned": False, "Previous Selection": False, "Edited": False} for text in genout] - else: - story_settings.actions_metadata[story_settings.actions.get_next_id()] = {'Selected Text': '', 'Alternative Text': [{"Text": text["generated_text"], "Pinned": False, "Previous Selection": False, "Edited": False} for text in genout]} - else: - story_settings.actions_metadata[story_settings.actions.get_next_id()] = {'Selected Text': '', 'Alternative Text': [{"Text": text["generated_text"], "Pinned": False, "Previous Selection": False, "Edited": False} for text in genout]} + story_settings.actions.clear_unused_options() + story_settings.actions.append_options([x["generated_text"] for x in genout]) - genout = [{"generated_text": item['Text']} for item in story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] if (item["Previous Selection"]==False) and (item["Edited"]==False)] + genout = [{"generated_text": x['text']} for x in story_settings.actions.get_current_options_no_edits()] # Store sequences in memory until selection is made story_settings.genseqs = genout - genout = [[item['Text'], "pinned" if item['Pinned'] else "normal"] for item in story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] if (item["Previous Selection"]==False) and (item["Edited"]==False)] + + genout = story_settings.actions.get_current_options_no_edits(ui=1) # Send sequences to UI for selection emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True) @@ -4041,9 +3967,6 @@ def selectsequence(n): system_settings.lua_koboldbridge.feedback = story_settings.genseqs[int(n)]["generated_text"] if(len(system_settings.lua_koboldbridge.feedback) != 0): story_settings.actions.append(system_settings.lua_koboldbridge.feedback) - #We'll want to remove the option from the alternative text and put it in selected text - story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Alternative Text'] = [item for item in story_settings.actions_metadata[story_settings.actions.get_last_key()]['Alternative Text'] if item['Text'] != system_settings.lua_koboldbridge.feedback] - story_settings.actions_metadata[story_settings.actions.get_last_key() ]['Selected Text'] = system_settings.lua_koboldbridge.feedback update_story_chunk('last') emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) @@ -4058,14 +3981,8 @@ def selectsequence(n): #==================================================================# def pinsequence(n): if n.isnumeric(): + story_settings.actions.toggle_pin(story_settings.actions.get_last_key()+1, int(n)) text = story_settings.genseqs[int(n)]['generated_text'] - if text in [item['Text'] for item in story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text']]: - alternatives = story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] - for i in range(len(alternatives)): - if alternatives[i]['Text'] == text: - alternatives[i]['Pinned'] = not alternatives[i]['Pinned'] - break - story_settings.actions_metadata[story_settings.actions.get_next_id()]['Alternative Text'] = alternatives send_debug() @@ -4483,10 +4400,6 @@ def editsubmit(data): if(story_settings.editln == 0): story_settings.prompt = data else: - story_settings.actions_metadata[story_settings.editln-1]['Alternative Text'] = story_settings.actions_metadata[story_settings.editln-1]['Alternative Text'] + [{"Text": story_settings.actions[story_settings.editln-1], "Pinned": False, - "Previous Selection": False, - "Edited": True}] - story_settings.actions_metadata[story_settings.editln-1]['Selected Text'] = data story_settings.actions[story_settings.editln-1] = data story_settings.mode = "play" @@ -4505,10 +4418,7 @@ def deleterequest(): # Send error message pass else: - story_settings.actions_metadata[story_settings.editln-1]['Alternative Text'] = [{"Text": story_settings.actions[story_settings.editln-1], "Pinned": False, - "Previous Selection": True, "Edited": False}] + story_settings.actions_metadata[story_settings.editln-1]['Alternative Text'] - story_settings.actions_metadata[story_settings.editln-1]['Selected Text'] = '' - story_settings.actions[story_settings.editln-1] = '' + story_settings.actions.delete_action(story_settings.editln-1) story_settings.mode = "play" remove_story_chunk(story_settings.editln) emit('from_server', {'cmd': 'editmode', 'data': 'false'}) @@ -4526,10 +4436,6 @@ def inlineedit(chunk, data): story_settings.prompt = data else: if(chunk-1 in story_settings.actions): - story_settings.actions_metadata[chunk-1]['Alternative Text'] = story_settings.actions_metadata[chunk-1]['Alternative Text'] + [{"Text": story_settings.actions[chunk-1], "Pinned": False, - "Previous Selection": False, - "Edited": True}] - story_settings.actions_metadata[chunk-1]['Selected Text'] = data story_settings.actions[chunk-1] = data else: print(f"WARNING: Attempted to edit non-existent chunk {chunk}") @@ -4554,11 +4460,7 @@ def inlinedelete(chunk): emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) else: if(chunk-1 in story_settings.actions): - story_settings.actions_metadata[chunk-1]['Alternative Text'] = [{"Text": story_settings.actions[chunk-1], "Pinned": False, - "Previous Selection": True, - "Edited": False}] + story_settings.actions_metadata[chunk-1]['Alternative Text'] - story_settings.actions_metadata[chunk-1]['Selected Text'] = '' - story_settings.actions[chunk-1] = '' + story_settings.actions.delete_action(chunk-1) else: print(f"WARNING: Attempted to delete non-existent chunk {chunk}") setgamesaved(False) @@ -4954,15 +4856,6 @@ def ikrequest(txt): if not system_settings.quiet: print("{0}{1}{2}".format(colors.CYAN, genout, colors.END)) story_settings.actions.append(genout) - if story_settings.actions.get_last_key() in story_settings.actions_metadata: - story_settings.actions_metadata[story_settings.actions.get_last_key()] = {"Selected Text": genout, "Alternative Text": []} - else: - # 2. We've selected a chunk of text that is was presented previously - alternatives = [item['Text'] for item in story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"]] - if genout in alternatives: - alternatives = [item for item in story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"] if item['Text'] != genout] - story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"] = alternatives - story_settings.actions_metadata[story_settings.actions.get_last_key()]["Selected Text"] = genout update_story_chunk('last') emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) send_debug() @@ -5046,21 +4939,6 @@ def oairequest(txt, min, max): {"generated_text": utils.decodenewlines(txt)} for txt in outputs] - if story_settings.actions.get_last_key() not in story_settings.actions_metadata: - story_settings.actions_metadata[story_settings.actions.get_last_key()] = { - "Selected Text": genout[0], "Alternative Text": []} - else: - # 2. We've selected a chunk of text that is was presented previously - try: - alternatives = [item['Text'] for item in story_settings.actions_metadata[len(story_settings.actions)-1]["Alternative Text"]] - except: - print(len(story_settings.actions)) - print(story_settings.actions_metadata) - raise - if genout in alternatives: - alternatives = [item for item in story_settings.actions_metadata[story_settings.actions.get_last_key() ]["Alternative Text"] if item['Text'] != genout] - story_settings.actions_metadata[story_settings.actions.get_last_key()]["Alternative Text"] = alternatives - story_settings.actions_metadata[story_settings.actions.get_last_key()]["Selected Text"] = genout if (len(genout) == 1): genresult(genout[0]["generated_text"]) @@ -5933,6 +5811,20 @@ def UI_2_Pinning(data): else: story_settings.actions.unset_pin(int(data['chunk']), int(data['option'])) +#==================================================================# +# Event triggered when user clicks the back button +#==================================================================# +@socketio.on('back') +def UI_2_back(data): + ignore = story_settings.actions.pop() + +#==================================================================# +# Event triggered when user clicks the redo button +#==================================================================# +@socketio.on('redo') +def UI_2_redo(data): + pass + #==================================================================# # Final startup commands to launch Flask app diff --git a/koboldai_settings.py b/koboldai_settings.py index c2553410..f76f2ca1 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -85,6 +85,10 @@ class model_settings(settings): #Put variable change actions here if name not in self.local_only_variables and name[0] != "_": process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + + #Since I haven't migrated the old_ui to use the new actions class for options, let's sync the metadata and options here + if name == 'actions_metadata': + print(value) class story_settings(settings): @@ -367,6 +371,14 @@ class KoboldStoryRegister(object): new_options = self.actions[pointer]["Options"] process_variable_changes("actions", "Options", {"id": pointer, "options": new_options}, {"id": pointer, "options": old_options}) + def toggle_pin(self, action_step, option_number): + if action_step in self.actions: + if option_number < len(self.actions[action_step]['Options']): + if self.actions[action_step]["Options"]['Pinned']: + self.unset_pin(action_step, option_number) + else: + self.set_pin(action_step, option_number) + def set_pin(self, action_step, option_number): if action_step in self.actions: if option_number < len(self.actions[action_step]['Options']): @@ -448,11 +460,17 @@ class KoboldStoryRegister(object): else: return [] - def get_current_options_no_edits(self): - if self.action_count+1 in self.actions: - return [x for x in self.actions[self.action_count+1]["Options"] if x["Edited"] == False] + def get_current_options_no_edits(self, ui=2): + if ui==2: + if self.action_count+1 in self.actions: + return [x for x in self.actions[self.action_count+1]["Options"] if x["Edited"] == False and x['Previous Selection'] == False] + else: + return [] else: - return [] + if self.action_count+1 in self.actions: + return [[x, "pinned" if x['Pinned'] else 'normal'] for x in self.actions[self.action_count+1]["Options"] if x["Edited"] == False and x['Previous Selection'] == False] + else: + return [] def get_pins(self, action_id): if action_id in self.actions: @@ -473,14 +491,10 @@ class KoboldStoryRegister(object): return [] def get_redo_options(self): - pointer = max(self.actions) - while pointer > self.action_count: - if pointer in self.actions: - for item in self.actions[pointer]["Options"]: - if item["Previous Selection"] or item["Pinned"]: - return self.actions[pointer]["Options"] - pointer-=1 - return [] + if self.action_count+1 in self.actions: + return [x for x in self.actions[self.action_count+1]['Options'] if x['Pinned'] or x['Previous Selection']] + else: + return [] diff --git a/static/application.js b/static/application.js index b5c1a585..8b2cbb53 100644 --- a/static/application.js +++ b/static/application.js @@ -1359,6 +1359,7 @@ function setStartState() { function parsegenseqs(seqs) { seqselcontents.html(""); + console.log(seqs); var i; for(i=0; i +
@@ -18,7 +23,7 @@
Running Model: ReadOnly
-
+
{% with menu='Model' %} {% include 'settings item.html' %} From b906742f6178d8c6f5d02f4402542a8ea67e4af6 Mon Sep 17 00:00:00 2001 From: ebolam Date: Sun, 26 Jun 2022 16:36:07 -0400 Subject: [PATCH 0008/1297] Working options. --- aiserver.py | 443 ++++++++++++++++++----------------- gensettings.py | 14 +- koboldai_settings.py | 138 +++++------ static/application.js | 3 +- static/koboldai.css | 13 +- static/koboldai.js | 9 +- static/socket.io.min.js | 4 +- templates/index_new.html | 7 +- templates/settings item.html | 4 +- 9 files changed, 339 insertions(+), 296 deletions(-) diff --git a/aiserver.py b/aiserver.py index c9eb7e16..81a3eb3b 100644 --- a/aiserver.py +++ b/aiserver.py @@ -7,14 +7,15 @@ # External packages import eventlet +from eventlet import tpool eventlet.monkey_patch(all=True, thread=False) +#eventlet.monkey_patch(os=True, select=True, socket=True, thread=True, time=True, psycopg=True) import os os.system("") __file__ = os.path.dirname(os.path.realpath(__file__)) os.chdir(__file__) os.environ['EVENTLET_THREADPOOL_SIZE'] = '1' os.environ['TOKENIZERS_PARALLELISM'] = 'false' -from eventlet import tpool import logging logging.getLogger("urllib3").setLevel(logging.ERROR) @@ -228,7 +229,7 @@ class Send_to_socketio(object): print(bar, end="") time.sleep(0.01) try: - emit('from_server', {'cmd': 'model_load_status', 'data': bar.replace(" ", " ")}, broadcast=True) + emit('from_server', {'cmd': 'model_load_status', 'data': bar.replace(" ", " ")}, broadcast=True, room="UI_1") except: pass @@ -245,6 +246,7 @@ app = Flask(__name__, root_path=os.getcwd()) app.config['SECRET KEY'] = 'secret!' app.config['TEMPLATES_AUTO_RELOAD'] = True socketio = SocketIO(app, async_method="eventlet") +#socketio = SocketIO(app, async_method="eventlet", logger=True, engineio_logger=True) koboldai_settings.socketio = socketio print("{0}OK!{1}".format(colors.GREEN, colors.END)) @@ -263,9 +265,9 @@ def sendModelSelection(menu="mainmenu", folder="./models"): showdelete=True else: showdelete=False - emit('from_server', {'cmd': 'show_model_menu', 'data': menu_list, 'menu': menu, 'breadcrumbs': breadcrumbs, "showdelete": showdelete}, broadcast=True) + emit('from_server', {'cmd': 'show_model_menu', 'data': menu_list, 'menu': menu, 'breadcrumbs': breadcrumbs, "showdelete": showdelete}, broadcast=True, room="UI_1") else: - emit('from_server', {'cmd': 'show_model_menu', 'data': model_menu[menu], 'menu': menu, 'breadcrumbs': [], "showdelete": False}, broadcast=True) + emit('from_server', {'cmd': 'show_model_menu', 'data': model_menu[menu], 'menu': menu, 'breadcrumbs': [], "showdelete": False}, broadcast=True, room="UI_1") def get_folder_path_info(base): if base == 'This PC': @@ -794,7 +796,7 @@ def check_for_sp_change(): time.sleep(0.1) if(system_settings.sp_changed): with app.app_context(): - emit('from_server', {'cmd': 'spstatitems', 'data': {system_settings.spfilename: system_settings.spmeta} if system_settings.allowsp and len(system_settings.spfilename) else {}}, namespace=None, broadcast=True) + emit('from_server', {'cmd': 'spstatitems', 'data': {system_settings.spfilename: system_settings.spmeta} if system_settings.allowsp and len(system_settings.spfilename) else {}}, namespace=None, broadcast=True, room="UI_1") system_settings.sp_changed = False socketio.start_background_task(check_for_sp_change) @@ -1040,7 +1042,7 @@ def get_model_info(model, directory=""): 'gpu':gpu, 'layer_count':layer_count, 'breakmodel':breakmodel, 'disk_break_value': disk_blocks, 'accelerate': utils.HAS_ACCELERATE, 'break_values': break_values, 'gpu_count': gpu_count, - 'url': url, 'gpu_names': gpu_names}, broadcast=True) + 'url': url, 'gpu_names': gpu_names}, broadcast=True, room="UI_1") if key_value != "": get_oai_models(key_value) @@ -1112,12 +1114,12 @@ def get_oai_models(key): if changed: with open("settings/{}.settings".format(model_settings.model), "w") as file: js["apikey"] = key - file.write(json.dumps(js, indent=3)) + file.write(json.dumps(js, indent=3), room="UI_1") emit('from_server', {'cmd': 'oai_engines', 'data': engines, 'online_model': online_model}, broadcast=True) else: # Something went wrong, print the message and quit since we can't initialize an engine - print("{0}ERROR!{1}".format(colors.RED, colors.END)) + print("{0}ERROR!{1}".format(colors.RED, colors.END), room="UI_1") print(req.json()) emit('from_server', {'cmd': 'errmsg', 'data': req.json()}) @@ -1392,7 +1394,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal if not initial_load: set_aibusy(True) if model_settings.model != 'ReadOnly': - emit('from_server', {'cmd': 'model_load_status', 'data': "Loading {}".format(model_settings.model)}, broadcast=True) + emit('from_server', {'cmd': 'model_load_status', 'data': "Loading {}".format(model_settings.model)}, broadcast=True, room="UI_1") #Have to add a sleep so the server will send the emit for some reason time.sleep(0.1) if gpu_layers is not None: @@ -2058,7 +2060,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal final_startup() if not initial_load: set_aibusy(False) - emit('from_server', {'cmd': 'hide_model_name'}, broadcast=True) + emit('from_server', {'cmd': 'hide_model_name'}, broadcast=True, room="UI_1") time.sleep(0.1) if not story_settings.gamestarted: @@ -2235,7 +2237,7 @@ def load_lua_scripts(): pass system_settings.lua_running = False if(system_settings.serverstarted): - emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True, room="UI_1") sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) @@ -2722,7 +2724,7 @@ def execute_inmod(): except lupa.LuaError as e: system_settings.lua_koboldbridge.obliterate_multiverse() system_settings.lua_running = False - emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True, room="UI_1") sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) @@ -2734,13 +2736,13 @@ def execute_genmod(): def execute_outmod(): setgamesaved(False) - emit('from_server', {'cmd': 'hidemsg', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'hidemsg', 'data': ''}, broadcast=True, room="UI_1") try: tpool.execute(system_settings.lua_koboldbridge.execute_outmod) except lupa.LuaError as e: system_settings.lua_koboldbridge.obliterate_multiverse() system_settings.lua_running = False - emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True, room="UI_1") sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) @@ -2764,6 +2766,8 @@ def execute_outmod(): #==================================================================# @socketio.on('connect') def do_connect(): + if request.args.get("rely") == "true": + return join_room("UI_{}".format(request.args.get('ui'))) print("Joining Room UI_{}".format(request.args.get('ui'))) #Send all variables to client @@ -2772,51 +2776,51 @@ def do_connect(): user_settings.send_to_ui() system_settings.send_to_ui() print("{0}Client connected!{1}".format(colors.GREEN, colors.END)) - emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}) - emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}) - emit('from_server', {'cmd': 'connected', 'smandelete': system_settings.smandelete, 'smanrename': system_settings.smanrename, 'modelname': getmodelname()}) + emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}, room="UI_1") + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, room="UI_1") + emit('from_server', {'cmd': 'connected', 'smandelete': system_settings.smandelete, 'smanrename': system_settings.smanrename, 'modelname': getmodelname()}, room="UI_1") if(system_settings.host): - emit('from_server', {'cmd': 'runs_remotely'}) + emit('from_server', {'cmd': 'runs_remotely'}, room="UI_1") if(system_settings.flaskwebgui): - emit('from_server', {'cmd': 'flaskwebgui'}) + emit('from_server', {'cmd': 'flaskwebgui'}, room="UI_1") if(system_settings.allowsp): - emit('from_server', {'cmd': 'allowsp', 'data': system_settings.allowsp}) + emit('from_server', {'cmd': 'allowsp', 'data': system_settings.allowsp}, room="UI_1") sendUSStatItems() - emit('from_server', {'cmd': 'spstatitems', 'data': {system_settings.spfilename: system_settings.spmeta} if system_settings.allowsp and len(system_settings.spfilename) else {}}, broadcast=True) + emit('from_server', {'cmd': 'spstatitems', 'data': {system_settings.spfilename: system_settings.spmeta} if system_settings.allowsp and len(system_settings.spfilename) else {}}, broadcast=True, room="UI_1") if(not story_settings.gamestarted): setStartState() sendsettings() refresh_settings() user_settings.laststory = None - emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}) + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, room="UI_1") sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}) - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, room="UI_1") + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, room="UI_1") story_settings.mode = "play" else: # Game in session, send current game data and ready state to browser refresh_story() sendsettings() refresh_settings() - emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}) + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, room="UI_1") sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}) - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, room="UI_1") + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, room="UI_1") if(story_settings.mode == "play"): if(not system_settings.aibusy): - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, room="UI_1") else: - emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}) + emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, room="UI_1") elif(story_settings.mode == "edit"): - emit('from_server', {'cmd': 'editmode', 'data': 'true'}) + emit('from_server', {'cmd': 'editmode', 'data': 'true'}, room="UI_1") elif(story_settings.mode == "memory"): - emit('from_server', {'cmd': 'memmode', 'data': 'true'}) + emit('from_server', {'cmd': 'memmode', 'data': 'true'}, room="UI_1") elif(story_settings.mode == "wi"): - emit('from_server', {'cmd': 'wimode', 'data': 'true'}) + emit('from_server', {'cmd': 'wimode', 'data': 'true'}, room="UI_1") - emit('from_server', {'cmd': 'gamesaved', 'data': story_settings.gamesaved}, broadcast=True) + emit('from_server', {'cmd': 'gamesaved', 'data': story_settings.gamesaved}, broadcast=True, room="UI_1") #==================================================================# # Event triggered when browser SocketIO sends data to the server @@ -2839,7 +2843,7 @@ def get_message(msg): raise ValueError("Chatname must be a string") story_settings.chatname = msg['chatname'] settingschanged() - emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}) + emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}, room="UI_1") story_settings.recentrng = story_settings.recentrngm = None actionsubmit(msg['data'], actionmode=msg['actionmode']) elif(story_settings.mode == "edit"): @@ -2858,7 +2862,7 @@ def get_message(msg): raise ValueError("Chatname must be a string") story_settings.chatname = msg['chatname'] settingschanged() - emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}) + emit('from_server', {'cmd': 'setchatname', 'data': story_settings.chatname}, room="UI_1") actionretry(msg['data']) # Back/Undo Action elif(msg['cmd'] == 'back'): @@ -2870,10 +2874,10 @@ def get_message(msg): elif(msg['cmd'] == 'edit'): if(story_settings.mode == "play"): story_settings.mode = "edit" - emit('from_server', {'cmd': 'editmode', 'data': 'true'}, broadcast=True) + emit('from_server', {'cmd': 'editmode', 'data': 'true'}, broadcast=True, room="UI_1") elif(story_settings.mode == "edit"): story_settings.mode = "play" - emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True, room="UI_1") # EditLine Action (old) elif(msg['cmd'] == 'editline'): editrequest(int(msg['data'])) @@ -2901,62 +2905,62 @@ def get_message(msg): randomGameRequest(msg['data'], memory=msg['memory']) elif(msg['cmd'] == 'settemp'): model_settings.temp = float(msg['data']) - emit('from_server', {'cmd': 'setlabeltemp', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabeltemp', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'settopp'): model_settings.top_p = float(msg['data']) - emit('from_server', {'cmd': 'setlabeltopp', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabeltopp', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'settopk'): model_settings.top_k = int(msg['data']) - emit('from_server', {'cmd': 'setlabeltopk', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabeltopk', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'settfs'): model_settings.tfs = float(msg['data']) - emit('from_server', {'cmd': 'setlabeltfs', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabeltfs', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'settypical'): model_settings.typical = float(msg['data']) - emit('from_server', {'cmd': 'setlabeltypical', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabeltypical', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'settopa'): model_settings.top_a = float(msg['data']) - emit('from_server', {'cmd': 'setlabeltopa', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabeltopa', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'setreppen'): model_settings.rep_pen = float(msg['data']) - emit('from_server', {'cmd': 'setlabelreppen', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabelreppen', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'setreppenslope'): model_settings.rep_pen_slope = float(msg['data']) - emit('from_server', {'cmd': 'setlabelreppenslope', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabelreppenslope', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'setreppenrange'): model_settings.rep_pen_range = float(msg['data']) - emit('from_server', {'cmd': 'setlabelreppenrange', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabelreppenrange', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'setoutput'): model_settings.genamt = int(msg['data']) - emit('from_server', {'cmd': 'setlabeloutput', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabeloutput', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'settknmax'): model_settings.max_length = int(msg['data']) - emit('from_server', {'cmd': 'setlabeltknmax', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabeltknmax', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'setikgen'): model_settings.ikgen = int(msg['data']) - emit('from_server', {'cmd': 'setlabelikgen', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabelikgen', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() # Author's Note field update @@ -2965,7 +2969,7 @@ def get_message(msg): # Author's Note depth update elif(msg['cmd'] == 'anotedepth'): story_settings.andepth = int(msg['data']) - emit('from_server', {'cmd': 'setlabelanotedepth', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'setlabelanotedepth', 'data': msg['data']}, broadcast=True, room="UI_1") settingschanged() refresh_settings() # Format - Trim incomplete sentences @@ -2997,10 +3001,10 @@ def get_message(msg): elif(msg['cmd'] == 'importselect'): user_settings.importnum = int(msg["data"].replace("import", "")) elif(msg['cmd'] == 'importcancel'): - emit('from_server', {'cmd': 'popupshow', 'data': False}) + emit('from_server', {'cmd': 'popupshow', 'data': False}, room="UI_1") user_settings.importjs = {} elif(msg['cmd'] == 'importaccept'): - emit('from_server', {'cmd': 'popupshow', 'data': False}) + emit('from_server', {'cmd': 'popupshow', 'data': False}, room="UI_1") importgame() elif(msg['cmd'] == 'wi'): togglewimode() @@ -3022,19 +3026,19 @@ def get_message(msg): elif(msg['cmd'] == 'wiexpand'): assert 0 <= int(msg['data']) < len(story_settings.worldinfo) setgamesaved(False) - emit('from_server', {'cmd': 'wiexpand', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'wiexpand', 'data': msg['data']}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wiexpandfolder'): assert 0 <= int(msg['data']) < len(story_settings.worldinfo) setgamesaved(False) - emit('from_server', {'cmd': 'wiexpandfolder', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'wiexpandfolder', 'data': msg['data']}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wifoldercollapsecontent'): setgamesaved(False) story_settings.wifolders_d[msg['data']]['collapsed'] = True - emit('from_server', {'cmd': 'wifoldercollapsecontent', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'wifoldercollapsecontent', 'data': msg['data']}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wifolderexpandcontent'): setgamesaved(False) story_settings.wifolders_d[msg['data']]['collapsed'] = False - emit('from_server', {'cmd': 'wifolderexpandcontent', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'wifolderexpandcontent', 'data': msg['data']}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wiupdate'): setgamesaved(False) num = int(msg['num']) @@ -3042,7 +3046,7 @@ def get_message(msg): for field in fields: if(field in msg['data'] and type(msg['data'][field]) is str): story_settings.worldinfo[num][field] = msg['data'][field] - emit('from_server', {'cmd': 'wiupdate', 'num': msg['num'], 'data': {field: story_settings.worldinfo[num][field] for field in fields}}, broadcast=True) + emit('from_server', {'cmd': 'wiupdate', 'num': msg['num'], 'data': {field: story_settings.worldinfo[num][field] for field in fields}}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wifolderupdate'): setgamesaved(False) uid = int(msg['uid']) @@ -3050,23 +3054,23 @@ def get_message(msg): for field in fields: if(field in msg['data'] and type(msg['data'][field]) is (str if field != "collapsed" else bool)): story_settings.wifolders_d[uid][field] = msg['data'][field] - emit('from_server', {'cmd': 'wifolderupdate', 'uid': msg['uid'], 'data': {field: story_settings.wifolders_d[uid][field] for field in fields}}, broadcast=True) + emit('from_server', {'cmd': 'wifolderupdate', 'uid': msg['uid'], 'data': {field: story_settings.wifolders_d[uid][field] for field in fields}}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wiselon'): setgamesaved(False) story_settings.worldinfo[msg['data']]["selective"] = True - emit('from_server', {'cmd': 'wiselon', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'wiselon', 'data': msg['data']}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wiseloff'): setgamesaved(False) story_settings.worldinfo[msg['data']]["selective"] = False - emit('from_server', {'cmd': 'wiseloff', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'wiseloff', 'data': msg['data']}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wiconstanton'): setgamesaved(False) story_settings.worldinfo[msg['data']]["constant"] = True - emit('from_server', {'cmd': 'wiconstanton', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'wiconstanton', 'data': msg['data']}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'wiconstantoff'): setgamesaved(False) story_settings.worldinfo[msg['data']]["constant"] = False - emit('from_server', {'cmd': 'wiconstantoff', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'wiconstantoff', 'data': msg['data']}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'sendwilist'): commitwi(msg['data']) elif(msg['cmd'] == 'aidgimport'): @@ -3081,9 +3085,9 @@ def get_message(msg): getsplist() elif(msg['cmd'] == 'uslistrequest'): unloaded, loaded = getuslist() - emit('from_server', {'cmd': 'buildus', 'data': {"unloaded": unloaded, "loaded": loaded}}) + emit('from_server', {'cmd': 'buildus', 'data': {"unloaded": unloaded, "loaded": loaded}}, room="UI_1") elif(msg['cmd'] == 'samplerlistrequest'): - emit('from_server', {'cmd': 'buildsamplers', 'data': model_settings.sampler_order}) + emit('from_server', {'cmd': 'buildsamplers', 'data': model_settings.sampler_order}, room="UI_1") elif(msg['cmd'] == 'usloaded'): system_settings.userscripts = [] for userscript in msg['data']: @@ -3131,7 +3135,7 @@ def get_message(msg): load_model(use_gpu=msg['use_gpu'], gpu_layers=msg['gpu_layers'], disk_layers=msg['disk_layers'], online_model=msg['online_model']) elif(msg['cmd'] == 'show_model'): print("Model Name: {}".format(getmodelname())) - emit('from_server', {'cmd': 'show_model_name', 'data': getmodelname()}, broadcast=True) + emit('from_server', {'cmd': 'show_model_name', 'data': getmodelname()}, broadcast=True, room="UI_1") elif(msg['cmd'] == 'selectmodel'): # This is run when a model line is selected from the UI (line from the model_menu variable) that is tagged as not a menu # otherwise we should be running the msg['cmd'] == 'list_model' @@ -3160,7 +3164,7 @@ def get_message(msg): try: get_model_info(model_settings.model) except: - emit('from_server', {'cmd': 'errmsg', 'data': "The model entered doesn't exist."}) + emit('from_server', {'cmd': 'errmsg', 'data': "The model entered doesn't exist."}, room="UI_1") elif msg['data'] in ('NeoCustom', 'GPT2Custom'): if check_if_dir_is_model(msg['path']): model_settings.model = msg['data'] @@ -3212,12 +3216,12 @@ def get_message(msg): pinsequence(msg['data']) elif(msg['cmd'] == 'setnumseq'): model_settings.numseqs = int(msg['data']) - emit('from_server', {'cmd': 'setlabelnumseq', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabelnumseq', 'data': msg['data']}, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'setwidepth'): user_settings.widepth = int(msg['data']) - emit('from_server', {'cmd': 'setlabelwidepth', 'data': msg['data']}) + emit('from_server', {'cmd': 'setlabelwidepth', 'data': msg['data']}, room="UI_1") settingschanged() refresh_settings() elif(msg['cmd'] == 'setuseprompt'): @@ -3258,7 +3262,7 @@ def get_message(msg): wiimportrequest() elif(msg['cmd'] == 'debug'): user_settings.debug = msg['data'] - emit('from_server', {'cmd': 'set_debug', 'data': msg['data']}, broadcast=True) + emit('from_server', {'cmd': 'set_debug', 'data': msg['data']}, broadcast=True, room="UI_1") if user_settings.debug: send_debug() @@ -3269,7 +3273,7 @@ def sendUSStatItems(): _, loaded = getuslist() loaded = loaded if system_settings.lua_running else [] last_userscripts = [e["filename"] for e in loaded] - emit('from_server', {'cmd': 'usstatitems', 'data': loaded, 'flash': last_userscripts != system_settings.last_userscripts}, broadcast=True) + emit('from_server', {'cmd': 'usstatitems', 'data': loaded, 'flash': last_userscripts != system_settings.last_userscripts}, broadcast=True, room="UI_1") system_settings.last_userscripts = last_userscripts #==================================================================# @@ -3292,25 +3296,25 @@ def setStartState(): txt = txt + "Please load a game or enter a prompt below to begin!" if(system_settings.noai): txt = txt + "Please load or import a story to read. There is no AI in this mode." - emit('from_server', {'cmd': 'updatescreen', 'gamestarted': story_settings.gamestarted, 'data': txt}, broadcast=True) - emit('from_server', {'cmd': 'setgamestate', 'data': 'start'}, broadcast=True) + emit('from_server', {'cmd': 'updatescreen', 'gamestarted': story_settings.gamestarted, 'data': txt}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setgamestate', 'data': 'start'}, broadcast=True, room="UI_1") #==================================================================# # Transmit applicable settings to SocketIO to build UI sliders/toggles #==================================================================# def sendsettings(): # Send settings for selected AI type - emit('from_server', {'cmd': 'reset_menus'}) + emit('from_server', {'cmd': 'reset_menus'}, room="UI_1") if(model_settings.model != "InferKit"): for set in gensettings.gensettingstf: - emit('from_server', {'cmd': 'addsetting', 'data': set}) + emit('from_server', {'cmd': 'addsetting', 'data': set}, room="UI_1") else: for set in gensettings.gensettingsik: - emit('from_server', {'cmd': 'addsetting', 'data': set}) + emit('from_server', {'cmd': 'addsetting', 'data': set}, room="UI_1") # Send formatting options for frm in gensettings.formatcontrols: - emit('from_server', {'cmd': 'addformat', 'data': frm}) + emit('from_server', {'cmd': 'addformat', 'data': frm}, room="UI_1") # Add format key to vars if it wasn't loaded with client.settings if(not frm["id"] in user_settings.formatoptns): user_settings.formatoptns[frm["id"]] = False; @@ -3321,7 +3325,7 @@ def sendsettings(): def setgamesaved(gamesaved): assert type(gamesaved) is bool if(gamesaved != story_settings.gamesaved): - emit('from_server', {'cmd': 'gamesaved', 'data': gamesaved}, broadcast=True) + emit('from_server', {'cmd': 'gamesaved', 'data': gamesaved}, broadcast=True, room="UI_1") story_settings.gamesaved = gamesaved #==================================================================# @@ -3335,7 +3339,7 @@ def check_for_backend_compilation(): for _ in range(31): time.sleep(0.06276680299820175) if(system_settings.compiling): - emit('from_server', {'cmd': 'warnmsg', 'data': 'Compiling TPU backend—this usually takes 1–2 minutes...'}, broadcast=True) + emit('from_server', {'cmd': 'warnmsg', 'data': 'Compiling TPU backend—this usually takes 1–2 minutes...'}, broadcast=True, room="UI_1") break system_settings.checking = False @@ -3383,14 +3387,14 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, # Save this first action as the prompt story_settings.prompt = data # Clear the startup text from game screen - emit('from_server', {'cmd': 'updatescreen', 'gamestarted': False, 'data': 'Please wait, generating story...'}, broadcast=True) + emit('from_server', {'cmd': 'updatescreen', 'gamestarted': False, 'data': 'Please wait, generating story...'}, broadcast=True, room="UI_1") calcsubmit(data) # Run the first action through the generator if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None and len(story_settings.genseqs) == 0): data = "" force_submit = True disable_recentrng = True continue - emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True, room="UI_1") break else: # Save this first action as the prompt @@ -3407,7 +3411,7 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, genresult(genout[0]["generated_text"], flash=False) refresh_story() if(len(story_settings.actions) > 0): - emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1}, broadcast=True) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1}, broadcast=True, room="UI_1") if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None): data = "" force_submit = True @@ -3424,7 +3428,7 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, genselect(genout) refresh_story() set_aibusy(0) - emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True, room="UI_1") break else: # Apply input formatting & scripts before sending to tokenizer @@ -3451,7 +3455,7 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, force_submit = True disable_recentrng = True continue - emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True, room="UI_1") break else: for i in range(model_settings.numseqs): @@ -3478,7 +3482,7 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, continue genselect(genout) set_aibusy(0) - emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True, room="UI_1") break #==================================================================# @@ -3486,7 +3490,7 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, #==================================================================# def actionretry(data): if(system_settings.noai): - emit('from_server', {'cmd': 'errmsg', 'data': "Retry function unavailable in Read Only mode."}) + emit('from_server', {'cmd': 'errmsg', 'data': "Retry function unavailable in Read Only mode."}, room="UI_1") return if(story_settings.recentrng is not None): if(not system_settings.aibusy): @@ -3496,7 +3500,7 @@ def actionretry(data): actionsubmit("", actionmode=story_settings.actionmode, force_submit=True) send_debug() elif(not story_settings.useprompt): - emit('from_server', {'cmd': 'errmsg', 'data': "Please enable \"Always Add Prompt\" to retry with your prompt."}) + emit('from_server', {'cmd': 'errmsg', 'data': "Please enable \"Always Add Prompt\" to retry with your prompt."}, room="UI_1") #==================================================================# # @@ -3512,7 +3516,7 @@ def actionback(): remove_story_chunk(last_key + 1) success = True elif(len(story_settings.genseqs) == 0): - emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}) + emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}, room="UI_1") success = False else: story_settings.genseqs = [] @@ -3523,12 +3527,12 @@ def actionback(): def actionredo(): genout = [[x['text'], "redo" if x['Previous Selection'] else "pinned" if x['Pinned'] else "normal"] for x in story_settings.actions.get_redo_options()] if len(genout) == 0: - emit('from_server', {'cmd': 'popuperror', 'data': "There's nothing to redo"}, broadcast=True) + emit('from_server', {'cmd': 'popuperror', 'data': "There's nothing to redo"}, broadcast=True, room="UI_1") elif len(genout) == 1: genresult(genout[0][0], flash=True, ignore_formatting=True) else: story_settings.genseqs = [{"generated_text": x[0]} for x in genout] - emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True) + emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True, room="UI_1") send_debug() #==================================================================# @@ -3864,13 +3868,13 @@ def generate(txt, minimum, maximum, found_entries=None): if(issubclass(type(e), lupa.LuaError)): system_settings.lua_koboldbridge.obliterate_multiverse() system_settings.lua_running = False - emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True, room="UI_1") sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) print("{0}{1}{2}".format(colors.YELLOW, "Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.", colors.END), file=sys.stderr) else: - emit('from_server', {'cmd': 'errmsg', 'data': 'Error occurred during generator call; please check console.'}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': 'Error occurred during generator call; please check console.'}, broadcast=True, room="UI_1") print("{0}{1}{2}".format(colors.RED, traceback.format_exc().replace("\033", ""), colors.END), file=sys.stderr) set_aibusy(0) return @@ -3928,7 +3932,7 @@ def genresult(genout, flash=True, ignore_formatting=False): story_settings.actions.append(genout) update_story_chunk('last') if(flash): - emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True, room="UI_1") send_debug() #==================================================================# @@ -3955,7 +3959,7 @@ def genselect(genout): genout = story_settings.actions.get_current_options_no_edits(ui=1) # Send sequences to UI for selection - emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True) + emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True, room="UI_1") send_debug() #==================================================================# @@ -3968,8 +3972,8 @@ def selectsequence(n): if(len(system_settings.lua_koboldbridge.feedback) != 0): story_settings.actions.append(system_settings.lua_koboldbridge.feedback) update_story_chunk('last') - emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) - emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True, room="UI_1") story_settings.genseqs = [] if(system_settings.lua_koboldbridge.restart_sequence is not None): @@ -4066,7 +4070,7 @@ def sendtocolab(txt, min, max): else: errmsg = "Colab API Error: Failed to get a reply from the server. Please check the colab console." print("{0}{1}{2}".format(colors.RED, errmsg, colors.END)) - emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True, room="UI_1") set_aibusy(0) #==================================================================# @@ -4166,13 +4170,13 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): if(issubclass(type(e), lupa.LuaError)): system_settings.lua_koboldbridge.obliterate_multiverse() system_settings.lua_running = False - emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True, room="UI_1") sendUSStatItems() print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) print("{0}{1}{2}".format(colors.YELLOW, "Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.", colors.END), file=sys.stderr) else: - emit('from_server', {'cmd': 'errmsg', 'data': 'Error occurred during generator call; please check console.'}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': 'Error occurred during generator call; please check console.'}, broadcast=True, room="UI_1") print("{0}{1}{2}".format(colors.RED, traceback.format_exc().replace("\033", ""), colors.END), file=sys.stderr) set_aibusy(0) return @@ -4274,7 +4278,7 @@ def refresh_story(): item = system_settings.comregex_ui.sub(lambda m: '\n'.join('' + l + '' for l in m.group().split('\n')), item) # Add special formatting to comments item = system_settings.acregex_ui.sub('\\1', item) # Add special formatting to adventure actions text_parts.extend(('', item, '')) - emit('from_server', {'cmd': 'updatescreen', 'gamestarted': story_settings.gamestarted, 'data': formatforhtml(''.join(text_parts))}, broadcast=True) + emit('from_server', {'cmd': 'updatescreen', 'gamestarted': story_settings.gamestarted, 'data': formatforhtml(''.join(text_parts))}, broadcast=True, room="UI_1") #==================================================================# @@ -4306,7 +4310,7 @@ def update_story_chunk(idx: Union[int, str]): item = system_settings.acregex_ui.sub('\\1', item) # Add special formatting to adventure actions chunk_text = f'{formatforhtml(item)}' - emit('from_server', {'cmd': 'updatechunk', 'data': {'index': idx, 'html': chunk_text}}, broadcast=True) + emit('from_server', {'cmd': 'updatechunk', 'data': {'index': idx, 'html': chunk_text}}, broadcast=True, room="UI_1") setgamesaved(False) @@ -4319,7 +4323,7 @@ def update_story_chunk(idx: Union[int, str]): # Signals the Game Screen to remove one of the chunks #==================================================================# def remove_story_chunk(idx: int): - emit('from_server', {'cmd': 'removechunk', 'data': idx}, broadcast=True) + emit('from_server', {'cmd': 'removechunk', 'data': idx}, broadcast=True, room="UI_1") setgamesaved(False) @@ -4328,45 +4332,45 @@ def remove_story_chunk(idx: int): #==================================================================# def refresh_settings(): # Suppress toggle change events while loading state - emit('from_server', {'cmd': 'allowtoggle', 'data': False}, broadcast=True) + emit('from_server', {'cmd': 'allowtoggle', 'data': False}, broadcast=True, room="UI_1") if(model_settings.model != "InferKit"): - emit('from_server', {'cmd': 'updatetemp', 'data': model_settings.temp}, broadcast=True) - emit('from_server', {'cmd': 'updatetopp', 'data': model_settings.top_p}, broadcast=True) - emit('from_server', {'cmd': 'updatetopk', 'data': model_settings.top_k}, broadcast=True) - emit('from_server', {'cmd': 'updatetfs', 'data': model_settings.tfs}, broadcast=True) - emit('from_server', {'cmd': 'updatetypical', 'data': model_settings.typical}, broadcast=True) - emit('from_server', {'cmd': 'updatetopa', 'data': model_settings.top_a}, broadcast=True) - emit('from_server', {'cmd': 'updatereppen', 'data': model_settings.rep_pen}, broadcast=True) - emit('from_server', {'cmd': 'updatereppenslope', 'data': model_settings.rep_pen_slope}, broadcast=True) - emit('from_server', {'cmd': 'updatereppenrange', 'data': model_settings.rep_pen_range}, broadcast=True) - emit('from_server', {'cmd': 'updateoutlen', 'data': model_settings.genamt}, broadcast=True) - emit('from_server', {'cmd': 'updatetknmax', 'data': model_settings.max_length}, broadcast=True) - emit('from_server', {'cmd': 'updatenumseq', 'data': model_settings.numseqs}, broadcast=True) + emit('from_server', {'cmd': 'updatetemp', 'data': model_settings.temp}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatetopp', 'data': model_settings.top_p}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatetopk', 'data': model_settings.top_k}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatetfs', 'data': model_settings.tfs}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatetypical', 'data': model_settings.typical}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatetopa', 'data': model_settings.top_a}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatereppen', 'data': model_settings.rep_pen}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatereppenslope', 'data': model_settings.rep_pen_slope}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatereppenrange', 'data': model_settings.rep_pen_range}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updateoutlen', 'data': model_settings.genamt}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatetknmax', 'data': model_settings.max_length}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatenumseq', 'data': model_settings.numseqs}, broadcast=True, room="UI_1") else: - emit('from_server', {'cmd': 'updatetemp', 'data': model_settings.temp}, broadcast=True) - emit('from_server', {'cmd': 'updatetopp', 'data': model_settings.top_p}, broadcast=True) - emit('from_server', {'cmd': 'updateikgen', 'data': model_settings.ikgen}, broadcast=True) + emit('from_server', {'cmd': 'updatetemp', 'data': model_settings.temp}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatetopp', 'data': model_settings.top_p}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updateikgen', 'data': model_settings.ikgen}, broadcast=True, room="UI_1") - emit('from_server', {'cmd': 'updateanotedepth', 'data': story_settings.andepth}, broadcast=True) - emit('from_server', {'cmd': 'updatewidepth', 'data': user_settings.widepth}, broadcast=True) - emit('from_server', {'cmd': 'updateuseprompt', 'data': story_settings.useprompt}, broadcast=True) - emit('from_server', {'cmd': 'updateadventure', 'data': story_settings.adventure}, broadcast=True) - emit('from_server', {'cmd': 'updatechatmode', 'data': story_settings.chatmode}, broadcast=True) - emit('from_server', {'cmd': 'updatedynamicscan', 'data': story_settings.dynamicscan}, broadcast=True) - emit('from_server', {'cmd': 'updateautosave', 'data': user_settings.autosave}, broadcast=True) - emit('from_server', {'cmd': 'updatenopromptgen', 'data': user_settings.nopromptgen}, broadcast=True) - emit('from_server', {'cmd': 'updaterngpersist', 'data': user_settings.rngpersist}, broadcast=True) - emit('from_server', {'cmd': 'updatenogenmod', 'data': user_settings.nogenmod}, broadcast=True) + emit('from_server', {'cmd': 'updateanotedepth', 'data': story_settings.andepth}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatewidepth', 'data': user_settings.widepth}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updateuseprompt', 'data': story_settings.useprompt}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updateadventure', 'data': story_settings.adventure}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatechatmode', 'data': story_settings.chatmode}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatedynamicscan', 'data': story_settings.dynamicscan}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updateautosave', 'data': user_settings.autosave}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatenopromptgen', 'data': user_settings.nopromptgen}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updaterngpersist', 'data': user_settings.rngpersist}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatenogenmod', 'data': user_settings.nogenmod}, broadcast=True, room="UI_1") - emit('from_server', {'cmd': 'updatefrmttriminc', 'data': user_settings.formatoptns["frmttriminc"]}, broadcast=True) - emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': user_settings.formatoptns["frmtrmblln"]}, broadcast=True) - emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': user_settings.formatoptns["frmtrmspch"]}, broadcast=True) - emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': user_settings.formatoptns["frmtadsnsp"]}, broadcast=True) - emit('from_server', {'cmd': 'updatesingleline', 'data': user_settings.formatoptns["singleline"]}, broadcast=True) + emit('from_server', {'cmd': 'updatefrmttriminc', 'data': user_settings.formatoptns["frmttriminc"]}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatefrmtrmblln', 'data': user_settings.formatoptns["frmtrmblln"]}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': user_settings.formatoptns["frmtrmspch"]}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': user_settings.formatoptns["frmtadsnsp"]}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'updatesingleline', 'data': user_settings.formatoptns["singleline"]}, broadcast=True, room="UI_1") # Allow toggle events again - emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True) + emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True, room="UI_1") #==================================================================# # Sets the logical and display states for the AI Busy condition @@ -4374,10 +4378,10 @@ def refresh_settings(): def set_aibusy(state): if(state): system_settings.aibusy = True - emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, broadcast=True) + emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, broadcast=True, room="UI_1") else: system_settings.aibusy = False - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True, room="UI_1") #==================================================================# # @@ -4389,8 +4393,8 @@ def editrequest(n): txt = story_settings.actions[n-1] story_settings.editln = n - emit('from_server', {'cmd': 'setinputtext', 'data': txt}, broadcast=True) - emit('from_server', {'cmd': 'enablesubmit', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'setinputtext', 'data': txt}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'enablesubmit', 'data': ''}, broadcast=True, room="UI_1") #==================================================================# # @@ -4404,8 +4408,8 @@ def editsubmit(data): story_settings.mode = "play" update_story_chunk(story_settings.editln) - emit('from_server', {'cmd': 'texteffect', 'data': story_settings.editln}, broadcast=True) - emit('from_server', {'cmd': 'editmode', 'data': 'false'}) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.editln}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, room="UI_1") send_debug() #==================================================================# @@ -4421,7 +4425,7 @@ def deleterequest(): story_settings.actions.delete_action(story_settings.editln-1) story_settings.mode = "play" remove_story_chunk(story_settings.editln) - emit('from_server', {'cmd': 'editmode', 'data': 'false'}) + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, room="UI_1") send_debug() #==================================================================# @@ -4442,8 +4446,8 @@ def inlineedit(chunk, data): setgamesaved(False) update_story_chunk(chunk) - emit('from_server', {'cmd': 'texteffect', 'data': chunk}, broadcast=True) - emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'texteffect', 'data': chunk}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True, room="UI_1") send_debug() #==================================================================# @@ -4456,8 +4460,8 @@ def inlinedelete(chunk): if(chunk == 0): # Send error message update_story_chunk(chunk) - emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}) - emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."}, room="UI_1") + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True, room="UI_1") else: if(chunk-1 in story_settings.actions): story_settings.actions.delete_action(chunk-1) @@ -4465,7 +4469,7 @@ def inlinedelete(chunk): print(f"WARNING: Attempted to delete non-existent chunk {chunk}") setgamesaved(False) remove_story_chunk(chunk) - emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True, room="UI_1") send_debug() #==================================================================# @@ -4474,13 +4478,13 @@ def inlinedelete(chunk): def togglememorymode(): if(story_settings.mode == "play"): story_settings.mode = "memory" - emit('from_server', {'cmd': 'memmode', 'data': 'true'}, broadcast=True) - emit('from_server', {'cmd': 'setinputtext', 'data': story_settings.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'memmode', 'data': 'true'}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setinputtext', 'data': story_settings.memory}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True, room="UI_1") elif(story_settings.mode == "memory"): story_settings.mode = "play" - emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True, room="UI_1") #==================================================================# # Toggles the game mode for WI editing and sends UI commands @@ -4488,13 +4492,13 @@ def togglememorymode(): def togglewimode(): if(story_settings.mode == "play"): story_settings.mode = "wi" - emit('from_server', {'cmd': 'wimode', 'data': 'true'}, broadcast=True) + emit('from_server', {'cmd': 'wimode', 'data': 'true'}, broadcast=True, room="UI_1") elif(story_settings.mode == "wi"): # Commit WI fields first requestwi() # Then set UI state back to Play story_settings.mode = "play" - emit('from_server', {'cmd': 'wimode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'wimode', 'data': 'false'}, broadcast=True, room="UI_1") sendwi() #==================================================================# @@ -4512,7 +4516,7 @@ def addwiitem(folder_uid=None): story_settings.worldinfo[-1]["uid"] = uid if(folder_uid is not None): story_settings.wifolders_u[folder_uid].append(story_settings.worldinfo[-1]) - emit('from_server', {'cmd': 'addwiitem', 'data': ob}, broadcast=True) + emit('from_server', {'cmd': 'addwiitem', 'data': ob}, broadcast=True, room="UI_1") #==================================================================# # Creates a new WI folder with an unused cryptographically secure random UID @@ -4526,7 +4530,7 @@ def addwifolder(): story_settings.wifolders_d[uid] = ob story_settings.wifolders_l.append(uid) story_settings.wifolders_u[uid] = [] - emit('from_server', {'cmd': 'addwifolder', 'uid': uid, 'data': ob}, broadcast=True) + emit('from_server', {'cmd': 'addwifolder', 'uid': uid, 'data': ob}, broadcast=True, room="UI_1") addwiitem(folder_uid=uid) #==================================================================# @@ -4573,7 +4577,7 @@ def sendwi(): ln = len(story_settings.worldinfo) # Clear contents of WI container - emit('from_server', {'cmd': 'wistart', 'wifolders_d': story_settings.wifolders_d, 'wifolders_l': story_settings.wifolders_l, 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'wistart', 'wifolders_d': story_settings.wifolders_d, 'wifolders_l': story_settings.wifolders_l, 'data': ''}, broadcast=True, room="UI_1") # Stable-sort WI entries in order of folder stablesortwi() @@ -4588,12 +4592,12 @@ def sendwi(): last_folder = ... for wi in story_settings.worldinfo: if(wi["folder"] != last_folder): - emit('from_server', {'cmd': 'addwifolder', 'uid': wi["folder"], 'data': story_settings.wifolders_d[wi["folder"]] if wi["folder"] is not None else None}, broadcast=True) + emit('from_server', {'cmd': 'addwifolder', 'uid': wi["folder"], 'data': story_settings.wifolders_d[wi["folder"]] if wi["folder"] is not None else None}, broadcast=True, room="UI_1") last_folder = wi["folder"] ob = wi - emit('from_server', {'cmd': 'addwiitem', 'data': ob}, broadcast=True) + emit('from_server', {'cmd': 'addwiitem', 'data': ob}, broadcast=True, room="UI_1") - emit('from_server', {'cmd': 'wifinish', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'wifinish', 'data': ''}, broadcast=True, room="UI_1") #==================================================================# # Request current contents of all WI HTML elements @@ -4602,7 +4606,7 @@ def requestwi(): list = [] for wi in story_settings.worldinfo: list.append(wi["num"]) - emit('from_server', {'cmd': 'requestwiitem', 'data': list}) + emit('from_server', {'cmd': 'requestwiitem', 'data': list}, room="UI_1") #==================================================================# # Stable-sort WI items so that items in the same folder are adjacent, @@ -4779,17 +4783,17 @@ def checkworldinfo(txt, allowed_entries=None, allowed_folders=None, force_use_tx # Commit changes to Memory storage #==================================================================# def memsubmit(data): - emit('from_server', {'cmd': 'setinputtext', 'data': data}, broadcast=True) + emit('from_server', {'cmd': 'setinputtext', 'data': data}, broadcast=True, room="UI_1") # Maybe check for length at some point # For now just send it to storage if(data != story_settings.memory): setgamesaved(False) story_settings.memory = data story_settings.mode = "play" - emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True, room="UI_1") # Ask for contents of Author's Note field - emit('from_server', {'cmd': 'getanote', 'data': ''}) + emit('from_server', {'cmd': 'getanote', 'data': ''}, room="UI_1") #==================================================================# # Commit changes to Author's Note @@ -4807,8 +4811,8 @@ def anotesubmit(data, template=""): settingschanged() story_settings.authornotetemplate = template - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True, room="UI_1") #==================================================================# # Assembles game data into a request to InferKit API @@ -4857,7 +4861,7 @@ def ikrequest(txt): print("{0}{1}{2}".format(colors.CYAN, genout, colors.END)) story_settings.actions.append(genout) update_story_chunk('last') - emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True) + emit('from_server', {'cmd': 'texteffect', 'data': story_settings.actions.get_last_key() + 1 if len(story_settings.actions) else 0}, broadcast=True, room="UI_1") send_debug() set_aibusy(0) else: @@ -4869,7 +4873,7 @@ def ikrequest(txt): code = er["errors"][0]["extensions"]["code"] errmsg = "InferKit API Error: {0} - {1}".format(req.status_code, code) - emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True, room="UI_1") set_aibusy(0) #==================================================================# @@ -4962,7 +4966,7 @@ def oairequest(txt, min, max): message = er["error"]["message"] errmsg = "OpenAI API Error: {0} - {1}".format(type, message) - emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True) + emit('from_server', {'cmd': 'errmsg', 'data': errmsg}, broadcast=True, room="UI_1") set_aibusy(0) #==================================================================# @@ -4970,11 +4974,11 @@ def oairequest(txt, min, max): #==================================================================# def exitModes(): if(story_settings.mode == "edit"): - emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'editmode', 'data': 'false'}, broadcast=True, room="UI_1") elif(story_settings.mode == "memory"): - emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'memmode', 'data': 'false'}, broadcast=True, room="UI_1") elif(story_settings.mode == "wi"): - emit('from_server', {'cmd': 'wimode', 'data': 'false'}, broadcast=True) + emit('from_server', {'cmd': 'wimode', 'data': 'false'}, broadcast=True, room="UI_1") story_settings.mode = "play" #==================================================================# @@ -4992,15 +4996,15 @@ def saveas(data): user_settings.saveow = False user_settings.svowname = "" if(e is None): - emit('from_server', {'cmd': 'hidesaveas', 'data': ''}) + emit('from_server', {'cmd': 'hidesaveas', 'data': ''}, room="UI_1") else: print("{0}{1}{2}".format(colors.RED, str(e), colors.END)) - emit('from_server', {'cmd': 'popuperror', 'data': str(e)}) + emit('from_server', {'cmd': 'popuperror', 'data': str(e)}, room="UI_1") else: # File exists, prompt for overwrite user_settings.saveow = True user_settings.svowname = name - emit('from_server', {'cmd': 'askforoverwrite', 'data': ''}) + emit('from_server', {'cmd': 'askforoverwrite', 'data': ''}, room="UI_1") #==================================================================# # Launch in-browser story-delete prompt @@ -5010,13 +5014,13 @@ def deletesave(name): e = fileops.deletesave(name) if(e is None): if(system_settings.smandelete): - emit('from_server', {'cmd': 'hidepopupdelete', 'data': ''}) + emit('from_server', {'cmd': 'hidepopupdelete', 'data': ''}, room="UI_1") getloadlist() else: - emit('from_server', {'cmd': 'popuperror', 'data': "The server denied your request to delete this story"}) + emit('from_server', {'cmd': 'popuperror', 'data': "The server denied your request to delete this story"}, room="UI_1") else: print("{0}{1}{2}".format(colors.RED, str(e), colors.END)) - emit('from_server', {'cmd': 'popuperror', 'data': str(e)}) + emit('from_server', {'cmd': 'popuperror', 'data': str(e)}, room="UI_1") #==================================================================# # Launch in-browser story-rename prompt @@ -5031,18 +5035,18 @@ def renamesave(name, newname): user_settings.svowname = "" if(e is None): if(system_settings.smanrename): - emit('from_server', {'cmd': 'hidepopuprename', 'data': ''}) + emit('from_server', {'cmd': 'hidepopuprename', 'data': ''}, room="UI_1") getloadlist() else: - emit('from_server', {'cmd': 'popuperror', 'data': "The server denied your request to rename this story"}) + emit('from_server', {'cmd': 'popuperror', 'data': "The server denied your request to rename this story"}, room="UI_1") else: print("{0}{1}{2}".format(colors.RED, str(e), colors.END)) - emit('from_server', {'cmd': 'popuperror', 'data': str(e)}) + emit('from_server', {'cmd': 'popuperror', 'data': str(e)}, room="UI_1") else: # File exists, prompt for overwrite user_settings.saveow = True user_settings.svowname = newname - emit('from_server', {'cmd': 'askforoverwrite', 'data': ''}) + emit('from_server', {'cmd': 'askforoverwrite', 'data': ''}, room="UI_1") #==================================================================# # Save the currently running story @@ -5052,7 +5056,7 @@ def save(): if(".json" in system_settings.savedir): saveRequest(system_settings.savedir) else: - emit('from_server', {'cmd': 'saveas', 'data': ''}) + emit('from_server', {'cmd': 'saveas', 'data': ''}, room="UI_1") #==================================================================# # Save the story via file browser @@ -5128,7 +5132,7 @@ def saveRequest(savpath, savepins=True): if(filename.endswith('.json')): filename = filename[:-5] user_settings.laststory = filename - emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True, room="UI_1") setgamesaved(True) print("{0}Story saved to {1}!{2}".format(colors.GREEN, path.basename(savpath), colors.END)) @@ -5136,14 +5140,14 @@ def saveRequest(savpath, savepins=True): # Show list of saved stories #==================================================================# def getloadlist(): - emit('from_server', {'cmd': 'buildload', 'data': fileops.getstoryfiles()}) + emit('from_server', {'cmd': 'buildload', 'data': fileops.getstoryfiles()}, room="UI_1") #==================================================================# # Show list of soft prompts #==================================================================# def getsplist(): if(system_settings.allowsp): - emit('from_server', {'cmd': 'buildsp', 'data': fileops.getspfiles(model_settings.modeldim)}) + emit('from_server', {'cmd': 'buildsp', 'data': fileops.getspfiles(model_settings.modeldim)}, room="UI_1") #==================================================================# # Get list of userscripts @@ -5311,15 +5315,15 @@ def loadRequest(loadpath, filename=None): if(filename.endswith('.json')): _filename = filename[:-5] user_settings.laststory = _filename - emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True, room="UI_1") setgamesaved(True) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True, room="UI_1") refresh_story() - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) - emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True, room="UI_1") print("{0}Story loaded from {1}!{2}".format(colors.GREEN, filename, colors.END)) send_debug() @@ -5343,7 +5347,7 @@ def importRequest(): user_settings.importjs = user_settings.importjs["stories"] # Clear Popup Contents - emit('from_server', {'cmd': 'clearpopup', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'clearpopup', 'data': ''}, broadcast=True, room="UI_1") # Initialize vars num = 0 @@ -5365,11 +5369,11 @@ def importRequest(): ob["acts"] = len(story["actions"]) elif("actionWindow" in story): ob["acts"] = len(story["actionWindow"]) - emit('from_server', {'cmd': 'addimportline', 'data': ob}) + emit('from_server', {'cmd': 'addimportline', 'data': ob}, room="UI_1") num += 1 # Show Popup - emit('from_server', {'cmd': 'popupshow', 'data': True}) + emit('from_server', {'cmd': 'popupshow', 'data': True}, room="UI_1") #==================================================================# # Import an AIDungon game selected in popup @@ -5468,15 +5472,15 @@ def importgame(): # Refresh game screen user_settings.laststory = None - emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True, room="UI_1") setgamesaved(False) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True, room="UI_1") refresh_story() - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) - emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True, room="UI_1") #==================================================================# # Import an aidg.club prompt and start a new game with it. @@ -5550,14 +5554,14 @@ def importAidgRequest(id): # Refresh game screen user_settings.laststory = None - emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True, room="UI_1") setgamesaved(False) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True, room="UI_1") refresh_story() - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True) + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True, room="UI_1") #==================================================================# # Import World Info JSON file @@ -5643,19 +5647,19 @@ def newGameRequest(): # Refresh game screen user_settings.laststory = None - emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True) + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True, room="UI_1") setgamesaved(True) sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True) - emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True, room="UI_1") setStartState() def randomGameRequest(topic, memory=""): if(system_settings.noai): newGameRequest() story_settings.memory = memory - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True, room="UI_1") return story_settings.recentrng = topic story_settings.recentrngm = memory @@ -5668,7 +5672,7 @@ def randomGameRequest(topic, memory=""): system_settings.lua_koboldbridge.feedback = None actionsubmit("", force_submit=True, force_prompt_gen=True) story_settings.memory = memory - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True) + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True, room="UI_1") def final_startup(): # Prevent tokenizer from taking extra time the first time it's used @@ -5749,7 +5753,7 @@ def send_debug(): except: pass - emit('from_server', {'cmd': 'debug_info', 'data': debug_info}, broadcast=True) + emit('from_server', {'cmd': 'debug_info', 'data': debug_info}, broadcast=True, room="UI_1") #==================================================================# # Event triggered when browser SocketIO detects a variable change @@ -5825,6 +5829,22 @@ def UI_2_back(data): def UI_2_redo(data): pass +#==================================================================# +# Event triggered to rely a message +#==================================================================# +@socketio.on('relay') +def UI_2_relay(data): + socketio.emit(data[0], data[1], **data[2]) + + +#==================================================================# +# Test +#==================================================================# +@app.route("/actions") +def show_actions(): + return story_settings.actions.actions + + #==================================================================# # Final startup commands to launch Flask app @@ -5843,7 +5863,6 @@ if __name__ == "__main__": # Start Flask/SocketIO (Blocking, so this must be last method!) port = args.port if "port" in args and args.port is not None else 5000 - #socketio.run(app, host='0.0.0.0', port=port) if(system_settings.host): if(args.localtunnel): import subprocess, shutil diff --git a/gensettings.py b/gensettings.py index 14ba7970..648dcce0 100644 --- a/gensettings.py +++ b/gensettings.py @@ -319,7 +319,19 @@ gensettingstf = [ "menu_path": "user", "classname": "user", "name": "debug" - } + }, + { + "uitype": "dropdown", + "unit": "text", + "label": "Story Mode", + "id": "actionmode", + "default": 0, + "tooltip": "Choose the mode of KoboldAI", + "menu_path": "Story", + "classname": "story", + "name": "actionmode", + 'children': [{'text': 'Story', 'value': 0}, {'text':'Adventure','value':1}, {'text':'Chat', 'value':2}] + } ] gensettingsik =[{ diff --git a/koboldai_settings.py b/koboldai_settings.py index f76f2ca1..ee783061 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -1,8 +1,11 @@ from flask_socketio import emit, join_room, leave_room, rooms -import os -import re +import os, re, time, threading +import socketio as socketio_client socketio = None +main_thread_id = threading.get_ident() +rely_clients = {} + def clean_var_for_emit(value): if isinstance(value, KoboldStoryRegister): @@ -12,19 +15,43 @@ def clean_var_for_emit(value): else: return value -def process_variable_changes(classname, name, value, old_value): - #Special Case for KoboldStoryRegister - if isinstance(value, KoboldStoryRegister): - print("resetting") - socketio.emit("reset_story", {}, broadcast=True, room="UI_2") - for i in range(len(value.actions)): - socketio.emit("var_changed", {"classname": "actions", "name": "Selected Text", "old_value": None, "value": {"id": i, "text": value[i]}}, broadcast=True, room="UI_2") - socketio.emit("var_changed", {"classname": "actions", "name": "Options", "old_value": None, "value": {"id": i, "options": value.actions[i]['Options']}}, broadcast=True, room="UI_2") - else: - #print("{}: {} changed from {} to {}".format(classname, name, old_value, value)) - #if name == "Selected Text": - # print({"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}) - socketio.emit("var_changed", {"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}, broadcast=True, room="UI_2") +def process_variable_changes(classname, name, value, old_value, debug_message=None): + if socketio is not None: + if debug_message is not None: + print("{} {}: {} changed from {} to {}".format(debug_message, classname, name, old_value, value)) + if value != old_value: + #Special Case for KoboldStoryRegister + if isinstance(value, KoboldStoryRegister): + print("We got a story register") + print(value) + socketio.emit("reset_story", {}, broadcast=True, room="UI_2") + for i in range(len(value.actions)): + socketio.emit("var_changed", {"classname": "actions", "name": "Selected Text", "old_value": None, "value": {"id": i, "text": value[i]}}, include_self=True, broadcast=True, room="UI_2") + socketio.emit("var_changed", {"classname": "actions", "name": "Options", "old_value": None, "value": {"id": i, "options": value.actions[i]['Options']}}, include_self=True, broadcast=True, room="UI_2") + else: + #If we got a variable change from a thread other than what the app is run it, eventlet seems to block and no further messages are sent. Instead, we'll rely the message to the app and have the main thread send it + if main_thread_id != threading.get_ident(): + if threading.get_ident() in rely_clients: + sio = rely_clients[threading.get_ident()] + else: + start_time = time.time() + print("getting client") + sio = socketio_client.Client() + @sio.event + def connect(): + print("I'm connected!") + sio.connect('http://localhost:5000/?rely=true') + rely_clients[threading.get_ident()] = sio + print("got client, took {}".format(time.time()-start_time)) + #release no longer used clients + for thread in rely_clients: + if thread not in [x.ident for x in threading.enumerate()]: + del rely_clients[thread] + sio.emit("relay", {"emit": "var_changed", "data": {"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}, "include_self":True, "broadcast":True, "room":"UI_2"}) + else: + socketio.emit("var_changed", {"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}, include_self=True, broadcast=True, room="UI_2") + #eventlet.sleep() + #socketio.sleep(0) class settings(object): @@ -39,7 +66,6 @@ class settings(object): class model_settings(settings): local_only_variables = ['badwordsids', 'apikey', '_class_init'] settings_name = "model" - __class_initialized = False def __init__(self): self.model = "" # Model ID string chosen at startup self.model_type = "" # Model Type (Automatically taken from the model config) @@ -75,26 +101,20 @@ class model_settings(settings): self.presets = [] # Holder for presets self.selected_preset = "" - #Must be at end of __init__ - self.__class_initialized = True def __setattr__(self, name, value): old_value = getattr(self, name, None) super().__setattr__(name, value) - if self.__class_initialized and name != '__class_initialized': - #Put variable change actions here - if name not in self.local_only_variables and name[0] != "_": - process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) - - #Since I haven't migrated the old_ui to use the new actions class for options, let's sync the metadata and options here - if name == 'actions_metadata': - print(value) + #Put variable change actions here + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + class story_settings(settings): + #local_only_variables = ['generated_tkns'] local_only_variables = [] settings_name = "story" - __class_initialized = False def __init__(self): self.lastact = "" # The last action received from the user self.submission = "" # Same as above, but after applying input formatting @@ -138,21 +158,17 @@ class story_settings(settings): self.dynamicscan = False self.recentedit = False - #Must be at end of __init__ - self.__class_initialized = True def __setattr__(self, name, value): old_value = getattr(self, name, None) super().__setattr__(name, value) - if self.__class_initialized and name != '__class_initialized': - #Put variable change actions here - if name not in self.local_only_variables and name[0] != "_": - process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + #Put variable change actions here + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) class user_settings(settings): local_only_variables = [] settings_name = "user" - __class_initialized = False def __init__(self): self.wirmvwhtsp = False # Whether to remove leading whitespace from WI entries self.widepth = 3 # How many historical actions to scan for WI hits @@ -172,22 +188,18 @@ class user_settings(settings): self.nogenmod = False self.debug = False # If set to true, will send debug information to the client for display - #Must be at end of __init__ - self.__class_initialized = True def __setattr__(self, name, value): old_value = getattr(self, name, None) super().__setattr__(name, value) - if self.__class_initialized and name != '__class_initialized': - #Put variable change actions here - if name not in self.local_only_variables and name[0] != "_": - process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + #Put variable change actions here + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) class system_settings(settings): local_only_variables = ['lua_state', 'lua_logname', 'lua_koboldbridge', 'lua_kobold', 'lua_koboldcore', 'regex_sl', 'acregex_ai', 'acregex_ui', 'comregex_ai', 'comregex_ui'] settings_name = "system" - __class_initialized = False def __init__(self): self.noai = False # Runs the script without starting up the transformers pipeline self.aibusy = False # Stops submissions while the AI is working @@ -234,16 +246,13 @@ class system_settings(settings): self.use_colab_tpu = os.environ.get("COLAB_TPU_ADDR", "") != "" or os.environ.get("TPU_NAME", "") != "" # Whether or not we're in a Colab TPU instance or Kaggle TPU instance and are going to use the TPU rather than the CPU self.aria2_port = 6799 #Specify the port on which aria2's RPC interface will be open if aria2 is installed (defaults to 6799) - #Must be at end of __init__ - self.__class_initialized = True def __setattr__(self, name, value): old_value = getattr(self, name, None) super().__setattr__(name, value) - if self.__class_initialized and name != '__class_initialized': - #Put variable change actions here - if name not in self.local_only_variables and name[0] != "_": - process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + #Put variable change actions here + if name not in self.local_only_variables and name[0] != "_": + process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) class KoboldStoryRegister(object): @@ -330,17 +339,10 @@ class KoboldStoryRegister(object): self.action_count+=1 if self.action_count in self.actions: self.actions[self.action_count]["Selected Text"] = text - print("looking for old option that matches") for item in self.actions[self.action_count]["Options"]: if item['text'] == text: - print("found it") old_options = self.actions[self.action_count]["Options"] del item - print("old: ") - print(old_options) - print() - print("New: ") - print(self.actions[self.action_count]["Options"]) process_variable_changes("actions", "Options", {"id": self.action_count, "options": self.actions[self.action_count]["Options"]}, {"id": self.action_count, "options": old_options}) else: @@ -348,25 +350,23 @@ class KoboldStoryRegister(object): process_variable_changes("actions", "Selected Text", {"id": self.action_count, "text": text}, None) def append_options(self, option_list): + print("appending options for {}".format(self.action_count+1)) if self.action_count+1 in self.actions: - print("1") - old_options = self.actions[self.action_count+1]["Options"] + old_options = self.actions[self.action_count+1]["Options"].copy() self.actions[self.action_count+1]['Options'].extend([{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False} for x in option_list]) - for item in option_list: - process_variable_changes("actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": old_options}) else: - print("2") old_options = None self.actions[self.action_count+1] = {"Selected Text": "", "Options": [{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False} for x in option_list]} - process_variable_changes("actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": old_options}) + process_variable_changes("actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": old_options}, debug_message="wtf") def clear_unused_options(self, pointer=None): + print("clearing options for {}".format(self.action_count+1)) new_options = [] old_options = None if pointer is None: pointer = self.action_count+1 if pointer in self.actions: - old_options = self.actions[pointer]["Options"] + old_options = self.actions[pointer]["Options"].copy() self.actions[pointer]["Options"] = [x for x in self.actions[pointer]["Options"] if x["Pinned"] or x["Previous Selection"] or x["Edited"]] new_options = self.actions[pointer]["Options"] process_variable_changes("actions", "Options", {"id": pointer, "options": new_options}, {"id": pointer, "options": old_options}) @@ -382,20 +382,20 @@ class KoboldStoryRegister(object): def set_pin(self, action_step, option_number): if action_step in self.actions: if option_number < len(self.actions[action_step]['Options']): - old_options = self.actions[action_step]["Options"] + old_options = self.actions[action_step]["Options"].copy() self.actions[action_step]['Options'][option_number]['Pinned'] = True process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) def unset_pin(self, action_step, option_number): if action_step in self.actions: - old_options = self.actions[action_step]["Options"] + old_options = self.actions[action_step]["Options"].copy() if option_number < len(self.actions[action_step]['Options']): self.actions[action_step]['Options'][option_number]['Pinned'] = False process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) def use_option(self, action_step, option_number): if action_step in self.actions: - old_options = self.actions[action_step]["Options"] + old_options = self.actions[action_step]["Options"].copy() old_text = self.actions[action_step]["Selected Text"] if option_number < len(self.actions[action_step]['Options']): self.actions[action_step]["Selected Text"] = self.actions[action_step]['Options'][option_number]['text'] @@ -405,7 +405,7 @@ class KoboldStoryRegister(object): def delete_action(self, action_id): if action_id in self.actions: - old_options = self.actions[action_id]["Options"] + old_options = self.actions[action_id]["Options"].copy() old_text = self.actions[action_id]["Selected Text"] self.actions[action_id]["Options"].append({"text": self.actions[action_id]["Selected Text"], "Pinned": False, "Previous Selection": True, "Edited": False}) self.actions[action_id]["Selected Text"] = "" @@ -468,7 +468,7 @@ class KoboldStoryRegister(object): return [] else: if self.action_count+1 in self.actions: - return [[x, "pinned" if x['Pinned'] else 'normal'] for x in self.actions[self.action_count+1]["Options"] if x["Edited"] == False and x['Previous Selection'] == False] + return [[x['text'], "pinned" if x['Pinned'] else 'normal'] for x in self.actions[self.action_count+1]["Options"] if x["Edited"] == False and x['Previous Selection'] == False] else: return [] @@ -495,6 +495,12 @@ class KoboldStoryRegister(object): return [x for x in self.actions[self.action_count+1]['Options'] if x['Pinned'] or x['Previous Selection']] else: return [] + + def __setattr__(self, name, value): + old_value = getattr(self, name, None) + super().__setattr__(name, value) + if name == 'action_count': + process_variable_changes("actions", "Action Count", value, old_value) diff --git a/static/application.js b/static/application.js index 8b2cbb53..b256e3ef 100644 --- a/static/application.js +++ b/static/application.js @@ -1359,7 +1359,6 @@ function setStartState() { function parsegenseqs(seqs) { seqselcontents.html(""); - console.log(seqs); var i; for(i=0; i { + console.log("Lost connection from: "+reason); // "transport error" +}); socket.on('reset_story', function(){reset_story();}); socket.on('var_changed', function(data){var_changed(data);}); //socket.onAny(function(event_name, data) {console.log({"event": event_name, "data": data});}); @@ -20,6 +22,7 @@ function disconnect() { } function reset_story() { + console.log("Resetting story"); var story_area = document.getElementById('Selected Text'); while (story_area.firstChild) { story_area.removeChild(story_area.firstChild); @@ -43,7 +46,7 @@ function fix_text(val) { } function create_options(data) { - console.log(data.value.options); + console.log(data); if (document.getElementById("Select Options Chunk "+data.value.id)) { var option_chunk = document.getElementById("Select Options Chunk "+data.value.id) } else { @@ -200,7 +203,7 @@ function var_changed(data) { } else { var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname+"_"+data.name); for (item of elements_to_change) { - if (item.tagName.toLowerCase() === 'input') { + if ((item.tagName.toLowerCase() === 'input') || (item.tagName.toLowerCase() === 'select')) { item.value = fix_text(data.value); } else { item.textContent = fix_text(data.value); diff --git a/static/socket.io.min.js b/static/socket.io.min.js index 8560bedd..e627c13a 100644 --- a/static/socket.io.min.js +++ b/static/socket.io.min.js @@ -1,7 +1,7 @@ /*! - * Socket.IO v4.4.1 + * Socket.IO v4.5.1 * (c) 2014-2022 Guillermo Rauch * Released under the MIT License. */ -!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).io=e()}(this,(function(){"use strict";function t(e){return t="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},t(e)}function e(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function n(t,e){for(var n=0;nt.length)&&(e=t.length);for(var n=0,r=new Array(e);n=t.length?{done:!0}:{done:!1,value:t[r++]}},e:function(t){throw t},f:o}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var i,s=!0,a=!1;return{s:function(){n=n.call(t)},n:function(){var t=n.next();return s=t.done,t},e:function(t){a=!0,i=t},f:function(){try{s||null==n.return||n.return()}finally{if(a)throw i}}}}var d=/^(?:(?![^:@]+:[^:@\/]*@)(http|https|ws|wss):\/\/)?((?:(([^:@]*)(?::([^:@]*))?)?@)?((?:[a-f0-9]{0,4}:){2,7}[a-f0-9]{0,4}|[^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/,y=["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"],v=function(t){var e=t,n=t.indexOf("["),r=t.indexOf("]");-1!=n&&-1!=r&&(t=t.substring(0,n)+t.substring(n,r).replace(/:/g,";")+t.substring(r,t.length));for(var o,i,s=d.exec(t||""),a={},c=14;c--;)a[y[c]]=s[c]||"";return-1!=n&&-1!=r&&(a.source=e,a.host=a.host.substring(1,a.host.length-1).replace(/;/g,":"),a.authority=a.authority.replace("[","").replace("]","").replace(/;/g,":"),a.ipv6uri=!0),a.pathNames=function(t,e){var n=/\/{2,9}/g,r=e.replace(n,"/").split("/");"/"!=e.substr(0,1)&&0!==e.length||r.splice(0,1);"/"==e.substr(e.length-1,1)&&r.splice(r.length-1,1);return r}(0,a.path),a.queryKey=(o=a.query,i={},o.replace(/(?:^|&)([^&=]*)=?([^&]*)/g,(function(t,e,n){e&&(i[e]=n)})),i),a};var m={exports:{}};try{m.exports="undefined"!=typeof XMLHttpRequest&&"withCredentials"in new XMLHttpRequest}catch(t){m.exports=!1}var g=m.exports,k="undefined"!=typeof self?self:"undefined"!=typeof window?window:Function("return this")();function b(t){var e=t.xdomain;try{if("undefined"!=typeof XMLHttpRequest&&(!e||g))return new XMLHttpRequest}catch(t){}if(!e)try{return new(k[["Active"].concat("Object").join("X")])("Microsoft.XMLHTTP")}catch(t){}}function w(t){for(var e=arguments.length,n=new Array(e>1?e-1:0),r=1;r1?{type:O[n],data:t.substring(1)}:{type:O[n]}:S},M=function(t,e){if(I){var n=function(t){var e,n,r,o,i,s=.75*t.length,a=t.length,c=0;"="===t[t.length-1]&&(s--,"="===t[t.length-2]&&s--);var u=new ArrayBuffer(s),h=new Uint8Array(u);for(e=0;e>4,h[c++]=(15&r)<<4|o>>2,h[c++]=(3&o)<<6|63&i;return u}(t);return U(n,e)}return{base64:!0,data:t}},U=function(t,e){return"blob"===e&&t instanceof ArrayBuffer?new Blob([t]):t},V=String.fromCharCode(30),H=function(t){i(o,t);var n=h(o);function o(t){var r;return e(this,o),(r=n.call(this)).writable=!1,A(c(r),t),r.opts=t,r.query=t.query,r.readyState="",r.socket=t.socket,r}return r(o,[{key:"onError",value:function(t,e){var n=new Error(t);return n.type="TransportError",n.description=e,f(s(o.prototype),"emit",this).call(this,"error",n),this}},{key:"open",value:function(){return"closed"!==this.readyState&&""!==this.readyState||(this.readyState="opening",this.doOpen()),this}},{key:"close",value:function(){return"opening"!==this.readyState&&"open"!==this.readyState||(this.doClose(),this.onClose()),this}},{key:"send",value:function(t){"open"===this.readyState&&this.write(t)}},{key:"onOpen",value:function(){this.readyState="open",this.writable=!0,f(s(o.prototype),"emit",this).call(this,"open")}},{key:"onData",value:function(t){var e=F(t,this.socket.binaryType);this.onPacket(e)}},{key:"onPacket",value:function(t){f(s(o.prototype),"emit",this).call(this,"packet",t)}},{key:"onClose",value:function(){this.readyState="closed",f(s(o.prototype),"emit",this).call(this,"close")}}]),o}(R),K="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_".split(""),Y={},z=0,$=0;function W(t){var e="";do{e=K[t%64]+e,t=Math.floor(t/64)}while(t>0);return e}function J(){var t=W(+new Date);return t!==D?(z=0,D=t):t+"."+W(z++)}for(;$<64;$++)Y[K[$]]=$;J.encode=W,J.decode=function(t){var e=0;for($=0;$0&&void 0!==arguments[0]?arguments[0]:{};return o(t,{xd:this.xd,xs:this.xs},this.opts),new nt(this.uri(),t)}},{key:"doWrite",value:function(t,e){var n=this,r=this.request({method:"POST",data:t});r.on("success",e),r.on("error",(function(t){n.onError("xhr post error",t)}))}},{key:"doPoll",value:function(){var t=this,e=this.request();e.on("data",this.onData.bind(this)),e.on("error",(function(e){t.onError("xhr poll error",e)})),this.pollXhr=e}}]),s}(Q),nt=function(t){i(o,t);var n=h(o);function o(t,r){var i;return e(this,o),A(c(i=n.call(this)),r),i.opts=r,i.method=r.method||"GET",i.uri=t,i.async=!1!==r.async,i.data=void 0!==r.data?r.data:null,i.create(),i}return r(o,[{key:"create",value:function(){var t=this,e=w(this.opts,"agent","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","autoUnref");e.xdomain=!!this.opts.xd,e.xscheme=!!this.opts.xs;var n=this.xhr=new b(e);try{n.open(this.method,this.uri,this.async);try{if(this.opts.extraHeaders)for(var r in n.setDisableHeaderCheck&&n.setDisableHeaderCheck(!0),this.opts.extraHeaders)this.opts.extraHeaders.hasOwnProperty(r)&&n.setRequestHeader(r,this.opts.extraHeaders[r])}catch(t){}if("POST"===this.method)try{n.setRequestHeader("Content-type","text/plain;charset=UTF-8")}catch(t){}try{n.setRequestHeader("Accept","*/*")}catch(t){}"withCredentials"in n&&(n.withCredentials=this.opts.withCredentials),this.opts.requestTimeout&&(n.timeout=this.opts.requestTimeout),n.onreadystatechange=function(){4===n.readyState&&(200===n.status||1223===n.status?t.onLoad():t.setTimeoutFn((function(){t.onError("number"==typeof n.status?n.status:0)}),0))},n.send(this.data)}catch(e){return void this.setTimeoutFn((function(){t.onError(e)}),0)}"undefined"!=typeof document&&(this.index=o.requestsCount++,o.requests[this.index]=this)}},{key:"onSuccess",value:function(){this.emit("success"),this.cleanup()}},{key:"onData",value:function(t){this.emit("data",t),this.onSuccess()}},{key:"onError",value:function(t){this.emit("error",t),this.cleanup(!0)}},{key:"cleanup",value:function(t){if(void 0!==this.xhr&&null!==this.xhr){if(this.xhr.onreadystatechange=Z,t)try{this.xhr.abort()}catch(t){}"undefined"!=typeof document&&delete o.requests[this.index],this.xhr=null}}},{key:"onLoad",value:function(){var t=this.xhr.responseText;null!==t&&this.onData(t)}},{key:"abort",value:function(){this.cleanup()}}]),o}(R);if(nt.requestsCount=0,nt.requests={},"undefined"!=typeof document)if("function"==typeof attachEvent)attachEvent("onunload",rt);else if("function"==typeof addEventListener){addEventListener("onpagehide"in k?"pagehide":"unload",rt,!1)}function rt(){for(var t in nt.requests)nt.requests.hasOwnProperty(t)&&nt.requests[t].abort()}var ot="function"==typeof Promise&&"function"==typeof Promise.resolve?function(t){return Promise.resolve().then(t)}:function(t,e){return e(t,0)},it=k.WebSocket||k.MozWebSocket,st="undefined"!=typeof navigator&&"string"==typeof navigator.product&&"reactnative"===navigator.product.toLowerCase(),at=function(t){i(o,t);var n=h(o);function o(t){var r;return e(this,o),(r=n.call(this,t)).supportsBinary=!t.forceBase64,r}return r(o,[{key:"name",get:function(){return"websocket"}},{key:"doOpen",value:function(){if(this.check()){var t=this.uri(),e=this.opts.protocols,n=st?{}:w(this.opts,"agent","perMessageDeflate","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","localAddress","protocolVersion","origin","maxPayload","family","checkServerIdentity");this.opts.extraHeaders&&(n.headers=this.opts.extraHeaders);try{this.ws=st?new it(t,e,n):e?new it(t,e):new it(t)}catch(t){return this.emit("error",t)}this.ws.binaryType=this.socket.binaryType||"arraybuffer",this.addEventListeners()}}},{key:"addEventListeners",value:function(){var t=this;this.ws.onopen=function(){t.opts.autoUnref&&t.ws._socket.unref(),t.onOpen()},this.ws.onclose=this.onClose.bind(this),this.ws.onmessage=function(e){return t.onData(e.data)},this.ws.onerror=function(e){return t.onError("websocket error",e)}}},{key:"write",value:function(t){var e=this;this.writable=!1;for(var n=function(n){var r=t[n],o=n===t.length-1;x(r,e.supportsBinary,(function(t){try{e.ws.send(t)}catch(t){}o&&ot((function(){e.writable=!0,e.emit("drain")}),e.setTimeoutFn)}))},r=0;r1&&void 0!==arguments[1]?arguments[1]:{};return e(this,a),r=s.call(this),n&&"object"===t(n)&&(i=n,n=null),n?(n=v(n),i.hostname=n.host,i.secure="https"===n.protocol||"wss"===n.protocol,i.port=n.port,n.query&&(i.query=n.query)):i.host&&(i.hostname=v(i.host).host),A(c(r),i),r.secure=null!=i.secure?i.secure:"undefined"!=typeof location&&"https:"===location.protocol,i.hostname&&!i.port&&(i.port=r.secure?"443":"80"),r.hostname=i.hostname||("undefined"!=typeof location?location.hostname:"localhost"),r.port=i.port||("undefined"!=typeof location&&location.port?location.port:r.secure?"443":"80"),r.transports=i.transports||["polling","websocket"],r.readyState="",r.writeBuffer=[],r.prevBufferLen=0,r.opts=o({path:"/engine.io",agent:!1,withCredentials:!1,upgrade:!0,timestampParam:"t",rememberUpgrade:!1,rejectUnauthorized:!0,perMessageDeflate:{threshold:1024},transportOptions:{},closeOnBeforeunload:!0},i),r.opts.path=r.opts.path.replace(/\/$/,"")+"/","string"==typeof r.opts.query&&(r.opts.query=G.decode(r.opts.query)),r.id=null,r.upgrades=null,r.pingInterval=null,r.pingTimeout=null,r.pingTimeoutTimer=null,"function"==typeof addEventListener&&(r.opts.closeOnBeforeunload&&addEventListener("beforeunload",(function(){r.transport&&(r.transport.removeAllListeners(),r.transport.close())}),!1),"localhost"!==r.hostname&&(r.offlineEventListener=function(){r.onClose("transport close")},addEventListener("offline",r.offlineEventListener,!1))),r.open(),r}return r(a,[{key:"createTransport",value:function(t){var e=function(t){var e={};for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n]);return e}(this.opts.query);e.EIO=4,e.transport=t,this.id&&(e.sid=this.id);var n=o({},this.opts.transportOptions[t],this.opts,{query:e,socket:this,hostname:this.hostname,secure:this.secure,port:this.port});return new ct[t](n)}},{key:"open",value:function(){var t,e=this;if(this.opts.rememberUpgrade&&a.priorWebsocketSuccess&&-1!==this.transports.indexOf("websocket"))t="websocket";else{if(0===this.transports.length)return void this.setTimeoutFn((function(){e.emitReserved("error","No transports available")}),0);t=this.transports[0]}this.readyState="opening";try{t=this.createTransport(t)}catch(t){return this.transports.shift(),void this.open()}t.open(),this.setTransport(t)}},{key:"setTransport",value:function(t){var e=this;this.transport&&this.transport.removeAllListeners(),this.transport=t,t.on("drain",this.onDrain.bind(this)).on("packet",this.onPacket.bind(this)).on("error",this.onError.bind(this)).on("close",(function(){e.onClose("transport close")}))}},{key:"probe",value:function(t){var e=this,n=this.createTransport(t),r=!1;a.priorWebsocketSuccess=!1;var o=function(){r||(n.send([{type:"ping",data:"probe"}]),n.once("packet",(function(t){if(!r)if("pong"===t.type&&"probe"===t.data){if(e.upgrading=!0,e.emitReserved("upgrading",n),!n)return;a.priorWebsocketSuccess="websocket"===n.name,e.transport.pause((function(){r||"closed"!==e.readyState&&(f(),e.setTransport(n),n.send([{type:"upgrade"}]),e.emitReserved("upgrade",n),n=null,e.upgrading=!1,e.flush())}))}else{var o=new Error("probe error");o.transport=n.name,e.emitReserved("upgradeError",o)}})))};function i(){r||(r=!0,f(),n.close(),n=null)}var s=function(t){var r=new Error("probe error: "+t);r.transport=n.name,i(),e.emitReserved("upgradeError",r)};function c(){s("transport closed")}function u(){s("socket closed")}function h(t){n&&t.name!==n.name&&i()}var f=function(){n.removeListener("open",o),n.removeListener("error",s),n.removeListener("close",c),e.off("close",u),e.off("upgrading",h)};n.once("open",o),n.once("error",s),n.once("close",c),this.once("close",u),this.once("upgrading",h),n.open()}},{key:"onOpen",value:function(){if(this.readyState="open",a.priorWebsocketSuccess="websocket"===this.transport.name,this.emitReserved("open"),this.flush(),"open"===this.readyState&&this.opts.upgrade&&this.transport.pause)for(var t=0,e=this.upgrades.length;t0;case bt.ACK:case bt.BINARY_ACK:return Array.isArray(n)}}}]),a}(R);var Et=function(){function t(n){e(this,t),this.packet=n,this.buffers=[],this.reconPack=n}return r(t,[{key:"takeBinaryData",value:function(t){if(this.buffers.push(t),this.buffers.length===this.reconPack.attachments){var e=gt(this.reconPack,this.buffers);return this.finishedReconstruction(),e}return null}},{key:"finishedReconstruction",value:function(){this.reconPack=null,this.buffers=[]}}]),t}(),At=Object.freeze({__proto__:null,protocol:5,get PacketType(){return bt},Encoder:wt,Decoder:_t});function Rt(t,e,n){return t.on(e,n),function(){t.off(e,n)}}var Tt=Object.freeze({connect:1,connect_error:1,disconnect:1,disconnecting:1,newListener:1,removeListener:1}),Ct=function(t){i(o,t);var n=h(o);function o(t,r,i){var s;return e(this,o),(s=n.call(this)).connected=!1,s.disconnected=!0,s.receiveBuffer=[],s.sendBuffer=[],s.ids=0,s.acks={},s.flags={},s.io=t,s.nsp=r,i&&i.auth&&(s.auth=i.auth),s.io._autoConnect&&s.open(),s}return r(o,[{key:"subEvents",value:function(){if(!this.subs){var t=this.io;this.subs=[Rt(t,"open",this.onopen.bind(this)),Rt(t,"packet",this.onpacket.bind(this)),Rt(t,"error",this.onerror.bind(this)),Rt(t,"close",this.onclose.bind(this))]}}},{key:"active",get:function(){return!!this.subs}},{key:"connect",value:function(){return this.connected||(this.subEvents(),this.io._reconnecting||this.io.open(),"open"===this.io._readyState&&this.onopen()),this}},{key:"open",value:function(){return this.connect()}},{key:"send",value:function(){for(var t=arguments.length,e=new Array(t),n=0;n1?e-1:0),r=1;r0&&t.jitter<=1?t.jitter:0,this.attempts=0}St.prototype.duration=function(){var t=this.ms*Math.pow(this.factor,this.attempts++);if(this.jitter){var e=Math.random(),n=Math.floor(e*this.jitter*t);t=0==(1&Math.floor(10*e))?t-n:t+n}return 0|Math.min(t,this.max)},St.prototype.reset=function(){this.attempts=0},St.prototype.setMin=function(t){this.ms=t},St.prototype.setMax=function(t){this.max=t},St.prototype.setJitter=function(t){this.jitter=t};var Bt=function(n){i(s,n);var o=h(s);function s(n,r){var i,a;e(this,s),(i=o.call(this)).nsps={},i.subs=[],n&&"object"===t(n)&&(r=n,n=void 0),(r=r||{}).path=r.path||"/socket.io",i.opts=r,A(c(i),r),i.reconnection(!1!==r.reconnection),i.reconnectionAttempts(r.reconnectionAttempts||1/0),i.reconnectionDelay(r.reconnectionDelay||1e3),i.reconnectionDelayMax(r.reconnectionDelayMax||5e3),i.randomizationFactor(null!==(a=r.randomizationFactor)&&void 0!==a?a:.5),i.backoff=new Ot({min:i.reconnectionDelay(),max:i.reconnectionDelayMax(),jitter:i.randomizationFactor()}),i.timeout(null==r.timeout?2e4:r.timeout),i._readyState="closed",i.uri=n;var u=r.parser||At;return i.encoder=new u.Encoder,i.decoder=new u.Decoder,i._autoConnect=!1!==r.autoConnect,i._autoConnect&&i.open(),i}return r(s,[{key:"reconnection",value:function(t){return arguments.length?(this._reconnection=!!t,this):this._reconnection}},{key:"reconnectionAttempts",value:function(t){return void 0===t?this._reconnectionAttempts:(this._reconnectionAttempts=t,this)}},{key:"reconnectionDelay",value:function(t){var e;return void 0===t?this._reconnectionDelay:(this._reconnectionDelay=t,null===(e=this.backoff)||void 0===e||e.setMin(t),this)}},{key:"randomizationFactor",value:function(t){var e;return void 0===t?this._randomizationFactor:(this._randomizationFactor=t,null===(e=this.backoff)||void 0===e||e.setJitter(t),this)}},{key:"reconnectionDelayMax",value:function(t){var e;return void 0===t?this._reconnectionDelayMax:(this._reconnectionDelayMax=t,null===(e=this.backoff)||void 0===e||e.setMax(t),this)}},{key:"timeout",value:function(t){return arguments.length?(this._timeout=t,this):this._timeout}},{key:"maybeReconnectOnOpen",value:function(){!this._reconnecting&&this._reconnection&&0===this.backoff.attempts&&this.reconnect()}},{key:"open",value:function(t){var e=this;if(~this._readyState.indexOf("open"))return this;this.engine=new ut(this.uri,this.opts);var n=this.engine,r=this;this._readyState="opening",this.skipReconnect=!1;var o=Rt(n,"open",(function(){r.onopen(),t&&t()})),i=Rt(n,"error",(function(n){r.cleanup(),r._readyState="closed",e.emitReserved("error",n),t?t(n):r.maybeReconnectOnOpen()}));if(!1!==this._timeout){var s=this._timeout;0===s&&o();var a=this.setTimeoutFn((function(){o(),n.close(),n.emit("error",new Error("timeout"))}),s);this.opts.autoUnref&&a.unref(),this.subs.push((function(){clearTimeout(a)}))}return this.subs.push(o),this.subs.push(i),this}},{key:"connect",value:function(t){return this.open(t)}},{key:"onopen",value:function(){this.cleanup(),this._readyState="open",this.emitReserved("open");var t=this.engine;this.subs.push(Rt(t,"ping",this.onping.bind(this)),Rt(t,"data",this.ondata.bind(this)),Rt(t,"error",this.onerror.bind(this)),Rt(t,"close",this.onclose.bind(this)),Rt(this.decoder,"decoded",this.ondecoded.bind(this)))}},{key:"onping",value:function(){this.emitReserved("ping")}},{key:"ondata",value:function(t){this.decoder.add(t)}},{key:"ondecoded",value:function(t){this.emitReserved("packet",t)}},{key:"onerror",value:function(t){this.emitReserved("error",t)}},{key:"socket",value:function(t,e){var n=this.nsps[t];return n||(n=new Ct(this,t,e),this.nsps[t]=n),n}},{key:"_destroy",value:function(t){for(var e=0,n=Object.keys(this.nsps);e=this._reconnectionAttempts)this.backoff.reset(),this.emitReserved("reconnect_failed"),this._reconnecting=!1;else{var n=this.backoff.duration();this._reconnecting=!0;var r=this.setTimeoutFn((function(){e.skipReconnect||(t.emitReserved("reconnect_attempt",e.backoff.attempts),e.skipReconnect||e.open((function(n){n?(e._reconnecting=!1,e.reconnect(),t.emitReserved("reconnect_error",n)):e.onreconnect()})))}),n);this.opts.autoUnref&&r.unref(),this.subs.push((function(){clearTimeout(r)}))}}},{key:"onreconnect",value:function(){var t=this.backoff.attempts;this._reconnecting=!1,this.backoff.reset(),this.emitReserved("reconnect",t)}}]),s}(R),Nt={};function xt(e,n){"object"===t(e)&&(n=e,e=void 0);var r,o=function(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2?arguments[2]:void 0,r=t;n=n||"undefined"!=typeof location&&location,null==t&&(t=n.protocol+"//"+n.host),"string"==typeof t&&("/"===t.charAt(0)&&(t="/"===t.charAt(1)?n.protocol+t:n.host+t),/^(https?|wss?):\/\//.test(t)||(t=void 0!==n?n.protocol+"//"+t:"https://"+t),r=v(t)),r.port||(/^(http|ws)$/.test(r.protocol)?r.port="80":/^(http|ws)s$/.test(r.protocol)&&(r.port="443")),r.path=r.path||"/";var o=-1!==r.host.indexOf(":")?"["+r.host+"]":r.host;return r.id=r.protocol+"://"+o+":"+r.port+e,r.href=r.protocol+"://"+o+(n&&n.port===r.port?"":":"+r.port),r}(e,(n=n||{}).path||"/socket.io"),i=o.source,s=o.id,a=o.path,c=Nt[s]&&a in Nt[s].nsps;return n.forceNew||n["force new connection"]||!1===n.multiplex||c?r=new Bt(i,n):(Nt[s]||(Nt[s]=new Bt(i,n)),r=Nt[s]),o.query&&!n.query&&(n.query=o.queryKey),r.socket(o.path,n)}return o(xt,{Manager:Bt,Socket:Ct,io:xt,connect:xt}),xt})); +!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).io=e()}(this,(function(){"use strict";function t(e){return t="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},t(e)}function e(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function n(t,e){for(var n=0;nt.length)&&(e=t.length);for(var n=0,r=new Array(e);n=t.length?{done:!0}:{done:!1,value:t[r++]}},e:function(t){throw t},f:i}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var o,s=!0,a=!1;return{s:function(){n=n.call(t)},n:function(){var t=n.next();return s=t.done,t},e:function(t){a=!0,o=t},f:function(){try{s||null==n.return||n.return()}finally{if(a)throw o}}}}var g=Object.create(null);g.open="0",g.close="1",g.ping="2",g.pong="3",g.message="4",g.upgrade="5",g.noop="6";var m=Object.create(null);Object.keys(g).forEach((function(t){m[g[t]]=t}));for(var k={type:"error",data:"parser error"},b="function"==typeof Blob||"undefined"!=typeof Blob&&"[object BlobConstructor]"===Object.prototype.toString.call(Blob),w="function"==typeof ArrayBuffer,_=function(t,e,n){var r,i=t.type,o=t.data;return b&&o instanceof Blob?e?n(o):A(o,n):w&&(o instanceof ArrayBuffer||(r=o,"function"==typeof ArrayBuffer.isView?ArrayBuffer.isView(r):r&&r.buffer instanceof ArrayBuffer))?e?n(o):A(new Blob([o]),n):n(g[i]+(o||""))},A=function(t,e){var n=new FileReader;return n.onload=function(){var t=n.result.split(",")[1];e("b"+t)},n.readAsDataURL(t)},E="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",O="undefined"==typeof Uint8Array?[]:new Uint8Array(256),R=0;R1?{type:m[n],data:t.substring(1)}:{type:m[n]}:k},B=function(t,e){if(T){var n=function(t){var e,n,r,i,o,s=.75*t.length,a=t.length,c=0;"="===t[t.length-1]&&(s--,"="===t[t.length-2]&&s--);var u=new ArrayBuffer(s),h=new Uint8Array(u);for(e=0;e>4,h[c++]=(15&r)<<4|i>>2,h[c++]=(3&i)<<6|63&o;return u}(t);return S(n,e)}return{base64:!0,data:t}},S=function(t,e){return"blob"===e&&t instanceof ArrayBuffer?new Blob([t]):t},N=String.fromCharCode(30);function x(t){if(t)return function(t){for(var e in x.prototype)t[e]=x.prototype[e];return t}(t)}x.prototype.on=x.prototype.addEventListener=function(t,e){return this._callbacks=this._callbacks||{},(this._callbacks["$"+t]=this._callbacks["$"+t]||[]).push(e),this},x.prototype.once=function(t,e){function n(){this.off(t,n),e.apply(this,arguments)}return n.fn=e,this.on(t,n),this},x.prototype.off=x.prototype.removeListener=x.prototype.removeAllListeners=x.prototype.removeEventListener=function(t,e){if(this._callbacks=this._callbacks||{},0==arguments.length)return this._callbacks={},this;var n,r=this._callbacks["$"+t];if(!r)return this;if(1==arguments.length)return delete this._callbacks["$"+t],this;for(var i=0;i1?e-1:0),r=1;r0);return e}function z(){var t=Y(+new Date);return t!==D?(H=0,D=t):t+"."+Y(H++)}for(;K<64;K++)V[U[K]]=K;function W(t){var e="";for(var n in t)t.hasOwnProperty(n)&&(e.length&&(e+="&"),e+=encodeURIComponent(n)+"="+encodeURIComponent(t[n]));return e}function $(t){for(var e={},n=t.split("&"),r=0,i=n.length;r0&&void 0!==arguments[0]?arguments[0]:{};return i(t,{xd:this.xd,xs:this.xs},this.opts),new et(this.uri(),t)}},{key:"doWrite",value:function(t,e){var n=this,r=this.request({method:"POST",data:t});r.on("success",e),r.on("error",(function(t,e){n.onError("xhr post error",t,e)}))}},{key:"doPoll",value:function(){var t=this,e=this.request();e.on("data",this.onData.bind(this)),e.on("error",(function(e,n){t.onError("xhr poll error",e,n)})),this.pollXhr=e}}]),s}(M),et=function(t){o(i,t);var n=p(i);function i(t,r){var o;return e(this,i),I(f(o=n.call(this)),r),o.opts=r,o.method=r.method||"GET",o.uri=t,o.async=!1!==r.async,o.data=void 0!==r.data?r.data:null,o.create(),o}return r(i,[{key:"create",value:function(){var t=this,e=P(this.opts,"agent","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","autoUnref");e.xdomain=!!this.opts.xd,e.xscheme=!!this.opts.xs;var n=this.xhr=new G(e);try{n.open(this.method,this.uri,this.async);try{if(this.opts.extraHeaders)for(var r in n.setDisableHeaderCheck&&n.setDisableHeaderCheck(!0),this.opts.extraHeaders)this.opts.extraHeaders.hasOwnProperty(r)&&n.setRequestHeader(r,this.opts.extraHeaders[r])}catch(t){}if("POST"===this.method)try{n.setRequestHeader("Content-type","text/plain;charset=UTF-8")}catch(t){}try{n.setRequestHeader("Accept","*/*")}catch(t){}"withCredentials"in n&&(n.withCredentials=this.opts.withCredentials),this.opts.requestTimeout&&(n.timeout=this.opts.requestTimeout),n.onreadystatechange=function(){4===n.readyState&&(200===n.status||1223===n.status?t.onLoad():t.setTimeoutFn((function(){t.onError("number"==typeof n.status?n.status:0)}),0))},n.send(this.data)}catch(e){return void this.setTimeoutFn((function(){t.onError(e)}),0)}"undefined"!=typeof document&&(this.index=i.requestsCount++,i.requests[this.index]=this)}},{key:"onError",value:function(t){this.emitReserved("error",t,this.xhr),this.cleanup(!0)}},{key:"cleanup",value:function(t){if(void 0!==this.xhr&&null!==this.xhr){if(this.xhr.onreadystatechange=Q,t)try{this.xhr.abort()}catch(t){}"undefined"!=typeof document&&delete i.requests[this.index],this.xhr=null}}},{key:"onLoad",value:function(){var t=this.xhr.responseText;null!==t&&(this.emitReserved("data",t),this.emitReserved("success"),this.cleanup())}},{key:"abort",value:function(){this.cleanup()}}]),i}(x);if(et.requestsCount=0,et.requests={},"undefined"!=typeof document)if("function"==typeof attachEvent)attachEvent("onunload",nt);else if("function"==typeof addEventListener){addEventListener("onpagehide"in L?"pagehide":"unload",nt,!1)}function nt(){for(var t in et.requests)et.requests.hasOwnProperty(t)&&et.requests[t].abort()}var rt="function"==typeof Promise&&"function"==typeof Promise.resolve?function(t){return Promise.resolve().then(t)}:function(t,e){return e(t,0)},it=L.WebSocket||L.MozWebSocket,ot="undefined"!=typeof navigator&&"string"==typeof navigator.product&&"reactnative"===navigator.product.toLowerCase(),st=function(t){o(i,t);var n=p(i);function i(t){var r;return e(this,i),(r=n.call(this,t)).supportsBinary=!t.forceBase64,r}return r(i,[{key:"name",get:function(){return"websocket"}},{key:"doOpen",value:function(){if(this.check()){var t=this.uri(),e=this.opts.protocols,n=ot?{}:P(this.opts,"agent","perMessageDeflate","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","localAddress","protocolVersion","origin","maxPayload","family","checkServerIdentity");this.opts.extraHeaders&&(n.headers=this.opts.extraHeaders);try{this.ws=ot?new it(t,e,n):e?new it(t,e):new it(t)}catch(t){return this.emitReserved("error",t)}this.ws.binaryType=this.socket.binaryType||"arraybuffer",this.addEventListeners()}}},{key:"addEventListeners",value:function(){var t=this;this.ws.onopen=function(){t.opts.autoUnref&&t.ws._socket.unref(),t.onOpen()},this.ws.onclose=function(e){return t.onClose({description:"websocket connection closed",context:e})},this.ws.onmessage=function(e){return t.onData(e.data)},this.ws.onerror=function(e){return t.onError("websocket error",e)}}},{key:"write",value:function(t){var e=this;this.writable=!1;for(var n=function(n){var r=t[n],i=n===t.length-1;_(r,e.supportsBinary,(function(t){try{e.ws.send(t)}catch(t){}i&&rt((function(){e.writable=!0,e.emitReserved("drain")}),e.setTimeoutFn)}))},r=0;r1&&void 0!==arguments[1]?arguments[1]:{};return e(this,a),r=s.call(this),n&&"object"===t(n)&&(o=n,n=null),n?(n=ht(n),o.hostname=n.host,o.secure="https"===n.protocol||"wss"===n.protocol,o.port=n.port,n.query&&(o.query=n.query)):o.host&&(o.hostname=ht(o.host).host),I(f(r),o),r.secure=null!=o.secure?o.secure:"undefined"!=typeof location&&"https:"===location.protocol,o.hostname&&!o.port&&(o.port=r.secure?"443":"80"),r.hostname=o.hostname||("undefined"!=typeof location?location.hostname:"localhost"),r.port=o.port||("undefined"!=typeof location&&location.port?location.port:r.secure?"443":"80"),r.transports=o.transports||["polling","websocket"],r.readyState="",r.writeBuffer=[],r.prevBufferLen=0,r.opts=i({path:"/engine.io",agent:!1,withCredentials:!1,upgrade:!0,timestampParam:"t",rememberUpgrade:!1,rejectUnauthorized:!0,perMessageDeflate:{threshold:1024},transportOptions:{},closeOnBeforeunload:!0},o),r.opts.path=r.opts.path.replace(/\/$/,"")+"/","string"==typeof r.opts.query&&(r.opts.query=$(r.opts.query)),r.id=null,r.upgrades=null,r.pingInterval=null,r.pingTimeout=null,r.pingTimeoutTimer=null,"function"==typeof addEventListener&&(r.opts.closeOnBeforeunload&&addEventListener("beforeunload",(function(){r.transport&&(r.transport.removeAllListeners(),r.transport.close())}),!1),"localhost"!==r.hostname&&(r.offlineEventListener=function(){r.onClose("transport close",{description:"network connection lost"})},addEventListener("offline",r.offlineEventListener,!1))),r.open(),r}return r(a,[{key:"createTransport",value:function(t){var e=i({},this.opts.query);e.EIO=4,e.transport=t,this.id&&(e.sid=this.id);var n=i({},this.opts.transportOptions[t],this.opts,{query:e,socket:this,hostname:this.hostname,secure:this.secure,port:this.port});return new at[t](n)}},{key:"open",value:function(){var t,e=this;if(this.opts.rememberUpgrade&&a.priorWebsocketSuccess&&-1!==this.transports.indexOf("websocket"))t="websocket";else{if(0===this.transports.length)return void this.setTimeoutFn((function(){e.emitReserved("error","No transports available")}),0);t=this.transports[0]}this.readyState="opening";try{t=this.createTransport(t)}catch(t){return this.transports.shift(),void this.open()}t.open(),this.setTransport(t)}},{key:"setTransport",value:function(t){var e=this;this.transport&&this.transport.removeAllListeners(),this.transport=t,t.on("drain",this.onDrain.bind(this)).on("packet",this.onPacket.bind(this)).on("error",this.onError.bind(this)).on("close",(function(t){return e.onClose("transport close",t)}))}},{key:"probe",value:function(t){var e=this,n=this.createTransport(t),r=!1;a.priorWebsocketSuccess=!1;var i=function(){r||(n.send([{type:"ping",data:"probe"}]),n.once("packet",(function(t){if(!r)if("pong"===t.type&&"probe"===t.data){if(e.upgrading=!0,e.emitReserved("upgrading",n),!n)return;a.priorWebsocketSuccess="websocket"===n.name,e.transport.pause((function(){r||"closed"!==e.readyState&&(f(),e.setTransport(n),n.send([{type:"upgrade"}]),e.emitReserved("upgrade",n),n=null,e.upgrading=!1,e.flush())}))}else{var i=new Error("probe error");i.transport=n.name,e.emitReserved("upgradeError",i)}})))};function o(){r||(r=!0,f(),n.close(),n=null)}var s=function(t){var r=new Error("probe error: "+t);r.transport=n.name,o(),e.emitReserved("upgradeError",r)};function c(){s("transport closed")}function u(){s("socket closed")}function h(t){n&&t.name!==n.name&&o()}var f=function(){n.removeListener("open",i),n.removeListener("error",s),n.removeListener("close",c),e.off("close",u),e.off("upgrading",h)};n.once("open",i),n.once("error",s),n.once("close",c),this.once("close",u),this.once("upgrading",h),n.open()}},{key:"onOpen",value:function(){if(this.readyState="open",a.priorWebsocketSuccess="websocket"===this.transport.name,this.emitReserved("open"),this.flush(),"open"===this.readyState&&this.opts.upgrade&&this.transport.pause)for(var t=0,e=this.upgrades.length;t1))return this.writeBuffer;for(var t,e=1,n=0;n=57344?n+=3:(r++,n+=4);return n}(t):Math.ceil(1.33*(t.byteLength||t.size))),n>0&&e>this.maxPayload)return this.writeBuffer.slice(0,n);e+=2}return this.writeBuffer}},{key:"write",value:function(t,e,n){return this.sendPacket("message",t,e,n),this}},{key:"send",value:function(t,e,n){return this.sendPacket("message",t,e,n),this}},{key:"sendPacket",value:function(t,e,n,r){if("function"==typeof e&&(r=e,e=void 0),"function"==typeof n&&(r=n,n=null),"closing"!==this.readyState&&"closed"!==this.readyState){(n=n||{}).compress=!1!==n.compress;var i={type:t,data:e,options:n};this.emitReserved("packetCreate",i),this.writeBuffer.push(i),r&&this.once("flush",r),this.flush()}}},{key:"close",value:function(){var t=this,e=function(){t.onClose("forced close"),t.transport.close()},n=function n(){t.off("upgrade",n),t.off("upgradeError",n),e()},r=function(){t.once("upgrade",n),t.once("upgradeError",n)};return"opening"!==this.readyState&&"open"!==this.readyState||(this.readyState="closing",this.writeBuffer.length?this.once("drain",(function(){t.upgrading?r():e()})):this.upgrading?r():e()),this}},{key:"onError",value:function(t){a.priorWebsocketSuccess=!1,this.emitReserved("error",t),this.onClose("transport error",t)}},{key:"onClose",value:function(t,e){"opening"!==this.readyState&&"open"!==this.readyState&&"closing"!==this.readyState||(this.clearTimeoutFn(this.pingTimeoutTimer),this.transport.removeAllListeners("close"),this.transport.close(),this.transport.removeAllListeners(),"function"==typeof removeEventListener&&removeEventListener("offline",this.offlineEventListener,!1),this.readyState="closed",this.id=null,this.emitReserved("close",t,e),this.writeBuffer=[],this.prevBufferLen=0)}},{key:"filterUpgrades",value:function(t){for(var e=[],n=0,r=t.length;n0;case _t.ACK:case _t.BINARY_ACK:return Array.isArray(n)}}}]),a}(x),Ot=function(){function t(n){e(this,t),this.packet=n,this.buffers=[],this.reconPack=n}return r(t,[{key:"takeBinaryData",value:function(t){if(this.buffers.push(t),this.buffers.length===this.reconPack.attachments){var e=bt(this.reconPack,this.buffers);return this.finishedReconstruction(),e}return null}},{key:"finishedReconstruction",value:function(){this.reconPack=null,this.buffers=[]}}]),t}(),Rt=Object.freeze({__proto__:null,protocol:5,get PacketType(){return _t},Encoder:At,Decoder:Et});function Tt(t,e,n){return t.on(e,n),function(){t.off(e,n)}}var Ct=Object.freeze({connect:1,connect_error:1,disconnect:1,disconnecting:1,newListener:1,removeListener:1}),Bt=function(t){o(i,t);var n=p(i);function i(t,r,o){var s;return e(this,i),(s=n.call(this)).connected=!1,s.receiveBuffer=[],s.sendBuffer=[],s.ids=0,s.acks={},s.flags={},s.io=t,s.nsp=r,o&&o.auth&&(s.auth=o.auth),s.io._autoConnect&&s.open(),s}return r(i,[{key:"disconnected",get:function(){return!this.connected}},{key:"subEvents",value:function(){if(!this.subs){var t=this.io;this.subs=[Tt(t,"open",this.onopen.bind(this)),Tt(t,"packet",this.onpacket.bind(this)),Tt(t,"error",this.onerror.bind(this)),Tt(t,"close",this.onclose.bind(this))]}}},{key:"active",get:function(){return!!this.subs}},{key:"connect",value:function(){return this.connected||(this.subEvents(),this.io._reconnecting||this.io.open(),"open"===this.io._readyState&&this.onopen()),this}},{key:"open",value:function(){return this.connect()}},{key:"send",value:function(){for(var t=arguments.length,e=new Array(t),n=0;n1?e-1:0),r=1;r0&&t.jitter<=1?t.jitter:0,this.attempts=0}St.prototype.duration=function(){var t=this.ms*Math.pow(this.factor,this.attempts++);if(this.jitter){var e=Math.random(),n=Math.floor(e*this.jitter*t);t=0==(1&Math.floor(10*e))?t-n:t+n}return 0|Math.min(t,this.max)},St.prototype.reset=function(){this.attempts=0},St.prototype.setMin=function(t){this.ms=t},St.prototype.setMax=function(t){this.max=t},St.prototype.setJitter=function(t){this.jitter=t};var Nt=function(n){o(s,n);var i=p(s);function s(n,r){var o,a;e(this,s),(o=i.call(this)).nsps={},o.subs=[],n&&"object"===t(n)&&(r=n,n=void 0),(r=r||{}).path=r.path||"/socket.io",o.opts=r,I(f(o),r),o.reconnection(!1!==r.reconnection),o.reconnectionAttempts(r.reconnectionAttempts||1/0),o.reconnectionDelay(r.reconnectionDelay||1e3),o.reconnectionDelayMax(r.reconnectionDelayMax||5e3),o.randomizationFactor(null!==(a=r.randomizationFactor)&&void 0!==a?a:.5),o.backoff=new St({min:o.reconnectionDelay(),max:o.reconnectionDelayMax(),jitter:o.randomizationFactor()}),o.timeout(null==r.timeout?2e4:r.timeout),o._readyState="closed",o.uri=n;var c=r.parser||Rt;return o.encoder=new c.Encoder,o.decoder=new c.Decoder,o._autoConnect=!1!==r.autoConnect,o._autoConnect&&o.open(),o}return r(s,[{key:"reconnection",value:function(t){return arguments.length?(this._reconnection=!!t,this):this._reconnection}},{key:"reconnectionAttempts",value:function(t){return void 0===t?this._reconnectionAttempts:(this._reconnectionAttempts=t,this)}},{key:"reconnectionDelay",value:function(t){var e;return void 0===t?this._reconnectionDelay:(this._reconnectionDelay=t,null===(e=this.backoff)||void 0===e||e.setMin(t),this)}},{key:"randomizationFactor",value:function(t){var e;return void 0===t?this._randomizationFactor:(this._randomizationFactor=t,null===(e=this.backoff)||void 0===e||e.setJitter(t),this)}},{key:"reconnectionDelayMax",value:function(t){var e;return void 0===t?this._reconnectionDelayMax:(this._reconnectionDelayMax=t,null===(e=this.backoff)||void 0===e||e.setMax(t),this)}},{key:"timeout",value:function(t){return arguments.length?(this._timeout=t,this):this._timeout}},{key:"maybeReconnectOnOpen",value:function(){!this._reconnecting&&this._reconnection&&0===this.backoff.attempts&&this.reconnect()}},{key:"open",value:function(t){var e=this;if(~this._readyState.indexOf("open"))return this;this.engine=new ft(this.uri,this.opts);var n=this.engine,r=this;this._readyState="opening",this.skipReconnect=!1;var i=Tt(n,"open",(function(){r.onopen(),t&&t()})),o=Tt(n,"error",(function(n){r.cleanup(),r._readyState="closed",e.emitReserved("error",n),t?t(n):r.maybeReconnectOnOpen()}));if(!1!==this._timeout){var s=this._timeout;0===s&&i();var a=this.setTimeoutFn((function(){i(),n.close(),n.emit("error",new Error("timeout"))}),s);this.opts.autoUnref&&a.unref(),this.subs.push((function(){clearTimeout(a)}))}return this.subs.push(i),this.subs.push(o),this}},{key:"connect",value:function(t){return this.open(t)}},{key:"onopen",value:function(){this.cleanup(),this._readyState="open",this.emitReserved("open");var t=this.engine;this.subs.push(Tt(t,"ping",this.onping.bind(this)),Tt(t,"data",this.ondata.bind(this)),Tt(t,"error",this.onerror.bind(this)),Tt(t,"close",this.onclose.bind(this)),Tt(this.decoder,"decoded",this.ondecoded.bind(this)))}},{key:"onping",value:function(){this.emitReserved("ping")}},{key:"ondata",value:function(t){this.decoder.add(t)}},{key:"ondecoded",value:function(t){this.emitReserved("packet",t)}},{key:"onerror",value:function(t){this.emitReserved("error",t)}},{key:"socket",value:function(t,e){var n=this.nsps[t];return n||(n=new Bt(this,t,e),this.nsps[t]=n),n}},{key:"_destroy",value:function(t){for(var e=0,n=Object.keys(this.nsps);e=this._reconnectionAttempts)this.backoff.reset(),this.emitReserved("reconnect_failed"),this._reconnecting=!1;else{var n=this.backoff.duration();this._reconnecting=!0;var r=this.setTimeoutFn((function(){e.skipReconnect||(t.emitReserved("reconnect_attempt",e.backoff.attempts),e.skipReconnect||e.open((function(n){n?(e._reconnecting=!1,e.reconnect(),t.emitReserved("reconnect_error",n)):e.onreconnect()})))}),n);this.opts.autoUnref&&r.unref(),this.subs.push((function(){clearTimeout(r)}))}}},{key:"onreconnect",value:function(){var t=this.backoff.attempts;this._reconnecting=!1,this.backoff.reset(),this.emitReserved("reconnect",t)}}]),s}(x),xt={};function Lt(e,n){"object"===t(e)&&(n=e,e=void 0);var r,i=function(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2?arguments[2]:void 0,r=t;n=n||"undefined"!=typeof location&&location,null==t&&(t=n.protocol+"//"+n.host),"string"==typeof t&&("/"===t.charAt(0)&&(t="/"===t.charAt(1)?n.protocol+t:n.host+t),/^(https?|wss?):\/\//.test(t)||(t=void 0!==n?n.protocol+"//"+t:"https://"+t),r=ht(t)),r.port||(/^(http|ws)$/.test(r.protocol)?r.port="80":/^(http|ws)s$/.test(r.protocol)&&(r.port="443")),r.path=r.path||"/";var i=-1!==r.host.indexOf(":")?"["+r.host+"]":r.host;return r.id=r.protocol+"://"+i+":"+r.port+e,r.href=r.protocol+"://"+i+(n&&n.port===r.port?"":":"+r.port),r}(e,(n=n||{}).path||"/socket.io"),o=i.source,s=i.id,a=i.path,c=xt[s]&&a in xt[s].nsps;return n.forceNew||n["force new connection"]||!1===n.multiplex||c?r=new Nt(o,n):(xt[s]||(xt[s]=new Nt(o,n)),r=xt[s]),i.query&&!n.query&&(n.query=i.queryKey),r.socket(i.path,n)}return i(Lt,{Manager:Nt,Socket:Bt,io:Lt,connect:Lt}),Lt})); //# sourceMappingURL=socket.io.min.js.map diff --git a/templates/index_new.html b/templates/index_new.html index 47cf4c79..3946af2f 100644 --- a/templates/index_new.html +++ b/templates/index_new.html @@ -37,8 +37,9 @@
-
+ +
@@ -54,9 +55,7 @@
Status:
- options: -
-
+
diff --git a/templates/settings item.html b/templates/settings item.html index 62b41416..44f15da2 100644 --- a/templates/settings item.html +++ b/templates/settings item.html @@ -26,8 +26,8 @@ {% elif item["uitype"] == "toggle" %} - {% elif item['uuitype'] == "dropdown" %} - {% for option in item['children'] %} {% endfor %} From 057f3dd92d27affd97630bd3c7df1db229d27d58 Mon Sep 17 00:00:00 2001 From: ebolam Date: Sun, 26 Jun 2022 21:06:06 -0400 Subject: [PATCH 0009/1297] back, redo, retry functional --- aiserver.py | 46 ++++++-- koboldai_settings.py | 24 ++--- requirements.txt | 3 +- static/koboldai.css | 28 ++++- static/koboldai.js | 226 ++++++++++++++++++++++++++------------- templates/index_new.html | 12 ++- 6 files changed, 232 insertions(+), 107 deletions(-) diff --git a/aiserver.py b/aiserver.py index 81a3eb3b..da4a7c73 100644 --- a/aiserver.py +++ b/aiserver.py @@ -3407,6 +3407,9 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, for i in range(model_settings.numseqs): genout.append({"generated_text": system_settings.lua_koboldbridge.outputs[i+1]}) assert type(genout[-1]["generated_text"]) is str + story_settings.actions.clear_unused_options() + story_settings.actions.append_options([x["generated_text"] for x in genout]) + genout = [{"generated_text": x['text']} for x in story_settings.actions.get_current_options()] if(len(genout) == 1): genresult(genout[0]["generated_text"], flash=False) refresh_story() @@ -3466,6 +3469,9 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, for i in range(model_settings.numseqs): genout.append({"generated_text": system_settings.lua_koboldbridge.outputs[i+1]}) assert type(genout[-1]["generated_text"]) is str + story_settings.actions.clear_unused_options() + story_settings.actions.append_options([x["generated_text"] for x in genout]) + genout = [{"generated_text": x['text']} for x in story_settings.actions.get_current_options()] if(len(genout) == 1): genresult(genout[0]["generated_text"]) if(not system_settings.abort and system_settings.lua_koboldbridge.restart_sequence is not None): @@ -3893,6 +3899,9 @@ def generate(txt, minimum, maximum, found_entries=None): else: genout = [{"generated_text": utils.decodenewlines(tokenizer.decode(tokens[-already_generated:]))} for tokens in genout] + story_settings.actions.clear_unused_options() + story_settings.actions.append_options([x["generated_text"] for x in genout]) + genout = [{"generated_text": x['text']} for x in story_settings.actions.get_current_options()] if(len(genout) == 1): genresult(genout[0]["generated_text"]) else: @@ -3947,11 +3956,7 @@ def genselect(genout): print("{0}[Result {1}]\n{2}{3}".format(colors.CYAN, i, result["generated_text"], colors.END)) i += 1 - story_settings.actions.clear_unused_options() - story_settings.actions.append_options([x["generated_text"] for x in genout]) - genout = [{"generated_text": x['text']} for x in story_settings.actions.get_current_options_no_edits()] - # Store sequences in memory until selection is made story_settings.genseqs = genout @@ -4046,7 +4051,11 @@ def sendtocolab(txt, min, max): genout.append(system_settings.lua_koboldbridge.outputs[i+1]) assert type(genout[-1]) is str + story_settings.actions.clear_unused_options() + story_settings.actions.append_options([x["generated_text"] for x in genout]) + genout = [{"generated_text": x['text']} for x in story_settings.actions.get_current_options()] if(len(genout) == 1): + genresult(genout[0]) else: # Convert torch output format to transformers @@ -4195,13 +4204,16 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): else: genout = [{"generated_text": utils.decodenewlines(tokenizer.decode(txt))} for txt in genout] - if(len(genout) == 1): - genresult(genout[0]["generated_text"]) + story_settings.actions.clear_unused_options() + story_settings.actions.append_options([x["generated_text"] for x in genout]) + genout = [{"generated_text": x['text']} for x in story_settings.actions.get_current_options()] + if(len(story_settings.actions.get_current_options()) == 1): + genresult(story_settings.actions.get_current_options()[0]) else: if(system_settings.lua_koboldbridge.restart_sequence is not None and system_settings.lua_koboldbridge.restart_sequence > 0): genresult(genout[system_settings.lua_koboldbridge.restart_sequence-1]["generated_text"]) else: - genselect(genout) + genselect([{"generated_text": x} for x in story_settings.actions.get_current_options()]) set_aibusy(0) @@ -4943,7 +4955,9 @@ def oairequest(txt, min, max): {"generated_text": utils.decodenewlines(txt)} for txt in outputs] - + story_settings.actions.clear_unused_options() + story_settings.actions.append_options([x["generated_text"] for x in genout]) + genout = [{"generated_text": x['text']} for x in story_settings.actions.get_current_options()] if (len(genout) == 1): genresult(genout[0]["generated_text"]) else: @@ -5794,7 +5808,7 @@ def new_ui_index(): @socketio.on('Set Selected Text') def UI_2_Set_Selected_Text(data): print("Updating Selected Text: {}".format(data)) - story_settings.actions.use_option(int(data['chunk']), int(data['option'])) + story_settings.actions.use_option(int(data['option']), action_step=int(data['chunk'])) #==================================================================# # Event triggered when user clicks the submit button @@ -5820,6 +5834,7 @@ def UI_2_Pinning(data): #==================================================================# @socketio.on('back') def UI_2_back(data): + print("back") ignore = story_settings.actions.pop() #==================================================================# @@ -5827,8 +5842,19 @@ def UI_2_back(data): #==================================================================# @socketio.on('redo') def UI_2_redo(data): - pass + if len(story_settings.actions.get_current_options()) == 1: + story_settings.actions.use_option(0) +#==================================================================# +# Event triggered when user clicks the redo button +#==================================================================# +@socketio.on('retry') +def UI_2_retry(data): + story_settings.actions.clear_unused_options() + system_settings.lua_koboldbridge.feedback = None + story_settings.recentrng = story_settings.recentrngm = None + actionsubmit("", actionmode=story_settings.actionmode) + #==================================================================# # Event triggered to rely a message #==================================================================# diff --git a/koboldai_settings.py b/koboldai_settings.py index ee783061..34f89152 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -22,9 +22,8 @@ def process_variable_changes(classname, name, value, old_value, debug_message=No if value != old_value: #Special Case for KoboldStoryRegister if isinstance(value, KoboldStoryRegister): - print("We got a story register") - print(value) socketio.emit("reset_story", {}, broadcast=True, room="UI_2") + socketio.emit("var_changed", {"classname": "actions", "name": "Action Count", "old_value": None, "value":value.action_count}, broadcast=True, room="UI_2") for i in range(len(value.actions)): socketio.emit("var_changed", {"classname": "actions", "name": "Selected Text", "old_value": None, "value": {"id": i, "text": value[i]}}, include_self=True, broadcast=True, room="UI_2") socketio.emit("var_changed", {"classname": "actions", "name": "Options", "old_value": None, "value": {"id": i, "options": value.actions[i]['Options']}}, include_self=True, broadcast=True, room="UI_2") @@ -34,24 +33,19 @@ def process_variable_changes(classname, name, value, old_value, debug_message=No if threading.get_ident() in rely_clients: sio = rely_clients[threading.get_ident()] else: - start_time = time.time() - print("getting client") sio = socketio_client.Client() @sio.event def connect(): - print("I'm connected!") + pass sio.connect('http://localhost:5000/?rely=true') rely_clients[threading.get_ident()] = sio - print("got client, took {}".format(time.time()-start_time)) #release no longer used clients for thread in rely_clients: if thread not in [x.ident for x in threading.enumerate()]: del rely_clients[thread] - sio.emit("relay", {"emit": "var_changed", "data": {"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}, "include_self":True, "broadcast":True, "room":"UI_2"}) + sio.emit("relay", ["var_changed", {"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}, {"include_self":True, "broadcast":True, "room":"UI_2"}]) else: socketio.emit("var_changed", {"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}, include_self=True, broadcast=True, room="UI_2") - #eventlet.sleep() - #socketio.sleep(0) class settings(object): @@ -350,17 +344,15 @@ class KoboldStoryRegister(object): process_variable_changes("actions", "Selected Text", {"id": self.action_count, "text": text}, None) def append_options(self, option_list): - print("appending options for {}".format(self.action_count+1)) if self.action_count+1 in self.actions: old_options = self.actions[self.action_count+1]["Options"].copy() self.actions[self.action_count+1]['Options'].extend([{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False} for x in option_list]) else: old_options = None self.actions[self.action_count+1] = {"Selected Text": "", "Options": [{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False} for x in option_list]} - process_variable_changes("actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": old_options}, debug_message="wtf") + process_variable_changes("actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": old_options}) def clear_unused_options(self, pointer=None): - print("clearing options for {}".format(self.action_count+1)) new_options = [] old_options = None if pointer is None: @@ -393,13 +385,19 @@ class KoboldStoryRegister(object): self.actions[action_step]['Options'][option_number]['Pinned'] = False process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) - def use_option(self, action_step, option_number): + def use_option(self, option_number, action_step=None): + if action_step is None: + action_step = self.action_count+1 if action_step in self.actions: old_options = self.actions[action_step]["Options"].copy() old_text = self.actions[action_step]["Selected Text"] if option_number < len(self.actions[action_step]['Options']): self.actions[action_step]["Selected Text"] = self.actions[action_step]['Options'][option_number]['text'] del self.actions[action_step]['Options'][option_number] + #If this is the current spot in the story, advance + if action_step-1 == self.action_count: + self.action_count+=1 + socketio.emit("var_changed", {"classname": "actions", "name": "Action Count", "old_value": None, "value":self.action_count}, broadcast=True, room="UI_2") process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) process_variable_changes("actions", "Selected Text", {"id": action_step, "text": self.actions[action_step]["Selected Text"]}, {"id": action_step, "Selected Text": old_text}) diff --git a/requirements.txt b/requirements.txt index 13fdd7cd..1a523e36 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,4 +11,5 @@ markdown bleach==4.1.0 sentencepiece protobuf -accelerate \ No newline at end of file +accelerate +python-socketio[client] \ No newline at end of file diff --git a/static/koboldai.css b/static/koboldai.css index 96bae325..3c8afbaf 100644 --- a/static/koboldai.css +++ b/static/koboldai.css @@ -427,14 +427,19 @@ td.sequence:hover { .inputrow { grid-area: inputrow; display: grid; - grid-template-areas: "textarea submit submit submit" + grid-template-areas: "textarea statusbar statusbar statusbar" + "textarea submit submit submit" "textarea back redo retry"; grid-template-columns: auto 30px 30px 30px; - grid-template-rows: auto 40px; + grid-template-rows: 0px auto 40px; gap: 1px; } +.inputrow.status_bar { + grid-template-rows: 20px auto 40px; +} + .inputrow textarea { grid-area: textarea; background-color: var(--textarea_background); @@ -474,6 +479,25 @@ td.sequence:hover { display: inline; } +.statusbar_outer { + color: #000; + background-color: #f1f1f1; + grid-area: statusbar; + height: 20px; +} + +.statusbar_outer.hidden { + height: 0px; +} + +.statusbar_inner { + background-color: #4CAF50; + text-align: center; + font-size: 0.875em; + + +} + .inputrow .back { grid-area: back; padding: 0px; diff --git a/static/koboldai.js b/static/koboldai.js index 56b5d98e..5dfdbb91 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -46,86 +46,129 @@ function fix_text(val) { } function create_options(data) { + //Set all options before the next chunk to hidden + var option_container = document.getElementById("Select Options"); + var current_chunk = parseInt(document.getElementById("action_count").textContent)+1; + var children = option_container.children; + for (var i = 0; i < children.length; i++) { + var chunk = children[i]; + if (chunk.id == "Select Options Chunk " + current_chunk) { + chunk.classList.remove("hidden"); + console.log(current_chunk); + } else { + chunk.classList.add("hidden"); + } + } + + console.log(current_chunk); console.log(data); if (document.getElementById("Select Options Chunk "+data.value.id)) { - var option_chunk = document.getElementById("Select Options Chunk "+data.value.id) - } else { - var option_area = document.getElementById("Select Options"); - var option_chunk = document.createElement("div"); - option_chunk.id = "Select Options Chunk "+data.value.id; - option_area.append(option_chunk); + var option_chunk = document.getElementById("Select Options Chunk "+data.value.id) + } else { + var option_area = document.getElementById("Select Options"); + var option_chunk = document.createElement("div"); + option_chunk.id = "Select Options Chunk "+data.value.id; + option_area.append(option_chunk); + } + //first, let's clear out our existing data + while (option_chunk.firstChild) { + option_chunk.removeChild(option_chunk.firstChild); + } + var table = document.createElement("table"); + table.classList.add("sequence"); + table.style = "border-spacing: 0;"; + //Add pins + i=0; + for (item of data.value.options) { + if (item.Pinned) { + var row = document.createElement("tr"); + row.classList.add("sequence"); + var textcell = document.createElement("td"); + textcell.textContent = item.text; + textcell.classList.add("sequence"); + textcell.setAttribute("option_id", i); + textcell.setAttribute("option_chunk", data.value.id); + var iconcell = document.createElement("td"); + iconcell.setAttribute("option_id", i); + iconcell.setAttribute("option_chunk", data.value.id); + var icon = document.createElement("span"); + icon.id = "Pin_"+i; + icon.classList.add("oi"); + icon.setAttribute('data-glyph', "pin"); + iconcell.append(icon); + textcell.onclick = function () { + socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); + }; + iconcell.onclick = function () { + socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id"), "set": false}); + }; + row.append(textcell); + row.append(iconcell); + table.append(row); } - //first, let's clear out our existing data - while (option_chunk.firstChild) { - option_chunk.removeChild(option_chunk.firstChild); + i+=1; + } + //Add Redo options + i=0; + for (item of data.value.options) { + if ((item['Previous Selection'])) { + var row = document.createElement("tr"); + row.classList.add("sequence"); + var textcell = document.createElement("td"); + textcell.textContent = item.text; + textcell.classList.add("sequence"); + textcell.setAttribute("option_id", i); + textcell.setAttribute("option_chunk", data.value.id); + var iconcell = document.createElement("td"); + iconcell.setAttribute("option_id", i); + iconcell.setAttribute("option_chunk", data.value.id); + var icon = document.createElement("span"); + icon.id = "Pin_"+i; + icon.classList.add("oi"); + icon.setAttribute('data-glyph', "loop-circular"); + iconcell.append(icon); + textcell.onclick = function () { + socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); + }; + row.append(textcell); + row.append(iconcell); + table.append(row); } - var table = document.createElement("table"); - table.classList.add("sequence"); - table.style = "border-spacing: 0;"; - //Add pins - i=0; - for (item of data.value.options) { - if (item.Pinned) { - var row = document.createElement("tr"); - row.classList.add("sequence"); - var textcell = document.createElement("td"); - textcell.textContent = item.text; - textcell.classList.add("sequence"); - textcell.setAttribute("option_id", i); - textcell.setAttribute("option_chunk", data.value.id); - var iconcell = document.createElement("td"); - iconcell.setAttribute("option_id", i); - iconcell.setAttribute("option_chunk", data.value.id); - var icon = document.createElement("span"); - icon.id = "Pin_"+i; - icon.classList.add("oi"); - icon.setAttribute('data-glyph', "pin"); - iconcell.append(icon); - textcell.onclick = function () { - socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); - }; - iconcell.onclick = function () { - socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id"), "set": false}); - }; - row.append(textcell); - row.append(iconcell); - table.append(row); - } - i+=1; + i+=1; + } + //Add general options + i=0; + for (item of data.value.options) { + if (!(item.Edited) && !(item.Pinned) && !(item['Previous Selection'])) { + var row = document.createElement("tr"); + row.classList.add("sequence"); + var textcell = document.createElement("td"); + textcell.textContent = item.text; + textcell.classList.add("sequence"); + textcell.setAttribute("option_id", i); + textcell.setAttribute("option_chunk", data.value.id); + var iconcell = document.createElement("td"); + iconcell.setAttribute("option_id", i); + iconcell.setAttribute("option_chunk", data.value.id); + var icon = document.createElement("span"); + icon.id = "Pin_"+i; + icon.classList.add("oi"); + icon.setAttribute('data-glyph', "pin"); + icon.setAttribute('style', "filter: brightness(50%);"); + iconcell.append(icon); + iconcell.onclick = function () { + socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id"), "set": true}); + }; + textcell.onclick = function () { + socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); + }; + row.append(textcell); + row.append(iconcell); + table.append(row); } - //Add general options - i=0; - for (item of data.value.options) { - if (!(item.Edited) && !(item.Pinned) && !(item['Previous Selection'])) { - var row = document.createElement("tr"); - row.classList.add("sequence"); - var textcell = document.createElement("td"); - textcell.textContent = item.text; - textcell.classList.add("sequence"); - textcell.setAttribute("option_id", i); - textcell.setAttribute("option_chunk", data.value.id); - var iconcell = document.createElement("td"); - iconcell.setAttribute("option_id", i); - iconcell.setAttribute("option_chunk", data.value.id); - var icon = document.createElement("span"); - icon.id = "Pin_"+i; - icon.classList.add("oi"); - icon.setAttribute('data-glyph', "pin"); - icon.setAttribute('style', "filter: brightness(50%);"); - iconcell.append(icon); - iconcell.onclick = function () { - socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id"), "set": true}); - }; - textcell.onclick = function () { - socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); - }; - row.append(textcell); - row.append(iconcell); - table.append(row); - } - i+=1; - } - option_chunk.append(table); + i+=1; + } + option_chunk.append(table); } function do_story_text_updates(data) { @@ -187,6 +230,27 @@ function selected_preset(data) { } } +function update_status_bar(data) { + var total_tokens = document.getElementById('model_genamt').value; + var percent_complete = data.value/total_tokens*100; + var percent_bar = document.getElementsByClassName("statusbar_inner"); + for (item of percent_bar) { + item.setAttribute("style", "width:"+percent_complete+"%"); + item.textContent = Math.round(percent_complete)+"%" + if ((percent_complete == 0) || (percent_complete == 100)) { + item.parentElement.classList.add("hidden"); + document.getElementById("inputrow_container").classList.remove("status_bar"); + } else { + item.parentElement.classList.remove("hidden"); + document.getElementById("inputrow_container").classList.add("status_bar"); + } + } + if ((percent_complete == 0) || (percent_complete == 100)) { + document.title = "KoboldAI Client"; + } else { + document.title = "KoboldAI Client Generating (" + percent_complete + "%)"; + } +} function var_changed(data) { //Special Case for Story Text if ((data.classname == "actions") && (data.name == "Selected Text")) { @@ -201,7 +265,7 @@ function var_changed(data) { selected_preset(data); //Basic Data Syncing } else { - var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname+"_"+data.name); + var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_")); for (item of elements_to_change) { if ((item.tagName.toLowerCase() === 'input') || (item.tagName.toLowerCase() === 'select')) { item.value = fix_text(data.value); @@ -209,11 +273,19 @@ function var_changed(data) { item.textContent = fix_text(data.value); } } - var elements_to_change = document.getElementsByClassName("var_sync_alt_"+data.classname+"_"+data.name); + //alternative syncing method + var elements_to_change = document.getElementsByClassName("var_sync_alt_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_")); for (item of elements_to_change) { item.setAttribute("server_value", fix_text(data.value)); } } + + //if we're updating generated tokens, let's show that in our status bar + if ((data.classname == 'story') && (data.name == 'generated_tkns')) { + update_status_bar(data); + } + + //If we have ai_busy, start the favicon swapping if ((data.classname == 'system') && (data.name == 'aibusy')) { if (data.value) { favicon.start_swap() diff --git a/templates/index_new.html b/templates/index_new.html index 3946af2f..1b11b16d 100644 --- a/templates/index_new.html +++ b/templates/index_new.html @@ -39,15 +39,19 @@
+
-
+
+
+
25%
+

- - - + + +
From 0ffaa1bfcf037ed1f3f950b5b01d3d74e8177376 Mon Sep 17 00:00:00 2001 From: ebolam Date: Mon, 27 Jun 2022 18:36:22 -0400 Subject: [PATCH 0010/1297] Presets and Remaining time updates --- aiserver.py | 359 ++++++++++++++++++------------- koboldai_settings.py | 114 ++++++++-- settings/preset/official.presets | 95 ++++++++ static/koboldai.css | 20 +- static/koboldai.js | 88 ++++---- templates/index_new.html | 11 +- templates/settings flyout.html | 16 +- 7 files changed, 480 insertions(+), 223 deletions(-) create mode 100644 settings/preset/official.presets diff --git a/aiserver.py b/aiserver.py index da4a7c73..ca2f92cc 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1024,8 +1024,12 @@ def get_model_info(model, directory=""): breakmodel = False else: breakmodel = True - if path.exists("settings/{}.breakmodel".format(model.replace("/", "_"))): - with open("settings/{}.breakmodel".format(model.replace("/", "_")), "r") as file: + if model in ["NeoCustom", "GPT2Custom"]: + filename = os.path.basename(os.path.normpath(directory)) + else: + filename = "settings/{}.breakmodel".format(model.replace("/", "_")) + if path.exists(filename): + with open(filename, "r") as file: data = file.read().split("\n")[:2] if len(data) < 2: data.append("0") @@ -1034,10 +1038,6 @@ def get_model_info(model, directory=""): else: break_values = [layer_count] break_values += [0] * (gpu_count - len(break_values)) - #print("Model_info: {}".format({'cmd': 'selected_model_info', 'key_value': key_value, 'key':key, - # 'gpu':gpu, 'layer_count':layer_count, 'breakmodel':breakmodel, - # 'break_values': break_values, 'gpu_count': gpu_count, - # 'url': url, 'gpu_names': gpu_names})) emit('from_server', {'cmd': 'selected_model_info', 'key_value': key_value, 'key':key, 'gpu':gpu, 'layer_count':layer_count, 'breakmodel':breakmodel, 'disk_break_value': disk_blocks, 'accelerate': utils.HAS_ACCELERATE, @@ -1341,10 +1341,10 @@ def patch_transformers(): scores: torch.FloatTensor, **kwargs, ) -> bool: - story_settings.generated_tkns += 1 - if(system_settings.lua_koboldbridge.generated_cols and story_settings.generated_tkns != system_settings.lua_koboldbridge.generated_cols): - raise RuntimeError(f"Inconsistency detected between KoboldAI Python and Lua backends ({story_settings.generated_tkns} != {system_settings.lua_koboldbridge.generated_cols})") - if(system_settings.abort or story_settings.generated_tkns >= model_settings.genamt): + model_settings.generated_tkns += 1 + if(system_settings.lua_koboldbridge.generated_cols and model_settings.generated_tkns != system_settings.lua_koboldbridge.generated_cols): + raise RuntimeError(f"Inconsistency detected between KoboldAI Python and Lua backends ({model_settings.generated_tkns} != {system_settings.lua_koboldbridge.generated_cols})") + if(system_settings.abort or model_settings.generated_tkns >= model_settings.genamt): self.regeneration_required = False self.halt = False return True @@ -1356,11 +1356,11 @@ def patch_transformers(): system_settings.lua_koboldbridge.regeneration_required = False for i in range(model_settings.numseqs): - system_settings.lua_koboldbridge.generated[i+1][story_settings.generated_tkns] = int(input_ids[i, -1].item()) + system_settings.lua_koboldbridge.generated[i+1][model_settings.generated_tkns] = int(input_ids[i, -1].item()) if(not story_settings.dynamicscan): return self.regeneration_required or self.halt - tail = input_ids[..., -story_settings.generated_tkns:] + tail = input_ids[..., -model_settings.generated_tkns:] for i, t in enumerate(tail): decoded = utils.decodenewlines(tokenizer.decode(t)) _, found = checkworldinfo(decoded, force_use_txt=True, actions=story_settings._actions) @@ -1970,17 +1970,17 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal return scores def tpumtjgenerate_stopping_callback(generated, n_generated, excluded_world_info) -> Tuple[List[set], bool, bool]: - story_settings.generated_tkns += 1 + model_settings.generated_tkns += 1 assert len(excluded_world_info) == len(generated) regeneration_required = system_settings.lua_koboldbridge.regeneration_required - halt = system_settings.abort or not system_settings.lua_koboldbridge.generating or story_settings.generated_tkns >= model_settings.genamt + halt = system_settings.abort or not system_settings.lua_koboldbridge.generating or model_settings.generated_tkns >= model_settings.genamt system_settings.lua_koboldbridge.regeneration_required = False global past for i in range(model_settings.numseqs): - system_settings.lua_koboldbridge.generated[i+1][story_settings.generated_tkns] = int(generated[i, tpu_mtj_backend.params["seq"] + n_generated - 1].item()) + system_settings.lua_koboldbridge.generated[i+1][model_settings.generated_tkns] = int(generated[i, tpu_mtj_backend.params["seq"] + n_generated - 1].item()) if(not story_settings.dynamicscan or halt): return excluded_world_info, regeneration_required, halt @@ -2067,7 +2067,16 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal setStartState() sendsettings() refresh_settings() - + + #Let's load the presets + with open('settings/preset/official.presets') as f: + presets = json.load(f) + if model_settings.model in presets: + model_settings.presets = presets[model_settings.model] + elif model_settings.model.replace("/", "_") in presets: + model_settings.presets = presets[model_settings.model.replace("/", "_")] + else: + model_settings.presets = {} # Set up Flask routes @app.route('/') @@ -2770,6 +2779,9 @@ def do_connect(): return join_room("UI_{}".format(request.args.get('ui'))) print("Joining Room UI_{}".format(request.args.get('ui'))) + if request.args.get("ui") == 2: + ui2_connect() + return #Send all variables to client model_settings.send_to_ui() story_settings.send_to_ui() @@ -3808,9 +3820,9 @@ def _generate(txt, minimum, maximum, found_entries): break assert genout.ndim >= 2 assert genout.shape[0] == model_settings.numseqs - if(system_settings.lua_koboldbridge.generated_cols and story_settings.generated_tkns != system_settings.lua_koboldbridge.generated_cols): + if(system_settings.lua_koboldbridge.generated_cols and model_settings.generated_tkns != system_settings.lua_koboldbridge.generated_cols): raise RuntimeError("Inconsistency detected between KoboldAI Python and Lua backends") - if(already_generated != story_settings.generated_tkns): + if(already_generated != model_settings.generated_tkns): raise RuntimeError("WI scanning error") for r in range(model_settings.numseqs): for c in range(already_generated): @@ -3850,7 +3862,7 @@ def _generate(txt, minimum, maximum, found_entries): def generate(txt, minimum, maximum, found_entries=None): - story_settings.generated_tkns = 0 + model_settings.generated_tkns = 0 if(found_entries is None): found_entries = set() @@ -3886,7 +3898,7 @@ def generate(txt, minimum, maximum, found_entries=None): return for i in range(model_settings.numseqs): - system_settings.lua_koboldbridge.generated[i+1][story_settings.generated_tkns] = int(genout[i, -1].item()) + system_settings.lua_koboldbridge.generated[i+1][model_settings.generated_tkns] = int(genout[i, -1].item()) system_settings.lua_koboldbridge.outputs[i+1] = utils.decodenewlines(tokenizer.decode(genout[i, -already_generated:])) execute_outmod() @@ -4086,7 +4098,7 @@ def sendtocolab(txt, min, max): # Send text to TPU mesh transformer backend #==================================================================# def tpumtjgenerate(txt, minimum, maximum, found_entries=None): - story_settings.generated_tkns = 0 + model_settings.generated_tkns = 0 if(found_entries is None): found_entries = set() @@ -4173,7 +4185,7 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): past = genout for i in range(model_settings.numseqs): system_settings.lua_koboldbridge.generated[i+1] = system_settings.lua_state.table(*genout[i].tolist()) - system_settings.lua_koboldbridge.generated_cols = story_settings.generated_tkns = genout[0].shape[-1] + system_settings.lua_koboldbridge.generated_cols = model_settings.generated_tkns = genout[0].shape[-1] except Exception as e: if(issubclass(type(e), lupa.LuaError)): @@ -5196,6 +5208,7 @@ def loadRequest(loadpath, filename=None): # Leave Edit/Memory mode before continuing exitModes() + # Read file contents into JSON object if(isinstance(loadpath, str)): with open(loadpath, "r") as file: @@ -5206,107 +5219,115 @@ def loadRequest(loadpath, filename=None): js = loadpath if(filename is None): filename = "untitled.json" - - # Copy file contents to vars - story_settings.gamestarted = js["gamestarted"] - story_settings.prompt = js["prompt"] - story_settings.memory = js["memory"] - story_settings.worldinfo = [] - story_settings.worldinfo = [] - story_settings.worldinfo_u = {} - story_settings.wifolders_d = {int(k): v for k, v in js.get("wifolders_d", {}).items()} - story_settings.wifolders_l = js.get("wifolders_l", []) - story_settings.wifolders_u = {uid: [] for uid in story_settings.wifolders_d} - story_settings.lastact = "" - story_settings.submission = "" - story_settings.lastctx = "" - story_settings.genseqs = [] + js['v1_loadpath'] = loadpath + js['v1_filename'] = filename + loadJSON(js) - del story_settings.actions - story_settings.actions = koboldai_settings.KoboldStoryRegister() - actions = collections.deque(js["actions"]) - - - if "actions_metadata" in js: - - if type(js["actions_metadata"]) == dict: - temp = js["actions_metadata"] - story_settings.actions_metadata = {} - #we need to redo the numbering of the actions_metadata since the actions list doesn't preserve it's number on saving - if len(temp) > 0: - counter = 0 - temp = {int(k):v for k,v in temp.items()} - for i in range(max(temp)+1): - if i in temp: - story_settings.actions_metadata[counter] = temp[i] - counter += 1 - del temp - else: - #fix if we're using the old metadata format - story_settings.actions_metadata = {} - i = 0 - - for text in js['actions']: - story_settings.actions_metadata[i] = {'Selected Text': text, 'Alternative Text': []} - i+=1 +def loadJSON(json_text_or_dict): + if isinstance(json_text_or_dict, str): + json_data = json.loads(json_text_or_dict) + else: + json_data = json_text_or_dict + if "file_version" in json_data: + if json_data['file_version'] == 2: + load_story_v2(json_data) else: + load_story_v1(json_data) + else: + load_story_v1(json_data) + +def load_story_v1(js): + loadpath = js['v1_loadpath'] + filename = js['v1_filename'] + + # Copy file contents to vars + story_settings.gamestarted = js["gamestarted"] + story_settings.prompt = js["prompt"] + story_settings.memory = js["memory"] + story_settings.worldinfo = [] + story_settings.worldinfo = [] + story_settings.worldinfo_u = {} + story_settings.wifolders_d = {int(k): v for k, v in js.get("wifolders_d", {}).items()} + story_settings.wifolders_l = js.get("wifolders_l", []) + story_settings.wifolders_u = {uid: [] for uid in story_settings.wifolders_d} + story_settings.lastact = "" + story_settings.submission = "" + story_settings.lastctx = "" + story_settings.genseqs = [] + + del story_settings.actions + story_settings.actions = koboldai_settings.KoboldStoryRegister() + actions = collections.deque(js["actions"]) + + + if "actions_metadata" in js: + + if type(js["actions_metadata"]) == dict: + temp = js["actions_metadata"] + story_settings.actions_metadata = {} + #we need to redo the numbering of the actions_metadata since the actions list doesn't preserve it's number on saving + if len(temp) > 0: + counter = 0 + temp = {int(k):v for k,v in temp.items()} + for i in range(max(temp)+1): + if i in temp: + story_settings.actions_metadata[counter] = temp[i] + counter += 1 + del temp + else: + #fix if we're using the old metadata format story_settings.actions_metadata = {} i = 0 for text in js['actions']: story_settings.actions_metadata[i] = {'Selected Text': text, 'Alternative Text': []} i+=1 - - - if(len(story_settings.prompt.strip()) == 0): - while(len(actions)): - action = actions.popleft() - if(len(action.strip()) != 0): - story_settings.prompt = action - break - else: - story_settings.gamestarted = False - if(story_settings.gamestarted): - for s in actions: - story_settings.actions.append(s) + else: + story_settings.actions_metadata = {} + i = 0 - # Try not to break older save files - if("authorsnote" in js): - story_settings.authornote = js["authorsnote"] - else: - story_settings.authornote = "" - if("anotetemplate" in js): - story_settings.authornotetemplate = js["anotetemplate"] - else: - story_settings.authornotetemplate = "[Author's note: <|>]" - - if("worldinfo" in js): - num = 0 - for wi in js["worldinfo"]: - story_settings.worldinfo.append({ - "key": wi["key"], - "keysecondary": wi.get("keysecondary", ""), - "content": wi["content"], - "comment": wi.get("comment", ""), - "folder": wi.get("folder", None), - "num": num, - "init": True, - "selective": wi.get("selective", False), - "constant": wi.get("constant", False), - "uid": None, - }) - while(True): - uid = int.from_bytes(os.urandom(4), "little", signed=True) - if(uid not in story_settings.worldinfo_u): - break - story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] - story_settings.worldinfo[-1]["uid"] = uid - if(story_settings.worldinfo[-1]["folder"] is not None): - story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) - num += 1 + for text in js['actions']: + story_settings.actions_metadata[i] = {'Selected Text': text, 'Alternative Text': []} + i+=1 + - for uid in story_settings.wifolders_l + [None]: - story_settings.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) + if(len(story_settings.prompt.strip()) == 0): + while(len(actions)): + action = actions.popleft() + if(len(action.strip()) != 0): + story_settings.prompt = action + break + else: + story_settings.gamestarted = False + if(story_settings.gamestarted): + for s in actions: + story_settings.actions.append(s) + + # Try not to break older save files + if("authorsnote" in js): + story_settings.authornote = js["authorsnote"] + else: + story_settings.authornote = "" + if("anotetemplate" in js): + story_settings.authornotetemplate = js["anotetemplate"] + else: + story_settings.authornotetemplate = "[Author's note: <|>]" + + if("worldinfo" in js): + num = 0 + for wi in js["worldinfo"]: + story_settings.worldinfo.append({ + "key": wi["key"], + "keysecondary": wi.get("keysecondary", ""), + "content": wi["content"], + "comment": wi.get("comment", ""), + "folder": wi.get("folder", None), + "num": num, + "init": True, + "selective": wi.get("selective", False), + "constant": wi.get("constant", False), + "uid": None, + }) while(True): uid = int.from_bytes(os.urandom(4), "little", signed=True) if(uid not in story_settings.worldinfo_u): @@ -5315,32 +5336,49 @@ def loadRequest(loadpath, filename=None): story_settings.worldinfo[-1]["uid"] = uid if(story_settings.worldinfo[-1]["folder"] is not None): story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) - stablesortwi() - story_settings.worldinfo_i = [wi for wi in story_settings.worldinfo if wi["init"]] + num += 1 - # Save path for save button - system_settings.savedir = loadpath - - # Clear loadselect var - user_settings.loadselect = "" - - # Refresh game screen - _filename = filename - if(filename.endswith('.json')): - _filename = filename[:-5] - user_settings.laststory = _filename - emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True, room="UI_1") - setgamesaved(True) - sendwi() - emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True, room="UI_1") - emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True, room="UI_1") - emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True, room="UI_1") - refresh_story() - emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True, room="UI_1") - emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True, room="UI_1") - print("{0}Story loaded from {1}!{2}".format(colors.GREEN, filename, colors.END)) - - send_debug() + for uid in story_settings.wifolders_l + [None]: + story_settings.worldinfo.append({"key": "", "keysecondary": "", "content": "", "comment": "", "folder": uid, "num": None, "init": False, "selective": False, "constant": False, "uid": None}) + while(True): + uid = int.from_bytes(os.urandom(4), "little", signed=True) + if(uid not in story_settings.worldinfo_u): + break + story_settings.worldinfo_u[uid] = story_settings.worldinfo[-1] + story_settings.worldinfo[-1]["uid"] = uid + if(story_settings.worldinfo[-1]["folder"] is not None): + story_settings.wifolders_u[story_settings.worldinfo[-1]["folder"]].append(story_settings.worldinfo[-1]) + stablesortwi() + story_settings.worldinfo_i = [wi for wi in story_settings.worldinfo if wi["init"]] + + # Save path for save button + system_settings.savedir = loadpath + + # Clear loadselect var + user_settings.loadselect = "" + + # Refresh game screen + _filename = filename + if(filename.endswith('.json')): + _filename = filename[:-5] + user_settings.laststory = _filename + #set the story_name + story_settings.story_name = _filename + emit('from_server', {'cmd': 'setstoryname', 'data': user_settings.laststory}, broadcast=True, room="UI_1") + setgamesaved(True) + sendwi() + emit('from_server', {'cmd': 'setmemory', 'data': story_settings.memory}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanote', 'data': story_settings.authornote}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'setanotetemplate', 'data': story_settings.authornotetemplate}, broadcast=True, room="UI_1") + refresh_story() + emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True, room="UI_1") + emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True, room="UI_1") + print("{0}Story loaded from {1}!{2}".format(colors.GREEN, filename, colors.END)) + + send_debug() + +def load_story_v2(js): + story_settings.from_json(js) #==================================================================# # Import an AIDungon game exported with Mimi's tool @@ -5769,11 +5807,23 @@ def send_debug(): emit('from_server', {'cmd': 'debug_info', 'data': debug_info}, broadcast=True, room="UI_1") + +#==================================================================# +# UI V2 CODE +#==================================================================# +@app.route('/new_ui') +def new_ui_index(): + return render_template('index_new.html', settings=gensettings.gensettingstf if model_settings.model != "InferKit" else gensettings.gensettingsik ) + +def ui2_connect(): + pass + #==================================================================# # Event triggered when browser SocketIO detects a variable change #==================================================================# @socketio.on('var_change') def UI_2_var_change(data): + print(data) classname = data['ID'].split("_")[0] name = data['ID'][len(classname)+1:] classname += "_settings" @@ -5793,15 +5843,24 @@ def UI_2_var_change(data): print("{} {} = {}".format(classname, name, value)) setattr(globals()[classname], name, value) - - + + #Now let's save except for story changes + if classname != "story_settings": + with open("settings/{}.v2_settings".format(classname), "w") as settings_file: + settings_file.write(globals()[classname].to_json()) + #==================================================================# -# UI V2 CODE +# Saving Story #==================================================================# -@app.route('/new_ui') -def new_ui_index(): - return render_template('index_new.html', settings=gensettings.gensettingstf if model_settings.model != "InferKit" else gensettings.gensettingsik ) - +@socketio.on('save_story') +def UI_2_save_story(data): + json_data = story_settings.to_json() + save_name = story_settings.story_name if story_settings.story_name is not None else "untitled" + with open("stories/{}_v2.json".format(save_name), "w") as settings_file: + settings_file.write(story_settings.to_json()) + story_settings.gamesaved = True + + #==================================================================# # Event triggered when Selected Text is edited, Option is Selected, etc #==================================================================# @@ -5824,10 +5883,7 @@ def UI_2_submit(data): #==================================================================# @socketio.on('Pinning') def UI_2_Pinning(data): - if data['set']: - story_settings.actions.set_pin(int(data['chunk']), int(data['option'])) - else: - story_settings.actions.unset_pin(int(data['chunk']), int(data['option'])) + story_settings.actions.toggle_pin(int(data['chunk']), int(data['option'])) #==================================================================# # Event triggered when user clicks the back button @@ -5870,6 +5926,10 @@ def UI_2_relay(data): def show_actions(): return story_settings.actions.actions +@app.route("/story") +def show_story(): + return story_settings.to_json() + #==================================================================# @@ -5952,3 +6012,4 @@ else: model_settings.model = "ReadOnly" load_model(initial_load=True) print("{0}\nServer started in WSGI mode!{1}".format(colors.GREEN, colors.END), flush=True) + diff --git a/koboldai_settings.py b/koboldai_settings.py index 34f89152..6ea3ee1a 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -1,6 +1,7 @@ from flask_socketio import emit, join_room, leave_room, rooms -import os, re, time, threading +import os, re, time, threading, json, pickle, base64, copy, tqdm, datetime import socketio as socketio_client +from io import BytesIO socketio = None main_thread_id = threading.get_ident() @@ -47,8 +48,50 @@ def process_variable_changes(classname, name, value, old_value, debug_message=No else: socketio.emit("var_changed", {"classname": classname, "name": name, "old_value": clean_var_for_emit(old_value), "value": clean_var_for_emit(value)}, include_self=True, broadcast=True, room="UI_2") + + class settings(object): + def to_json(self): + json_data = {'file_version': 2} + for (name, value) in vars(self).items(): + if name not in self.no_save_variables: + json_data[name] = value + def to_base64(data): + if isinstance(data, KoboldStoryRegister): + return data.to_json() + output = BytesIO() + pickle.dump(data, output) + output.seek(0) + return "base64:{}".format(base64.encodebytes(output.read()).decode()) + return json.dumps(json_data, default=to_base64) + + def from_json(self, data): + if isinstance(data, str): + json_data = json.loads(data) + else: + json_data = data + for key, value in data.items(): + if key in self.__dict__: + if isinstance(value, str): + if value[:7] == 'base64:': + value = pickle.loads(base64.b64decode(value[7:])) + #Need to fix the data type of value to match the module + if type(getattr(self, key)) == int: + value = int(value) + elif type(getattr(self, key)) == float: + value = float(value) + elif type(getattr(self, key)) == bool: + value = bool(value) + elif type(getattr(self, key)) == str: + value = str(value) + if isinstance(getattr(self, key), KoboldStoryRegister): + self.actions.load_json(value) + else: + setattr(self, key, value) + + + def send_to_ui(self): if socketio is not None: for (name, value) in vars(self).items(): @@ -58,7 +101,8 @@ class settings(object): class model_settings(settings): - local_only_variables = ['badwordsids', 'apikey', '_class_init'] + local_only_variables = ['badwordsids', 'apikey', '_class_init', 'tqdm'] + no_save_variables = ['tqdm'] settings_name = "model" def __init__(self): self.model = "" # Model ID string chosen at startup @@ -79,6 +123,9 @@ class model_settings(settings): self.tfs = 1.0 # Default generator tfs (tail-free sampling) self.typical = 1.0 # Default generator typical sampling threshold self.numseqs = 1 # Number of sequences to ask the generator to create + self.generated_tkns = 0 # If using a backend that supports Lua generation modifiers, how many tokens have already been generated, otherwise 0 + self.tqdm = tqdm.tqdm(total=self.genamt, file=self.ignore_tqdm()) # tqdm agent for generating tokens. This will allow us to calculate the remaining time + self.tqdm_rem_time = 0 # tqdm calculated reemaining time self.badwordsids = [] self.fp32_model = False # Whether or not the most recently loaded HF model was in fp32 format self.url = "https://api.inferkit.com/v1/models/standard/generate" # InferKit API URL @@ -96,20 +143,35 @@ class model_settings(settings): self.selected_preset = "" + #dummy class to eat the tqdm output + class ignore_tqdm(object): + def write(self, bar): + pass + def __setattr__(self, name, value): old_value = getattr(self, name, None) super().__setattr__(name, value) #Put variable change actions here + + #Setup TQDP + if name == "generated_tkns" and 'tqdm' in self.__dict__: + if value == 0: + self.tqdm.reset(total=self.genamt) + else: + self.tqdm.update(1) + self.tqdm_rem_time = str(datetime.timedelta(seconds=int(float(self.genamt-self.generated_tkns)/self.tqdm.format_dict['rate']))) + + if name not in self.local_only_variables and name[0] != "_": process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) - - class story_settings(settings): #local_only_variables = ['generated_tkns'] local_only_variables = [] + no_save_variables = [] settings_name = "story" def __init__(self): + self.story_name = None # Title of the story self.lastact = "" # The last action received from the user self.submission = "" # Same as above, but after applying input formatting self.lastctx = "" # The last context submitted to the generator @@ -136,7 +198,6 @@ class story_settings(settings): self.wifolders_u = {} # Dictionary of pairs of folder UID - list of WI UID self.lua_edited = set() # Set of chunk numbers that were edited from a Lua generation modifier self.lua_deleted = set() # Set of chunk numbers that were deleted from a Lua generation modifier - self.generated_tkns = 0 # If using a backend that supports Lua generation modifiers, how many tokens have already been generated, otherwise 0 self.deletewi = None # Temporary storage for UID to delete self.mode = "play" # Whether the interface is in play, memory, or edit mode self.editln = 0 # Which line was last selected in Edit Mode @@ -159,9 +220,14 @@ class story_settings(settings): #Put variable change actions here if name not in self.local_only_variables and name[0] != "_": process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) + #We want to automatically set gamesaved to false if something happens to the actions list (pins, redos, generations, text, etc) + #To do that we need to give the actions list a copy of this data so it can set the gamesaved variable as needed + if name == 'actions': + self.actions.story_settings = self class user_settings(settings): local_only_variables = [] + no_save_variables = [] settings_name = "user" def __init__(self): self.wirmvwhtsp = False # Whether to remove leading whitespace from WI entries @@ -190,9 +256,9 @@ class user_settings(settings): if name not in self.local_only_variables and name[0] != "_": process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) - class system_settings(settings): local_only_variables = ['lua_state', 'lua_logname', 'lua_koboldbridge', 'lua_kobold', 'lua_koboldcore', 'regex_sl', 'acregex_ai', 'acregex_ui', 'comregex_ai', 'comregex_ui'] + no_save_variables = [] settings_name = "system" def __init__(self): self.noai = False # Runs the script without starting up the transformers pipeline @@ -248,7 +314,6 @@ class system_settings(settings): if name not in self.local_only_variables and name[0] != "_": process_variable_changes(self.__class__.__name__.replace("_settings", ""), name, value, old_value) - class KoboldStoryRegister(object): def __init__(self, sequence=[]): self.actions = {} @@ -290,6 +355,7 @@ class KoboldStoryRegister(object): old_text = None self.actions[i] = {"Selected Text": text, "Options": []} process_variable_changes("actions", "Selected Text", {"id": i, "text": text}, {"id": i, "text": old_text}) + self.set_game_saved() def __len__(self): return self.action_count+1 if self.action_count >=0 else 0 @@ -317,6 +383,7 @@ class KoboldStoryRegister(object): self.action_count = json_data['action_count'] self.actions = temp + self.set_game_saved() def get_action(self, action_id): if action_id not in actions: @@ -342,15 +409,17 @@ class KoboldStoryRegister(object): else: self.actions[self.action_count] = {"Selected Text": text, "Options": []} process_variable_changes("actions", "Selected Text", {"id": self.action_count, "text": text}, None) + self.set_game_saved() def append_options(self, option_list): if self.action_count+1 in self.actions: - old_options = self.actions[self.action_count+1]["Options"].copy() + old_options = copy.deepcopy(self.actions[self.action_count+1]["Options"]) self.actions[self.action_count+1]['Options'].extend([{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False} for x in option_list]) else: old_options = None self.actions[self.action_count+1] = {"Selected Text": "", "Options": [{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False} for x in option_list]} process_variable_changes("actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": old_options}) + self.set_game_saved() def clear_unused_options(self, pointer=None): new_options = [] @@ -358,10 +427,11 @@ class KoboldStoryRegister(object): if pointer is None: pointer = self.action_count+1 if pointer in self.actions: - old_options = self.actions[pointer]["Options"].copy() + old_options = copy.deepcopy(self.actions[pointer]["Options"]) self.actions[pointer]["Options"] = [x for x in self.actions[pointer]["Options"] if x["Pinned"] or x["Previous Selection"] or x["Edited"]] new_options = self.actions[pointer]["Options"] process_variable_changes("actions", "Options", {"id": pointer, "options": new_options}, {"id": pointer, "options": old_options}) + self.set_game_saved() def toggle_pin(self, action_step, option_number): if action_step in self.actions: @@ -374,22 +444,32 @@ class KoboldStoryRegister(object): def set_pin(self, action_step, option_number): if action_step in self.actions: if option_number < len(self.actions[action_step]['Options']): - old_options = self.actions[action_step]["Options"].copy() + old_options = copy.deepcopy(self.actions[action_step]["Options"]) self.actions[action_step]['Options'][option_number]['Pinned'] = True process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) + self.set_game_saved() def unset_pin(self, action_step, option_number): if action_step in self.actions: - old_options = self.actions[action_step]["Options"].copy() + old_options = copy.deepcopy(self.actions[action_step]["Options"]) if option_number < len(self.actions[action_step]['Options']): self.actions[action_step]['Options'][option_number]['Pinned'] = False process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) + self.set_game_saved() + + def toggle_pin(self, action_step, option_number): + if action_step in self.actions: + old_options = copy.deepcopy(self.actions[action_step]["Options"]) + if option_number < len(self.actions[action_step]['Options']): + self.actions[action_step]['Options'][option_number]['Pinned'] = not self.actions[action_step]['Options'][option_number]['Pinned'] + process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) + self.set_game_saved() def use_option(self, option_number, action_step=None): if action_step is None: action_step = self.action_count+1 if action_step in self.actions: - old_options = self.actions[action_step]["Options"].copy() + old_options = copy.deepcopy(self.actions[action_step]["Options"]) old_text = self.actions[action_step]["Selected Text"] if option_number < len(self.actions[action_step]['Options']): self.actions[action_step]["Selected Text"] = self.actions[action_step]['Options'][option_number]['text'] @@ -400,22 +480,25 @@ class KoboldStoryRegister(object): socketio.emit("var_changed", {"classname": "actions", "name": "Action Count", "old_value": None, "value":self.action_count}, broadcast=True, room="UI_2") process_variable_changes("actions", "Options", {"id": action_step, "options": self.actions[action_step]["Options"]}, {"id": action_step, "options": old_options}) process_variable_changes("actions", "Selected Text", {"id": action_step, "text": self.actions[action_step]["Selected Text"]}, {"id": action_step, "Selected Text": old_text}) + self.set_game_saved() def delete_action(self, action_id): if action_id in self.actions: - old_options = self.actions[action_id]["Options"].copy() + old_options = copy.deepcopy(self.actions[action_id]["Options"]) old_text = self.actions[action_id]["Selected Text"] self.actions[action_id]["Options"].append({"text": self.actions[action_id]["Selected Text"], "Pinned": False, "Previous Selection": True, "Edited": False}) self.actions[action_id]["Selected Text"] = "" self.action_count -= 1 process_variable_changes("actions", "Selected Text", {"id": action_id, "text": None}, {"id": action_id, "text": old_text}) process_variable_changes("actions", "Options", {"id": action_id, "options": self.actions[action_id]["Options"]}, {"id": action_id, "options": old_options}) + self.set_game_saved() def pop(self): if self.action_count >= 0: text = self.actions[self.action_count] self.delete_action(self.action_count) process_variable_changes("actions", "Selected Text", {"id": self.action_count, "text": None}, {"id": self.action_count, "text": text}) + self.set_game_saved() return text else: return None @@ -493,7 +576,10 @@ class KoboldStoryRegister(object): return [x for x in self.actions[self.action_count+1]['Options'] if x['Pinned'] or x['Previous Selection']] else: return [] - + + def set_game_saved(self): + if 'story_settings' in self.__dict__: + self.story_settings.gamesaved = False def __setattr__(self, name, value): old_value = getattr(self, name, None) super().__setattr__(name, value) diff --git a/settings/preset/official.presets b/settings/preset/official.presets new file mode 100644 index 00000000..3e3259de --- /dev/null +++ b/settings/preset/official.presets @@ -0,0 +1,95 @@ +{"EleutherAI_gpt-neo-1.3B": + { + "Storywriter": { + "temp": 0.72, + "genamt": 40, + "rep_pen": 1.2, + "top_p": 0.725, + "top_k": 0, + "tfs": 1, + "rep_pen_slope": 2048, + "rep_pen_range": 0.18, + "typical": 1, + "top_a": 0, + "description": "Optimized settings for relevant output." + }, + "Coherent Creativity": { + "temp": 0.51, + "genamt": 40, + "rep_pen": 1.2, + "top_p": 1, + "top_k": 0, + "tfs": 0.9, + "rep_pen_slope": 2048, + "rep_pen_range": 0, + "typical": 1, + "top_a": 0, + "description": "A good balance between coherence, creativity, and quality of prose." + }, + "Luna Moth": { + "temp": 2, + "genamt": 40, + "rep_pen": 1.2, + "top_p": 0.235, + "top_k": 85, + "tfs": 1, + "rep_pen_slope": 2048, + "rep_pen_range": 0, + "typical": 1, + "top_a": 0, + "description": "A great degree of creativity without losing coherency." + }, + "Sphinx Moth": { + "temp": 2, + "genamt": 40, + "rep_pen": 1.2, + "top_p": 0.175, + "top_k": 30, + "tfs": 1, + "rep_pen_slope": 2048, + "rep_pen_range": 0, + "typical": 1, + "top_a": 0, + "description": "Maximum randomness while still being plot relevant. Like Sphinx riddles!" + }, + "Emperor Moth": { + "temp": 1.25, + "genamt": 40, + "rep_pen": 1.2, + "top_p": 0.235, + "top_k": 0, + "tfs": 1, + "rep_pen_slope": 2048, + "rep_pen_range": 0, + "typical": 1, + "top_a": 0, + "description": "Medium randomness with a decent bit of creative writing." + }, + "Best Guess": { + "temp": 0.8, + "genamt": 40, + "rep_pen": 1.2, + "top_p": 0.9, + "top_k": 100, + "tfs": 1, + "rep_pen_slope": 512, + "rep_pen_range": 3.33, + "typical": 1, + "top_a": 0, + "description": "A subtle change with alternative context settings." + }, + "Pleasing Results": { + "temp": 0.44, + "genamt": 40, + "rep_pen": 1.2, + "top_p": 1, + "top_k": 0, + "tfs": 0.9, + "rep_pen_slope": 1024, + "rep_pen_range": 6.75, + "typical": 1, + "top_a": 0, + "description": "Expectable output with alternative context settings." + } + } +} \ No newline at end of file diff --git a/static/koboldai.css b/static/koboldai.css index 3c8afbaf..7cf2f8f2 100644 --- a/static/koboldai.css +++ b/static/koboldai.css @@ -327,6 +327,8 @@ td.server_vars { } + + /* ---------------------------- OVERALL PAGE CONFIG ------------------------------*/ body { background-color: var(--background); @@ -336,7 +338,7 @@ body { .main-grid { transition: margin-left .5s; display: grid; - min-height: 98vh; + height: 98vh; margin-left: var(--flyout_menu_closed_width); /* grid-template-areas: "menuicon gamescreen lefticon" "menuicon actions lefticon" @@ -371,6 +373,13 @@ body { margin-top: auto; padding-bottom: 1px; vertical-align: bottom; + overflow-y: scroll; + -ms-overflow-style: none; /* IE and Edge */ + scrollbar-width: none; /* Firefox */ +} +/* Hide scrollbar for Chrome, Safari and Opera */ +.gametext::-webkit-scrollbar { + display: none; } [contenteditable="true"]:active, @@ -384,6 +393,7 @@ body { margin-top: 10px; grid-area: options; background-color: var(--gamescreen_background); + overflow-y: scroll; } table.sequence { @@ -446,14 +456,14 @@ td.sequence:hover { color: var(--text); } -.inputrow .submit[server_value=false] { +.inputrow .submit[system_aibusy=false] { grid-area: submit; height: 100%; width: 100%; text-align: center; overflow: hidden; } -.inputrow .submit[server_value=true] { +.inputrow .submit[system_aibusy=true] { grid-area: submit; height: 100%; width: 100%; @@ -462,7 +472,7 @@ td.sequence:hover { display: none; } -.inputrow .submited[server_value=false] { +.inputrow .submited[system_aibusy=false] { grid-area: submit; height: 100%; width: 100%; @@ -470,7 +480,7 @@ td.sequence:hover { overflow: hidden; display: none; } -.inputrow .submited[server_value=true] { +.inputrow .submited[system_aibusy=true] { grid-area: submit; height: 100%; width: 100%; diff --git a/static/koboldai.js b/static/koboldai.js index 5dfdbb91..69da49a9 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -12,6 +12,7 @@ socket.on('var_changed', function(data){var_changed(data);}); var backend_vars = {}; var presets = {} +var ai_busy_start = Date.now(); //-----------------------------------Server to UI Functions----------------------------------------------- function connect() { console.log("connected"); @@ -54,14 +55,11 @@ function create_options(data) { var chunk = children[i]; if (chunk.id == "Select Options Chunk " + current_chunk) { chunk.classList.remove("hidden"); - console.log(current_chunk); } else { chunk.classList.add("hidden"); } } - console.log(current_chunk); - console.log(data); if (document.getElementById("Select Options Chunk "+data.value.id)) { var option_chunk = document.getElementById("Select Options Chunk "+data.value.id) } else { @@ -77,37 +75,6 @@ function create_options(data) { var table = document.createElement("table"); table.classList.add("sequence"); table.style = "border-spacing: 0;"; - //Add pins - i=0; - for (item of data.value.options) { - if (item.Pinned) { - var row = document.createElement("tr"); - row.classList.add("sequence"); - var textcell = document.createElement("td"); - textcell.textContent = item.text; - textcell.classList.add("sequence"); - textcell.setAttribute("option_id", i); - textcell.setAttribute("option_chunk", data.value.id); - var iconcell = document.createElement("td"); - iconcell.setAttribute("option_id", i); - iconcell.setAttribute("option_chunk", data.value.id); - var icon = document.createElement("span"); - icon.id = "Pin_"+i; - icon.classList.add("oi"); - icon.setAttribute('data-glyph', "pin"); - iconcell.append(icon); - textcell.onclick = function () { - socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); - }; - iconcell.onclick = function () { - socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id"), "set": false}); - }; - row.append(textcell); - row.append(iconcell); - table.append(row); - } - i+=1; - } //Add Redo options i=0; for (item of data.value.options) { @@ -139,7 +106,7 @@ function create_options(data) { //Add general options i=0; for (item of data.value.options) { - if (!(item.Edited) && !(item.Pinned) && !(item['Previous Selection'])) { + if (!(item.Edited) && !(item['Previous Selection'])) { var row = document.createElement("tr"); row.classList.add("sequence"); var textcell = document.createElement("td"); @@ -154,10 +121,12 @@ function create_options(data) { icon.id = "Pin_"+i; icon.classList.add("oi"); icon.setAttribute('data-glyph', "pin"); - icon.setAttribute('style', "filter: brightness(50%);"); + if (!(item.Pinned)) { + icon.setAttribute('style', "filter: brightness(50%);"); + } iconcell.append(icon); iconcell.onclick = function () { - socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id"), "set": true}); + socket.emit("Pinning", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); }; textcell.onclick = function () { socket.emit("Set Selected Text", {"chunk": this.getAttribute("option_chunk"), "option": this.getAttribute("option_id")}); @@ -193,6 +162,7 @@ function do_story_text_updates(data) { } function do_presets(data) { + console.log(data); var select = document.getElementById('presets'); //clear out the preset list while (select.firstChild) { @@ -203,11 +173,11 @@ function do_presets(data) { option.value=""; option.text="presets"; select.append(option); - for (item of data.value) { - presets[item.preset] = item; + for (const [key, value] of Object.entries(data.value)) { + presets[key] = value; var option = document.createElement("option"); - option.value=item.preset; - option.text=item.preset; + option.value=key; + option.text=key; select.append(option); } } @@ -251,6 +221,20 @@ function update_status_bar(data) { document.title = "KoboldAI Client Generating (" + percent_complete + "%)"; } } + +function do_ai_busy(data) { + if (data.value) { + ai_busy_start = Date.now(); + favicon.start_swap() + } else { + runtime = Date.now() - ai_busy_start; + if (document.getElementById("Execution Time")) { + document.getElementById("Execution Time").textContent = Math.round(runtime/1000).toString().toHHMMSS(); + } + favicon.stop_swap() + } +} + function var_changed(data) { //Special Case for Story Text if ((data.classname == "actions") && (data.name == "Selected Text")) { @@ -276,22 +260,18 @@ function var_changed(data) { //alternative syncing method var elements_to_change = document.getElementsByClassName("var_sync_alt_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_")); for (item of elements_to_change) { - item.setAttribute("server_value", fix_text(data.value)); + item.setAttribute(data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_"), fix_text(data.value)); } } //if we're updating generated tokens, let's show that in our status bar - if ((data.classname == 'story') && (data.name == 'generated_tkns')) { + if ((data.classname == 'model') && (data.name == 'generated_tkns')) { update_status_bar(data); } //If we have ai_busy, start the favicon swapping if ((data.classname == 'system') && (data.name == 'aibusy')) { - if (data.value) { - favicon.start_swap() - } else { - favicon.stop_swap() - } + do_ai_busy(data); } } @@ -299,6 +279,18 @@ function var_changed(data) { //--------------------------------------------General UI Functions------------------------------------ +String.prototype.toHHMMSS = function () { + var sec_num = parseInt(this, 10); // don't forget the second param + var hours = Math.floor(sec_num / 3600); + var minutes = Math.floor((sec_num - (hours * 3600)) / 60); + var seconds = sec_num - (hours * 3600) - (minutes * 60); + + if (hours < 10) {hours = "0"+hours;} + if (minutes < 10) {minutes = "0"+minutes;} + if (seconds < 10) {seconds = "0"+seconds;} + return hours+':'+minutes+':'+seconds; +} + function toggle_flyout(x) { if (document.getElementById("SideMenu").classList.contains("open")) { x.classList.remove("change"); diff --git a/templates/index_new.html b/templates/index_new.html index 1b11b16d..f9f6cc43 100644 --- a/templates/index_new.html +++ b/templates/index_new.html @@ -34,7 +34,7 @@
- +
@@ -44,8 +44,8 @@
-
-
25%
+
@@ -58,9 +58,8 @@
-
Status:
- - +
+ {% include 'story flyout.html' %}
\ No newline at end of file diff --git a/templates/settings flyout.html b/templates/settings flyout.html index 8e0cf776..a12744bd 100644 --- a/templates/settings flyout.html +++ b/templates/settings flyout.html @@ -10,6 +10,16 @@ #Model_Info { width: 100%; } + #Story_Info { + width: 100%; + } + #save_story[story_gamesaved="true"] { + text: var(--disabled_button_text); + background-color: var(--disabled_button_background_color); + border-color: var(--disabled_button_border_color); + filter: brightness(85%); + } +
@@ -17,7 +27,7 @@ - +
@@ -30,6 +40,10 @@ {% endwith %}
{% endif %} {% endfor %} \ No newline at end of file From d45a8329e2cea96191e7288b59e575ddf8745462 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 15:40:31 -0400 Subject: [PATCH 0013/1297] Missing file --- templates/story flyout.html | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 templates/story flyout.html diff --git a/templates/story flyout.html b/templates/story flyout.html new file mode 100644 index 00000000..45344193 --- /dev/null +++ b/templates/story flyout.html @@ -0,0 +1,28 @@ +Status:
+
Execution Time:
+
Remaining Time:
+
+
+ Notes (ignored by AI):
+ +
+
+
+ Memory:
+ +
+
+
+ Author's Notes:
+
+ Template:
+
+ andepth
+ + + 0 + 5 + +
\ No newline at end of file From a2a2888f5fe74cec899cb725b033d0c8c9f40476 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 16:50:16 -0400 Subject: [PATCH 0014/1297] Test for colab --- koboldai_settings.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index 1dc35c2e..b5a9ced5 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -164,7 +164,12 @@ class settings(object): def send_to_ui(self): for (name, value) in vars(self).items(): if name not in self.local_only_variables and name[0] != "_": - process_variable_changes(self.socketio, self.__class__.__name__.replace("_settings", ""), name, value, None) + print(name) + try: + process_variable_changes(self.socketio, self.__class__.__name__.replace("_settings", ""), name, clean_var_for_emit(value), None) + except: + print("{} is of type {} and I can't transmit".format(name, type(value))) + raise class model_settings(settings): local_only_variables = ['badwordsids', 'apikey', 'tqdm', 'socketio'] From 04faf2748700852e89a8b942e32f1456d7c52df4 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 17:11:17 -0400 Subject: [PATCH 0015/1297] Softprompt Fix --- koboldai_settings.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index b5a9ced5..ea0473b4 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -164,7 +164,6 @@ class settings(object): def send_to_ui(self): for (name, value) in vars(self).items(): if name not in self.local_only_variables and name[0] != "_": - print(name) try: process_variable_changes(self.socketio, self.__class__.__name__.replace("_settings", ""), name, clean_var_for_emit(value), None) except: @@ -348,7 +347,7 @@ class user_settings(settings): process_variable_changes(self.socketio, self.__class__.__name__.replace("_settings", ""), name, value, old_value) class system_settings(settings): - local_only_variables = ['socketio', 'lua_state', 'lua_logname', 'lua_koboldbridge', 'lua_kobold', 'lua_koboldcore', 'regex_sl', 'acregex_ai', 'acregex_ui', 'comregex_ai', 'comregex_ui'] + local_only_variables = ['socketio', 'lua_state', 'lua_logname', 'lua_koboldbridge', 'lua_kobold', 'lua_koboldcore', 'regex_sl', 'acregex_ai', 'acregex_ui', 'comregex_ai', 'comregex_ui', 'sp'] no_save_variables = ['socketio'] settings_name = "system" def __init__(self, socketio): From 283cec117f6160047c40c27daf1f28de2c6d024d Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 17:23:00 -0400 Subject: [PATCH 0016/1297] Create Story Fix --- koboldai_settings.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index ea0473b4..91f695ae 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -74,7 +74,8 @@ class koboldai_vars(object): def create_story(self, story_name, json_data=None): story_name = 'default' if story_name in self._story_settings: - self._story_settings[story_name].reset() + + self._story_settings[story_name]__init__() else: self._story_settings[story_name] = story_settings(self.socketio) if json_data is not None: From ed193566474917c1a7b78c263fd78b86b203159d Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 17:24:14 -0400 Subject: [PATCH 0017/1297] Fix for create story --- koboldai_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index 91f695ae..ac40a753 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -75,7 +75,7 @@ class koboldai_vars(object): story_name = 'default' if story_name in self._story_settings: - self._story_settings[story_name]__init__() + self._story_settings[story_name].__init__() else: self._story_settings[story_name] = story_settings(self.socketio) if json_data is not None: From 3bf0d387e0449cae7abafd2e3101891355c4d890 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 17:31:17 -0400 Subject: [PATCH 0018/1297] Fix for create story --- koboldai_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index ac40a753..1f5f9a2f 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -75,7 +75,7 @@ class koboldai_vars(object): story_name = 'default' if story_name in self._story_settings: - self._story_settings[story_name].__init__() + self._story_settings[story_name].__init__(self._story_settings[story_name].socketio) else: self._story_settings[story_name] = story_settings(self.socketio) if json_data is not None: From 72827ed1497b89e3308cbf5b5c73ab96b94d24e0 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 17:44:22 -0400 Subject: [PATCH 0019/1297] Colab fix and send_to_ui fix --- aiserver.py | 2 +- koboldai_settings.py | 9 +++++---- static/koboldai.js | 3 ++- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/aiserver.py b/aiserver.py index 023dbb42..2aa90c2c 100644 --- a/aiserver.py +++ b/aiserver.py @@ -4213,7 +4213,7 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): koboldai_vars.actions.append_options([x["generated_text"] for x in genout]) genout = [{"generated_text": x['text']} for x in koboldai_vars.actions.get_current_options()] if(len(koboldai_vars.actions.get_current_options()) == 1): - genresult(koboldai_vars.actions.get_current_options()[0]) + genresult(koboldai_vars.actions.get_current_options()[0]['text']) else: if(koboldai_vars.lua_koboldbridge.restart_sequence is not None and koboldai_vars.lua_koboldbridge.restart_sequence > 0): genresult(genout[koboldai_vars.lua_koboldbridge.restart_sequence-1]["generated_text"]) diff --git a/koboldai_settings.py b/koboldai_settings.py index 1f5f9a2f..76553869 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -88,10 +88,11 @@ class koboldai_vars(object): self._model_settings.send_to_ui() self._user_settings.send_to_ui() self._system_settings.send_to_ui() - if 'story' in self._sessions: - self._story_settings[self._sessions['story']].send_to_ui() - else: - self._story_settings['default'].send_to_ui() + #if 'story' in self._sessions: + # self._story_settings[self._sessions['story']].send_to_ui() + #else: + # self._story_settings['default'].send_to_ui() + self._story_settings['default'].send_to_ui() def __setattr__(self, name, value): if name[0] == "_": diff --git a/static/koboldai.js b/static/koboldai.js index 4dad96e9..7eca766e 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -8,7 +8,7 @@ socket.on("disconnect", (reason, details) => { }); socket.on('reset_story', function(){reset_story();}); socket.on('var_changed', function(data){var_changed(data);}); -socket.onAny(function(event_name, data) {console.log({"event": event_name, "class": data.classname, "data": data});}); +//socket.onAny(function(event_name, data) {console.log({"event": event_name, "class": data.classname, "data": data});}); var backend_vars = {}; var presets = {} @@ -141,6 +141,7 @@ function create_options(data) { } function do_story_text_updates(data) { + console.log(data); story_area = document.getElementById('Selected Text'); if (document.getElementById('Selected Text Chunk '+data.value.id)) { document.getElementById('Selected Text Chunk '+data.value.id).textContent = data.value.text; From 3e1c9ebacb757f79abbd8eb984694dec1036aa9f Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 17:50:35 -0400 Subject: [PATCH 0020/1297] requirements_mtj.txt updated --- requirements_mtj.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements_mtj.txt b/requirements_mtj.txt index 0f723a49..a05c333a 100644 --- a/requirements_mtj.txt +++ b/requirements_mtj.txt @@ -16,3 +16,5 @@ eventlet lupa==1.10 markdown bleach==4.1.0 +python-socketio[client] +flask_session \ No newline at end of file From ce1bff1b8451b60554c4f9e4adb0b7dcfdb1162b Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 29 Jun 2022 17:56:25 -0400 Subject: [PATCH 0021/1297] TPU fixes --- aiserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aiserver.py b/aiserver.py index 2aa90c2c..a25ae047 100644 --- a/aiserver.py +++ b/aiserver.py @@ -4218,7 +4218,7 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None): if(koboldai_vars.lua_koboldbridge.restart_sequence is not None and koboldai_vars.lua_koboldbridge.restart_sequence > 0): genresult(genout[koboldai_vars.lua_koboldbridge.restart_sequence-1]["generated_text"]) else: - genselect([{"generated_text": x} for x in koboldai_vars.actions.get_current_options()]) + genselect([{"generated_text": x['text']} for x in koboldai_vars.actions.get_current_options()]) set_aibusy(0) From 16c5c580dbc7c6a70917a1bed7045c7f525f85a2 Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 30 Jun 2022 13:40:47 -0400 Subject: [PATCH 0022/1297] Checkin --- aiserver.py | 211 ++++++++++++++- static/koboldai.css | 163 +++++++++++- static/koboldai.js | 464 +++++++++++++++++++++++++++++++++ templates/index_new.html | 48 ++++ templates/settings flyout.html | 2 +- 5 files changed, 878 insertions(+), 10 deletions(-) diff --git a/aiserver.py b/aiserver.py index a25ae047..6880ebfa 100644 --- a/aiserver.py +++ b/aiserver.py @@ -263,8 +263,10 @@ def sendModelSelection(menu="mainmenu", folder="./models"): else: showdelete=False emit('from_server', {'cmd': 'show_model_menu', 'data': menu_list, 'menu': menu, 'breadcrumbs': breadcrumbs, "showdelete": showdelete}, broadcast=True, room="UI_1") + emit('show_model_menu', {'data': menu_list, 'menu': menu, 'breadcrumbs': breadcrumbs, "showdelete": showdelete}, broadcast=False, room="UI_2") else: emit('from_server', {'cmd': 'show_model_menu', 'data': model_menu[menu], 'menu': menu, 'breadcrumbs': [], "showdelete": False}, broadcast=True, room="UI_1") + emit('show_model_menu', {'data': model_menu[menu], 'menu': menu, 'breadcrumbs': [], "showdelete": False}, broadcast=False, room="UI_2") def get_folder_path_info(base): if base == 'This PC': @@ -333,7 +335,7 @@ def check_if_dir_is_model(path): if os.path.exists(path): try: from transformers import AutoConfig - model_config = AutoConfig.from_pretrained(path, revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(path) except: return False return True @@ -1034,12 +1036,18 @@ def get_model_info(model, directory=""): break_values = break_values.split(",") else: break_values = [layer_count] + break_values = [int(x) for x in break_values] break_values += [0] * (gpu_count - len(break_values)) emit('from_server', {'cmd': 'selected_model_info', 'key_value': key_value, 'key':key, 'gpu':gpu, 'layer_count':layer_count, 'breakmodel':breakmodel, 'disk_break_value': disk_blocks, 'accelerate': utils.HAS_ACCELERATE, 'break_values': break_values, 'gpu_count': gpu_count, 'url': url, 'gpu_names': gpu_names}, broadcast=True, room="UI_1") + emit('selected_model_info', {'key_value': key_value, 'key':key, + 'gpu':gpu, 'layer_count':layer_count, 'breakmodel':breakmodel, + 'disk_break_value': disk_blocks, 'disk_break': not utils.HAS_ACCELERATE, + 'break_values': break_values, 'gpu_count': gpu_count, + 'url': url, 'gpu_names': gpu_names}, broadcast=False, room="UI_2") if key_value != "": get_oai_models(key_value) @@ -1047,18 +1055,16 @@ def get_model_info(model, directory=""): def get_layer_count(model, directory=""): if(model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]): if(koboldai_vars.model == "GPT2Custom"): - model_config = open(koboldai_vars.custmodpth + "/config.json", "r") + model_config = open(directory + "/config.json", "r") # Get the model_type from the config or assume a model type if it isn't present else: from transformers import AutoConfig if directory == "": - model_config = AutoConfig.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") - elif(os.path.isdir(koboldai_vars.custmodpth.replace('/', '_'))): - model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=koboldai_vars.revision, cache_dir="cache") - elif(os.path.isdir(directory)): - model_config = AutoConfig.from_pretrained(directory, revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model, cache_dir="cache") + elif os.path.isdir(directory): + model_config = AutoConfig.from_pretrained(directory, cache_dir="cache") else: - model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + assert "Selected Model directory doesn't exist" @@ -5821,8 +5827,177 @@ def new_ui_index(): def ui2_connect(): #Send all variables to client koboldai_vars.send_to_ui() + pass +#==================================================================# +# File Popup options +#==================================================================# +@app.route("/popup_test") +def popup_test(): + file_popup("Test Popup", "./", "return_event_name", folder_only=False, editable=True, deleteable=True, jailed=False, item_check=check_if_dir_is_model) + return "ok" + +@socketio.on('popup_change_folder') +def popup_change_folder(data): + print("Doing popup change folder: {}".format(data)) + if 'popup_jailed_dir' not in session: + print("Someone is trying to get at files in your server. Blocked.") + return + if session['popup_jailed_dir'] is None: + get_files_folders(data) + elif session['popup_jailed_dir'] in data: + get_files_folders(data) + else: + print("User is trying to get at files in your server outside the jail. Blocked. Jailed Dir: {} Requested Dir: {}".format(session['popup_jailed_dir'], data)) + +@socketio.on('popup_delete') +def popup_delete(data): + if 'popup_deletable' not in session: + print("Someone is trying to delete a file in your server. Blocked.") + return + if not session['popup_deletable']: + print("Someone is trying to delete a file in your server. Blocked.") + return + + if session['popup_jailed_dir'] is None: + import shutil + if os.path.isdir(data): + shutil.rmtree(data) + else: + os.remove(data) + path = os.path.abspath(data).replace("\\", "/") + if path[-1] == "/": + path = path[:-1] + path = "/".join(path.split("/")[:-1]) + get_files_folders(path) + elif session['popup_jailed_dir'] in data: + import shutil + if os.path.isdir(data): + shutil.rmtree(data) + else: + os.remove(data) + path = os.path.abspath(data).replace("\\", "/") + if path[-1] == "/": + path = path[:-1] + path = "/".join(path.split("/")[:-1]) + get_files_folders(path) + else: + print("User is trying to delete files in your server outside the jail. Blocked. Jailed Dir: {} Requested Dir: {}".format(session['popup_jailed_dir'], data)) + +@socketio.on('popup_edit') +def popup_edit(data): + if 'popup_editable' not in session: + print("Someone is trying to edit a file in your server. Blocked.") + return + if not session['popup_editable']: + print("Someone is trying to edit a file in your server. Blocked.") + return + + if session['popup_jailed_dir'] is None: + emit("popup_edit_file", {"file": data, "text": open(data, 'r').read()}); + elif session['popup_jailed_dir'] in data: + emit("popup_edit_file", {"file": data, "text": open(data, 'r').read()}); + else: + print("User is trying to delete files in your server outside the jail. Blocked. Jailed Dir: {} Requested Dir: {}".format(session['popup_jailed_dir'], data)) + +@socketio.on('popup_change_file') +def popup_change_file(data): + if 'popup_editable' not in session: + print("Someone is trying to edit a file in your server. Blocked.") + return + if not session['popup_editable']: + print("Someone is trying to edit a file in your server. Blocked.") + return + + if session['popup_jailed_dir'] is None: + with open(data['file'], 'w') as f: + f.write(data['data']) + elif session['popup_jailed_dir'] in data['file']: + with open(data['file'], 'w') as f: + f.write(data['data']) + else: + print("User is trying to delete files in your server outside the jail. Blocked. Jailed Dir: {} Requested Dir: {}".format(session['popup_jailed_dir'], data)) + +def file_popup(popup_title, starting_folder, return_event, jailed=True, folder_only=True, deleteable=False, editable=False, show_breadcrumbs=True, item_check=None, show_hidden=False): + #starting_folder = The folder we're going to get folders and/or items from + #return_event = the socketio event that will be emitted when the load button is clicked + #jailed = if set to true will look for the session variable jailed_folder and prevent navigation outside of that folder + #folder_only = will only show folders, no files + #deletable = will show the delete icons/methods. + #editable = will show the edit icons/methods + #show_breadcrumbs = will show the breadcrumbs at the top of the screen + #item_check will call this function to check if the item is valid as a selection if not none. Will pass absolute directory as only argument to function + #show_hidden = ... really, you have to ask? + if jailed: + session['popup_jailed_dir'] = os.path.abspath(starting_folder).replace("\\", "/") + else: + session['popup_jailed_dir'] = None + session['popup_deletable'] = deleteable + session['popup_editable'] = editable + session['popup_show_hidden'] = show_hidden + session['popup_item_check'] = item_check + session['popup_folder_only'] = folder_only + session['popup_show_breadcrumbs'] = show_breadcrumbs + + socketio.emit("load_popup", {"popup_title": popup_title, "call_back": return_event, "deleteable": deleteable, "editable": editable}, broadcast=True, room="UI_2") + socketio.emit("load_popup", {"popup_title": popup_title, "call_back": return_event, "deleteable": deleteable, "editable": editable}, broadcast=True, room="UI_1") + + get_files_folders(starting_folder) + + +def get_files_folders(starting_folder): + import stat + item_check = session['popup_item_check'] + show_breadcrumbs = session['popup_show_breadcrumbs'] + show_hidden = session['popup_show_hidden'] + folder_only = session['popup_folder_only'] + + if starting_folder == 'This PC': + breadcrumbs = [['This PC', 'This PC']] + items = [["{}:/".format(chr(i)), "{}:\\".format(chr(i))] for i in range(65, 91) if os.path.exists("{}:".format(chr(i)))] + else: + path = os.path.abspath(starting_folder).replace("\\", "/") + if path[-1] == "/": + path = path[:-1] + breadcrumbs = [] + for i in range(len(path.split("/"))): + breadcrumbs.append(["/".join(path.split("/")[:i+1]), + path.split("/")[i]]) + if len(breadcrumbs) == 1: + breadcrumbs = [["{}:/".format(chr(i)), "{}:\\".format(chr(i))] for i in range(65, 91) if os.path.exists("{}:".format(chr(i)))] + else: + if len([["{}:/".format(chr(i)), "{}:\\".format(chr(i))] for i in range(65, 91) if os.path.exists("{}:".format(chr(i)))]) > 0: + breadcrumbs.insert(0, ['This PC', 'This PC']) + folders = [] + files = [] + base_path = os.path.abspath(starting_folder).replace("\\", "/") + for item in os.listdir(base_path): + item_full_path = os.path.join(base_path, item).replace("\\", "/") + if hasattr(os.stat(item_full_path), "st_file_attributes"): + hidden = bool(os.stat(item_full_path).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN) + else: + hidden = item[0] == "." + if item_check is None: + valid_selection = True + else: + valid_selection = item_check(item_full_path) + + if (show_hidden and hidden) or not hidden: + if os.path.isdir(os.path.join(base_path, item)): + folders.append([True, item_full_path, item, valid_selection]) + else: + files.append([False, item_full_path, item, valid_selection]) + items = folders + if not folder_only: + items += files + + socketio.emit("popup_items", items, broadcast=True, include_self=True, room="UI_2") + socketio.emit("popup_items", items, broadcast=True, include_self=True, room="UI_1") + if show_breadcrumbs: + socketio.emit("popup_breadcrumbs", breadcrumbs, broadcast=True, room="UI_2") + socketio.emit("popup_breadcrumbs", breadcrumbs, broadcast=True, room="UI_1") + #==================================================================# # Event triggered when browser SocketIO detects a variable change #==================================================================# @@ -5914,6 +6089,26 @@ def UI_2_retry(data): koboldai_vars.recentrng = koboldai_vars.recentrngm = None actionsubmit("", actionmode=koboldai_vars.actionmode) +#==================================================================# +# Event triggered when user clicks the load model button +#==================================================================# +@socketio.on('load_model_button') +def UI_2_load_model_button(data): + sendModelSelection() + +#==================================================================# +# Event triggered when user clicks the a model +#==================================================================# +@socketio.on('select_model') +def UI_2_load_model_button(data): + if data['model'] in model_menu: + sendModelSelection(menu=data['model']) + else: + #We now have some model we want to potentially load. + #First we need to send the client the model parameters (layers, etc) + print("getting model info for {}".format(data['model'])) + get_model_info(data['model']) + #==================================================================# # Event triggered to rely a message #==================================================================# diff --git a/static/koboldai.css b/static/koboldai.css index eccbc31e..c8fcac85 100644 --- a/static/koboldai.css +++ b/static/koboldai.css @@ -21,6 +21,8 @@ --enabled_button_text: #fff; --enabled_button_background_color: #337ab7; --enabled_button_border_color: #2e6da4; + --popup_title_bar_color: #337AB7; + --popup_item_color: #262626; --disabled_button_text: #303030; --disabled_button_background_color: #686c68; --disabled_button_border_color: #686c68; @@ -543,6 +545,161 @@ td.sequence:hover { overflow: hidden; } + +/*---------------------------------- Popup -------------------------------------------------*/ +.popup { + position: absolute; + top: 10vh; + left: 10%; + z-index: 999; + width: 80%; + height: 80vh; + background-color: black; + display: flex; + flex-direction: column; + overflow-x: hidden; +} + +.popup .title { + width: 100%; + background-color: var(--popup_title_bar_color); + text-align: center; + font-size: 1.3em; +} + +.popup .popup_list_area { + height: 70vh; + overflow-x: hidden; +} + +.popup .item { + width: 100%; + background-color: var(--popup_item_color); + padding: 2px; + display: grid; + grid-template-areas: "folder_icon delete_icon edit_icon file"; + grid-template-columns: 20px 20px 20px auto; + +} + +.popup .item .folder_icon { + grid-area: folder_icon; +} + +.popup .item .edit_icon { + grid-area: edit_icon; +} + +.popup .item .delete_icon { + grid-area: delete_icon; +} + +.popup .item .file { + grid-area: file; + display: grid; +} + +.popup .item .file:hover { + background-color: #688f1f; +} + +.popup .popup_load_cancel { + text-align: center; + background-color: var(--popup_title_bar_color); +} + +.popup_load_cancel_button { + vertical-align: bottom; + display: inline; +} + +.breadcrumbitem { + padding: 5px 10px 5px 10px; + color: #ffffff; + background-color: transparent; + border: none; + + -moz-transition: background-color 0.25s ease-in; + -o-transition: background-color 0.25s ease-in; + -webkit-transition: background-color 0.25s ease-in; + transition: background-color 0.25s ease-in; +} + +.breadcrumbitem:hover { + cursor: pointer; + background-color: #688f1f; +} +/*----------------------------- Model Load Popup ------------------------------------------*/ + +.popup .item .model { + grid-area: file; + display: grid; + grid-template-areas: "item gpu_size"; + grid-template-columns: auto 40px; +} + +.popup .item .model:hover { + background-color: #688f1f; +} + +.popup .item .model.selected { + background-color: #688f1f; +} + +.model_setting_container { + display: grid; + grid-template-areas: "label label" + "item item" + "minlabel maxlabel"; + grid-template-rows: 20px 25px 10px; + grid-template-columns: auto 40px; + row-gap: 0.2em; + border: 1px; + margin: 2px; +} + +.model_setting_minlabel { + grid-area: minlabel; + overflow: hidden; + text-align: left; + font-size: 0.8em; +} + +.model_setting_maxlabel { + grid-area: maxlabel; + overflow: hidden; + text-align: right; + font-size: 0.8em; +} + +.model_setting_label { + grid-area: label; + overflow: hidden; + text-align: left; +} + +.model_setting_value { + text-align: left; + grid-area: label; + background-color: inherit; + color: inherit; + border: none; + outline: none; +} + +.model_setting_value:focus { + color: var(--text_edit); +} + +.model_setting_item { + grid-area: item; + overflow: hidden; +} + +.model_setting_item_input { + width:95%; +} + /*---------------------------------- Global ------------------------------------------------*/ .hidden { display: none; @@ -573,7 +730,7 @@ input { } -.action_button.disabled { +button.disabled { color: var(--disabled_button_text); background-color: var(--disabled_button_background_color); border-color: var(--disabled_button_border_color); @@ -595,4 +752,8 @@ input { .rawtext { white-space: pre-wrap; +} + +.text_red { + color: red; } \ No newline at end of file diff --git a/static/koboldai.js b/static/koboldai.js index 7eca766e..391964f0 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -8,11 +8,19 @@ socket.on("disconnect", (reason, details) => { }); socket.on('reset_story', function(){reset_story();}); socket.on('var_changed', function(data){var_changed(data);}); +socket.on('load_popup', function(data){load_popup(data);}); +socket.on('popup_items', function(data){popup_items(data);}); +socket.on('popup_breadcrumbs', function(data){popup_breadcrumbs(data);}); +socket.on('popup_edit_file', function(data){popup_edit_file(data);}); +socket.on('show_model_menu', function(data){show_model_menu(data);}); +socket.on('selected_model_info', function(data){selected_model_info(data);}); //socket.onAny(function(event_name, data) {console.log({"event": event_name, "class": data.classname, "data": data});}); var backend_vars = {}; var presets = {} var ai_busy_start = Date.now(); +var popup_deleteable = false; +var popup_editable = false; //-----------------------------------Server to UI Functions----------------------------------------------- function connect() { console.log("connected"); @@ -276,9 +284,465 @@ function var_changed(data) { } } +function load_popup(data) { + popup_deleteable = data.deleteable; + popup_editable = data.editable; + var popup = document.getElementById("popup"); + var popup_title = document.getElementById("popup_title"); + popup_title.textContent = data.popup_title; + var popup_list = document.getElementById("popup_list"); + //first, let's clear out our existing data + while (popup_list.firstChild) { + popup_list.removeChild(popup_list.firstChild); + } + var breadcrumbs = document.getElementById('popup_breadcrumbs'); + while (breadcrumbs.firstChild) { + breadcrumbs.removeChild(breadcrumbs.firstChild); + } + + popup.classList.remove("hidden"); + + //adjust accept button + var accept = document.getElementById("popup_accept"); + accept.classList.add("disabled"); + accept.setAttribute("emit", data.call_back); + accept.setAttribute("selected_value", ""); + accept.onclick = function () { + socket.emit(this.emit, this.getAttribute("selected_value")); + document.getElementById("popup").classList.add("hidden"); + }; + +} + +function popup_items(data) { + var popup_list = document.getElementById('popup_list'); + //first, let's clear out our existing data + while (popup_list.firstChild) { + popup_list.removeChild(popup_list.firstChild); + } + + for (item of data) { + var list_item = document.createElement("span"); + list_item.classList.add("item"); + + //create the folder icon + var folder_icon = document.createElement("span"); + folder_icon.classList.add("folder_icon"); + if (item[0]) { + folder_icon.classList.add("oi"); + folder_icon.setAttribute('data-glyph', "folder"); + } + list_item.append(folder_icon); + + //create the edit icon + var edit_icon = document.createElement("span"); + edit_icon.classList.add("edit_icon"); + if ((popup_editable) && !(item[0])) { + edit_icon.classList.add("oi"); + edit_icon.setAttribute('data-glyph', "pencil"); + edit_icon.id = item[1]; + edit_icon.onclick = function () { + socket.emit("popup_edit", this.id); + }; + } + list_item.append(edit_icon); + + //create the delete icon + var delete_icon = document.createElement("span"); + delete_icon.classList.add("delete_icon"); + if (popup_deleteable) { + delete_icon.classList.add("oi"); + delete_icon.setAttribute('data-glyph', "x"); + delete_icon.id = item[1]; + delete_icon.setAttribute("folder", item[0]); + delete_icon.onclick = function () { + if (this.getAttribute("folder") == "true") { + if (window.confirm("Do you really want to delete this folder and ALL files under it?")) { + socket.emit("popup_delete", this.id); + } + } else { + if (window.confirm("Do you really want to delete this file?")) { + socket.emit("popup_delete", this.id); + } + } + }; + } + list_item.append(delete_icon); + + //create the actual item + var popup_item = document.createElement("span"); + popup_item.classList.add("file"); + popup_item.id = item[1]; + popup_item.setAttribute("folder", item[0]); + popup_item.setAttribute("valid", item[3]); + popup_item.textContent = item[2]; + popup_item.onclick = function () { + var accept = document.getElementById("popup_accept"); + if (this.getAttribute("valid") == "true") { + accept.classList.remove("disabled"); + accept.setAttribute("selected_value", this.id); + } else { + console.log("not valid"); + accept.setAttribute("selected_value", ""); + accept.classList.add("disabled"); + if (this.getAttribute("folder") == "true") { + console.log("folder"); + socket.emit("popup_change_folder", this.id); + } + } + }; + list_item.append(popup_item); + + + popup_list.append(list_item); + + + } +} + +function popup_breadcrumbs(data) { + var breadcrumbs = document.getElementById('popup_breadcrumbs') + while (breadcrumbs.firstChild) { + breadcrumbs.removeChild(breadcrumbs.firstChild); + } + + for (item of data) { + var button = document.createElement("button"); + button.id = item[0]; + button.textContent = item[1]; + button.classList.add("breadcrumbitem"); + button.onclick = function () { + socket.emit("popup_change_folder", this.id); + }; + breadcrumbs.append(button); + var span = document.createElement("span"); + span.textContent = "\\"; + breadcrumbs.append(span); + } +} + +function popup_edit_file(data) { + var popup_list = document.getElementById('popup_list'); + //first, let's clear out our existing data + while (popup_list.firstChild) { + popup_list.removeChild(popup_list.firstChild); + } + var accept = document.getElementById("popup_accept"); + accept.setAttribute("selected_value", ""); + accept.onclick = function () { + var textarea = document.getElementById("filecontents"); + socket.emit("popup_change_file", {"file": textarea.getAttribute("filename"), "data": textarea.value}); + document.getElementById("popup").classList.add("hidden"); + }; + + var textarea = document.createElement("textarea"); + textarea.classList.add("fullwidth"); + textarea.rows = 25; + textarea.id = "filecontents" + textarea.setAttribute("filename", data.file); + textarea.value = data.text; + textarea.onblur = function () { + var accept = document.getElementById("popup_accept"); + accept.classList.remove("disabled"); + }; + popup_list.append(textarea); + +} //--------------------------------------------UI to Server Functions---------------------------------- +function show_model_menu(data) { + document.getElementById("loadmodelcontainer").classList.remove("hidden"); + + //clear old options + document.getElementById("modelkey").classList.add("hidden"); + document.getElementById("modelkey").value = ""; + document.getElementById("modelurl").classList.add("hidden"); + document.getElementById("use_gpu_div").classList.add("hidden"); + document.getElementById("modellayers").classList.add("hidden"); + var model_layer_bars = document.getElementById('model_layer_bars'); + while (model_layer_bars.firstChild) { + model_layer_bars.removeChild(model_layer_bars.firstChild); + } + + //clear out the breadcrumbs + var breadcrumbs = document.getElementById('loadmodellistbreadcrumbs') + while (breadcrumbs.firstChild) { + breadcrumbs.removeChild(breadcrumbs.firstChild); + } + //add breadcrumbs + for (item of data.breadcrumbs) { + var button = document.createElement("button"); + button.classList.add("breadcrumbitem"); + button.id = item[0]; + button.value = item[1]; + button.onclick = function () { + socket.emit('selectmodel', {'data': this.id, 'folder': this.value}); + }; + breadcrumbs.append(button); + var span = document.createElement("span"); + span.textContent = "\\"; + breadcrumbs.append(span); + } + + //clear out the items + var model_list = document.getElementById('loadmodellistcontent') + while (model_list.firstChild) { + model_list.removeChild(model_list.firstChild); + } + //add items + for (item of data.data) { + var list_item = document.createElement("span"); + list_item.classList.add("item"); + + //create the folder icon + var folder_icon = document.createElement("span"); + folder_icon.classList.add("folder_icon"); + if (item[3]) { + folder_icon.classList.add("oi"); + folder_icon.setAttribute('data-glyph', "folder"); + } + list_item.append(folder_icon); + + //create the delete icon + //var delete_icon = document.createElement("span"); + //delete_icon.classList.add("delete_icon"); + //if (popup_deleteable) { + // delete_icon.classList.add("oi"); + // delete_icon.setAttribute('data-glyph', "x"); + // delete_icon.id = item[1]; + // delete_icon.setAttribute("folder", item[0]); + // delete_icon.onclick = function () { + // if (this.getAttribute("folder") == "true") { + // if (window.confirm("Do you really want to delete this folder and ALL files under it?")) { + // socket.emit("popup_delete", this.id); + // } + // } else { + // if (window.confirm("Do you really want to delete this file?")) { + // socket.emit("popup_delete", this.id); + // } + // } + // }; + //} + //list_item.append(delete_icon); + + //create the actual item + var popup_item = document.createElement("span"); + popup_item.classList.add("model"); + popup_item.id = item[1]; + popup_item.setAttribute("Menu", data.menu) + //name text + var text = document.createElement("span"); + text.style="grid-area: item;"; + text.textContent = item[0]; + popup_item.append(text); + //model size text + var text = document.createElement("span"); + text.textContent = item[2]; + text.style="grid-area: gpu_size;padding: 2px;"; + popup_item.append(text); + + popup_item.onclick = function () { + var accept = document.getElementById("popup_accept"); + accept.classList.add("disabled"); + socket.emit("select_model", {"model": this.id, "menu": this.getAttribute("Menu")}); + var model_list = document.getElementById('loadmodellistcontent').getElementsByClassName("selected"); + for (model of model_list) { + model.classList.remove("selected"); + } + this.classList.add("selected"); + }; + list_item.append(popup_item); + + + model_list.append(list_item); + } + +} +function selected_model_info(data) { + var accept = document.getElementById("btn_loadmodelaccept"); + //hide or unhide key + if (data.key) { + document.getElementById("modelkey").classList.remove("hidden"); + document.getElementById("modelkey").value = data.key_value; + } else { + document.getElementById("modelkey").classList.add("hidden"); + document.getElementById("modelkey").value = ""; + } + //hide or unhide URL + if (data.url) { + document.getElementById("modelurl").classList.remove("hidden"); + } else { + document.getElementById("modelurl").classList.add("hidden"); + } + //hide or unhide the use gpu checkbox + if (data.gpu) { + document.getElementById("use_gpu_div").classList.remove("hidden"); + } else { + document.getElementById("use_gpu_div").classList.add("hidden"); + } + //setup breakmodel + if (data.breakmodel) { + document.getElementById("modellayers").classList.remove("hidden"); + //setup model layer count + console.log(data.break_values.reduce((a, b) => a + b, 0)); + document.getElementById("gpu_layers_current").textContent = data.break_values.reduce((a, b) => a + b, 0); + document.getElementById("gpu_layers_max").textContent = data.layer_count; + document.getElementById("gpu_count").value = data.gpu_count; + + //create the gpu load bars + var model_layer_bars = document.getElementById('model_layer_bars'); + while (model_layer_bars.firstChild) { + model_layer_bars.removeChild(model_layer_bars.firstChild); + } + + //Add the bars + for (let i = 0; i < data.gpu_names.length; i++) { + var div = document.createElement("div"); + div.classList.add("model_setting_container"); + //build GPU text + var span = document.createElement("span"); + span.classList.add("model_setting_label"); + span.textContent = "GPU " + i + " " + data.gpu_names[i] + ": " + //build layer count box + var input = document.createElement("input"); + input.classList.add("model_setting_value"); + input.classList.add("setting_value"); + input.inputmode = "numeric"; + input.id = "gpu_layers_box_"+i; + input.value = data.break_values[i]; + input.onblur = function () { + document.getElementById(this.id.replace("_box", "")).value = this.value; + update_gpu_layers(); + } + span.append(input); + div.append(span); + //build layer count slider + var input = document.createElement("input"); + input.classList.add("model_setting_item"); + input.type = "range"; + input.min = 0; + input.max = data.layer_count; + input.step = 1; + input.value = data.break_values[i]; + input.id = "gpu_layers_" + i; + input.onchange = function () { + document.getElementById(this.id.replace("gpu_layers", "gpu_layers_box")).value = this.value; + update_gpu_layers(); + } + div.append(input); + //build slider bar #s + //min + var span = document.createElement("span"); + span.classList.add("model_setting_minlabel"); + var span2 = document.createElement("span"); + span2.style="top: -4px; position: relative;"; + span2.textContent = 0; + span.append(span2); + div.append(span); + //max + var span = document.createElement("span"); + span.classList.add("model_setting_maxlabel"); + var span2 = document.createElement("span"); + span2.style="top: -4px; position: relative;"; + span2.textContent = data.layer_count; + span.append(span2); + div.append(span); + + model_layer_bars.append(div); + } + + //add the disk layers + var div = document.createElement("div"); + div.classList.add("model_setting_container"); + //build GPU text + var span = document.createElement("span"); + span.classList.add("model_setting_label"); + span.textContent = "Disk cache: " + //build layer count box + var input = document.createElement("input"); + input.classList.add("model_setting_value"); + input.classList.add("setting_value"); + input.inputmode = "numeric"; + input.id = "disk_layers_box"; + input.value = data.disk_break_value; + input.onblur = function () { + document.getElementById(this.id.replace("_box", "")).value = this.value; + update_gpu_layers(); + } + span.append(input); + div.append(span); + //build layer count slider + var input = document.createElement("input"); + input.classList.add("model_setting_item"); + input.type = "range"; + input.min = 0; + input.max = data.layer_count; + input.step = 1; + input.value = data.disk_break_value; + input.id = "disk_layers"; + input.onchange = function () { + document.getElementById(this.id+"_box").value = this.value; + update_gpu_layers(); + } + div.append(input); + //build slider bar #s + //min + var span = document.createElement("span"); + span.classList.add("model_setting_minlabel"); + var span2 = document.createElement("span"); + span2.style="top: -4px; position: relative;"; + span2.textContent = 0; + span.append(span2); + div.append(span); + //max + var span = document.createElement("span"); + span.classList.add("model_setting_maxlabel"); + var span2 = document.createElement("span"); + span2.style="top: -4px; position: relative;"; + span2.textContent = data.layer_count; + span.append(span2); + div.append(span); + + model_layer_bars.append(div); + + update_gpu_layers(); + } else { + document.getElementById("modellayers").classList.add("hidden"); + accept.classList.remove("disabled"); + } + + +} + +function update_gpu_layers() { + var gpu_layers + gpu_layers = 0; + for (let i=0; i < document.getElementById("gpu_count").value; i++) { + gpu_layers += parseInt(document.getElementById("gpu_layers_"+i).value); + } + if (document.getElementById("disk_layers")) { + gpu_layers += parseInt(document.getElementById("disk_layers").value); + } + if (gpu_layers > parseInt(document.getElementById("gpu_layers_max").textContent)) { + document.getElementById("gpu_layers_current").textContent = gpu_layers; + document.getElementById("gpu_layers_current").classList.add("text_red"); + var accept = document.getElementById("btn_loadmodelaccept"); + accept.classList.add("disabled"); + } else { + var accept = document.getElementById("btn_loadmodelaccept"); + accept.classList.remove("disabled"); + document.getElementById("gpu_layers_current").textContent = gpu_layers; + document.getElementById("gpu_layers_current").classList.remove("text_red"); + } +} + +function load_model() { + message = {'cmd': 'load_model', 'use_gpu': $('#use_gpu')[0].checked, + 'key': $('#modelkey')[0].value, 'gpu_layers': gpu_layers.slice(0, -1), + 'disk_layers': disk_layers, 'url': $('#modelurl')[0].value, + 'online_model': $('#oaimodel')[0].value}; +} //--------------------------------------------General UI Functions------------------------------------ String.prototype.toHHMMSS = function () { var sec_num = parseInt(this, 10); // don't forget the second param diff --git a/templates/index_new.html b/templates/index_new.html index 5ae82c30..44e36106 100644 --- a/templates/index_new.html +++ b/templates/index_new.html @@ -61,5 +61,53 @@
{% include 'story flyout.html' %}
+ + + + + + \ No newline at end of file diff --git a/templates/settings flyout.html b/templates/settings flyout.html index a12744bd..b0159d44 100644 --- a/templates/settings flyout.html +++ b/templates/settings flyout.html @@ -33,7 +33,7 @@
Running Model: ReadOnly
-
+
{% with menu='Model' %} {% include 'settings item.html' %} From 9170aa7a4e110b17ef7792bc8e4764c63cbf752f Mon Sep 17 00:00:00 2001 From: ebolam Date: Fri, 1 Jul 2022 08:09:10 -0400 Subject: [PATCH 0023/1297] Model Loading functional Fix for mobile display --- aiserver.py | 123 ++++++++++++++++--- static/koboldai.css | 19 ++- static/koboldai.js | 256 +++++++++++++++++++++++++++++---------- templates/index_new.html | 16 ++- 4 files changed, 323 insertions(+), 91 deletions(-) diff --git a/aiserver.py b/aiserver.py index 6880ebfa..42eea963 100644 --- a/aiserver.py +++ b/aiserver.py @@ -257,13 +257,15 @@ def sendModelSelection(menu="mainmenu", folder="./models"): if koboldai_vars.host: breadcrumbs = [] menu_list = [[folder, menu, "", False] for folder in paths] + menu_list_ui_2 = [[folder[0], folder[1], "", False] for folder in paths] menu_list.append(["Return to Main Menu", "mainmenu", "", True]) + menu_list_ui_2.append(["Return to Main Menu", "mainmenu", "", True]) if os.path.abspath("{}/models".format(os.getcwd())) == os.path.abspath(folder): showdelete=True else: showdelete=False emit('from_server', {'cmd': 'show_model_menu', 'data': menu_list, 'menu': menu, 'breadcrumbs': breadcrumbs, "showdelete": showdelete}, broadcast=True, room="UI_1") - emit('show_model_menu', {'data': menu_list, 'menu': menu, 'breadcrumbs': breadcrumbs, "showdelete": showdelete}, broadcast=False, room="UI_2") + emit('show_model_menu', {'data': menu_list_ui_2, 'menu': menu, 'breadcrumbs': breadcrumbs, "showdelete": showdelete}, broadcast=False, room="UI_2") else: emit('from_server', {'cmd': 'show_model_menu', 'data': model_menu[menu], 'menu': menu, 'breadcrumbs': [], "showdelete": False}, broadcast=True, room="UI_1") emit('show_model_menu', {'data': model_menu[menu], 'menu': menu, 'breadcrumbs': [], "showdelete": False}, broadcast=False, room="UI_2") @@ -1010,6 +1012,8 @@ def get_model_info(model, directory=""): key_value = js["apikey"] elif 'oaiapikey' in js and js['oaiapikey'] != "": key_value = js["oaiapikey"] + if model in ('GooseAI', 'OAI'): + get_oai_models({'model': model, 'key': key_value}) key = True elif model == 'ReadOnly': pass @@ -1045,11 +1049,10 @@ def get_model_info(model, directory=""): 'url': url, 'gpu_names': gpu_names}, broadcast=True, room="UI_1") emit('selected_model_info', {'key_value': key_value, 'key':key, 'gpu':gpu, 'layer_count':layer_count, 'breakmodel':breakmodel, - 'disk_break_value': disk_blocks, 'disk_break': not utils.HAS_ACCELERATE, + 'disk_break_value': disk_blocks, 'disk_break': utils.HAS_ACCELERATE, 'break_values': break_values, 'gpu_count': gpu_count, 'url': url, 'gpu_names': gpu_names}, broadcast=False, room="UI_2") - if key_value != "": - get_oai_models(key_value) + def get_layer_count(model, directory=""): @@ -1072,12 +1075,14 @@ def get_layer_count(model, directory=""): else: return None - -def get_oai_models(key): +@socketio.on('OAI_Key_Update') +def get_oai_models(data): + key = data['key'] + model = data['model'] koboldai_vars.oaiapikey = key - if koboldai_vars.model == 'OAI': + if model == 'OAI': url = "https://api.openai.com/v1/engines" - elif koboldai_vars.model == 'GooseAI': + elif model == 'GooseAI': url = "https://api.goose.ai/v1/engines" else: return @@ -1106,8 +1111,8 @@ def get_oai_models(key): # If the client settings file doesn't exist, create it # Write API key to file os.makedirs('settings', exist_ok=True) - if path.exists("settings/{}.settings".format(koboldai_vars.model)): - with open("settings/{}.settings".format(koboldai_vars.model), "r") as file: + if path.exists("settings/{}.settings".format(model)): + with open("settings/{}.settings".format(model), "r") as file: js = json.load(file) if 'online_model' in js: online_model = js['online_model'] @@ -1115,11 +1120,12 @@ def get_oai_models(key): if js['apikey'] != key: changed=True if changed: - with open("settings/{}.settings".format(koboldai_vars.model), "w") as file: + with open("settings/{}.settings".format(model), "w") as file: js["apikey"] = key file.write(json.dumps(js, indent=3), room="UI_1") - emit('from_server', {'cmd': 'oai_engines', 'data': engines, 'online_model': online_model}, broadcast=True) + emit('from_server', {'cmd': 'oai_engines', 'data': engines, 'online_model': online_model}, broadcast=True, room="UI_1") + emit('oai_engines', {'data': engines, 'online_model': online_model}, room="UI_2") else: # Something went wrong, print the message and quit since we can't initialize an engine print("{0}ERROR!{1}".format(colors.RED, colors.END), room="UI_1") @@ -3205,7 +3211,7 @@ def get_message(msg): else: print(colors.RED + "WARNING!!: Someone maliciously attempted to delete " + msg['data'] + " the attempt has been blocked.") elif(msg['cmd'] == 'OAI_Key_Update'): - get_oai_models(msg['key']) + get_oai_models({'model': koboldai_vars.model, 'key': msg['key']}) elif(msg['cmd'] == 'loadselect'): koboldai_vars.loadselect = msg["data"] elif(msg['cmd'] == 'spselect'): @@ -5835,9 +5841,32 @@ def ui2_connect(): #==================================================================# @app.route("/popup_test") def popup_test(): - file_popup("Test Popup", "./", "return_event_name", folder_only=False, editable=True, deleteable=True, jailed=False, item_check=check_if_dir_is_model) + file_popup("Test Popup", "./", "return_event_name", renameable=True, folder_only=False, editable=True, deleteable=True, jailed=False, item_check=check_if_dir_is_model) return "ok" +@socketio.on('upload_file') +def upload_file(data): + print("upload_file {}".format(data['filename'])) + if 'current_folder' in session: + path = os.path.abspath(os.path.join(session['current_folder'], data['filename']).replace("\\", "/")).replace("\\", "/") + print("Want to save to {}".format(path)) + if 'popup_jailed_dir' not in session: + print("Someone is trying to upload a file to your server. Blocked.") + elif session['popup_jailed_dir'] is None: + if os.path.exists(path): + emit("error_popup", "The file already exists. Please delete it or rename the file before uploading", room="UI_2"); + else: + with open(path, "wb") as f: + f.write(data['data']) + get_files_folders(session['current_folder']) + elif session['popup_jailed_dir'] in session['current_folder']: + if os.path.exists(path): + emit("error_popup", "The file already exists. Please delete it or rename the file before uploading", room="UI_2"); + else: + with open(path, "wb") as f: + f.write(data['data']) + get_files_folders(session['current_folder']) + @socketio.on('popup_change_folder') def popup_change_folder(data): print("Doing popup change folder: {}".format(data)) @@ -5851,6 +5880,24 @@ def popup_change_folder(data): else: print("User is trying to get at files in your server outside the jail. Blocked. Jailed Dir: {} Requested Dir: {}".format(session['popup_jailed_dir'], data)) +@socketio.on('popup_rename') +def popup_rename(data): + if 'popup_renameable' not in session: + print("Someone is trying to rename a file in your server. Blocked.") + return + if not session['popup_renameable']: + print("Someone is trying to rename a file in your server. Blocked.") + return + + if session['popup_jailed_dir'] is None: + os.rename(data['file'], data['new_name']) + get_files_folders(os.path.dirname(data['file'])) + elif session['popup_jailed_dir'] in data: + os.rename(data['file'], data['new_name']) + get_files_folders(os.path.dirname(data['file'])) + else: + print("User is trying to rename files in your server outside the jail. Blocked. Jailed Dir: {} Requested Dir: {}".format(session['popup_jailed_dir'], data['file'])) + @socketio.on('popup_delete') def popup_delete(data): if 'popup_deletable' not in session: @@ -5919,7 +5966,7 @@ def popup_change_file(data): else: print("User is trying to delete files in your server outside the jail. Blocked. Jailed Dir: {} Requested Dir: {}".format(session['popup_jailed_dir'], data)) -def file_popup(popup_title, starting_folder, return_event, jailed=True, folder_only=True, deleteable=False, editable=False, show_breadcrumbs=True, item_check=None, show_hidden=False): +def file_popup(popup_title, starting_folder, return_event, upload=True, jailed=True, folder_only=True, renameable=False, deleteable=False, editable=False, show_breadcrumbs=True, item_check=None, show_hidden=False): #starting_folder = The folder we're going to get folders and/or items from #return_event = the socketio event that will be emitted when the load button is clicked #jailed = if set to true will look for the session variable jailed_folder and prevent navigation outside of that folder @@ -5934,20 +5981,23 @@ def file_popup(popup_title, starting_folder, return_event, jailed=True, folder_o else: session['popup_jailed_dir'] = None session['popup_deletable'] = deleteable + session['popup_renameable'] = renameable session['popup_editable'] = editable session['popup_show_hidden'] = show_hidden session['popup_item_check'] = item_check session['popup_folder_only'] = folder_only session['popup_show_breadcrumbs'] = show_breadcrumbs + session['upload'] = upload - socketio.emit("load_popup", {"popup_title": popup_title, "call_back": return_event, "deleteable": deleteable, "editable": editable}, broadcast=True, room="UI_2") - socketio.emit("load_popup", {"popup_title": popup_title, "call_back": return_event, "deleteable": deleteable, "editable": editable}, broadcast=True, room="UI_1") + socketio.emit("load_popup", {"popup_title": popup_title, "call_back": return_event, "renameable": renameable, "deleteable": deleteable, "editable": editable, 'upload': upload}, broadcast=True, room="UI_2") + socketio.emit("load_popup", {"popup_title": popup_title, "call_back": return_event, "renameable": renameable, "deleteable": deleteable, "editable": editable, 'upload': upload}, broadcast=True, room="UI_1") get_files_folders(starting_folder) def get_files_folders(starting_folder): import stat + session['current_folder'] = starting_folder item_check = session['popup_item_check'] show_breadcrumbs = session['popup_show_breadcrumbs'] show_hidden = session['popup_show_hidden'] @@ -6101,14 +6151,51 @@ def UI_2_load_model_button(data): #==================================================================# @socketio.on('select_model') def UI_2_load_model_button(data): + print(data) + + #We've selected a menu if data['model'] in model_menu: sendModelSelection(menu=data['model']) + #We've selected a custom line + elif data['menu'] in ("NeoCustom", "GPT2Custom"): + get_model_info(data['menu'], directory=data['display_name']) + #We've selected a custom menu + elif data['model'] in ("NeoCustom", "GPT2Custom"): + sendModelSelection(menu=data['model'], folder="./models") else: #We now have some model we want to potentially load. #First we need to send the client the model parameters (layers, etc) - print("getting model info for {}".format(data['model'])) get_model_info(data['model']) +#==================================================================# +# Event triggered when user loads a model +#==================================================================# +@socketio.on('load_model') +def UI_2_load_model(data): + print(data) + if not os.path.exists("settings/"): + os.mkdir("settings") + changed = True + if not utils.HAS_ACCELERATE: + data['disk_layers'] = "0" + if os.path.exists("settings/" + data['model'].replace('/', '_') + ".breakmodel"): + with open("settings/" + data['model'].replace('/', '_') + ".breakmodel", "r") as file: + file_data = file.read().split('\n')[:2] + if len(file_data) < 2: + file_data.append("0") + gpu_layers, disk_layers = file_data + if gpu_layers == data['gpu_layers'] and disk_layers == data['disk_layers']: + changed = False + if changed: + f = open("settings/" + data['model'].replace('/', '_') + ".breakmodel", "w") + f.write(data['gpu_layers'] + '\n' + data['disk_layers']) + f.close() + koboldai_vars.colaburl = data['url'] + "/request" + koboldai_vars.model = data['model'] + koboldai_vars.custmodpth = data['path'] + print("loading Model") + load_model(use_gpu=data['use_gpu'], gpu_layers=data['gpu_layers'], disk_layers=data['disk_layers'], online_model=data['online_model']) + #==================================================================# # Event triggered to rely a message #==================================================================# diff --git a/static/koboldai.css b/static/koboldai.css index c8fcac85..c888c35b 100644 --- a/static/koboldai.css +++ b/static/koboldai.css @@ -203,6 +203,12 @@ cursor: pointer; } +@media only screen and (max-width: 768px) { +.menu_pin { + display: hidden; +} +} +@media only screen and not (max-width: 768px) { .menu_pin { position: fixed; top:10px; @@ -215,6 +221,7 @@ transition: left 0.5s; cursor: pointer; } +} .menu_pin.hidden { left: 0px; @@ -236,6 +243,7 @@ transition: 0.5s; } +@media only screen and not (max-width: 768px) { .SideMenu.pinned { height: 100%; width: var(--flyout_menu_width); @@ -247,6 +255,7 @@ overflow-x: hidden; transition: 0.5s; } +} .SideMenu.open { width: var(--flyout_menu_width); @@ -366,9 +375,11 @@ body { grid-template-rows: auto 100px; } +@media only screen and not (max-width: 768px) { .main-grid.pinned { margin-left: var(--flyout_menu_width); } +} /* ---------------------------------- GAME SCREEN ----------------------------------*/ .gamescreen { @@ -577,8 +588,8 @@ td.sequence:hover { background-color: var(--popup_item_color); padding: 2px; display: grid; - grid-template-areas: "folder_icon delete_icon edit_icon file"; - grid-template-columns: 20px 20px 20px auto; + grid-template-areas: "folder_icon delete_icon edit_icon rename_icon file"; + grid-template-columns: 20px 20px 20px 20px auto; } @@ -590,6 +601,10 @@ td.sequence:hover { grid-area: edit_icon; } +.popup .item .rename_icon { + grid-area: rename_icon; +} + .popup .item .delete_icon { grid-area: delete_icon; } diff --git a/static/koboldai.js b/static/koboldai.js index 391964f0..4a7f724f 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -14,6 +14,8 @@ socket.on('popup_breadcrumbs', function(data){popup_breadcrumbs(data);}); socket.on('popup_edit_file', function(data){popup_edit_file(data);}); socket.on('show_model_menu', function(data){show_model_menu(data);}); socket.on('selected_model_info', function(data){selected_model_info(data);}); +socket.on('oai_engines', function(data){oai_engines(data);}); +socket.on('error_popup', function(data){error_popup(data);}); //socket.onAny(function(event_name, data) {console.log({"event": event_name, "class": data.classname, "data": data});}); var backend_vars = {}; @@ -21,6 +23,7 @@ var presets = {} var ai_busy_start = Date.now(); var popup_deleteable = false; var popup_editable = false; +var popup_renameable = false; //-----------------------------------Server to UI Functions----------------------------------------------- function connect() { console.log("connected"); @@ -287,6 +290,7 @@ function var_changed(data) { function load_popup(data) { popup_deleteable = data.deleteable; popup_editable = data.editable; + popup_renameable = data.renameable; var popup = document.getElementById("popup"); var popup_title = document.getElementById("popup_title"); popup_title.textContent = data.popup_title; @@ -300,17 +304,47 @@ function load_popup(data) { breadcrumbs.removeChild(breadcrumbs.firstChild); } + if (data.upload) { + const dropArea = document.getElementById('popup_list'); + dropArea.addEventListener('dragover', (event) => { + event.stopPropagation(); + event.preventDefault(); + // Style the drag-and-drop as a "copy file" operation. + event.dataTransfer.dropEffect = 'copy'; + }); + + dropArea.addEventListener('drop', (event) => { + event.stopPropagation(); + event.preventDefault(); + const fileList = event.dataTransfer.files; + for (file of fileList) { + reader = new FileReader(); + reader.onload = function (event) { + socket.emit("upload_file", {'filename': file.name, "data": event.target.result}); + }; + reader.readAsArrayBuffer(file); + } + }); + } else { + + } + popup.classList.remove("hidden"); //adjust accept button - var accept = document.getElementById("popup_accept"); - accept.classList.add("disabled"); - accept.setAttribute("emit", data.call_back); - accept.setAttribute("selected_value", ""); - accept.onclick = function () { - socket.emit(this.emit, this.getAttribute("selected_value")); - document.getElementById("popup").classList.add("hidden"); - }; + if (data.call_back == "") { + document.getElementById("popup_load_cancel").classList.add("hidden"); + } else { + document.getElementById("popup_load_cancel").classList.remove("hidden"); + var accept = document.getElementById("popup_accept"); + accept.classList.add("disabled"); + accept.setAttribute("emit", data.call_back); + accept.setAttribute("selected_value", ""); + accept.onclick = function () { + socket.emit(this.emit, this.getAttribute("selected_value")); + document.getElementById("popup").classList.add("hidden"); + }; + } } @@ -320,6 +354,7 @@ function popup_items(data) { while (popup_list.firstChild) { popup_list.removeChild(popup_list.firstChild); } + document.getElementById('popup_upload_input').value = ""; for (item of data) { var list_item = document.createElement("span"); @@ -339,7 +374,8 @@ function popup_items(data) { edit_icon.classList.add("edit_icon"); if ((popup_editable) && !(item[0])) { edit_icon.classList.add("oi"); - edit_icon.setAttribute('data-glyph', "pencil"); + edit_icon.setAttribute('data-glyph', "spreadsheet"); + edit_icon.title = "Edit" edit_icon.id = item[1]; edit_icon.onclick = function () { socket.emit("popup_edit", this.id); @@ -347,12 +383,31 @@ function popup_items(data) { } list_item.append(edit_icon); + //create the rename icon + var rename_icon = document.createElement("span"); + rename_icon.classList.add("rename_icon"); + if ((popup_renameable) && !(item[0])) { + rename_icon.classList.add("oi"); + rename_icon.setAttribute('data-glyph', "pencil"); + rename_icon.title = "Rename" + rename_icon.id = item[1]; + rename_icon.setAttribute("filename", item[2]); + rename_icon.onclick = function () { + var new_name = prompt("Please enter new filename for \n"+ this.getAttribute("filename")); + if (new_name != null) { + socket.emit("popup_rename", {"file": this.id, "new_name": new_name}); + } + }; + } + list_item.append(rename_icon); + //create the delete icon var delete_icon = document.createElement("span"); delete_icon.classList.add("delete_icon"); if (popup_deleteable) { delete_icon.classList.add("oi"); delete_icon.setAttribute('data-glyph', "x"); + delete_icon.title = "Delete" delete_icon.id = item[1]; delete_icon.setAttribute("folder", item[0]); delete_icon.onclick = function () { @@ -448,7 +503,30 @@ function popup_edit_file(data) { popup_list.append(textarea); } -//--------------------------------------------UI to Server Functions---------------------------------- + +function error_popup(data) { + alert(data); +} + +function oai_engines(data) { + console.log(data); + var oaimodel = document.getElementById("oaimodel") + oaimodel.classList.remove("hidden") + selected_item = 0; + length = oaimodel.options.length; + for (let i = 0; i < length; i++) { + oaimodel.options.remove(1); + } + for (item of data.data) { + var option = document.createElement("option"); + option.value = item[0]; + option.text = item[1]; + if(data.online_model == item[0]) { + option.selected = true; + } + oaimodel.appendChild(option); + } +} function show_model_menu(data) { document.getElementById("loadmodelcontainer").classList.remove("hidden"); @@ -528,7 +606,9 @@ function show_model_menu(data) { //create the actual item var popup_item = document.createElement("span"); popup_item.classList.add("model"); + popup_item.setAttribute("display_name", item[0]); popup_item.id = item[1]; + popup_item.setAttribute("Menu", data.menu) //name text var text = document.createElement("span"); @@ -542,14 +622,17 @@ function show_model_menu(data) { popup_item.append(text); popup_item.onclick = function () { - var accept = document.getElementById("popup_accept"); + var accept = document.getElementById("btn_loadmodelaccept"); accept.classList.add("disabled"); - socket.emit("select_model", {"model": this.id, "menu": this.getAttribute("Menu")}); + socket.emit("select_model", {"model": this.id, "menu": this.getAttribute("Menu"), "display_name": this.getAttribute("display_name")}); var model_list = document.getElementById('loadmodellistcontent').getElementsByClassName("selected"); for (model of model_list) { model.classList.remove("selected"); } this.classList.add("selected"); + accept.setAttribute("selected_model", this.id); + accept.setAttribute("menu", this.getAttribute("Menu")); + accept.setAttribute("display_name", this.getAttribute("display_name")); }; list_item.append(popup_item); @@ -560,6 +643,7 @@ function show_model_menu(data) { } function selected_model_info(data) { + console.log(data); var accept = document.getElementById("btn_loadmodelaccept"); //hide or unhide key if (data.key) { @@ -653,56 +737,58 @@ function selected_model_info(data) { } //add the disk layers - var div = document.createElement("div"); - div.classList.add("model_setting_container"); - //build GPU text - var span = document.createElement("span"); - span.classList.add("model_setting_label"); - span.textContent = "Disk cache: " - //build layer count box - var input = document.createElement("input"); - input.classList.add("model_setting_value"); - input.classList.add("setting_value"); - input.inputmode = "numeric"; - input.id = "disk_layers_box"; - input.value = data.disk_break_value; - input.onblur = function () { - document.getElementById(this.id.replace("_box", "")).value = this.value; - update_gpu_layers(); - } - span.append(input); - div.append(span); - //build layer count slider - var input = document.createElement("input"); - input.classList.add("model_setting_item"); - input.type = "range"; - input.min = 0; - input.max = data.layer_count; - input.step = 1; - input.value = data.disk_break_value; - input.id = "disk_layers"; - input.onchange = function () { - document.getElementById(this.id+"_box").value = this.value; - update_gpu_layers(); - } - div.append(input); - //build slider bar #s - //min - var span = document.createElement("span"); - span.classList.add("model_setting_minlabel"); - var span2 = document.createElement("span"); - span2.style="top: -4px; position: relative;"; - span2.textContent = 0; - span.append(span2); - div.append(span); - //max - var span = document.createElement("span"); - span.classList.add("model_setting_maxlabel"); - var span2 = document.createElement("span"); - span2.style="top: -4px; position: relative;"; - span2.textContent = data.layer_count; - span.append(span2); - div.append(span); + if (data.disk_break) { + var div = document.createElement("div"); + div.classList.add("model_setting_container"); + //build GPU text + var span = document.createElement("span"); + span.classList.add("model_setting_label"); + span.textContent = "Disk cache: " + //build layer count box + var input = document.createElement("input"); + input.classList.add("model_setting_value"); + input.classList.add("setting_value"); + input.inputmode = "numeric"; + input.id = "disk_layers_box"; + input.value = data.disk_break_value; + input.onblur = function () { + document.getElementById(this.id.replace("_box", "")).value = this.value; + update_gpu_layers(); + } + span.append(input); + div.append(span); + //build layer count slider + var input = document.createElement("input"); + input.classList.add("model_setting_item"); + input.type = "range"; + input.min = 0; + input.max = data.layer_count; + input.step = 1; + input.value = data.disk_break_value; + input.id = "disk_layers"; + input.onchange = function () { + document.getElementById(this.id+"_box").value = this.value; + update_gpu_layers(); + } + div.append(input); + //build slider bar #s + //min + var span = document.createElement("span"); + span.classList.add("model_setting_minlabel"); + var span2 = document.createElement("span"); + span2.style="top: -4px; position: relative;"; + span2.textContent = 0; + span.append(span2); + div.append(span); + //max + var span = document.createElement("span"); + span.classList.add("model_setting_maxlabel"); + var span2 = document.createElement("span"); + span2.style="top: -4px; position: relative;"; + span2.textContent = data.layer_count; + span.append(span2); + div.append(span); + } model_layer_bars.append(div); @@ -738,11 +824,49 @@ function update_gpu_layers() { } function load_model() { - message = {'cmd': 'load_model', 'use_gpu': $('#use_gpu')[0].checked, - 'key': $('#modelkey')[0].value, 'gpu_layers': gpu_layers.slice(0, -1), - 'disk_layers': disk_layers, 'url': $('#modelurl')[0].value, - 'online_model': $('#oaimodel')[0].value}; + var accept = document.getElementById('btn_loadmodelaccept'); + gpu_layers = [] + for (let i=0; i < document.getElementById("gpu_count").value; i++) { + gpu_layers.push(document.getElementById("gpu_layers_"+i).value); + } + if (document.getElementById("disk_layers")) { + disk_layers = document.getElementById("disk_layers").value; + } else { + disk_layers = "0"; + } + //Need to do different stuff with custom models + if ((accept.getAttribute('menu') == 'GPT2Custom') || (accept.getAttribute('menu') == 'NeoCustom')) { + var model = document.getElementById("btn_loadmodelaccept").getAttribute("menu"); + var path = document.getElementById("btn_loadmodelaccept").getAttribute("display_name"); + } else { + var model = document.getElementById("btn_loadmodelaccept").getAttribute("selected_model"); + var path = ""; + } + + message = {'model': model, 'path': path, 'use_gpu': document.getElementById("use_gpu").checked, + 'key': document.getElementById('modelkey').value, 'gpu_layers': gpu_layers.join(), + 'disk_layers': disk_layers, 'url': document.getElementById("modelurl").value, + 'online_model': document.getElementById("oaimodel").value}; + console.log(message); + socket.emit("load_model", message); + document.getElementById("loadmodelcontainer").classList.add("hidden"); } + + +//--------------------------------------------UI to Server Functions---------------------------------- + + +function upload_file(file_box) { + var fileList = file_box.files; + for (file of fileList) { + reader = new FileReader(); + reader.onload = function (event) { + socket.emit("upload_file", {'filename': file.name, "data": event.target.result}); + }; + reader.readAsArrayBuffer(file); + } +} + //--------------------------------------------General UI Functions------------------------------------ String.prototype.toHHMMSS = function () { var sec_num = parseInt(this, 10); // don't forget the second param diff --git a/templates/index_new.html b/templates/index_new.html index 44e36106..03e3148b 100644 --- a/templates/index_new.html +++ b/templates/index_new.html @@ -69,7 +69,11 @@
-