From 9016e29c66edf83cd197855859d0d900170587cc Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 11 Aug 2022 10:33:47 -0400 Subject: [PATCH 01/10] Fix for APIs and Custom Models not working in AI menu --- static/application.js | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/static/application.js b/static/application.js index e1edf020..4ba8e0b2 100644 --- a/static/application.js +++ b/static/application.js @@ -1078,7 +1078,7 @@ function buildLoadModelList(ar, menu, breadcrumbs, showdelete) { html = html + "" } else { //this is a model - html = html + "
" + html = html + "
" } //now let's do the delete icon if applicable @@ -1096,6 +1096,7 @@ function buildLoadModelList(ar, menu, breadcrumbs, showdelete) { " loadmodelcontent.append(html); //If this is a menu + console.log(ar[i]); if(ar[i][3]) { $("#loadmodel"+i).off("click").on("click", (function () { return function () { @@ -1105,15 +1106,27 @@ function buildLoadModelList(ar, menu, breadcrumbs, showdelete) { })(i)); //Normal load } else { - $("#loadmodel"+i).off("click").on("click", (function () { - return function () { - $("#use_gpu_div").addClass("hidden"); - $("#modelkey").addClass("hidden"); - $("#modellayers").addClass("hidden"); - socket.send({'cmd': 'selectmodel', 'data': $(this).attr("name")}); - highlightLoadLine($(this)); - } - })(i)); + if (['NeoCustom', 'GPT2Custom'].includes(menu)) { + $("#loadmodel"+i).off("click").on("click", (function () { + return function () { + $("#use_gpu_div").addClass("hidden"); + $("#modelkey").addClass("hidden"); + $("#modellayers").addClass("hidden"); + socket.send({'cmd': 'selectmodel', 'data': $(this).attr("name"), 'path': $(this).attr("pretty_name")}); + highlightLoadLine($(this)); + } + })(i)); + } else { + $("#loadmodel"+i).off("click").on("click", (function () { + return function () { + $("#use_gpu_div").addClass("hidden"); + $("#modelkey").addClass("hidden"); + $("#modellayers").addClass("hidden"); + socket.send({'cmd': 'selectmodel', 'data': $(this).attr("name")}); + highlightLoadLine($(this)); + } + })(i)); + } } } } @@ -2841,6 +2854,8 @@ $(document).ready(function(){ if (msg.key) { $("#modelkey").removeClass("hidden"); $("#modelkey")[0].value = msg.key_value; + //if we're in the API list, disable to load button until the model is selected (after the API Key is entered) + disableButtons([load_model_accept]); } else { $("#modelkey").addClass("hidden"); @@ -2878,6 +2893,7 @@ $(document).ready(function(){ } } else if(msg.cmd == 'oai_engines') { $("#oaimodel").removeClass("hidden") + enableButtons([load_model_accept]); selected_item = 0; length = $("#oaimodel")[0].options.length; for (let i = 0; i < length; i++) { @@ -2914,6 +2930,7 @@ $(document).ready(function(){ opt.innerHTML = engine[1]; $("#oaimodel")[0].appendChild(opt); } + enableButtons([load_model_accept]); } }); From 64664dc61ea8b5008666b8d91b9454576a45d328 Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 11 Aug 2022 10:40:32 -0400 Subject: [PATCH 02/10] Fix for the AI menu to respect the --cpu command line flag --- aiserver.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aiserver.py b/aiserver.py index cd8b081e..5deeb688 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1209,6 +1209,8 @@ def get_model_info(model, directory=""): url = True elif not utils.HAS_ACCELERATE and not torch.cuda.is_available(): pass + elif args.cpu: + pass else: layer_count = get_layer_count(model, directory=directory) if layer_count is None: @@ -3460,7 +3462,7 @@ def get_message(msg): else: filename = "settings/{}.breakmodel".format(vars.model.replace('/', '_')) f = open(filename, "w") - f.write(msg['gpu_layers'] + '\n' + msg['disk_layers']) + f.write(str(msg['gpu_layers']) + '\n' + str(msg['disk_layers'])) f.close() vars.colaburl = msg['url'] + "/request" load_model(use_gpu=msg['use_gpu'], gpu_layers=msg['gpu_layers'], disk_layers=msg['disk_layers'], online_model=msg['online_model']) From 45495d87928a8bd379aab26e8720d306cc4814e9 Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 11 Aug 2022 15:23:35 -0400 Subject: [PATCH 03/10] Fix for --cpu on command line and MAYBE --nobreakmodel --- aiserver.py | 52 ++++++++++++++++++---------------------------------- 1 file changed, 18 insertions(+), 34 deletions(-) diff --git a/aiserver.py b/aiserver.py index 5deeb688..3b14d676 100644 --- a/aiserver.py +++ b/aiserver.py @@ -543,7 +543,9 @@ def device_config(config): global breakmodel, generator import breakmodel n_layers = utils.num_layers(config) - if(args.breakmodel_gpulayers is not None or (utils.HAS_ACCELERATE and args.breakmodel_disklayers is not None)): + if args.cpu: + breakmodel.gpu_blocks = [0]*n_layers + elif(args.breakmodel_gpulayers is not None or (utils.HAS_ACCELERATE and args.breakmodel_disklayers is not None)): try: if(not args.breakmodel_gpulayers): breakmodel.gpu_blocks = [] @@ -1730,8 +1732,12 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal time.sleep(0.1) if gpu_layers is not None: args.breakmodel_gpulayers = gpu_layers + elif initial_load: + gpu_layers = args.breakmodel_gpulayers if disk_layers is not None: args.breakmodel_disklayers = int(disk_layers) + elif initial_load: + disk_layers = args.breakmodel_disklayers #We need to wipe out the existing model and refresh the cuda cache model = None @@ -1840,41 +1846,19 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal else: print("{0}NOT FOUND!{1}".format(colors.YELLOW, colors.END)) - if args.model: - if(vars.hascuda): - genselected = True + if args.cpu: + vars.usegpu = False + gpu_layers = None + disk_layers = None + vars.breakmodel = False + elif vars.hascuda: + if(vars.bmsupported): + vars.usegpu = False + vars.breakmodel = True + else: + vars.breakmodel = False vars.usegpu = True - vars.breakmodel = utils.HAS_ACCELERATE - if(vars.bmsupported): - vars.usegpu = False - vars.breakmodel = True - if(args.cpu): - vars.usegpu = False - vars.breakmodel = utils.HAS_ACCELERATE - elif(vars.hascuda): - if(vars.bmsupported): - genselected = True - vars.usegpu = False - vars.breakmodel = True - else: - genselected = False - else: - genselected = False - if(vars.hascuda): - if(use_gpu): - if(vars.bmsupported): - vars.breakmodel = True - vars.usegpu = False - genselected = True - else: - vars.breakmodel = False - vars.usegpu = True - genselected = True - else: - vars.breakmodel = utils.HAS_ACCELERATE - vars.usegpu = False - genselected = True # Ask for API key if InferKit was selected if(vars.model == "InferKit"): From bddcd7ab7fa84a8b84b67b49751e93f09f5e0eb3 Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 11 Aug 2022 17:47:19 -0400 Subject: [PATCH 04/10] Deeper disable of --nobreakmodel attempt --- aiserver.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aiserver.py b/aiserver.py index 3b14d676..2dd2aeac 100644 --- a/aiserver.py +++ b/aiserver.py @@ -545,6 +545,10 @@ def device_config(config): n_layers = utils.num_layers(config) if args.cpu: breakmodel.gpu_blocks = [0]*n_layers + return + elif vars.nobreakmodel: + breakmodel.gpu_blocks = [0]*n_layers + return elif(args.breakmodel_gpulayers is not None or (utils.HAS_ACCELERATE and args.breakmodel_disklayers is not None)): try: if(not args.breakmodel_gpulayers): From ca2c60d423886e1b8b59d7118124d7d9a23e24fd Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 11 Aug 2022 18:12:50 -0400 Subject: [PATCH 05/10] Fix for --nobreakmodel --- aiserver.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/aiserver.py b/aiserver.py index 2dd2aeac..ee2d3a77 100644 --- a/aiserver.py +++ b/aiserver.py @@ -546,9 +546,6 @@ def device_config(config): if args.cpu: breakmodel.gpu_blocks = [0]*n_layers return - elif vars.nobreakmodel: - breakmodel.gpu_blocks = [0]*n_layers - return elif(args.breakmodel_gpulayers is not None or (utils.HAS_ACCELERATE and args.breakmodel_disklayers is not None)): try: if(not args.breakmodel_gpulayers): @@ -2081,7 +2078,8 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal # If we're using torch_lazy_loader, we need to get breakmodel config # early so that it knows where to load the individual model tensors - if(utils.HAS_ACCELERATE or vars.lazy_load and vars.hascuda and vars.breakmodel): + if (utils.HAS_ACCELERATE or vars.lazy_load and vars.hascuda and vars.breakmodel) and not vars.nobreakmodel: + print(1) device_config(model_config) # Download model from Huggingface if it does not exist, otherwise load locally @@ -2212,6 +2210,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel) vars.modeldim = get_hidden_size_from_model(model) if(not vars.lazy_load): + print(2) device_config(model.config) move_model_to_devices(model) elif(utils.HAS_ACCELERATE and __import__("breakmodel").disk_blocks > 0): From 555ca5fd05e4f46f61e8a884179924767b99c398 Mon Sep 17 00:00:00 2001 From: somebody Date: Thu, 11 Aug 2022 17:31:12 -0500 Subject: [PATCH 06/10] Add token usage indicator --- aiserver.py | 31 +++++++++++++++++++++++++++ static/application.js | 49 +++++++++++++++++++++++++++++++++++++++++++ static/custom.css | 27 ++++++++++++++++++++++++ templates/index.html | 4 ++-- 4 files changed, 109 insertions(+), 2 deletions(-) diff --git a/aiserver.py b/aiserver.py index cd8b081e..8f511b01 100644 --- a/aiserver.py +++ b/aiserver.py @@ -3608,6 +3608,37 @@ def get_message(msg): emit('from_server', {'cmd': 'set_debug', 'data': msg['data']}, broadcast=True) if vars.debug: send_debug() + elif(msg['cmd'] == 'getfieldbudget'): + unencoded = msg["data"]["unencoded"] + field = msg["data"]["field"] + + # Tokenizer may be undefined here when a model has not been chosen. + if "tokenizer" not in globals(): + # We don't have a tokenizer, just return nulls. + emit( + 'from_server', + {'cmd': 'showfieldbudget', 'data': {"length": None, "max": None, "field": field}}, + broadcast=True + ) + return + + header_length = len(tokenizer._koboldai_header) + max_tokens = vars.max_length - header_length - vars.sp_length - vars.genamt + + if not unencoded: + # Unencoded is empty, just return 0 + emit( + 'from_server', + {'cmd': 'showfieldbudget', 'data': {"length": 0, "max": max_tokens, "field": field}}, + broadcast=True + ) + else: + tokens_length = len(tokenizer.encode(unencoded)) + emit( + 'from_server', + {'cmd': 'showfieldbudget', 'data': {"length": tokens_length, "max": max_tokens, "field": field}}, + broadcast=True + ) #==================================================================# # Send userscripts list to client diff --git a/static/application.js b/static/application.js index 12b8f214..ab4e307a 100644 --- a/static/application.js +++ b/static/application.js @@ -512,6 +512,16 @@ function addWiLine(ob) { $(".wisortable-excluded-dynamic").removeClass("wisortable-excluded-dynamic"); $(this).parent().css("max-height", "").find(".wicomment").find(".form-control").css("max-height", ""); }); + + for (const wientry of document.getElementsByClassName("wientry")) { + // If we are uninitialized, skip. + if ($(wientry).closest(".wilistitem-uninitialized").length) continue; + + // add() will not add if the class is already present + wientry.classList.add("tokens-counted"); + } + + registerTokenCounters(); } function addWiFolder(uid, ob) { @@ -835,6 +845,7 @@ function exitMemoryMode() { button_actmem.html("Memory"); show([button_actback, button_actfwd, button_actretry, button_actwi]); input_text.val(""); + updateInputBudget(input_text[0]); // Hide Author's Note field anote_menu.slideUp("fast"); } @@ -2139,6 +2150,31 @@ function interpolateRGB(color0, color1, t) { ] } +function updateInputBudget(inputElement) { + socket.send({"cmd": "getfieldbudget", "data": {"unencoded": inputElement.value, "field": inputElement.id}}); +} + +function registerTokenCounters() { + // Add token counters to all input containers with the class of "tokens-counted", + // if a token counter is not already a child of said container. + for (const el of document.getElementsByClassName("tokens-counted")) { + if (el.getElementsByClassName("input-token-usage").length) continue; + + let span = document.createElement("span"); + span.classList.add("input-token-usage"); + span.innerText = "?/? Tokens"; + el.appendChild(span); + + let inputElement = el.querySelector("input, textarea"); + + inputElement.addEventListener("input", function() { + updateInputBudget(this); + }); + + updateInputBudget(inputElement); + } +} + //=================================================================// // READY/RUNTIME //=================================================================// @@ -2481,6 +2517,7 @@ $(document).ready(function(){ memorytext = msg.data; input_text.val(msg.data); } + updateInputBudget(input_text[0]); } else if(msg.cmd == "setmemory") { memorytext = msg.data; if(memorymode) { @@ -2602,6 +2639,7 @@ $(document).ready(function(){ } else if(msg.cmd == "setanote") { // Set contents of Author's Note field anote_input.val(msg.data); + updateInputBudget(anote_input[0]); } else if(msg.cmd == "setanotetemplate") { // Set contents of Author's Note Template field $("#anotetemplate").val(msg.data); @@ -2913,6 +2951,12 @@ $(document).ready(function(){ opt.innerHTML = engine[1]; $("#oaimodel")[0].appendChild(opt); } + } else if(msg.cmd == 'showfieldbudget') { + let inputElement = document.getElementById(msg.data.field); + let tokenBudgetElement = inputElement.parentNode.getElementsByClassName("input-token-usage")[0]; + let tokenLength = msg.data.length ?? "?"; + let tokenMax = msg.data.max ?? "?"; + tokenBudgetElement.innerText = `${tokenLength}/${tokenMax} Tokens`; } }); @@ -3381,6 +3425,11 @@ $(document).ready(function(){ if (handled) ev.preventDefault(); }); + + registerTokenCounters(); + + updateInputBudget(input_text[0]); + }); diff --git a/static/custom.css b/static/custom.css index 082c4230..af238dc7 100644 --- a/static/custom.css +++ b/static/custom.css @@ -1695,3 +1695,30 @@ body.connected .popupfooter, .popupfooter.always-available { overflow-x: auto; white-space: nowrap; } + +.tokens-counted { + position: relative; +} + +.input-token-usage { + color: white; + position: absolute; + font-size: 10px; + bottom: 2px; + right: 5px; + + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +/* Override needed here due to the 10px right padding on inputrowleft; add 10 px. */ +#inputrowleft > .input-token-usage { + right: 15px; + bottom: 1px; +} + +.wientry > .input-token-usage { + bottom: 8px; +} \ No newline at end of file diff --git a/templates/index.html b/templates/index.html index 82e4ca93..27b50b78 100644 --- a/templates/index.html +++ b/templates/index.html @@ -157,7 +157,7 @@
-
+
@@ -170,7 +170,7 @@
Author's Note
-
+
From 60d330144b4abd77317b2f164ffb733388f03caf Mon Sep 17 00:00:00 2001 From: Henk Date: Fri, 12 Aug 2022 01:06:27 +0200 Subject: [PATCH 07/10] Remove banned notebooks --- colab/vscode.ipynb | 76 ---------------------------------------------- 1 file changed, 76 deletions(-) delete mode 100644 colab/vscode.ipynb diff --git a/colab/vscode.ipynb b/colab/vscode.ipynb deleted file mode 100644 index 6a8fe84c..00000000 --- a/colab/vscode.ipynb +++ /dev/null @@ -1,76 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "ColabKobold Code", - "provenance": [], - "authorship_tag": "ABX9TyOuIHmyxj4U9dipAib4hfIi", - "include_colab_link": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "TPU" - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "view-in-github", - "colab_type": "text" - }, - "source": [ - "\"Open" - ] - }, - { - "cell_type": "markdown", - "source": [ - "# ColabKobold VSCode Edition\n", - "This is a special edition of ColabKobold aimed at developers, it will not start a KoboldAI instance for you to play KoboldAI and instead will launch a fully functional copy of VSCode for easy development.\n", - "\n", - "Few things of note:\n", - "1. Make sure the desired (or no) accelertor is selected on Colab, you do not want a TPU ban for not using it.\n", - "1. The Version can be replaced with your github URL and appended with -b for the branch for example \"https://github.com/henk717/koboldai -b united\" dependencies will automatically be installed from requirements.txt or requirements_mtj.txt.\n", - "1. With the args you can specify launch options for the KoboldAI Deployment Script, this way you can easily preinstall models to your development instance so you have a model to test with. To install TPU requirements specify the -m TPUMeshTransformerGPTJ argument.\n", - "1. You will need an Ngrok auth token which you can obtain here : https://dashboard.ngrok.com/get-started/your-authtoken\n", - "1. KoboldAI is installed in /content/koboldai-client opening this folder is enough to automatically get full git history and revision support. Also keep in mind that it mounts your Google Drive, be careful comitting directly from this instance.\n", - "1. With Ctrl + Shift + ` you can get a terminal to launch KoboldAI with your own parameters, launching with --colab is recommended.\n", - "\n", - "# [If you are not a developer and are looking to use KoboldAI click here](https://henk.tech/colabkobold)" - ], - "metadata": { - "id": "hMRnGz42Xsy3" - } - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "40B1QvI3Xv02" - }, - "outputs": [], - "source": [ - "#@title VSCode Server\n", - "Version = \"United\" #@param [\"Official\", \"United\"] {allow-input: true}\n", - "Args = \"-m TPUMeshTransformerGPTJ -a https://api.wandb.ai/files/ve-forbryderne/skein/files/gpt-j-6b-skein-jax/aria2.txt\" #@param {type:\"string\"}\n", - "Authtoken = \"\" #@param {type:\"string\"}\n", - "\n", - "from google.colab import drive\n", - "drive.mount('/content/drive/')\n", - "\n", - "!wget https://henk.tech/ckds -O - | bash /dev/stdin -g $Version -i only $Args\n", - "\n", - "!pip install colabcode\n", - "!pip install 'flask>=2.1.0'\n", - "from colabcode import ColabCode\n", - "ColabCode(authtoken=Authtoken)" - ] - } - ] -} \ No newline at end of file From a28faa0cb27bcd6661dd844eb5f9e931c0cbe377 Mon Sep 17 00:00:00 2001 From: somebody Date: Thu, 11 Aug 2022 18:21:49 -0500 Subject: [PATCH 08/10] Fix author's note token usage --- aiserver.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/aiserver.py b/aiserver.py index 8f511b01..955cdd11 100644 --- a/aiserver.py +++ b/aiserver.py @@ -3633,7 +3633,10 @@ def get_message(msg): broadcast=True ) else: + if field == "anoteinput": + unencoded = buildauthorsnote(unencoded) tokens_length = len(tokenizer.encode(unencoded)) + emit( 'from_server', {'cmd': 'showfieldbudget', 'data': {"length": tokens_length, "max": max_tokens, "field": field}}, @@ -3969,6 +3972,12 @@ def actionredo(): #==================================================================# # #==================================================================# +def buildauthorsnote(authorsnote): + # Build Author's Note if set + if authorsnote == "": + return "" + return ("\n" + vars.authornotetemplate + "\n").replace("<|>", authorsnote) + def calcsubmitbudgetheader(txt, **kwargs): # Scan for WorldInfo matches winfo, found_entries = checkworldinfo(txt, **kwargs) @@ -3979,11 +3988,7 @@ def calcsubmitbudgetheader(txt, **kwargs): else: mem = vars.memory - # Build Author's Note if set - if(vars.authornote != ""): - anotetxt = ("\n" + vars.authornotetemplate + "\n").replace("<|>", vars.authornote) - else: - anotetxt = "" + anotetxt = buildauthorsnote(vars.authornote) return winfo, mem, anotetxt, found_entries From c21c1e3dc027c4a0e110db130459ec23febb5c3e Mon Sep 17 00:00:00 2001 From: somebody Date: Thu, 11 Aug 2022 18:22:06 -0500 Subject: [PATCH 09/10] Don't show token usage when max tokens is unknown --- static/application.js | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/static/application.js b/static/application.js index ab4e307a..b6ba2c49 100644 --- a/static/application.js +++ b/static/application.js @@ -2954,9 +2954,13 @@ $(document).ready(function(){ } else if(msg.cmd == 'showfieldbudget') { let inputElement = document.getElementById(msg.data.field); let tokenBudgetElement = inputElement.parentNode.getElementsByClassName("input-token-usage")[0]; - let tokenLength = msg.data.length ?? "?"; - let tokenMax = msg.data.max ?? "?"; - tokenBudgetElement.innerText = `${tokenLength}/${tokenMax} Tokens`; + if (msg.data.max === null) { + tokenBudgetElement.innerText = ""; + } else { + let tokenLength = msg.data.length ?? "?"; + let tokenMax = msg.data.max ?? "?"; + tokenBudgetElement.innerText = `${tokenLength}/${tokenMax} Tokens`; + } } }); From 6ac970b1c0d14c6cf0384fa06f4c78bd7af86e2f Mon Sep 17 00:00:00 2001 From: somebody Date: Thu, 11 Aug 2022 18:38:29 -0500 Subject: [PATCH 10/10] Update author's template effect token usage live --- aiserver.py | 8 ++++---- static/application.js | 12 +++++++++++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/aiserver.py b/aiserver.py index 955cdd11..641ce6c4 100644 --- a/aiserver.py +++ b/aiserver.py @@ -3634,7 +3634,7 @@ def get_message(msg): ) else: if field == "anoteinput": - unencoded = buildauthorsnote(unencoded) + unencoded = buildauthorsnote(unencoded, msg["data"]["anotetemplate"]) tokens_length = len(tokenizer.encode(unencoded)) emit( @@ -3972,11 +3972,11 @@ def actionredo(): #==================================================================# # #==================================================================# -def buildauthorsnote(authorsnote): +def buildauthorsnote(authorsnote, template): # Build Author's Note if set if authorsnote == "": return "" - return ("\n" + vars.authornotetemplate + "\n").replace("<|>", authorsnote) + return ("\n" + template + "\n").replace("<|>", authorsnote) def calcsubmitbudgetheader(txt, **kwargs): # Scan for WorldInfo matches @@ -3988,7 +3988,7 @@ def calcsubmitbudgetheader(txt, **kwargs): else: mem = vars.memory - anotetxt = buildauthorsnote(vars.authornote) + anotetxt = buildauthorsnote(vars.authornote, vars.authornotetemplate) return winfo, mem, anotetxt, found_entries diff --git a/static/application.js b/static/application.js index b6ba2c49..be8f9bbf 100644 --- a/static/application.js +++ b/static/application.js @@ -2151,7 +2151,13 @@ function interpolateRGB(color0, color1, t) { } function updateInputBudget(inputElement) { - socket.send({"cmd": "getfieldbudget", "data": {"unencoded": inputElement.value, "field": inputElement.id}}); + let data = {"unencoded": inputElement.value, "field": inputElement.id}; + + if (inputElement.id === "anoteinput") { + data["anotetemplate"] = $("#anotetemplate").val(); + } + + socket.send({"cmd": "getfieldbudget", "data": data}); } function registerTokenCounters() { @@ -3430,6 +3436,10 @@ $(document).ready(function(){ if (handled) ev.preventDefault(); }); + $("#anotetemplate").on("input", function() { + updateInputBudget(anote_input[0]); + }) + registerTokenCounters(); updateInputBudget(input_text[0]);