From e164af9264f05d3105795edc43f544f042a0d50d Mon Sep 17 00:00:00 2001 From: ebolam Date: Fri, 12 Aug 2022 21:18:31 -0400 Subject: [PATCH] Sampler Order added to UI --- aiserver.py | 4 +- gensettings.py | 56 +++++++----- koboldai_settings.py | 1 + static/koboldai.css | 18 ++++ static/koboldai.js | 124 +++++++++++++++++++------- templates/index_new.html | 2 +- templates/settings flyout.html | 153 +++++++++++++++++++++++++-------- templates/settings item.html | 4 +- templates/story flyout.html | 8 +- 9 files changed, 271 insertions(+), 99 deletions(-) diff --git a/aiserver.py b/aiserver.py index 956e0374..773f8550 100644 --- a/aiserver.py +++ b/aiserver.py @@ -6296,6 +6296,8 @@ def UI_2_var_change(data): value = bool(data['value']) elif type(getattr(koboldai_vars, name)) == str: value = str(data['value']) + elif type(getattr(koboldai_vars, name)) == list: + value = list(data['value']) else: print("Unknown Type {} = {}".format(name, type(getattr(koboldai_vars, name)))) @@ -6407,7 +6409,7 @@ def UI_2_redo(data): koboldai_vars.actions.use_option(0) #==================================================================# -# Event triggered when user clicks the redo button +# Event triggered when user clicks the retry button #==================================================================# @socketio.on('retry') def UI_2_retry(data): diff --git a/gensettings.py b/gensettings.py index 90096347..4152e907 100644 --- a/gensettings.py +++ b/gensettings.py @@ -9,7 +9,7 @@ gensettingstf = [ "step": 2, "default": 80, "tooltip": "Number of tokens the AI should generate. Higher numbers will take longer to generate.", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Generation", "classname": "model", "name": "genamt" @@ -24,7 +24,7 @@ gensettingstf = [ "step": 0.05, "default": 0.5, "tooltip": "Randomness of sampling. High values can increase creativity but may make text less sensible. Lower values will make text more predictable but can become repetitious.", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Generation", "classname": "model", "name": "temp" @@ -39,7 +39,7 @@ gensettingstf = [ "step": 0.05, "default": 0.9, "tooltip": "Used to discard unlikely text in the sampling process. Lower values will make text more predictable but can become repetitious. (Put this value on 1 to disable its effect)", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Sampling", "classname": "model", "name": "top_p" @@ -55,7 +55,7 @@ gensettingstf = [ "step": 1, "default": 0, "tooltip": "Alternative sampling method, can be combined with top_p. (Put this value on 0 to disable its effect)", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Sampling", "classname": "model", "name": "top_k" @@ -70,7 +70,7 @@ gensettingstf = [ "step": 0.05, "default": 1.0, "tooltip": "Alternative sampling method; it is recommended to disable top_p and top_k (set top_p to 1 and top_k to 0) if using this. 0.95 is thought to be a good value. (Put this value on 1 to disable its effect)", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Sampling", "classname": "model", "name": "tfs" @@ -85,7 +85,7 @@ gensettingstf = [ "step": 0.05, "default": 1.0, "tooltip": "Alternative sampling method described in the paper \"Typical Decoding for Natural Language Generation\" (10.48550/ARXIV.2202.00666). The paper suggests 0.2 as a good value for this setting. Set this setting to 1 to disable its effect.", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Sampling", "classname": "model", "name": "typical" @@ -100,7 +100,7 @@ gensettingstf = [ "step": 0.01, "default": 0.0, "tooltip": "Alternative sampling method that reduces the randomness of the AI whenever the probability of one token is much higher than all the others. Higher values have a stronger effect. Set this setting to 0 to disable its effect.", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Sampling", "classname": "model", "name": "top_a" @@ -115,7 +115,7 @@ gensettingstf = [ "step": 0.01, "default": 1.1, "tooltip": "Used to penalize words that were already generated or belong to the context (Going over 1.2 breaks 6B models).", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Repetition", "classname": "model", "name": "rep_pen" @@ -130,7 +130,7 @@ gensettingstf = [ "step": 4, "default": 0, "tooltip": "Repetition penalty range. If set higher than 0, only applies repetition penalty to the last few tokens of your story rather than applying it to the entire story. This slider controls the amount of tokens at the end of your story to apply it to.", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Repetition", "classname": "model", "name": "rep_pen_range" @@ -145,7 +145,7 @@ gensettingstf = [ "step": 0.1, "default": 0.0, "tooltip": "Repetition penalty slope. If BOTH this setting and Rep Penalty Range are set higher than 0, will use sigmoid interpolation to apply repetition penalty more strongly on tokens that are closer to the end of your story. This setting controls the tension of the sigmoid curve; higher settings will result in the repetition penalty difference between the start and end of your story being more apparent. Setting this to 1 uses linear interpolation; setting this to 0 disables interpolation.", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Repetition", "classname": "model", "name": "rep_pen_slope" @@ -160,7 +160,7 @@ gensettingstf = [ "step": 8, "default": 1024, "tooltip": "Max number of tokens of context to submit to the AI for sampling. Make sure this is higher than Amount to Generate. Higher values increase VRAM/RAM usage.", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Generation", "classname": "model", "name": "max_length" @@ -175,7 +175,7 @@ gensettingstf = [ "step": 1, "default": 1, "tooltip": "Number of results to generate per submission. Increases VRAM/RAM usage.", - "menu_path": "Model", + "menu_path": "Settings", "sub_path": "Generation", "classname": "model", "name": "numseqs" @@ -190,7 +190,8 @@ gensettingstf = [ "step": 1, "default": 3, "tooltip": "Number of historic actions to scan for W Info keys.", - "menu_path": "User", + "menu_path": "World Info", + "sub_path": "", "classname": "user", "name": "widepth" }, @@ -204,7 +205,8 @@ gensettingstf = [ "step": 1, "default": 0, "tooltip": "Whether the game is saved after each action.", - "menu_path": "Story", + "menu_path": "Home", + "sub_path": "", "classname": "story", "name": "autosave" }, @@ -218,7 +220,8 @@ gensettingstf = [ "step": 1, "default": 1, "tooltip": "Whether the prompt should be sent in the context of every action.", - "menu_path": "Story", + "menu_path": "Settings", + "sub_path": "Other", "classname": "story", "name": "useprompt" }, @@ -233,6 +236,7 @@ gensettingstf = [ "default": 0, "tooltip": "Turn this on if you are playing a Choose your Adventure model.", #"menu_path": "Story", + #"sub_path": "", #"classname": "story", #"name": "adventure" }, @@ -247,6 +251,7 @@ gensettingstf = [ "default": 0, "tooltip": "This mode optimizes KoboldAI for chatting.", #"menu_path": "Story", + #"sub_path": "", #"classname": "story", #"name": "chatmode" }, @@ -260,7 +265,8 @@ gensettingstf = [ "step": 1, "default": 0, "tooltip": "Scan the AI's output for world info keys as it's generating the output.", - "menu_path": "Story", + "menu_path": "World Info", + "sub_path": "", "classname": "story", "name": "dynamicscan" }, @@ -274,7 +280,8 @@ gensettingstf = [ "step": 1, "default": 0, "tooltip": "When enabled the AI does not generate when you enter the prompt, instead you need to do an action first.", - "menu_path": "User", + "menu_path": "Settings", + "sub_path": "Other", "classname": "user", "name": "nopromptgen" }, @@ -288,7 +295,8 @@ gensettingstf = [ "step": 1, "default": 0, "tooltip": "When enabled, the Memory text box in the Random Story dialog will be prefilled by default with your current story's memory instead of being empty.", - "menu_path": "User", + "menu_path": "Settings", + "sub_path": "Other", "classname": "user", "name": "rngpersist" }, @@ -302,7 +310,8 @@ gensettingstf = [ "step": 1, "default": 0, "tooltip": "Disables userscript generation modifiers.", - "menu_path": "User", + "menu_path": "Settings", + "sub_path": "Modifiers", "classname": "user", "name": "nogenmod" }, @@ -316,7 +325,8 @@ gensettingstf = [ "step": 1, "default": 0, "tooltip": "Show debug info", - "menu_path": "user", + "menu_path": "", + "sub_path": "", "classname": "user", "name": "debug" }, @@ -327,7 +337,8 @@ gensettingstf = [ "id": "actionmode", "default": 0, "tooltip": "Choose the mode of KoboldAI", - "menu_path": "Story", + "menu_path": "Home", + "sub_path": "", "classname": "story", "name": "actionmode", 'children': [{'text': 'Story', 'value': 0}, {'text':'Adventure','value':1}, {'text':'Chat', 'value':2}] @@ -342,7 +353,8 @@ gensettingstf = [ "step": 1, "default": 0, "tooltip": "Shows outputs to you as they are made.", - "menu_path": "User", + "menu_path": "Interface", + "sub_path": "UI", "classname": "user", "name": "output_streaming" } diff --git a/koboldai_settings.py b/koboldai_settings.py index 23db7699..84812a6d 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -93,6 +93,7 @@ class koboldai_vars(object): self._story_settings[story_name] = story_settings(self.socketio) if json_data is not None: self._story_settings[story_name].from_json(json_data) + self._story_settings['default'].send_to_ui() def story_list(self): return [x for x in self._story_settings] diff --git a/static/koboldai.css b/static/koboldai.css index f9a480d3..9fdd5bdf 100644 --- a/static/koboldai.css +++ b/static/koboldai.css @@ -43,6 +43,7 @@ --wi_card_bg_color: #262626; --wi_card_tag_bg_color: #404040; --wi_tag_color: #337ab7; + --sample_order_select_color: blue; --story_options_size: 30%; --story_pinned_areas: "menuicon options gamescreen lefticon" "menuicon inputrow inputrow lefticon"; @@ -170,6 +171,19 @@ margin: 2px; } +.setting_container_single { + display: grid; + grid-template-areas: "label" + "item"; + grid-template-rows: 20px 120px; + grid-template-columns: var(--flyout_menu_width); + row-gap: 0.2em; + background-color: var(--setting_background); + color: var(--text); + margin: 2px; +} + + .setting_minlabel { grid-area: minlabel; overflow: hidden; @@ -346,6 +360,10 @@ margin-right: 25px; } +.sample_order.selected { + background-color: var(--sample_order_select_color); +} + .presets option { color: var(--dropdown-select); background: var(--preset-item-background); diff --git a/static/koboldai.js b/static/koboldai.js index d980b580..fb040b62 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -35,6 +35,20 @@ var shift_down = false; var world_info_data = {}; var world_info_folder_data = {}; var saved_settings = {}; +const map1 = new Map() +map1.set('Top-k Sampling', 0) +map1.set('Top-a Sampling', 1) +map1.set('Top-p Sampling', 2) +map1.set('Tail-free Sampling', 3) +map1.set('Typical Sampling', 4) +map1.set('Temperature', 5) +const map2 = new Map() +map2.set(0, 'Top-k Sampling') +map2.set(1, 'Top-a Sampling') +map2.set(2, 'Top-p Sampling') +map2.set(3, 'Tail-free Sampling') +map2.set(4, 'Typical Sampling') +map2.set(5, 'Temperature') //-----------------------------------Server to UI Functions----------------------------------------------- function connect() { console.log("connected"); @@ -289,38 +303,39 @@ function do_story_text_length_updates(data) { } function do_presets(data) { - var select = document.getElementById('presets'); - //clear out the preset list - while (select.firstChild) { - select.removeChild(select.firstChild); - } - //add our blank option - var option = document.createElement("option"); - option.value=""; - option.text="presets"; - select.append(option); - presets = data.value; - - - for (const [key, value] of Object.entries(data.value)) { - var option_group = document.createElement("optgroup"); - option_group.label = key; - option_group.classList.add("preset_group"); - for (const [group, group_value] of Object.entries(value)) { - var option = document.createElement("option"); - option.text=group; - option.disabled = true; - option.classList.add("preset_group"); - option_group.append(option); - for (const [preset, preset_value] of Object.entries(group_value)) { - var option = document.createElement("option"); - option.value=preset; - option.text=preset_value.preset; - option.title = preset_value.description; - option_group.append(option); - } + for (select of document.getElementsByClassName('presets')) { + //clear out the preset list + while (select.firstChild) { + select.removeChild(select.firstChild); + } + //add our blank option + var option = document.createElement("option"); + option.value=""; + option.text="presets"; + select.append(option); + presets = data.value; + + + for (const [key, value] of Object.entries(data.value)) { + var option_group = document.createElement("optgroup"); + option_group.label = key; + option_group.classList.add("preset_group"); + for (const [group, group_value] of Object.entries(value)) { + var option = document.createElement("option"); + option.text=group; + option.disabled = true; + option.classList.add("preset_group"); + option_group.append(option); + for (const [preset, preset_value] of Object.entries(group_value)) { + var option = document.createElement("option"); + option.value=preset; + option.text=preset_value.preset; + option.title = preset_value.description; + option_group.append(option); + } + } + select.append(option_group); } - select.append(option_group); } } @@ -372,11 +387,17 @@ function var_changed(data) { //Special Case for Presets } else if ((data.classname == 'model') && (data.name == 'presets')) { do_presets(data); + //Special Case for prompt } else if ((data.classname == 'story') && (data.name == 'prompt')) { do_prompt(data); //Special Case for phrase biasing } else if ((data.classname == 'story') && (data.name == 'biases')) { do_biases(data); + //Special Case for sample_order + } else if ((data.classname == 'model') && (data.name == 'sampler_order')) { + for (const [index, item] of data.value.entries()) { + Array.from(document.getElementsByClassName("sample_order"))[index].textContent = map2.get(item); + } //Basic Data Syncing } else { var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_")); @@ -1270,7 +1291,30 @@ function show_error_message(data) { error_message_box.classList.remove("hidden"); error_message_box.querySelector("#popup_list_area").textContent = data; } + //--------------------------------------------UI to Server Functions---------------------------------- +function move_sample(direction) { + var previous = null; + console.log(direction); + for (const [index, temp] of Array.from(document.getElementsByClassName("sample_order")).entries()) { + if (temp.classList.contains("selected")) { + if ((direction == 'up') && (index > 0)) { + temp.parentElement.insertBefore(temp, previous); + break; + } else if ((direction == 'down') && (index+1 < Array.from(document.getElementsByClassName("sample_order")).length)) { + temp.parentElement.insertBefore(temp, Array.from(document.getElementsByClassName("sample_order"))[index+2]); + break; + } + } + previous = temp; + } + var sample_order = [] + for (item of document.getElementsByClassName("sample_order")) { + sample_order.push(map1.get(item.textContent)); + } + socket.emit("var_change", {"ID": 'model_sampler_order', "value": sample_order}); +} + function new_story() { //check if the story is saved if (document.getElementById('save_story').getAttribute('story_gamesaved') == "false") { @@ -1368,6 +1412,24 @@ function send_world_info(uid) { } //--------------------------------------------General UI Functions------------------------------------ +function select_sample(item) { + for (temp of document.getElementsByClassName("sample_order")) { + temp.classList.remove("selected"); + } + item.classList.add("selected"); +} + +function toggle_setting_category(element) { + item = element.nextSibling.nextSibling; + if (item.classList.contains('hidden')) { + item.classList.remove("hidden"); + element.firstChild.nextSibling.firstChild.textContent = "expand_more"; + } else { + item.classList.add("hidden"); + element.firstChild.nextSibling.firstChild.textContent = "navigate_next"; + } +} + function preserve_game_space(preserve) { var r = document.querySelector(':root'); console.log("Setting cookie to: "+preserve); diff --git a/templates/index_new.html b/templates/index_new.html index 80c27948..732044b3 100644 --- a/templates/index_new.html +++ b/templates/index_new.html @@ -45,7 +45,7 @@
- +
diff --git a/templates/settings flyout.html b/templates/settings flyout.html index dc3dc540..68e93bf3 100644 --- a/templates/settings flyout.html +++ b/templates/settings flyout.html @@ -27,15 +27,23 @@
- Story - Model - User - UI + Home + Settings + Interface Help
-
+
+
+
+ Running Model: ReadOnly +
+
+ + +
+

@@ -44,7 +52,8 @@
- file_open + description + folder_open save cloud_download file_download @@ -53,18 +62,97 @@
- {% with menu='Story' %} - {% include 'settings item.html' %} + {% with menu='Home' %} + {% with sub_path='' %} + {% include 'settings item.html' %} + {% endwith %} {% endwith %}
-

- Biasing: - -

+
+ -