From 5052b39c3fd9be7c6b23b635416d481e464bc100 Mon Sep 17 00:00:00 2001 From: somebody Date: Thu, 25 Aug 2022 17:59:22 -0500 Subject: [PATCH 1/7] wip context viewer --- aiserver.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/aiserver.py b/aiserver.py index 3ff03751..5ba01b20 100644 --- a/aiserver.py +++ b/aiserver.py @@ -39,6 +39,8 @@ import traceback import inspect import warnings import multiprocessing +from enum import Enum +from dataclasses import dataclass from collections.abc import Iterable from collections import OrderedDict from typing import Any, Callable, TypeVar, Tuple, Union, Dict, Set, List, Optional, Type @@ -3970,6 +3972,19 @@ def check_for_backend_compilation(): break koboldai_vars.checking = False +class ContextType(Enum): + SOFT_PROMPT = 1 + STORY = 2 + WORLD_INFO = 3 + MEMORY = 4 + HEADER = 5 + +@dataclass +class ContextChunk: + def __init__(self, value, context_type: ContextType) -> None: + self.value = value + self.context_type = context_type + def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, disable_recentrng=False, no_generate=False): # Ignore new submissions if the AI is currently busy if(koboldai_vars.aibusy): @@ -4226,14 +4241,18 @@ def apiactionsubmit(data, use_memory=False, use_world_info=False, use_story=Fals mem = koboldai_vars.memory + "\n" else: mem = koboldai_vars.memory + if(use_authors_note and koboldai_vars.authornote != ""): anotetxt = ("\n" + koboldai_vars.authornotetemplate + "\n").replace("<|>", koboldai_vars.authornote) else: anotetxt = "" + MIN_STORY_TOKENS = 8 story_tokens = [] mem_tokens = [] wi_tokens = [] + context = [] + story_budget = lambda: koboldai_vars.max_length - koboldai_vars.sp_length - koboldai_vars.genamt - len(tokenizer._koboldai_header) - len(story_tokens) - len(mem_tokens) - len(wi_tokens) budget = lambda: story_budget() + MIN_STORY_TOKENS if budget() < 0: @@ -4241,15 +4260,20 @@ def apiactionsubmit(data, use_memory=False, use_world_info=False, use_story=Fals "msg": f"Your Max Tokens setting is too low for your current soft prompt and tokenizer to handle. It needs to be at least {koboldai_vars.max_length - budget()}.", "type": "token_overflow", }}), mimetype="application/json", status=500)) + if use_memory: mem_tokens = tokenizer.encode(utils.encodenewlines(mem))[-budget():] + if use_world_info: world_info, _ = checkworldinfo(data, force_use_txt=True, scan_story=use_story) wi_tokens = tokenizer.encode(utils.encodenewlines(world_info))[-budget():] + if use_story: if koboldai_vars.useprompt: story_tokens = tokenizer.encode(utils.encodenewlines(koboldai_vars.prompt))[-budget():] + story_tokens = tokenizer.encode(utils.encodenewlines(data))[-story_budget():] + story_tokens + if use_story: for i, action in enumerate(reversed(koboldai_vars.actions.values())): if story_budget() <= 0: @@ -4260,6 +4284,23 @@ def apiactionsubmit(data, use_memory=False, use_world_info=False, use_story=Fals story_tokens = tokenizer.encode(utils.encodenewlines(anotetxt))[-story_budget():] + story_tokens if not koboldai_vars.useprompt: story_tokens = tokenizer.encode(utils.encodenewlines(koboldai_vars.prompt))[-budget():] + story_tokens + + # Context tracker + if koboldai_vars.sp: + context.append(ContextChunk(koboldai_vars.sp, ContextType.SOFT_PROMPT)) + + if tokenizer._koboldai_header: + context.append(ContextChunk(tokenizer._koboldai_header, ContextType.HEADER)) + + if mem_tokens: + context.append(ContextChunk(mem_tokens, ContextType.MEMORY)) + + if wi_tokens: + context.append(ContextChunk(wi_tokens, ContextType.WORLD_INFO)) + + if story_tokens: + context.append(ContextChunk(story_tokens, ContextType.STORY)) + tokens = tokenizer._koboldai_header + mem_tokens + wi_tokens + story_tokens assert story_budget() >= 0 minimum = len(tokens) + 1 @@ -4270,6 +4311,8 @@ def apiactionsubmit(data, use_memory=False, use_world_info=False, use_story=Fals elif(koboldai_vars.use_colab_tpu or koboldai_vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): genout = apiactionsubmit_tpumtjgenerate(tokens, minimum, maximum) + koboldai_vars.context = context + print(context) return genout #==================================================================# From d534cbdeb11c4389dbefa456e2ba62eddfb33fee Mon Sep 17 00:00:00 2001 From: somebody Date: Sat, 27 Aug 2022 20:53:18 -0500 Subject: [PATCH 2/7] Working on context viewer --- aiserver.py | 37 ++++++++++++++++++++++++++++++++++++- koboldai_settings.py | 18 ++++++++++++++---- static/koboldai.js | 4 ++++ 3 files changed, 54 insertions(+), 5 deletions(-) diff --git a/aiserver.py b/aiserver.py index 5ba01b20..ae32cef8 100644 --- a/aiserver.py +++ b/aiserver.py @@ -4394,6 +4394,8 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, anotetkns = [] # Placeholder for Author's Note tokens lnanote = 0 # Placeholder for Author's Note length + context = [] + lnsp = koboldai_vars.sp_length if("tokenizer" not in globals()): @@ -4436,14 +4438,34 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, assert budget >= 0 + # Context behavior shared + if lnsp: + context.append({"type": "soft_prompt", "tokens": koboldai_vars.sp}) + if koboldai_vars.model not in ("Colab", "API", "OAI") and tokenizer._koboldai_header: + context.append({"type": "header", "tokens": tokenizer._koboldai_header}) + if memtokens: + context.append({"type": "memory", "tokens": memtokens}) + if witokens: + context.append({"type": "world_info", "tokens": witokens}) + if(actionlen == 0): # First/Prompt action tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction ln = len(tokens) + lnsp + + if anotetkns: + context.append({"type": "authors_note", "tokens": anotetkns}) + + if prompttkns: + context.append({"type": "prompt", "tokens": prompttkns}) + + + koboldai_vars.context = context return tokens, ln+1, ln+koboldai_vars.genamt else: tokens = [] + local_context = [] # Check if we have the action depth to hit our A.N. depth if(anotetxt != "" and actionlen < koboldai_vars.andepth): @@ -4462,10 +4484,13 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if(tknlen < budget): tokens = acttkns + tokens budget -= tknlen + local_context.insert(0, {"type": "action", "tokens": acttkns}) else: count = budget * -1 - tokens = acttkns[count:] + tokens + truncated_action_tokens = acttkns[count:] + tokens = truncated_action_tokens + tokens budget = 0 + local_context.insert(0, {"type": "action", "tokens": truncated_action_tokens}) break # Inject Author's Note if we've reached the desired depth @@ -4473,6 +4498,7 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if(anotetxt != ""): tokens = anotetkns + tokens # A.N. len already taken from bdgt anoteadded = True + local_context.insert(0, {"type": "authors_note", "tokens": anotetkns}) n += 1 # If we're not using the prompt every time and there's still budget left, @@ -4486,16 +4512,25 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, # Did we get to add the A.N.? If not, do it here if(anotetxt != ""): if((not anoteadded) or forceanote): + # header, mem, wi, anote, prompt, actions + local_context.insert(0, {"type": "authors_note", "tokens": anotetkns}) + local_context.insert(0, {"type": "prompt", "tokens": prompttkns}) tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns + tokens else: + local_context.insert(0, {"type": "prompt", "tokens": prompttkns}) tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens else: # Prepend Memory, WI, and Prompt before action tokens + local_context.insert(0, {"type": "prompt", "tokens": prompttkns}) tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens # Send completed bundle to generator assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction ln = len(tokens) + lnsp + + context += local_context + koboldai_vars.context = context + return tokens, ln+1, ln+koboldai_vars.genamt #==================================================================# diff --git a/koboldai_settings.py b/koboldai_settings.py index 51c6a0f8..b8736980 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -109,6 +109,7 @@ class koboldai_vars(object): self._model_settings.reset_for_model_load() def calc_ai_text(self, submitted_text=""): + context = [] token_budget = self.max_length used_world_info = [] if self.tokenizer is None: @@ -116,6 +117,11 @@ class koboldai_vars(object): else: used_tokens = self.sp_length text = "" + + if koboldai_vars.sp: + context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>"}) + if koboldai_vars.model not in ("Colab", "API", "OAI") and self.tokenizer._koboldai_header: + context.append({"type": "header", "tokens": self.tokenizer._koboldai_header}) self.worldinfo_v2.reset_used_in_game() @@ -124,11 +130,14 @@ class koboldai_vars(object): if memory_length+used_tokens <= token_budget: if self.memory_length > self.max_memory_length: if self.tokenizer is None: - text = self.memory + memory_text = self.memory else: - text += self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:]) + memory_text += self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:]) else: - text += self.memory + memory_text += self.memory + + context.append({"type": "memory", "text": }) + text += memory_text #Add constant world info entries to memory for wi in self.worldinfo_v2: @@ -455,7 +464,7 @@ class model_settings(settings): class story_settings(settings): local_only_variables = ['socketio', 'tokenizer', 'koboldai_vars'] - no_save_variables = ['socketio', 'tokenizer', 'koboldai_vars'] + no_save_variables = ['socketio', 'tokenizer', 'koboldai_vars', 'context'] settings_name = "story" def __init__(self, socketio, koboldai_vars, tokenizer=None): self.socketio = socketio @@ -514,6 +523,7 @@ class story_settings(settings): self.max_prompt_length = 512 self.max_authornote_length = 512 self.prompt_in_ai = False + self.context = [] def save_story(self): print("Saving") diff --git a/static/koboldai.js b/static/koboldai.js index 52eef5b4..052137ed 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -468,6 +468,10 @@ function var_changed(data) { option.setAttribute("title", sp[1][1]); item.append(option); } + //Special case for context viewer + } else if (data.classname == "story" && data.name == "context") { + console.log("HELLO FRIENDS!!!") + console.log(data.value) //Basic Data Syncing } else { var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_")); From b96483498292fa0e645de2434225f040dff930d5 Mon Sep 17 00:00:00 2001 From: somebody Date: Sun, 28 Aug 2022 09:41:23 -0500 Subject: [PATCH 3/7] More work on context viewer --- aiserver.py | 51 ------------------------------ koboldai_settings.py | 67 +++++++++++++++++++++++++++------------- static/koboldai.css | 60 +++++++++++++++++++++++++++++++++++ static/koboldai.js | 31 +++++++++++++++++-- templates/index_new.html | 20 ++++++++++++ 5 files changed, 155 insertions(+), 74 deletions(-) diff --git a/aiserver.py b/aiserver.py index ae32cef8..d8647982 100644 --- a/aiserver.py +++ b/aiserver.py @@ -4251,7 +4251,6 @@ def apiactionsubmit(data, use_memory=False, use_world_info=False, use_story=Fals story_tokens = [] mem_tokens = [] wi_tokens = [] - context = [] story_budget = lambda: koboldai_vars.max_length - koboldai_vars.sp_length - koboldai_vars.genamt - len(tokenizer._koboldai_header) - len(story_tokens) - len(mem_tokens) - len(wi_tokens) budget = lambda: story_budget() + MIN_STORY_TOKENS @@ -4285,22 +4284,6 @@ def apiactionsubmit(data, use_memory=False, use_world_info=False, use_story=Fals if not koboldai_vars.useprompt: story_tokens = tokenizer.encode(utils.encodenewlines(koboldai_vars.prompt))[-budget():] + story_tokens - # Context tracker - if koboldai_vars.sp: - context.append(ContextChunk(koboldai_vars.sp, ContextType.SOFT_PROMPT)) - - if tokenizer._koboldai_header: - context.append(ContextChunk(tokenizer._koboldai_header, ContextType.HEADER)) - - if mem_tokens: - context.append(ContextChunk(mem_tokens, ContextType.MEMORY)) - - if wi_tokens: - context.append(ContextChunk(wi_tokens, ContextType.WORLD_INFO)) - - if story_tokens: - context.append(ContextChunk(story_tokens, ContextType.STORY)) - tokens = tokenizer._koboldai_header + mem_tokens + wi_tokens + story_tokens assert story_budget() >= 0 minimum = len(tokens) + 1 @@ -4311,8 +4294,6 @@ def apiactionsubmit(data, use_memory=False, use_world_info=False, use_story=Fals elif(koboldai_vars.use_colab_tpu or koboldai_vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): genout = apiactionsubmit_tpumtjgenerate(tokens, minimum, maximum) - koboldai_vars.context = context - print(context) return genout #==================================================================# @@ -4394,8 +4375,6 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, anotetkns = [] # Placeholder for Author's Note tokens lnanote = 0 # Placeholder for Author's Note length - context = [] - lnsp = koboldai_vars.sp_length if("tokenizer" not in globals()): @@ -4438,34 +4417,14 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, assert budget >= 0 - # Context behavior shared - if lnsp: - context.append({"type": "soft_prompt", "tokens": koboldai_vars.sp}) - if koboldai_vars.model not in ("Colab", "API", "OAI") and tokenizer._koboldai_header: - context.append({"type": "header", "tokens": tokenizer._koboldai_header}) - if memtokens: - context.append({"type": "memory", "tokens": memtokens}) - if witokens: - context.append({"type": "world_info", "tokens": witokens}) - if(actionlen == 0): # First/Prompt action tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction ln = len(tokens) + lnsp - - if anotetkns: - context.append({"type": "authors_note", "tokens": anotetkns}) - - if prompttkns: - context.append({"type": "prompt", "tokens": prompttkns}) - - - koboldai_vars.context = context return tokens, ln+1, ln+koboldai_vars.genamt else: tokens = [] - local_context = [] # Check if we have the action depth to hit our A.N. depth if(anotetxt != "" and actionlen < koboldai_vars.andepth): @@ -4484,13 +4443,11 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if(tknlen < budget): tokens = acttkns + tokens budget -= tknlen - local_context.insert(0, {"type": "action", "tokens": acttkns}) else: count = budget * -1 truncated_action_tokens = acttkns[count:] tokens = truncated_action_tokens + tokens budget = 0 - local_context.insert(0, {"type": "action", "tokens": truncated_action_tokens}) break # Inject Author's Note if we've reached the desired depth @@ -4498,7 +4455,6 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if(anotetxt != ""): tokens = anotetkns + tokens # A.N. len already taken from bdgt anoteadded = True - local_context.insert(0, {"type": "authors_note", "tokens": anotetkns}) n += 1 # If we're not using the prompt every time and there's still budget left, @@ -4513,24 +4469,17 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if(anotetxt != ""): if((not anoteadded) or forceanote): # header, mem, wi, anote, prompt, actions - local_context.insert(0, {"type": "authors_note", "tokens": anotetkns}) - local_context.insert(0, {"type": "prompt", "tokens": prompttkns}) tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns + tokens else: - local_context.insert(0, {"type": "prompt", "tokens": prompttkns}) tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens else: # Prepend Memory, WI, and Prompt before action tokens - local_context.insert(0, {"type": "prompt", "tokens": prompttkns}) tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens # Send completed bundle to generator assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction ln = len(tokens) + lnsp - context += local_context - koboldai_vars.context = context - return tokens, ln+1, ln+koboldai_vars.genamt #==================================================================# diff --git a/koboldai_settings.py b/koboldai_settings.py index b8736980..4ea196a8 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -118,26 +118,31 @@ class koboldai_vars(object): used_tokens = self.sp_length text = "" - if koboldai_vars.sp: + # TODO: We may want to replace the "text" variable with a list-type + # class of context blocks, the class having a __str__ function. + if self.sp: context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>"}) - if koboldai_vars.model not in ("Colab", "API", "OAI") and self.tokenizer._koboldai_header: - context.append({"type": "header", "tokens": self.tokenizer._koboldai_header}) + # Header is never used? + # if koboldai_vars.model not in ("Colab", "API", "OAI") and self.tokenizer._koboldai_header: + # context.append({"type": "header", "text": f"{len(self.tokenizer._koboldai_header}) self.worldinfo_v2.reset_used_in_game() #Add memory memory_length = self.max_memory_length if self.memory_length > self.max_memory_length else self.memory_length + memory_text = None if memory_length+used_tokens <= token_budget: if self.memory_length > self.max_memory_length: if self.tokenizer is None: memory_text = self.memory else: - memory_text += self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:]) + memory_text = self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:]) else: - memory_text += self.memory + memory_text = self.memory - context.append({"type": "memory", "text": }) - text += memory_text + context.append({"type": "memory", "text": memory_text}) + if memory_text: + text += memory_text #Add constant world info entries to memory for wi in self.worldinfo_v2: @@ -146,7 +151,9 @@ class koboldai_vars(object): used_tokens+=wi['token_length'] used_world_info.append(wi['uid']) self.worldinfo_v2.set_world_info_used(wi['uid']) - text += wi['content'] + wi_text = wi['content'] + context.append({"type": "world_info", "text": wi_text}) + text += wi_text #Add prompt lenght/text if we're set to always use prompt if self.useprompt: @@ -171,15 +178,18 @@ class koboldai_vars(object): if used_tokens+0 if 'token_length' not in wi else wi['token_length'] <= token_budget: used_tokens+=wi['token_length'] used_world_info.append(wi['uid']) - text += wi['content'] + wi_text = wi['content'] + context.append({"type": "world_info", "text": wi_text}) + text += wi_text self.worldinfo_v2.set_world_info_used(wi['uid']) - if self.prompt_length > self.max_prompt_length: - if self.tokenizer is None: - text += self.prompt - else: - text += self.tokenizer.decode(self.tokenizer.encode(self.prompt)[-self.max_prompt_length-1:]) - else: - text += self.prompt + + prompt_text = self.prompt + if self.tokenizer and self.prompt_length > self.max_prompt_length: + if self.tokenizer: + prompt_text += self.tokenizer.decode(self.tokenizer.encode(self.prompt)[-self.max_prompt_length-1:]) + + text += prompt_text + context.append({"type": "prompt", "text": self.prompt}) self.prompt_in_ai = True else: self.prompt_in_ai = False @@ -189,13 +199,18 @@ class koboldai_vars(object): #Start going through the actions backwards, adding it to the text if it fits and look for world info entries game_text = "" + game_context = [] + authors_note_final = self.authornotetemplate.replace("<|>", self.authornote) used_all_tokens = False for i in range(len(self.actions)-1, -1, -1): if len(self.actions) - i == self.andepth and self.authornote != "": - game_text = "{}{}".format(self.authornotetemplate.replace("<|>", self.authornote), game_text) + game_text = "{}{}".format(authors_note_final, game_text) + game_context.insert(0, {"type": "authors_note", "text": authors_note_final}) if self.actions.actions[i]["Selected Text Length"]+used_tokens <= token_budget and not used_all_tokens: used_tokens += self.actions.actions[i]["Selected Text Length"] - game_text = "{}{}".format(self.actions.actions[i]["Selected Text"], game_text) + selected_text = self.actions.actions[i]["Selected Text"] + game_text = "{}{}".format(selected_text, game_text) + game_context.insert(0, {"type": "action", "text": selected_text}) self.actions.set_action_in_ai(i) #Now we need to check for used world info entries for wi in self.worldinfo_v2: @@ -216,7 +231,9 @@ class koboldai_vars(object): if used_tokens+0 if 'token_length' not in wi else wi['token_length'] <= token_budget: used_tokens+=wi['token_length'] used_world_info.append(wi['uid']) - game_text = "{}{}".format(wi['content'], game_text) + wi_text = wi["content"] + game_text = "{}{}".format(wi_text, game_text) + game_context.insert(0, {"type": "world_info", "text": wi_text}) self.worldinfo_v2.set_world_info_used(wi['uid']) else: self.actions.set_action_in_ai(i, used=False) @@ -224,7 +241,8 @@ class koboldai_vars(object): #if we don't have enough actions to get to author's note depth then we just add it right before the game text if len(self.actions) < self.andepth and self.authornote != "": - game_text = "{}{}".format(self.authornotetemplate.replace("<|>", self.authornote), game_text) + game_text = "{}{}".format(authors_note_final, game_text) + game_context.insert(0, {"type": "authors_note", "text": authors_note_final}) if not self.useprompt: if self.prompt_length + used_tokens < token_budget: @@ -248,18 +266,25 @@ class koboldai_vars(object): if used_tokens+0 if 'token_length' not in wi else wi['token_length'] <= token_budget: used_tokens+=wi['token_length'] used_world_info.append(wi['uid']) - text += wi['content'] + wi_text = wi["content"] + text += wi_text + context.append({"type": "world_info", "text": wi_text}) self.worldinfo_v2.set_world_info_used(wi['uid']) self.prompt_in_ai = True else: self.prompt_in_ai = False text += self.prompt + context.append({"type": "prompt", "text": self.prompt}) text += game_text + context += game_context + if self.tokenizer is None: tokens = [] else: tokens = self.tokenizer.encode(text) + + self.context = context return tokens, used_tokens, used_tokens+self.genamt def __setattr__(self, name, value): diff --git a/static/koboldai.css b/static/koboldai.css index 8a5c9416..5b168c59 100644 --- a/static/koboldai.css +++ b/static/koboldai.css @@ -1559,6 +1559,66 @@ body { .model_setting_item_input { width:95%; } +/*------------------------------ Context Viewer --------------------------------------------*/ +#context-viewer-container { + position: absolute; + left: 0px; + top: 0px; + + display: flex; + justify-content: center; + align-items: center; + + background-color: rgba(0, 0, 0, 0.7); + + width: 100vw; + height: 100vh; + + z-index: 20; +} + +#context-viewer { + display: flex; + flex-direction: column; + + width: 50%; + height: 75%; + padding-bottom: 10px; + background-color: var(--layer1_palette); +} + +#context-viewer-header { + display: flex; + justify-content: space-between; + + padding: 5px; + + background-color: var(--background); + margin-bottom: 3px; +} + +#context-viewer-header > h3 { + margin: 0px; + margin-top: 3px; +} + +#context-container { + overflow-y: auto; + height: 100%; + flex-grow: 1; + padding: 0px 10px +} + +.context-block { + margin: 0px 2px; +} + +.context-sp {background-color: orangered;} +.context-prompt {background-color: orange;} +.context-wi {background-color: #1751b9;} +.context-memory {background-color: #5e4395;} +.context-an {background-color: palevioletred;} +.context-action {background-color: #7d3737;} /*---------------------------------- Global ------------------------------------------------*/ .hidden { diff --git a/static/koboldai.js b/static/koboldai.js index 052137ed..ba842d8d 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -470,8 +470,7 @@ function var_changed(data) { } //Special case for context viewer } else if (data.classname == "story" && data.name == "context") { - console.log("HELLO FRIENDS!!!") - console.log(data.value) + update_context(data.value); //Basic Data Syncing } else { var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_")); @@ -2044,6 +2043,34 @@ function update_bias_slider_value(slider) { slider.parentElement.parentElement.querySelector(".bias_slider_cur").textContent = slider.value; } +function update_context(data) { + for (const el of document.getElementsByClassName("context-block")) { + if (el.classList.contains("example")) continue; + el.remove(); + } + + for (const entry of data) { + console.log(entry); + let contextClass = "context-" + ({ + soft_prompt: "sp", + prompt: "prompt", + world_info: "wi", + memory: "memory", + authors_note: "an", + action: "action" + }[entry.type]); + + let el = document.createElement("span"); + el.classList.add("context-block"); + el.classList.add(contextClass); + el.innerText = entry.text; + + document.getElementById("context-container").appendChild(el); + } + + +} + function save_model_settings(settings = saved_settings) { for (item of document.getElementsByClassName('setting_item_input')) { if (item.id.includes("model")) { diff --git a/templates/index_new.html b/templates/index_new.html index 0afad93e..f0927a24 100644 --- a/templates/index_new.html +++ b/templates/index_new.html @@ -112,5 +112,25 @@ {% include 'templates.html' %} + +
+
+
+

Context Viewer

+
+ Key: +
+ Soft Prompt + Prompt + World Info + Memory + Author's Note + Action +
+
+
+
+
+
\ No newline at end of file From 01d8736daf1e482c5b654653ae5674e6ca88d0a6 Mon Sep 17 00:00:00 2001 From: somebody Date: Sun, 28 Aug 2022 17:54:21 -0500 Subject: [PATCH 4/7] Improve context viewer --- static/koboldai.css | 34 ++++++++++++++++++++++++++++++++-- static/koboldai.js | 10 ++++++++++ templates/index_new.html | 27 +++++++++++++++++---------- 3 files changed, 59 insertions(+), 12 deletions(-) diff --git a/static/koboldai.css b/static/koboldai.css index 5b168c59..188b1be2 100644 --- a/static/koboldai.css +++ b/static/koboldai.css @@ -809,6 +809,7 @@ td.server_vars { padding-right: 35px; margin-bottom: 10px; flex-shrink: 0; + cursor: pointer; } .token_breakdown div { @@ -1597,6 +1598,16 @@ body { margin-bottom: 3px; } +#context-viewer-header-right { + display: flex; + flex-direction: row; +} + +#context-viewer-close { + cursor: pointer; + float: right; +} + #context-viewer-header > h3 { margin: 0px; margin-top: 3px; @@ -1606,13 +1617,28 @@ body { overflow-y: auto; height: 100%; flex-grow: 1; - padding: 0px 10px + padding: 0px 10px; +} + +.context-symbol { + font-size: 1em !important; + position: relative; + top: 3px; + opacity: 0.5; } .context-block { margin: 0px 2px; } +.context-block:hover { + outline: 1px solid gray; +} + +.context-block.example:hover { + outline: none !important; +} + .context-sp {background-color: orangered;} .context-prompt {background-color: orange;} .context-wi {background-color: #1751b9;} @@ -1783,7 +1809,11 @@ h2 .material-icons-outlined { cursor: pointer; } -.material-icons-outlined, .collapsable_header, .section_header, .help_text { +.material-icons-outlined, +.collapsable_header, +.section_header, +.help_text, +.noselect { -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; diff --git a/static/koboldai.js b/static/koboldai.js index ba842d8d..87729823 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -2065,6 +2065,8 @@ function update_context(data) { el.classList.add(contextClass); el.innerText = entry.text; + el.innerHTML = el.innerHTML.replaceAll("
", 'keyboard_return'); + document.getElementById("context-container").appendChild(el); } @@ -2905,4 +2907,12 @@ $(document).ready(function(){ if (enabledTweaks.includes(path)) toggle.click(); } + + $("#context-viewer-close").click(function() { + document.getElementById("context-viewer-container").classList.add("hidden"); + }); + + $(".token_breakdown").click(function() { + document.getElementById("context-viewer-container").classList.remove("hidden"); + }); }); diff --git a/templates/index_new.html b/templates/index_new.html index f0927a24..89675cee 100644 --- a/templates/index_new.html +++ b/templates/index_new.html @@ -113,22 +113,29 @@ -
+