Working on context viewer

This commit is contained in:
somebody
2022-08-27 20:53:18 -05:00
parent 889c318a22
commit d534cbdeb1
3 changed files with 54 additions and 5 deletions

View File

@@ -4394,6 +4394,8 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
anotetkns = [] # Placeholder for Author's Note tokens anotetkns = [] # Placeholder for Author's Note tokens
lnanote = 0 # Placeholder for Author's Note length lnanote = 0 # Placeholder for Author's Note length
context = []
lnsp = koboldai_vars.sp_length lnsp = koboldai_vars.sp_length
if("tokenizer" not in globals()): if("tokenizer" not in globals()):
@@ -4436,14 +4438,34 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
assert budget >= 0 assert budget >= 0
# Context behavior shared
if lnsp:
context.append({"type": "soft_prompt", "tokens": koboldai_vars.sp})
if koboldai_vars.model not in ("Colab", "API", "OAI") and tokenizer._koboldai_header:
context.append({"type": "header", "tokens": tokenizer._koboldai_header})
if memtokens:
context.append({"type": "memory", "tokens": memtokens})
if witokens:
context.append({"type": "world_info", "tokens": witokens})
if(actionlen == 0): if(actionlen == 0):
# First/Prompt action # First/Prompt action
tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns
assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction
ln = len(tokens) + lnsp ln = len(tokens) + lnsp
if anotetkns:
context.append({"type": "authors_note", "tokens": anotetkns})
if prompttkns:
context.append({"type": "prompt", "tokens": prompttkns})
koboldai_vars.context = context
return tokens, ln+1, ln+koboldai_vars.genamt return tokens, ln+1, ln+koboldai_vars.genamt
else: else:
tokens = [] tokens = []
local_context = []
# Check if we have the action depth to hit our A.N. depth # Check if we have the action depth to hit our A.N. depth
if(anotetxt != "" and actionlen < koboldai_vars.andepth): if(anotetxt != "" and actionlen < koboldai_vars.andepth):
@@ -4462,10 +4484,13 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
if(tknlen < budget): if(tknlen < budget):
tokens = acttkns + tokens tokens = acttkns + tokens
budget -= tknlen budget -= tknlen
local_context.insert(0, {"type": "action", "tokens": acttkns})
else: else:
count = budget * -1 count = budget * -1
tokens = acttkns[count:] + tokens truncated_action_tokens = acttkns[count:]
tokens = truncated_action_tokens + tokens
budget = 0 budget = 0
local_context.insert(0, {"type": "action", "tokens": truncated_action_tokens})
break break
# Inject Author's Note if we've reached the desired depth # Inject Author's Note if we've reached the desired depth
@@ -4473,6 +4498,7 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
if(anotetxt != ""): if(anotetxt != ""):
tokens = anotetkns + tokens # A.N. len already taken from bdgt tokens = anotetkns + tokens # A.N. len already taken from bdgt
anoteadded = True anoteadded = True
local_context.insert(0, {"type": "authors_note", "tokens": anotetkns})
n += 1 n += 1
# If we're not using the prompt every time and there's still budget left, # If we're not using the prompt every time and there's still budget left,
@@ -4486,16 +4512,25 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
# Did we get to add the A.N.? If not, do it here # Did we get to add the A.N.? If not, do it here
if(anotetxt != ""): if(anotetxt != ""):
if((not anoteadded) or forceanote): if((not anoteadded) or forceanote):
# header, mem, wi, anote, prompt, actions
local_context.insert(0, {"type": "authors_note", "tokens": anotetkns})
local_context.insert(0, {"type": "prompt", "tokens": prompttkns})
tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns + tokens tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns + tokens
else: else:
local_context.insert(0, {"type": "prompt", "tokens": prompttkns})
tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens
else: else:
# Prepend Memory, WI, and Prompt before action tokens # Prepend Memory, WI, and Prompt before action tokens
local_context.insert(0, {"type": "prompt", "tokens": prompttkns})
tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens
# Send completed bundle to generator # Send completed bundle to generator
assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction
ln = len(tokens) + lnsp ln = len(tokens) + lnsp
context += local_context
koboldai_vars.context = context
return tokens, ln+1, ln+koboldai_vars.genamt return tokens, ln+1, ln+koboldai_vars.genamt
#==================================================================# #==================================================================#

View File

@@ -109,6 +109,7 @@ class koboldai_vars(object):
self._model_settings.reset_for_model_load() self._model_settings.reset_for_model_load()
def calc_ai_text(self, submitted_text=""): def calc_ai_text(self, submitted_text=""):
context = []
token_budget = self.max_length token_budget = self.max_length
used_world_info = [] used_world_info = []
if self.tokenizer is None: if self.tokenizer is None:
@@ -116,6 +117,11 @@ class koboldai_vars(object):
else: else:
used_tokens = self.sp_length used_tokens = self.sp_length
text = "" text = ""
if koboldai_vars.sp:
context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>"})
if koboldai_vars.model not in ("Colab", "API", "OAI") and self.tokenizer._koboldai_header:
context.append({"type": "header", "tokens": self.tokenizer._koboldai_header})
self.worldinfo_v2.reset_used_in_game() self.worldinfo_v2.reset_used_in_game()
@@ -124,11 +130,14 @@ class koboldai_vars(object):
if memory_length+used_tokens <= token_budget: if memory_length+used_tokens <= token_budget:
if self.memory_length > self.max_memory_length: if self.memory_length > self.max_memory_length:
if self.tokenizer is None: if self.tokenizer is None:
text = self.memory memory_text = self.memory
else: else:
text += self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:]) memory_text += self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:])
else: else:
text += self.memory memory_text += self.memory
context.append({"type": "memory", "text": })
text += memory_text
#Add constant world info entries to memory #Add constant world info entries to memory
for wi in self.worldinfo_v2: for wi in self.worldinfo_v2:
@@ -455,7 +464,7 @@ class model_settings(settings):
class story_settings(settings): class story_settings(settings):
local_only_variables = ['socketio', 'tokenizer', 'koboldai_vars'] local_only_variables = ['socketio', 'tokenizer', 'koboldai_vars']
no_save_variables = ['socketio', 'tokenizer', 'koboldai_vars'] no_save_variables = ['socketio', 'tokenizer', 'koboldai_vars', 'context']
settings_name = "story" settings_name = "story"
def __init__(self, socketio, koboldai_vars, tokenizer=None): def __init__(self, socketio, koboldai_vars, tokenizer=None):
self.socketio = socketio self.socketio = socketio
@@ -514,6 +523,7 @@ class story_settings(settings):
self.max_prompt_length = 512 self.max_prompt_length = 512
self.max_authornote_length = 512 self.max_authornote_length = 512
self.prompt_in_ai = False self.prompt_in_ai = False
self.context = []
def save_story(self): def save_story(self):
print("Saving") print("Saving")

View File

@@ -468,6 +468,10 @@ function var_changed(data) {
option.setAttribute("title", sp[1][1]); option.setAttribute("title", sp[1][1]);
item.append(option); item.append(option);
} }
//Special case for context viewer
} else if (data.classname == "story" && data.name == "context") {
console.log("HELLO FRIENDS!!!")
console.log(data.value)
//Basic Data Syncing //Basic Data Syncing
} else { } else {
var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_")); var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_"));