Fix for prompt/author's note/world info not spacing correctly in the AI text

This commit is contained in:
ebolam
2022-10-17 11:05:02 -04:00
parent 881f5391d7
commit 90c3bbfdab
2 changed files with 157 additions and 229 deletions

View File

@@ -198,167 +198,180 @@ class koboldai_vars(object):
def calc_ai_text(self, submitted_text="", return_text=False): def calc_ai_text(self, submitted_text="", return_text=False):
#start_time = time.time() #start_time = time.time()
if self.tokenizer is None:
if return_text:
return ""
return [], 0, 0+self.genamt, []
if self.alt_gen: if self.alt_gen:
method = 2 method = 2
else: else:
method = 1 method = 1
#Context and Game Context are lists of chunks of text that will go to the AI. Game Context will be appended after context when we're done
context = [] context = []
game_context = []
token_budget = self.max_length - self.genamt token_budget = self.max_length - self.genamt
used_world_info = [] used_world_info = []
if self.tokenizer is None: used_tokens = 0 if self.sp_length is None else self.sp_length + len(self.tokenizer._koboldai_header)
used_tokens = 99999999999999999999999
else:
used_tokens = 0 if self.sp_length is None else self.sp_length + len(self.tokenizer._koboldai_header)
text = ""
# TODO: We may want to replace the "text" variable with a list-type
# class of context blocks, the class having a __str__ function.
if self.sp_length > 0: if self.sp_length > 0:
context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>", "tokens": [[-1, ""]] * self.sp_length}) context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>", "tokens": [[-1, ""]] * self.sp_length})
# Header is never used?
# if koboldai_vars.model not in ("Colab", "API", "OAI") and self.tokenizer._koboldai_header:
# context.append({"type": "header", "text": f"{len(self.tokenizer._koboldai_header})
self.worldinfo_v2.reset_used_in_game() self.worldinfo_v2.reset_used_in_game()
#Add memory ######################################### Add memory ########################################################
memory_length = self.max_memory_length if self.memory_length > self.max_memory_length else self.memory_length
memory_text = self.memory memory_text = self.memory
memory_encoded = None if memory_text != "":
if memory_length+used_tokens <= token_budget: if memory_text[-1] not in [" ", '\n']:
if self.tokenizer is not None and self.memory_length > self.max_memory_length: memory_text += " "
memory_encoded = self.tokenizer.encode(self.memory)[-self.max_memory_length-1:] memory_tokens = self.tokenizer.encode(memory_text)
memory_text = self.tokenizer.decode(memory_encoded) else:
memory_tokens = []
if len(memory_tokens) > self.max_memory_length:
memory_tokens = memory_tokens[:self.max_memory_length]
memory_length = self.max_memory_length
memory_data = [[x, self.tokenizer.decode(x)] for x in memory_tokens]
if not memory_encoded and self.tokenizer: #We have so much memory that we've run out of budget without going over the max_memory_length. Just stop
memory_encoded = self.tokenizer.encode(memory_text) if len(memory_tokens) > token_budget:
return [], 0, 0+self.genamt, []
context.append({"type": "memory", "text": memory_text, "tokens": self.get_token_representation(memory_encoded)})
text += memory_text
used_tokens += self.memory_length if self.memory_length <= self.max_memory_length else self.max_memory_length
#Actually Add Memory
if len(memory_tokens) != 0:
context.append({"type": "memory",
"text": "".join([x[1] for x in memory_data]),
"tokens": memory_data})
used_tokens += len(memory_tokens)
######################################### Constant World Info ########################################################
#Add constant world info entries to memory #Add constant world info entries to memory
for wi in self.worldinfo_v2: for wi in self.worldinfo_v2:
if wi['constant']: if wi['constant']:
if used_tokens+0 if 'token_length' not in wi else wi['token_length'] <= token_budget: wi_length = len(self.tokenizer.encode(wi['content']))
used_tokens+=0 if wi['token_length'] is None else wi['token_length'] if used_tokens + wi_length <= token_budget:
used_tokens+=wi_length
used_world_info.append(wi['uid']) used_world_info.append(wi['uid'])
self.worldinfo_v2.set_world_info_used(wi['uid']) self.worldinfo_v2.set_world_info_used(wi['uid'])
wi_text = wi['content'] wi_text = wi['content']+" " if wi['content'][-1] not in [" ", "\n"] else wi['content']
wi_tokens = self.tokenizer.encode(wi_text)
context.append({ context.append({
"type": "world_info", "type": "world_info",
"text": wi_text, "text": wi_text,
"tokens": self.get_token_representation(wi_text), "uid": wi['uid'],
"tokens": [[x, self.tokenizer.decode(x)] for x in wi_tokens],
}) })
text += wi_text used_tokens += len(wi_tokens)
used_tokens += wi['token_length']
######################################### Get Action Text by Sentence ########################################################
action_text_split = self.actions.to_sentences(submitted_text=submitted_text) action_text_split = self.actions.to_sentences(submitted_text=submitted_text)
######################################### Prompt ########################################################
#Add prompt lenght/text if we're set to always use prompt #Add prompt lenght/text if we're set to always use prompt
if self.useprompt: if self.useprompt:
prompt_length = 0 prompt_length = 0
prompt_text = "" prompt_data = []
for item in action_text_split: for item in reversed(action_text_split):
if -1 not in item[1]: if -1 in item[1]:
#We've finished going through our prompt. Stop tokenized_data = self.tokenizer.encode(item[0])
break item[2] = len(tokenized_data)
if self.tokenizer is None: if prompt_length + item[2] <= self.max_prompt_length:
item_size = 0 prompt_length += item[2]
else: item[3] = True
item_size = len(self.tokenizer.encode(item[0])) prompt_data = tokenized_data + prompt_data
if prompt_length + item_size < self.max_prompt_length: prompt_text = self.tokenizer.decode([x[0] for x in prompt_data])
prompt_length += item_size wi_search = re.sub("[^A-Za-z\ 0-9\'\"]", "", prompt_text)
item[3] = True if prompt_length + used_tokens <= token_budget:
prompt_text += item[0]
if prompt_length + used_tokens < token_budget:
used_tokens += prompt_length used_tokens += prompt_length
#We'll add the prompt text AFTER we go through the game text as the world info needs to come first if we're in method 1 rather than method 2
self.prompt_in_ai = True
#Find World Info entries in prompt #Find World Info entries in prompt
for wi in self.worldinfo_v2: for wi in self.worldinfo_v2:
if wi['uid'] not in used_world_info: if wi['uid'] not in used_world_info:
#Check to see if we have the keys/secondary keys in the text so far #Check to see if we have the keys/secondary keys in the text so far
match = False match = False
for key in wi['key']: for key in wi['key']:
if key in prompt_text: if key in wi_search:
match = True match = True
break break
if wi['selective'] and match: if wi['selective'] and match:
match = False match = False
for key in wi['keysecondary']: for key in wi['keysecondary']:
if key in prompt_text: if key in wi_search:
match=True match=True
break break
if match: if match:
if used_tokens+0 if 'token_length' not in wi else wi['token_length'] <= token_budget: wi_length = len(self.tokenizer.encode(wi['content']))
used_tokens+=0 if wi['token_length'] is None else wi['token_length'] if used_tokens+wi_length <= token_budget:
used_tokens+=wi_length
used_world_info.append(wi['uid']) used_world_info.append(wi['uid'])
wi_text = wi['content'] wi_text = wi['content']+" " if wi['content'][-1] not in [" ", "\n"] else wi['content']
context.append({"type": "world_info", "text": wi_text, "tokens": self.get_token_representation(wi_text)}) wi_tokens = self.tokenizer.encode(wi_text)
text += wi_text context.append({
used_tokens += wi['token_length'] "type": "world_info",
"text": wi_text,
"uid": wi['uid'],
"tokens": [[x, self.tokenizer.decode(x)] for x in wi_tokens],
})
used_tokens += len(wi_tokens)
self.worldinfo_v2.set_world_info_used(wi['uid']) self.worldinfo_v2.set_world_info_used(wi['uid'])
#We'll add the prompt text AFTER we go through the game text as the world info needs to come first if we're in method 1 rather than method 2
self.prompt_in_ai = True
else: else:
self.prompt_in_ai = False self.prompt_in_ai = False
#remove author's notes token length (Add the text later)
used_tokens += self.authornote_length
######################################### Setup Author's Note Data ########################################################
authors_note_text = self.authornotetemplate.replace("<|>", self.authornote)
if len(authors_note_text) > 0 and authors_note_text[-1] not in [" ", "\n"]:
authors_note_text += " "
authors_note_data = [[x, self.tokenizer.decode(x)] for x in self.tokenizer.encode(authors_note_text)]
if used_tokens + len(authors_note_data) <= token_budget:
used_tokens += len(authors_note_data)
######################################### Actions ########################################################
#Start going through the actions backwards, adding it to the text if it fits and look for world info entries #Start going through the actions backwards, adding it to the text if it fits and look for world info entries
game_text = ""
game_context = []
authors_note_final = self.authornotetemplate.replace("<|>", self.authornote)
used_all_tokens = False used_all_tokens = False
actions_seen = [] #Used to track how many actions we've seen so we can insert author's note in the appropriate place as well as WI depth stop
for action in range(len(self.actions)): inserted_author_note = False
self.actions.set_action_in_ai(action, used=False)
for i in range(len(action_text_split)-1, -1, -1): for i in range(len(action_text_split)-1, -1, -1):
if action_text_split[i][3] or action_text_split[i][1] == [-1]: if action_text_split[i][3]:
#We've hit an item we've already included or items that are only prompt. Stop #We've hit an item we've already included. Stop
for action in action_text_split[i][1]:
if action >= 0:
self.actions.set_action_in_ai(action)
break break
for action in action_text_split[i][1]:
if action not in actions_seen:
actions_seen.append(action)
if len(action_text_split) - i - 1 == self.andepth and self.authornote != "": #Add our author's note if we've hit andepth
game_text = "{}{}".format(authors_note_final, game_text) if not inserted_author_note and len(actions_seen) >= self.andepth and self.authornote != "":
game_context.insert(0, {"type": "authors_note", "text": authors_note_final, "tokens": self.get_token_representation(authors_note_final)}) game_context.insert(0, {"type": "authors_note", "text": authors_note_text, "tokens": authors_note_data})
inserted_author_note = True
encoded_action = [] if not self.tokenizer else self.tokenizer.encode(action_text_split[i][0]) action_data = [[x, self.tokenizer.decode(x)] for x in self.tokenizer.encode(action_text_split[i][0])]
length = len(encoded_action) length = len(action_data)
if length+used_tokens <= token_budget and not used_all_tokens: if length+used_tokens <= token_budget:
used_tokens += length used_tokens += length
selected_text = action_text_split[i][0]
action_text_split[i][3] = True action_text_split[i][3] = True
game_text = "{}{}".format(selected_text, game_text)
action_type = "action"
if action_text_split[i][1] == [self.actions.action_count+1]: if action_text_split[i][1] == [self.actions.action_count+1]:
game_context.insert(0, { action_type = "submit"
"type": "submit", elif -1 in action_text_split[i][1]:
"text": selected_text, action_type = "prompt"
"tokens": self.get_token_representation(encoded_action),
"action_ids": action_text_split[i][1] game_context.insert(0, {
}) "type": action_type,
else: "text": action_text_split[i][0],
game_context.insert(0, { "tokens": action_data,
"type": "action", "action_ids": action_text_split[i][1]
"text": selected_text, })
"tokens": self.get_token_representation(encoded_action), wi_search = re.sub("[^A-Za-z\ 0-9\'\"]", "", action_text_split[i][0])
"action_ids": action_text_split[i][1]
})
for action in action_text_split[i][1]:
if action >= 0:
self.actions.set_action_in_ai(action)
#Now we need to check for used world info entries #Now we need to check for used world info entries
for wi in self.worldinfo_v2: for wi in self.worldinfo_v2:
@@ -366,108 +379,70 @@ class koboldai_vars(object):
#Check to see if we have the keys/secondary keys in the text so far #Check to see if we have the keys/secondary keys in the text so far
match = False match = False
for key in wi['key']: for key in wi['key']:
if key in selected_text: if key in wi_search:
match = True match = True
break break
if wi['selective'] and match: if wi['selective'] and match:
match = False match = False
for key in wi['keysecondary']: for key in wi['keysecondary']:
if key in selected_text: if key in wi_search:
match=True match=True
break break
if method == 1: if method == 1:
if len(action_text_split) - i > self.widepth: if len(actions_seen) > self.widepth:
match = False match = False
if match: if match:
if used_tokens+0 if 'token_length' not in wi or wi['token_length'] is None else wi['token_length'] <= token_budget: wi_length = len(self.tokenizer.encode(wi['content']))
used_tokens+=0 if wi['token_length'] is None else wi['token_length'] if used_tokens+wi_length <= token_budget:
used_tokens+=wi_length
used_world_info.append(wi['uid']) used_world_info.append(wi['uid'])
wi_text = wi["content"] wi_text = wi['content']+" " if wi['content'][-1] not in [" ", "\n"] else wi['content']
encoded_wi = self.tokenizer.encode(wi_text) wi_tokens = self.tokenizer.encode(wi_text)
if method == 1: if method == 1:
text = "{}{}".format(wi_text, game_text) context.append({
context.insert(0, {"type": "world_info", "text": wi_text, "tokens": self.get_token_representation(encoded_wi)}) "type": "world_info",
"text": wi_text,
"uid": wi['uid'],
"tokens": [[x, self.tokenizer.decode(x)] for x in wi_tokens],
})
else: else:
game_text = "{}{}".format(wi_text, game_text) #for method 2 we add the game text before the current action
game_context.insert(0, {"type": "world_info", "text": wi_text, "tokens": self.get_token_representation(encoded_wi)}) game_context.insert(0, {
used_tokens += wi['token_length'] "type": "world_info",
"text": wi_text,
"uid": wi['uid'],
"tokens": [[x, self.tokenizer.decode(x)] for x in wi_tokens],
})
used_tokens += len(wi_tokens)
self.worldinfo_v2.set_world_info_used(wi['uid']) self.worldinfo_v2.set_world_info_used(wi['uid'])
else: else:
used_all_tokens = True used_all_tokens = True
break break
######################################### Verify Author's Note Data in AI Text ########################################################
#if we don't have enough actions to get to author's note depth then we just add it right before the game text #if we don't have enough actions to get to author's note depth then we just add it right before the game text
if len(action_text_split) < self.andepth and self.authornote != "": if not inserted_author_note and self.authornote != "":
game_text = "{}{}".format(authors_note_final, game_text) game_context.insert(0, {"type": "authors_note", "text": authors_note_text, "tokens": authors_note_data})
game_context.insert(0, {"type": "authors_note", "text": authors_note_final, "tokens": self.get_token_representation(authors_note_final)})
if self.useprompt:
text += prompt_text
context.append({"type": "prompt", "text": prompt_text, "tokens": self.get_token_representation(prompt_text)})
elif not used_all_tokens:
prompt_length = 0
prompt_text = ""
for item in action_text_split:
if -1 not in item[1] or item[3]:
#We've finished going through our prompt. Stop
break
if self.tokenizer is None:
item_size = 0
else:
item_size = len(self.tokenizer.encode(item[0]))
if prompt_length + item_size < self.max_prompt_length and not item[3]:
prompt_length += item_size
item[3] = True
prompt_text += item[0]
if prompt_length + used_tokens < token_budget:
used_tokens += prompt_length
#Find World Info entries in prompt
for wi in self.worldinfo_v2:
if wi['uid'] not in used_world_info:
#Check to see if we have the keys/secondary keys in the text so far
match = False
for key in wi['key']:
if key in prompt_text:
match = True
break
if wi['selective'] and match:
match = False
for key in wi['keysecondary']:
if key in prompt_text:
match=True
break
if match:
if used_tokens+0 if 'token_length' not in wi or wi['token_length'] is None else wi['token_length'] <= token_budget:
used_tokens+=0 if wi['token_length'] is None else wi['token_length']
used_world_info.append(wi['uid'])
wi_text = wi['content']
context.append({"type": "world_info", "text": wi_text, "tokens": self.get_token_representation(wi_text)})
text += wi_text
used_tokens += wi['token_length']
self.worldinfo_v2.set_world_info_used(wi['uid'])
text += prompt_text
context.append({"type": "prompt", "text": prompt_text, "tokens": self.get_token_representation(prompt_text)})
self.prompt_in_ai = True
else:
self.prompt_in_ai = False
else:
self.prompt_in_ai = False
text += game_text ######################################### Add our prompt data ########################################################
if self.useprompt and len(prompt_data) != 0:
context.append({"type": "prompt", "text": prompt_text, "tokens": prompt_data})
context += game_context context += game_context
if self.tokenizer is None: if len(context) == 0:
tokens = [] tokens = []
else: else:
tokens = self.tokenizer.encode(text) tokens = [x['tokens'][0] for x in context]
self.context = context self.context = context
#logger.debug("Calc_AI_text: {}s".format(time.time()-start_time)) #logger.debug("Calc_AI_text: {}s".format(time.time()-start_time))
logger.debug("Token Budget: {}. Used Tokens: {}".format(token_budget, used_tokens)) logger.debug("Token Budget: {}. Used Tokens: {}".format(token_budget, used_tokens))
if return_text: if return_text:
return text return "".join([x['text'] for x in context])
return tokens, used_tokens, used_tokens+self.genamt, used_world_info return tokens, used_tokens, used_tokens+self.genamt, used_world_info
def is_model_torch(self) -> bool: def is_model_torch(self) -> bool:
@@ -492,10 +467,8 @@ class koboldai_vars(object):
setattr(self._system_settings, name, value) setattr(self._system_settings, name, value)
else: else:
setattr(self._story_settings[self.get_story_name()], name, value) setattr(self._story_settings[self.get_story_name()], name, value)
if name == 'tokenizer':
self._story_settings[self.get_story_name()].worldinfo_v2.recalc_token_length(None)
setattr(self._story_settings[self.get_story_name()], name, value)
def __getattr__(self, name): def __getattr__(self, name):
if name in self.__dict__: if name in self.__dict__:
return getattr(self, name) return getattr(self, name)
@@ -1128,7 +1101,7 @@ class KoboldStoryRegister(object):
self.koboldai_vars = koboldai_vars self.koboldai_vars = koboldai_vars
#### DO NOT DIRECTLY EDIT THE ACTIONS DICT. IT WILL NOT TRANSMIT TO CLIENT. USE FUCTIONS BELOW TO DO SO ### #### DO NOT DIRECTLY EDIT THE ACTIONS DICT. IT WILL NOT TRANSMIT TO CLIENT. USE FUCTIONS BELOW TO DO SO ###
#### doing actions[x] = game text is OK #### doing actions[x] = game text is OK
self.actions = {} #keys = "Selected Text", "WI Search Text", "Wi_highlighted_text", "Options", "Selected Text Length", "In AI Input", "Probabilities". self.actions = {} #keys = "Selected Text", "Wi_highlighted_text", "Options", "Selected Text Length", "Probabilities".
#Options being a list of dict with keys of "text", "Pinned", "Previous Selection", "Edited", "Probabilities" #Options being a list of dict with keys of "text", "Pinned", "Previous Selection", "Edited", "Probabilities"
self.action_count = -1 self.action_count = -1
self.story_settings = story_settings self.story_settings = story_settings
@@ -1179,7 +1152,6 @@ class KoboldStoryRegister(object):
old_length = self.actions[i]["Selected Text Length"] old_length = self.actions[i]["Selected Text Length"]
if self.actions[i]["Selected Text"] != text: if self.actions[i]["Selected Text"] != text:
self.actions[i]["Selected Text"] = text self.actions[i]["Selected Text"] = text
self.actions[i]["WI Search Text"] = re.sub("[^0-9a-z \'\"]", "", text)
self.actions[i]["Probabilities"] = [] self.actions[i]["Probabilities"] = []
if "Options" in self.actions[i]: if "Options" in self.actions[i]:
for j in range(len(self.actions[i]["Options"])): for j in range(len(self.actions[i]["Options"])):
@@ -1191,13 +1163,12 @@ class KoboldStoryRegister(object):
old_text = None old_text = None
old_length = None old_length = None
old = None old = None
self.actions[i] = {"Selected Text": text, "WI Search Text": re.sub("[^0-9a-z \'\"]", "", text), "Probabilities": [], "Options": []} self.actions[i] = {"Selected Text": text, "Probabilities": [], "Options": []}
if self.koboldai_vars.tokenizer is not None: if self.koboldai_vars.tokenizer is not None:
self.actions[i]['Selected Text Length'] = len(self.koboldai_vars.tokenizer.encode(text)) self.actions[i]['Selected Text Length'] = len(self.koboldai_vars.tokenizer.encode(text))
else: else:
self.actions[i]['Selected Text Length'] = 0 self.actions[i]['Selected Text Length'] = 0
self.actions[i]["In AI Input"] = False
process_variable_changes(self.socketio, "story", 'actions', {"id": i, 'action': self.actions[i]}, old) process_variable_changes(self.socketio, "story", 'actions', {"id": i, 'action': self.actions[i]}, old)
logger.debug("Calcing AI Text from Action __setitem__") logger.debug("Calcing AI Text from Action __setitem__")
ignore = self.koboldai_vars.calc_ai_text() ignore = self.koboldai_vars.calc_ai_text()
@@ -1231,8 +1202,6 @@ class KoboldStoryRegister(object):
data_to_send = [] data_to_send = []
for item in json_data['actions']: for item in json_data['actions']:
temp[int(item)] = json_data['actions'][item] temp[int(item)] = json_data['actions'][item]
if "WI Search Text" not in temp[int(item)]:
temp[int(item)]["WI Search Text"] = re.sub("[^0-9a-z \'\"]", "", temp[int(item)]['Selected Text'])
if int(item) >= self.action_count-100: if int(item) >= self.action_count-100:
data_to_send.append({"id": item, 'action': temp[int(item)]}) data_to_send.append({"id": item, 'action': temp[int(item)]})
@@ -1250,11 +1219,9 @@ class KoboldStoryRegister(object):
if action_id in self.actions: if action_id in self.actions:
if self.actions[action_id]["Selected Text"] != text: if self.actions[action_id]["Selected Text"] != text:
self.actions[action_id]["Selected Text"] = text self.actions[action_id]["Selected Text"] = text
self.actions[action_id]["WI Search Text"] = re.sub("[^0-9a-z \'\"]", "", text)
self.actions[action_id]["Probabilities"] = [] self.actions[action_id]["Probabilities"] = []
selected_text_length = 0 selected_text_length = 0
self.actions[action_id]["Selected Text Length"] = selected_text_length self.actions[action_id]["Selected Text Length"] = selected_text_length
self.actions[action_id]["In AI Input"] = False
for item in self.actions[action_id]["Options"]: for item in self.actions[action_id]["Options"]:
if item['text'] == text: if item['text'] == text:
old_options = self.actions[action_id]["Options"] old_options = self.actions[action_id]["Options"]
@@ -1264,8 +1231,7 @@ class KoboldStoryRegister(object):
selected_text_length = 0 selected_text_length = 0
self.actions[action_id] = {"Selected Text": text, "Selected Text Length": selected_text_length, self.actions[action_id] = {"Selected Text": text, "Selected Text Length": selected_text_length,
"WI Search Text": re.sub("[^0-9a-z \'\"]", "", text), "Options": [], "Probabilities": []}
"In AI Input": False, "Options": [], "Probabilities": []}
process_variable_changes(self.socketio, "story", 'actions', {"id": action_id, 'action': self.actions[action_id]}, None) process_variable_changes(self.socketio, "story", 'actions', {"id": action_id, 'action': self.actions[action_id]}, None)
self.set_game_saved() self.set_game_saved()
@@ -1298,7 +1264,7 @@ class KoboldStoryRegister(object):
self.actions[self.action_count+1]['Options'].append({"text": option, "Pinned": False, "Previous Selection": False, "Edited": False, "Probabilities": []}) self.actions[self.action_count+1]['Options'].append({"text": option, "Pinned": False, "Previous Selection": False, "Edited": False, "Probabilities": []})
else: else:
old_options = None old_options = None
self.actions[self.action_count+1] = {"Selected Text": "", "Selected Text Length": 0, "In AI Input": False, "Options": [{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False, "Probabilities": []} for x in option_list]} self.actions[self.action_count+1] = {"Selected Text": "", "Selected Text Length": 0, "Options": [{"text": x, "Pinned": False, "Previous Selection": False, "Edited": False, "Probabilities": []} for x in option_list]}
process_variable_changes(self.socketio, "story", 'actions', {"id": self.action_count+1, 'action': self.actions[self.action_count+1]}, None) process_variable_changes(self.socketio, "story", 'actions', {"id": self.action_count+1, 'action': self.actions[self.action_count+1]}, None)
self.set_game_saved() self.set_game_saved()
@@ -1329,19 +1295,6 @@ class KoboldStoryRegister(object):
process_variable_changes(self.socketio, "story", 'actions', {"id": pointer, 'action': self.actions[pointer]}, None) process_variable_changes(self.socketio, "story", 'actions', {"id": pointer, 'action': self.actions[pointer]}, None)
self.set_game_saved() self.set_game_saved()
def set_action_in_ai(self, action_id, used=True):
return
if action_id in self.actions:
if 'In AI Input' in self.actions[action_id]:
old = self.actions[action_id]['In AI Input']
else:
old = None
self.actions[action_id]['In AI Input'] = used
if old != used:
process_variable_changes(self.socketio, "story", 'actions', {"id": action_id, 'action': self.actions[action_id]}, None)
if used:
self.recalc_token_length(action_id)
def set_pin(self, action_step, option_number): def set_pin(self, action_step, option_number):
if action_step in self.actions: if action_step in self.actions:
if option_number < len(self.actions[action_step]['Options']): if option_number < len(self.actions[action_step]['Options']):
@@ -1375,7 +1328,6 @@ class KoboldStoryRegister(object):
old_length = self.actions[action_step]["Selected Text Length"] old_length = self.actions[action_step]["Selected Text Length"]
if option_number < len(self.actions[action_step]['Options']): if option_number < len(self.actions[action_step]['Options']):
self.actions[action_step]["Selected Text"] = self.actions[action_step]['Options'][option_number]['text'] self.actions[action_step]["Selected Text"] = self.actions[action_step]['Options'][option_number]['text']
self.actions[action_step]["WI Search Text"] = re.sub("[^0-9a-z \'\"]", "", self.actions[action_step]["Selected Text"])
if 'Probabilities' in self.actions[action_step]['Options'][option_number]: if 'Probabilities' in self.actions[action_step]['Options'][option_number]:
self.actions[action_step]["Probabilities"] = self.actions[action_step]['Options'][option_number]['Probabilities'] self.actions[action_step]["Probabilities"] = self.actions[action_step]['Options'][option_number]['Probabilities']
if self.koboldai_vars.tokenizer is not None: if self.koboldai_vars.tokenizer is not None:
@@ -1458,18 +1410,6 @@ class KoboldStoryRegister(object):
if self.story_settings is not None: if self.story_settings is not None:
self.story_settings.gamesaved = False self.story_settings.gamesaved = False
def recalc_token_length(self, action_id):
if self.koboldai_vars.tokenizer is not None:
if action_id in self.actions:
if self.actions[action_id]['In AI Input']:
self.actions[action_id]['Selected Text Length'] = len(self.koboldai_vars.tokenizer.encode(self.actions[action_id]['Selected Text']))
process_variable_changes(self.socketio, "story", 'actions', {"id": action_id, 'action': self.actions[action_id]}, None)
eventlet.sleep(0.01)
else:
for key in self.actions:
self.actions[key]['Selected Text Length'] = 0
process_variable_changes(self.socketio, "story", 'actions', {"id": key, 'action': self.actions[key]}, None)
def stream_tokens(self, text_list): def stream_tokens(self, text_list):
if len(text_list) > 1: if len(text_list) > 1:
if self.action_count+1 in self.actions: if self.action_count+1 in self.actions:
@@ -1634,27 +1574,6 @@ class KoboldWorldInfo(object):
def __len__(self): def __len__(self):
return len(self.world_info) return len(self.world_info)
def recalc_token_length(self, uid):
if self.koboldai_vars.tokenizer is not None:
if uid is not None:
if uid in self.world_info:
logger.debug("Sending single world info after tokenizing {}: {}".format(uid, self.world_info[uid]['title']))
self.world_info[uid]['token_length'] = len(self.koboldai_vars.tokenizer.encode(self.world_info[uid]['content']))
self.socketio.emit("world_info_entry", self.world_info[uid], broadcast=True, room="UI_2")
else:
for uid in self.world_info:
self.world_info[uid]['token_length'] = len(self.koboldai_vars.tokenizer.encode(self.world_info[uid]['content']))
self.send_to_ui()
else:
had_change = False
for uid in self.world_info:
if self.world_info[uid]['token_length'] != 0 and not had_change:
had_change = True
self.world_info[uid]['token_length'] = 0
if had_change:
self.send_to_ui()
def add_folder(self, folder): def add_folder(self, folder):
if folder in self.world_info_folder: if folder in self.world_info_folder:
i=0 i=0
@@ -1988,7 +1907,6 @@ class KoboldWorldInfo(object):
logger.warning("Something tried to set world info UID {} to in game, but it doesn't exist".format(uid)) logger.warning("Something tried to set world info UID {} to in game, but it doesn't exist".format(uid))
if self.socketio is not None: if self.socketio is not None:
self.socketio.emit("world_info_entry_used_in_game", {"uid": uid, "used_in_game": True}, broadcast=True, room="UI_2") self.socketio.emit("world_info_entry_used_in_game", {"uid": uid, "used_in_game": True}, broadcast=True, room="UI_2")
#self.recalc_token_length(uid)
def get_used_wi(self): def get_used_wi(self):
return [x['content'] for x in self.world_info if x['used_in_game']] return [x['content'] for x in self.world_info if x['used_in_game']]

View File

@@ -2965,6 +2965,10 @@ function update_context(data) {
for (action of document.getElementsByClassName("within_max_length")) { for (action of document.getElementsByClassName("within_max_length")) {
action.classList.remove("within_max_length"); action.classList.remove("within_max_length");
} }
for (wi of document.getElementsByClassName("used_in_game")) {
wi.classList.remove("used_in_game");
}
for (const entry of data) { for (const entry of data) {
let contextClass = "context-" + ({ let contextClass = "context-" + ({
@@ -3006,9 +3010,15 @@ function update_context(data) {
break; break;
case 'prompt': case 'prompt':
prompt_length = entry.tokens.length; prompt_length = entry.tokens.length;
if (prompt_length > 0) {
document.getElementById('story_prompt').classList.add("within_max_length");
}
break; break;
case 'world_info': case 'world_info':
world_info_length += entry.tokens.length; world_info_length += entry.tokens.length;
if (document.getElementById('world_info_'+entry.uid)) {
document.getElementById('world_info_'+entry.uid).classList.add("used_in_game");
}
break; break;
case 'memory': case 'memory':
memory_length = entry.tokens.length; memory_length = entry.tokens.length;