mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Working on context viewer
This commit is contained in:
37
aiserver.py
37
aiserver.py
@@ -4394,6 +4394,8 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
|
||||
anotetkns = [] # Placeholder for Author's Note tokens
|
||||
lnanote = 0 # Placeholder for Author's Note length
|
||||
|
||||
context = []
|
||||
|
||||
lnsp = koboldai_vars.sp_length
|
||||
|
||||
if("tokenizer" not in globals()):
|
||||
@@ -4436,14 +4438,34 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
|
||||
|
||||
assert budget >= 0
|
||||
|
||||
# Context behavior shared
|
||||
if lnsp:
|
||||
context.append({"type": "soft_prompt", "tokens": koboldai_vars.sp})
|
||||
if koboldai_vars.model not in ("Colab", "API", "OAI") and tokenizer._koboldai_header:
|
||||
context.append({"type": "header", "tokens": tokenizer._koboldai_header})
|
||||
if memtokens:
|
||||
context.append({"type": "memory", "tokens": memtokens})
|
||||
if witokens:
|
||||
context.append({"type": "world_info", "tokens": witokens})
|
||||
|
||||
if(actionlen == 0):
|
||||
# First/Prompt action
|
||||
tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns
|
||||
assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction
|
||||
ln = len(tokens) + lnsp
|
||||
|
||||
if anotetkns:
|
||||
context.append({"type": "authors_note", "tokens": anotetkns})
|
||||
|
||||
if prompttkns:
|
||||
context.append({"type": "prompt", "tokens": prompttkns})
|
||||
|
||||
|
||||
koboldai_vars.context = context
|
||||
return tokens, ln+1, ln+koboldai_vars.genamt
|
||||
else:
|
||||
tokens = []
|
||||
local_context = []
|
||||
|
||||
# Check if we have the action depth to hit our A.N. depth
|
||||
if(anotetxt != "" and actionlen < koboldai_vars.andepth):
|
||||
@@ -4462,10 +4484,13 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
|
||||
if(tknlen < budget):
|
||||
tokens = acttkns + tokens
|
||||
budget -= tknlen
|
||||
local_context.insert(0, {"type": "action", "tokens": acttkns})
|
||||
else:
|
||||
count = budget * -1
|
||||
tokens = acttkns[count:] + tokens
|
||||
truncated_action_tokens = acttkns[count:]
|
||||
tokens = truncated_action_tokens + tokens
|
||||
budget = 0
|
||||
local_context.insert(0, {"type": "action", "tokens": truncated_action_tokens})
|
||||
break
|
||||
|
||||
# Inject Author's Note if we've reached the desired depth
|
||||
@@ -4473,6 +4498,7 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
|
||||
if(anotetxt != ""):
|
||||
tokens = anotetkns + tokens # A.N. len already taken from bdgt
|
||||
anoteadded = True
|
||||
local_context.insert(0, {"type": "authors_note", "tokens": anotetkns})
|
||||
n += 1
|
||||
|
||||
# If we're not using the prompt every time and there's still budget left,
|
||||
@@ -4486,16 +4512,25 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None,
|
||||
# Did we get to add the A.N.? If not, do it here
|
||||
if(anotetxt != ""):
|
||||
if((not anoteadded) or forceanote):
|
||||
# header, mem, wi, anote, prompt, actions
|
||||
local_context.insert(0, {"type": "authors_note", "tokens": anotetkns})
|
||||
local_context.insert(0, {"type": "prompt", "tokens": prompttkns})
|
||||
tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + anotetkns + prompttkns + tokens
|
||||
else:
|
||||
local_context.insert(0, {"type": "prompt", "tokens": prompttkns})
|
||||
tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens
|
||||
else:
|
||||
# Prepend Memory, WI, and Prompt before action tokens
|
||||
local_context.insert(0, {"type": "prompt", "tokens": prompttkns})
|
||||
tokens = (tokenizer._koboldai_header if koboldai_vars.model not in ("Colab", "API", "OAI") else []) + memtokens + witokens + prompttkns + tokens
|
||||
|
||||
# Send completed bundle to generator
|
||||
assert len(tokens) <= koboldai_vars.max_length - lnsp - koboldai_vars.genamt - budget_deduction
|
||||
ln = len(tokens) + lnsp
|
||||
|
||||
context += local_context
|
||||
koboldai_vars.context = context
|
||||
|
||||
return tokens, ln+1, ln+koboldai_vars.genamt
|
||||
|
||||
#==================================================================#
|
||||
|
@@ -109,6 +109,7 @@ class koboldai_vars(object):
|
||||
self._model_settings.reset_for_model_load()
|
||||
|
||||
def calc_ai_text(self, submitted_text=""):
|
||||
context = []
|
||||
token_budget = self.max_length
|
||||
used_world_info = []
|
||||
if self.tokenizer is None:
|
||||
@@ -116,6 +117,11 @@ class koboldai_vars(object):
|
||||
else:
|
||||
used_tokens = self.sp_length
|
||||
text = ""
|
||||
|
||||
if koboldai_vars.sp:
|
||||
context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>"})
|
||||
if koboldai_vars.model not in ("Colab", "API", "OAI") and self.tokenizer._koboldai_header:
|
||||
context.append({"type": "header", "tokens": self.tokenizer._koboldai_header})
|
||||
|
||||
self.worldinfo_v2.reset_used_in_game()
|
||||
|
||||
@@ -124,11 +130,14 @@ class koboldai_vars(object):
|
||||
if memory_length+used_tokens <= token_budget:
|
||||
if self.memory_length > self.max_memory_length:
|
||||
if self.tokenizer is None:
|
||||
text = self.memory
|
||||
memory_text = self.memory
|
||||
else:
|
||||
text += self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:])
|
||||
memory_text += self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:])
|
||||
else:
|
||||
text += self.memory
|
||||
memory_text += self.memory
|
||||
|
||||
context.append({"type": "memory", "text": })
|
||||
text += memory_text
|
||||
|
||||
#Add constant world info entries to memory
|
||||
for wi in self.worldinfo_v2:
|
||||
@@ -455,7 +464,7 @@ class model_settings(settings):
|
||||
|
||||
class story_settings(settings):
|
||||
local_only_variables = ['socketio', 'tokenizer', 'koboldai_vars']
|
||||
no_save_variables = ['socketio', 'tokenizer', 'koboldai_vars']
|
||||
no_save_variables = ['socketio', 'tokenizer', 'koboldai_vars', 'context']
|
||||
settings_name = "story"
|
||||
def __init__(self, socketio, koboldai_vars, tokenizer=None):
|
||||
self.socketio = socketio
|
||||
@@ -514,6 +523,7 @@ class story_settings(settings):
|
||||
self.max_prompt_length = 512
|
||||
self.max_authornote_length = 512
|
||||
self.prompt_in_ai = False
|
||||
self.context = []
|
||||
|
||||
def save_story(self):
|
||||
print("Saving")
|
||||
|
@@ -468,6 +468,10 @@ function var_changed(data) {
|
||||
option.setAttribute("title", sp[1][1]);
|
||||
item.append(option);
|
||||
}
|
||||
//Special case for context viewer
|
||||
} else if (data.classname == "story" && data.name == "context") {
|
||||
console.log("HELLO FRIENDS!!!")
|
||||
console.log(data.value)
|
||||
//Basic Data Syncing
|
||||
} else {
|
||||
var elements_to_change = document.getElementsByClassName("var_sync_"+data.classname.replace(" ", "_")+"_"+data.name.replace(" ", "_"));
|
||||
|
Reference in New Issue
Block a user