mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Working on Deterministic AI UI changes and dynamic world info bug
This commit is contained in:
45
aiserver.py
45
aiserver.py
@@ -2375,18 +2375,18 @@ def patch_transformers():
|
|||||||
if not koboldai_vars.inference_config.do_dynamic_wi:
|
if not koboldai_vars.inference_config.do_dynamic_wi:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if not koboldai_vars.dynamicscan:
|
||||||
|
return False
|
||||||
|
|
||||||
if len(self.excluded_world_info) != input_ids.shape[0]:
|
if len(self.excluded_world_info) != input_ids.shape[0]:
|
||||||
print(tokenizer.decode(self.excluded_world_info))
|
print(tokenizer.decode(self.excluded_world_info))
|
||||||
print(tokenizer.decode(input_ids.shape[0]))
|
print(tokenizer.decode(input_ids.shape[0]))
|
||||||
assert len(self.excluded_world_info) == input_ids.shape[0]
|
assert len(self.excluded_world_info) == input_ids.shape[0]
|
||||||
|
|
||||||
if not koboldai_vars.dynamicscan:
|
|
||||||
return False
|
|
||||||
|
|
||||||
tail = input_ids[..., -koboldai_vars.generated_tkns:]
|
tail = input_ids[..., -koboldai_vars.generated_tkns:]
|
||||||
for i, t in enumerate(tail):
|
for i, t in enumerate(tail):
|
||||||
decoded = utils.decodenewlines(tokenizer.decode(t))
|
decoded = utils.decodenewlines(tokenizer.decode(t))
|
||||||
_, _, _, found = koboldai_vars.calc_ai_text(submitted_text=decoded)
|
_, _, _, found = koboldai_vars.calc_ai_text(submitted_text=decoded, send_context=False)
|
||||||
found = list(set(found) - set(self.excluded_world_info[i]))
|
found = list(set(found) - set(self.excluded_world_info[i]))
|
||||||
if len(found) != 0:
|
if len(found) != 0:
|
||||||
print("Found: {}".format(found))
|
print("Found: {}".format(found))
|
||||||
@@ -5279,7 +5279,7 @@ def core_generate(text: list, min: int, max: int, found_entries: set, is_core: b
|
|||||||
assert genout.shape[0] == koboldai_vars.numseqs
|
assert genout.shape[0] == koboldai_vars.numseqs
|
||||||
|
|
||||||
if(koboldai_vars.lua_koboldbridge.generated_cols and koboldai_vars.generated_tkns != koboldai_vars.lua_koboldbridge.generated_cols):
|
if(koboldai_vars.lua_koboldbridge.generated_cols and koboldai_vars.generated_tkns != koboldai_vars.lua_koboldbridge.generated_cols):
|
||||||
raise RuntimeError("Inconsistency detected between KoboldAI Python and Lua backends")
|
raise RuntimeError(f"Inconsistency detected between KoboldAI Python and Lua backends ({koboldai_vars.generated_tkns} != {koboldai_vars.lua_koboldbridge.generated_cols})")
|
||||||
|
|
||||||
if(already_generated != koboldai_vars.generated_tkns):
|
if(already_generated != koboldai_vars.generated_tkns):
|
||||||
raise RuntimeError("WI scanning error")
|
raise RuntimeError("WI scanning error")
|
||||||
@@ -7769,18 +7769,7 @@ def final_startup():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Set the initial RNG seed
|
# Set the initial RNG seed
|
||||||
if(koboldai_vars.seed is not None):
|
set_seed()
|
||||||
if(koboldai_vars.use_colab_tpu):
|
|
||||||
if(koboldai_vars.seed_specified):
|
|
||||||
__import__("tpu_mtj_backend").set_rng_seed(koboldai_vars.seed)
|
|
||||||
else:
|
|
||||||
__import__("tpu_mtj_backend").randomize_rng_seed()
|
|
||||||
else:
|
|
||||||
if(koboldai_vars.seed_specified):
|
|
||||||
__import__("torch").manual_seed(koboldai_vars.seed)
|
|
||||||
else:
|
|
||||||
__import__("torch").seed()
|
|
||||||
koboldai_vars.seed = __import__("tpu_mtj_backend").get_rng_seed() if koboldai_vars.use_colab_tpu else __import__("torch").initial_seed()
|
|
||||||
|
|
||||||
def send_debug():
|
def send_debug():
|
||||||
if koboldai_vars.debug:
|
if koboldai_vars.debug:
|
||||||
@@ -8249,8 +8238,30 @@ def UI_2_var_change(data):
|
|||||||
with open(filename, "w") as settings_file:
|
with open(filename, "w") as settings_file:
|
||||||
settings_file.write(getattr(koboldai_vars, "_{}".format(classname)).to_json())
|
settings_file.write(getattr(koboldai_vars, "_{}".format(classname)).to_json())
|
||||||
|
|
||||||
|
if name in ['seed', 'seed_specified']:
|
||||||
|
set_seed()
|
||||||
|
|
||||||
return {'id': data['ID'], 'status': "Saved"}
|
return {'id': data['ID'], 'status': "Saved"}
|
||||||
|
|
||||||
|
|
||||||
|
#==================================================================#
|
||||||
|
# Set the random seed (or constant seed) for generation
|
||||||
|
#==================================================================#
|
||||||
|
def set_seed():
|
||||||
|
print("Setting Seed")
|
||||||
|
if(koboldai_vars.seed is not None):
|
||||||
|
if(koboldai_vars.use_colab_tpu):
|
||||||
|
if(koboldai_vars.seed_specified):
|
||||||
|
__import__("tpu_mtj_backend").set_rng_seed(koboldai_vars.seed)
|
||||||
|
else:
|
||||||
|
__import__("tpu_mtj_backend").randomize_rng_seed()
|
||||||
|
else:
|
||||||
|
if(koboldai_vars.seed_specified):
|
||||||
|
__import__("torch").manual_seed(koboldai_vars.seed)
|
||||||
|
else:
|
||||||
|
__import__("torch").seed()
|
||||||
|
koboldai_vars.seed = __import__("tpu_mtj_backend").get_rng_seed() if koboldai_vars.use_colab_tpu else __import__("torch").initial_seed()
|
||||||
|
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
# Saving Story
|
# Saving Story
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
|
@@ -316,17 +316,6 @@ gensettingstf = [
|
|||||||
"classname": "user",
|
"classname": "user",
|
||||||
"name": "nogenmod"
|
"name": "nogenmod"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"uitype": "toggle",
|
|
||||||
"unit": "bool",
|
|
||||||
"label": "Full Determinism",
|
|
||||||
"id": "setfulldeterminism",
|
|
||||||
"min": 0,
|
|
||||||
"max": 1,
|
|
||||||
"step": 1,
|
|
||||||
"default": 0,
|
|
||||||
"tooltip": "Causes generation to be fully deterministic. The model will always generate the same thing as long as your story, settings and RNG seed are the same. If disabled, only the sequence of outputs the model generates is deterministic."
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"uitype": "toggle",
|
"uitype": "toggle",
|
||||||
"unit": "bool",
|
"unit": "bool",
|
||||||
@@ -611,6 +600,37 @@ gensettingstf = [
|
|||||||
"classname": "user",
|
"classname": "user",
|
||||||
"name": "privacy_password"
|
"name": "privacy_password"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"uitype": "toggle",
|
||||||
|
"unit": "bool",
|
||||||
|
"label": "Full Determinism",
|
||||||
|
"id": "setfulldeterminism",
|
||||||
|
"min": 0,
|
||||||
|
"max": 1,
|
||||||
|
"step": 1,
|
||||||
|
"default": 0,
|
||||||
|
"tooltip": "Causes generation to be fully deterministic. The model will always generate the same thing as long as your story, settings and RNG seed are the same. If disabled, only the sequence of outputs the model generates is deterministic.",
|
||||||
|
"menu_path": "Settings",
|
||||||
|
"sub_path": "Other",
|
||||||
|
"classname": "system",
|
||||||
|
"name": "seed_specified"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"uitype": "text",
|
||||||
|
"unit": "text",
|
||||||
|
"label": "RNG seed",
|
||||||
|
"id": "seed",
|
||||||
|
"min": 0,
|
||||||
|
"max": 1,
|
||||||
|
"step": 1,
|
||||||
|
"default": 0,
|
||||||
|
"tooltip": "The seed number used to generate the AI text. Output will change if this number is changed.",
|
||||||
|
"menu_path": "Settings",
|
||||||
|
"sub_path": "Other",
|
||||||
|
"classname": "system",
|
||||||
|
"name": "seed",
|
||||||
|
"extra_classes": "var_sync_alt_system_seed_specified"
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
gensettingsik =[{
|
gensettingsik =[{
|
||||||
|
@@ -197,7 +197,7 @@ class koboldai_vars(object):
|
|||||||
# TODO: This might be ineffecient, should we cache some of this?
|
# TODO: This might be ineffecient, should we cache some of this?
|
||||||
return [[token, self.tokenizer.decode(token)] for token in encoded]
|
return [[token, self.tokenizer.decode(token)] for token in encoded]
|
||||||
|
|
||||||
def calc_ai_text(self, submitted_text="", return_text=False):
|
def calc_ai_text(self, submitted_text="", return_text=False, send_context=True):
|
||||||
#start_time = time.time()
|
#start_time = time.time()
|
||||||
if self.tokenizer is None:
|
if self.tokenizer is None:
|
||||||
if return_text:
|
if return_text:
|
||||||
@@ -443,6 +443,7 @@ class koboldai_vars(object):
|
|||||||
for item in context:
|
for item in context:
|
||||||
tokens.extend([x[0] for x in item['tokens']])
|
tokens.extend([x[0] for x in item['tokens']])
|
||||||
|
|
||||||
|
if send_context:
|
||||||
self.context = context
|
self.context = context
|
||||||
|
|
||||||
#logger.debug("Calc_AI_text: {}s".format(time.time()-start_time))
|
#logger.debug("Calc_AI_text: {}s".format(time.time()-start_time))
|
||||||
|
@@ -2953,3 +2953,7 @@ select {
|
|||||||
filter: blur(0px) !important;
|
filter: blur(0px) !important;
|
||||||
-webkit-filter: blur(0px) !important;
|
-webkit-filter: blur(0px) !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#seed_card[system_seed_specified=false] {
|
||||||
|
display: none;
|
||||||
|
}
|
Reference in New Issue
Block a user