Merge branch 'UI2' into ui2-finder

This commit is contained in:
ebolam
2022-09-02 20:31:57 -04:00
committed by GitHub
9 changed files with 215 additions and 298 deletions

View File

@@ -876,61 +876,14 @@ def loadmodelsettings():
#==================================================================#
def savesettings():
# Build json to write
js = {}
js["apikey"] = koboldai_vars.apikey
js["andepth"] = koboldai_vars.andepth
js["sampler_order"] = koboldai_vars.sampler_order
js["temp"] = koboldai_vars.temp
js["top_p"] = koboldai_vars.top_p
js["top_k"] = koboldai_vars.top_k
js["tfs"] = koboldai_vars.tfs
js["typical"] = koboldai_vars.typical
js["top_a"] = koboldai_vars.top_a
js["rep_pen"] = koboldai_vars.rep_pen
js["rep_pen_slope"] = koboldai_vars.rep_pen_slope
js["rep_pen_range"] = koboldai_vars.rep_pen_range
js["genamt"] = koboldai_vars.genamt
js["max_length"] = koboldai_vars.max_length
js["ikgen"] = koboldai_vars.ikgen
js["formatoptns"] = {'frmttriminc': koboldai_vars.frmttriminc, 'frmtrmblln': koboldai_vars.frmtrmblln,
'frmtrmspch': koboldai_vars.frmtrmspch, 'frmtadsnsp': koboldai_vars.frmtadsnsp, 'singleline': koboldai_vars.singleline}
js["numseqs"] = koboldai_vars.numseqs
js["widepth"] = koboldai_vars.widepth
js["useprompt"] = koboldai_vars.useprompt
js["adventure"] = koboldai_vars.adventure
js["chatmode"] = koboldai_vars.chatmode
js["chatname"] = koboldai_vars.chatname
js["dynamicscan"] = koboldai_vars.dynamicscan
js["nopromptgen"] = koboldai_vars.nopromptgen
js["rngpersist"] = koboldai_vars.rngpersist
js["nogenmod"] = koboldai_vars.nogenmod
js["fulldeterminism"] = koboldai_vars.full_determinism
js["autosave"] = koboldai_vars.autosave
js["welcome"] = koboldai_vars.welcome
js["output_streaming"] = koboldai_vars.output_streaming
js["show_probs"] = koboldai_vars.show_probs
if(koboldai_vars.seed_specified):
js["seed"] = koboldai_vars.seed
else:
js["seed"] = None
js["newlinemode"] = koboldai_vars.newlinemode
js["antemplate"] = koboldai_vars.setauthornotetemplate
js["userscripts"] = koboldai_vars.userscripts
js["corescript"] = koboldai_vars.corescript
js["softprompt"] = koboldai_vars.spfilename
# Write it
if not os.path.exists('settings'):
os.mkdir('settings')
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
try:
file.write(json.dumps(js, indent=3))
finally:
file.close()
for setting in ['model_settings', 'user_settings', 'system_settings']:
if setting == "model_settings":
filename = "settings/{}.v2_settings".format(koboldai_vars.model.replace("/", "_"))
else:
filename = "settings/{}.v2_settings".format(setting)
with open(filename, "w") as settings_file:
settings_file.write(getattr(koboldai_vars, "_{}".format(setting)).to_json())
#==================================================================#
# Don't save settings unless 2 seconds have passed without modification
@@ -945,121 +898,10 @@ def settingschanged():
#==================================================================#
def loadsettings():
if(path.exists("defaults/" + getmodelname().replace('/', '_') + ".settings")):
# Read file contents into JSON object
file = open("defaults/" + getmodelname().replace('/', '_') + ".settings", "r")
js = json.load(file)
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".v2_settings")):
with open("settings/" + getmodelname().replace('/', '_') + ".v2_settings", "r") as file:
getattr(koboldai_vars, "_model_settings").from_json(file.read())
processsettings(js)
file.close()
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
# Read file contents into JSON object
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
js = json.load(file)
processsettings(js)
file.close()
def processsettings(js):
# Copy file contents to koboldai_vars
if("apikey" in js):
koboldai_vars.apikey = js["apikey"]
if("andepth" in js):
koboldai_vars.andepth = js["andepth"]
if("sampler_order" in js):
sampler_order = koboldai_vars.sampler_order
if(len(sampler_order) < 7):
sampler_order = [6] + sampler_order
koboldai_vars.sampler_order = sampler_order
if("temp" in js):
koboldai_vars.temp = js["temp"]
if("top_p" in js):
koboldai_vars.top_p = js["top_p"]
if("top_k" in js):
koboldai_vars.top_k = js["top_k"]
if("tfs" in js):
koboldai_vars.tfs = js["tfs"]
if("typical" in js):
koboldai_vars.typical = js["typical"]
if("top_a" in js):
koboldai_vars.top_a = js["top_a"]
if("rep_pen" in js):
koboldai_vars.rep_pen = js["rep_pen"]
if("rep_pen_slope" in js):
koboldai_vars.rep_pen_slope = js["rep_pen_slope"]
if("rep_pen_range" in js):
koboldai_vars.rep_pen_range = js["rep_pen_range"]
if("genamt" in js):
koboldai_vars.genamt = js["genamt"]
if("max_length" in js):
koboldai_vars.max_length = js["max_length"]
if("ikgen" in js):
koboldai_vars.ikgen = js["ikgen"]
if("formatoptns" in js):
for setting in ['frmttriminc', 'frmtrmblln', 'frmtrmspch', 'frmtadsnsp', 'singleline']:
if setting in js["formatoptns"]:
setattr(koboldai_vars, setting, js["formatoptns"][setting])
if("numseqs" in js):
koboldai_vars.numseqs = js["numseqs"]
if("widepth" in js):
koboldai_vars.widepth = js["widepth"]
if("useprompt" in js):
koboldai_vars.useprompt = js["useprompt"]
if("adventure" in js):
koboldai_vars.adventure = js["adventure"]
if("chatmode" in js):
koboldai_vars.chatmode = js["chatmode"]
if("chatname" in js):
koboldai_vars.chatname = js["chatname"]
if("dynamicscan" in js):
koboldai_vars.dynamicscan = js["dynamicscan"]
if("nopromptgen" in js):
koboldai_vars.nopromptgen = js["nopromptgen"]
if("rngpersist" in js):
koboldai_vars.rngpersist = js["rngpersist"]
if("nogenmod" in js):
koboldai_vars.nogenmod = js["nogenmod"]
if("fulldeterminism" in js):
koboldai_vars.full_determinism = js["fulldeterminism"]
if("autosave" in js):
koboldai_vars.autosave = js["autosave"]
if("newlinemode" in js):
koboldai_vars.newlinemode = js["newlinemode"]
if("welcome" in js):
koboldai_vars.welcome = js["welcome"]
if("output_streaming" in js):
koboldai_vars.output_streaming = js["output_streaming"]
if("show_probs" in js):
koboldai_vars.show_probs = js["show_probs"]
if("seed" in js):
koboldai_vars.seed = js["seed"]
if(koboldai_vars.seed is not None):
koboldai_vars.seed_specified = True
else:
koboldai_vars.seed_specified = False
else:
koboldai_vars.seed_specified = False
if("antemplate" in js):
koboldai_vars.setauthornotetemplate = js["antemplate"]
if(not koboldai_vars.gamestarted):
koboldai_vars.authornotetemplate = koboldai_vars.setauthornotetemplate
if("userscripts" in js):
koboldai_vars.userscripts = []
for userscript in js["userscripts"]:
if type(userscript) is not str:
continue
userscript = userscript.strip()
if len(userscript) != 0 and all(q not in userscript for q in ("..", ":")) and all(userscript[0] not in q for q in ("/", "\\")) and os.path.exists(fileops.uspath(userscript)):
koboldai_vars.userscripts.append(userscript)
if("corescript" in js and type(js["corescript"]) is str and all(q not in js["corescript"] for q in ("..", ":")) and all(js["corescript"][0] not in q for q in ("/", "\\"))):
koboldai_vars.corescript = js["corescript"]
else:
koboldai_vars.corescript = "default.lua"
#==================================================================#
# Load a soft prompt from a file
#==================================================================#
@@ -1270,6 +1112,12 @@ def general_startup(override_args=None):
#setup socketio relay queue
koboldai_settings.queue = multiprocessing.Queue()
socketio.start_background_task(socket_io_relay, koboldai_settings.queue, socketio)
#load system and user settings
for setting in ['user_settings', 'system_settings']:
if os.path.exists("settings/{}.v2_settings".format(setting)):
with open("settings/{}.v2_settings".format(setting), "r") as settings_file:
getattr(koboldai_vars, "_{}".format(setting)).from_json(settings_file.read())
#==================================================================#
# Load Model
@@ -1315,8 +1163,8 @@ def get_model_info(model, directory=""):
if model in ['Colab', 'API']:
url = True
elif model in [x[1] for x in model_menu['apilist']]:
if path.exists("settings/{}.settings".format(model)):
with open("settings/{}.settings".format(model), "r") as file:
if path.exists("settings/{}.v2_settings".format(model)):
with open("settings/{}.v2_settings".format(model), "r") as file:
# Check if API key exists
js = json.load(file)
if("apikey" in js and js["apikey"] != ""):
@@ -1352,6 +1200,8 @@ def get_model_info(model, directory=""):
break_values = break_values.split(",")
else:
break_values = [layer_count]
if break_values == ['']:
break_values = []
break_values = [int(x) for x in break_values]
break_values += [0] * (gpu_count - len(break_values))
emit('from_server', {'cmd': 'selected_model_info', 'key_value': key_value, 'key':key,
@@ -1427,8 +1277,8 @@ def get_oai_models(data):
# If the client settings file doesn't exist, create it
# Write API key to file
os.makedirs('settings', exist_ok=True)
if path.exists("settings/{}.settings".format(model)):
with open("settings/{}.settings".format(model), "r") as file:
if path.exists("settings/{}.v2_settings".format(model)):
with open("settings/{}.v2_settings".format(model), "r") as file:
js = json.load(file)
if 'online_model' in js:
online_model = js['online_model']
@@ -1436,7 +1286,7 @@ def get_oai_models(data):
if js['apikey'] != key:
changed=True
if changed:
with open("settings/{}.settings".format(model), "w") as file:
with open("settings/{}.v2_settings".format(model), "w") as file:
js["apikey"] = key
file.write(json.dumps(js, indent=3))
@@ -1876,7 +1726,7 @@ def patch_transformers():
tail = input_ids[..., -koboldai_vars.generated_tkns:]
for i, t in enumerate(tail):
decoded = utils.decodenewlines(tokenizer.decode(t))
_, found = checkworldinfo(decoded, force_use_txt=True, actions=koboldai_vars._actions)
_, found = checkworldinfo(decoded, force_use_txt=True, actions=koboldai_vars.actions)
found -= self.excluded_world_info[i]
if(len(found) != 0):
self.regeneration_required = True
@@ -1981,9 +1831,9 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
#Let's set the GooseAI or OpenAI server URLs if that's applicable
if online_model != "":
if path.exists("settings/{}.settings".format(koboldai_vars.model)):
if path.exists("settings/{}.v2_settings".format(koboldai_vars.model)):
changed=False
with open("settings/{}.settings".format(koboldai_vars.model), "r") as file:
with open("settings/{}.v2_settings".format(koboldai_vars.model), "r") as file:
# Check if API key exists
js = json.load(file)
if 'online_model' in js:
@@ -1994,7 +1844,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
changed=True
js['online_model'] = online_model
if changed:
with open("settings/{}.settings".format(koboldai_vars.model), "w") as file:
with open("settings/{}.v2_settings".format(koboldai_vars.model), "w") as file:
file.write(json.dumps(js, indent=3))
# Swap OAI Server if GooseAI was selected
if(koboldai_vars.model == "GooseAI"):
@@ -2525,7 +2375,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
for i, t in enumerate(generated):
decoded = utils.decodenewlines(tokenizer.decode(past[i])) + utils.decodenewlines(tokenizer.decode(t[tpu_mtj_backend.params["seq"] : tpu_mtj_backend.params["seq"] + n_generated]))
_, found = checkworldinfo(decoded, force_use_txt=True, actions=koboldai_vars._actions)
_, found = checkworldinfo(decoded, force_use_txt=True, actions=koboldai_vars.actions)
found -= excluded_world_info[i]
if(len(found) != 0):
regeneration_required = True
@@ -2541,7 +2391,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
koboldai_vars.compiling = False
def tpumtjgenerate_settings_callback() -> dict:
sampler_order = vars.sampler_order[:]
sampler_order = koboldai_vars.sampler_order[:]
if len(sampler_order) < 7: # Add repetition penalty at beginning if it's not present
sampler_order = [6] + sampler_order
return {
@@ -2656,6 +2506,9 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
koboldai_vars.presets = to_use
koboldai_vars.aibusy = False
koboldai_vars.splist = [[f, get_softprompt_desc(os.path.join("./softprompts", f),None,True)] for f in os.listdir("./softprompts") if os.path.isfile(os.path.join("./softprompts", f)) and valid_softprompt(os.path.join("./softprompts", f))]
if initial_load and koboldai_vars.cloudflare_link != "":
print(format(colors.GREEN) + "KoboldAI has finished loading and is available at the following link for UI 1: " + koboldai_vars.cloudflare_link + format(colors.END))
print(format(colors.GREEN) + "KoboldAI has finished loading and is available at the following link for UI 2: " + koboldai_vars.cloudflare_link + "/new_ui" + format(colors.END))
# Set up Flask routes
@app.route('/')
@@ -2728,22 +2581,22 @@ def lua_startup():
global _bridged
global F
global bridged
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
js = json.load(file)
if("userscripts" in js):
koboldai_vars.userscripts = []
for userscript in js["userscripts"]:
if type(userscript) is not str:
continue
userscript = userscript.strip()
if len(userscript) != 0 and all(q not in userscript for q in ("..", ":")) and all(userscript[0] not in q for q in ("/", "\\")) and os.path.exists(fileops.uspath(userscript)):
koboldai_vars.userscripts.append(userscript)
if("corescript" in js and type(js["corescript"]) is str and all(q not in js["corescript"] for q in ("..", ":")) and all(js["corescript"][0] not in q for q in ("/", "\\"))):
koboldai_vars.corescript = js["corescript"]
else:
koboldai_vars.corescript = "default.lua"
file.close()
#if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
# file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
# js = json.load(file)
# if("userscripts" in js):
# koboldai_vars.userscripts = []
# for userscript in js["userscripts"]:
# if type(userscript) is not str:
# continue
# userscript = userscript.strip()
# if len(userscript) != 0 and all(q not in userscript for q in ("..", ":")) and all(userscript[0] not in q for q in ("/", "\\")) and os.path.exists(fileops.uspath(userscript)):
# koboldai_vars.userscripts.append(userscript)
# if("corescript" in js and type(js["corescript"]) is str and all(q not in js["corescript"] for q in ("..", ":")) and all(js["corescript"][0] not in q for q in ("/", "\\"))):
# koboldai_vars.corescript = js["corescript"]
# else:
# koboldai_vars.corescript = "default.lua"
# file.close()
#==================================================================#
# Lua runtime startup
@@ -2894,7 +2747,7 @@ def lua_compute_context(submission, entries, folders, kwargs):
assert type(submission) is str
if(kwargs is None):
kwargs = koboldai_vars.lua_state.table()
actions = koboldai_vars._actions if koboldai_vars.lua_koboldbridge.userstate == "genmod" else koboldai_vars.actions
actions = koboldai_vars.actions
allowed_entries = None
allowed_folders = None
if(entries is not None):
@@ -3220,13 +3073,9 @@ def lua_set_chunk(k, v):
if(len(v) == 0):
print(colors.GREEN + f"{lua_log_format_name(koboldai_vars.lua_koboldbridge.logging_name)} deleted story chunk {k}" + colors.END)
chunk = int(k)
if(koboldai_vars.lua_koboldbridge.userstate == "genmod"):
del koboldai_vars._actions[chunk-1]
koboldai_vars.actions.delete_action(chunk-1)
koboldai_vars.lua_deleted.add(chunk)
if(not hasattr(koboldai_vars, "_actions") or koboldai_vars._actions is not koboldai_vars.actions):
#Instead of deleting we'll blank out the text. This way our actions and actions_metadata stay in sync and we can restore the chunk on an undo
koboldai_vars.actions[chunk-1] = ""
send_debug()
send_debug()
else:
if(k == 0):
print(colors.GREEN + f"{lua_log_format_name(koboldai_vars.lua_koboldbridge.logging_name)} edited prompt chunk" + colors.END)
@@ -3239,8 +3088,6 @@ def lua_set_chunk(k, v):
koboldai_vars.lua_edited.add(chunk)
koboldai_vars.prompt = v
else:
if(koboldai_vars.lua_koboldbridge.userstate == "genmod"):
koboldai_vars._actions[chunk-1] = v
koboldai_vars.lua_edited.add(chunk)
koboldai_vars.actions[chunk-1] = v
send_debug()
@@ -3741,7 +3588,7 @@ def get_message(msg):
f.write(str(msg['gpu_layers']) + '\n' + str(msg['disk_layers']))
f.close()
koboldai_vars.colaburl = msg['url'] + "/request"
vars.model = vars.model_selected
koboldai_vars.model = koboldai_vars.model_selected
load_model(use_gpu=msg['use_gpu'], gpu_layers=msg['gpu_layers'], disk_layers=msg['disk_layers'], online_model=msg['online_model'])
elif(msg['cmd'] == 'show_model'):
print("Model Name: {}".format(getmodelname()))
@@ -4213,10 +4060,7 @@ def apiactionsubmit_tpumtjgenerate(txt, minimum, maximum):
if not koboldai_vars.quiet:
print("{0}Min:{1}, Max:{2}, Txt:{3}{4}".format(colors.YELLOW, minimum, maximum, utils.decodenewlines(tokenizer.decode(txt)), colors.END))
koboldai_vars._actions = koboldai_vars.actions
koboldai_vars._prompt = koboldai_vars.prompt
if(koboldai_vars.dynamicscan):
koboldai_vars._actions = koboldai_vars._actions.copy()
# Submit input text to generator
soft_tokens = tpumtjgetsofttokens()
@@ -4622,10 +4466,7 @@ def _generate(txt, minimum, maximum, found_entries):
model.kai_scanner_excluded_world_info = found_entries
koboldai_vars._actions = koboldai_vars.actions
koboldai_vars._prompt = koboldai_vars.prompt
if(koboldai_vars.dynamicscan):
koboldai_vars._actions = [x for x in koboldai_vars.actions]
with torch.no_grad():
already_generated = 0
@@ -4657,13 +4498,13 @@ def _generate(txt, minimum, maximum, found_entries):
encoded = []
for i in range(koboldai_vars.numseqs):
txt = utils.decodenewlines(tokenizer.decode(genout[i, -already_generated:]))
winfo, mem, anotetxt, _found_entries = calcsubmitbudgetheader(txt, force_use_txt=True, actions=koboldai_vars._actions)
winfo, mem, anotetxt, _found_entries = calcsubmitbudgetheader(txt, force_use_txt=True, actions=koboldai_vars.actions)
found_entries[i].update(_found_entries)
if koboldai_vars.alt_gen:
txt, _, _ = koboldai_vars.calc_ai_text(submitted_text=txt)
print("Using Alt Gen: {}".format(tokenizer.decode(txt)))
else:
txt, _, _ = calcsubmitbudget(len(koboldai_vars._actions), winfo, mem, anotetxt, koboldai_vars._actions, submission=txt)
txt, _, _ = calcsubmitbudget(len(koboldai_vars.actions), winfo, mem, anotetxt, koboldai_vars.actions, submission=txt)
encoded.append(torch.tensor(txt, dtype=torch.long, device=genout.device))
max_length = len(max(encoded, key=len))
encoded = torch.stack(tuple(torch.nn.functional.pad(e, (max_length - len(e), 0), value=model.config.pad_token_id or model.config.eos_token_id) for e in encoded))
@@ -5018,10 +4859,7 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None):
if not koboldai_vars.quiet:
print("{0}Min:{1}, Max:{2}, Txt:{3}{4}".format(colors.YELLOW, minimum, maximum, utils.decodenewlines(tokenizer.decode(txt)), colors.END))
koboldai_vars._actions = koboldai_vars.actions
koboldai_vars._prompt = koboldai_vars.prompt
if(koboldai_vars.dynamicscan):
koboldai_vars._actions = koboldai_vars._actions.copy()
# Submit input text to generator
try:
@@ -5060,13 +4898,13 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None):
encoded = []
for i in range(koboldai_vars.numseqs):
txt = utils.decodenewlines(tokenizer.decode(past[i]))
winfo, mem, anotetxt, _found_entries = calcsubmitbudgetheader(txt, force_use_txt=True, actions=koboldai_vars._actions)
winfo, mem, anotetxt, _found_entries = calcsubmitbudgetheader(txt, force_use_txt=True, actions=koboldai_vars.actions)
found_entries[i].update(_found_entries)
if koboldai_vars.alt_gen:
txt, _, _ = koboldai_vars.calc_ai_text(submitted_text=txt)
print("Using Alt Gen: {}".format(tokenizer.decode(txt)))
else:
txt, _, _ = calcsubmitbudget(len(koboldai_vars._actions), winfo, mem, anotetxt, koboldai_vars._actions, submission=txt)
txt, _, _ = calcsubmitbudget(len(koboldai_vars.actions), winfo, mem, anotetxt, koboldai_vars.actions, submission=txt)
encoded.append(np.array(txt, dtype=np.uint32))
max_length = len(max(encoded, key=len))
encoded = np.stack(tuple(np.pad(e, (max_length - len(e), 0), constant_values=tpu_mtj_backend.pad_token_id) for e in encoded))
@@ -5660,15 +5498,14 @@ def checkworldinfo(txt, allowed_entries=None, allowed_folders=None, force_use_tx
depth += 1
if(ln > 0):
chunks = collections.deque()
i = 0
for key in reversed(actions):
chunk = actions[key]
chunks.appendleft(chunk)
i += 1
if(i == depth):
break
chunks = actions[-depth:]
#i = 0
#for key in reversed(actions):
# chunk = actions[key]
# chunks.appendleft(chunk)
# i += 1
# if(i == depth):
# break
if(ln >= depth):
txt = "".join(chunks)
elif(ln > 0):
@@ -6592,8 +6429,8 @@ def final_startup():
threading.Thread(target=__preempt_tokenizer).start()
# Load soft prompt specified by the settings file, if applicable
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".v2_settings")):
file = open("settings/" + getmodelname().replace('/', '_') + ".v2_settings", "r")
js = json.load(file)
if(koboldai_vars.allowsp and "softprompt" in js and type(js["softprompt"]) is str and all(q not in js["softprompt"] for q in ("..", ":")) and (len(js["softprompt"]) != 0 and all(js["softprompt"][0] not in q for q in ("/", "\\")))):
if valid_softprompt("softprompts/"+js["softprompt"]):
@@ -7243,7 +7080,7 @@ def UI_2_load_story_list(data):
def get_story_length(item_full_path, item, valid_selection):
if not valid_selection:
return [""]
return ["", ""]
with open(item_full_path, "r") as f:
js = json.load(f)
title = js['story_name'] if 'story_name' in js else ".".join(item.split(".")[:-1])
@@ -7251,7 +7088,7 @@ def get_story_length(item_full_path, item, valid_selection):
return [title, len(js['actions'])]
if js['file_version'] == 1:
return [title, len(js['actions'])]
return [0 if js['actions']['action_count'] == -1 else js['actions']['action_count'] ]
return [title, 0 if js['actions']['action_count'] == -1 else js['actions']['action_count'] ]
def valid_story(file):
@@ -8097,18 +7934,18 @@ def put_model(body: ModelSelectionSchema):
{api_validation_error_response}
{api_server_busy_response}
"""
if vars.aibusy or vars.genseqs:
if koboldai_vars.aibusy or koboldai_vars.genseqs:
abort(Response(json.dumps({"detail": {
"msg": "Server is busy; please try again later.",
"type": "service_unavailable",
}}), mimetype="application/json", status=503))
set_aibusy(1)
old_model = vars.model
vars.model = body.model.strip()
old_model = koboldai_vars.model
koboldai_vars.model = body.model.strip()
try:
load_model(use_breakmodel_args=True, breakmodel_args_default_to_cpu=True)
except Exception as e:
vars.model = old_model
koboldai_vars.model = old_model
raise e
set_aibusy(0)
return {}
@@ -10322,7 +10159,6 @@ for schema in config_endpoint_schemas:
#==================================================================#
# Final startup commands to launch Flask app
#==================================================================#
@app.before_first_request
def startup():
if koboldai_vars.model == "" or koboldai_vars.model is None:
koboldai_vars.model = "ReadOnly"
@@ -10334,6 +10170,7 @@ if __name__ == "__main__":
general_startup()
patch_transformers()
startup()
# Start Flask/SocketIO (Blocking, so this must be last method!)
port = args.port if "port" in args and args.port is not None else 5000
koboldai_settings.port = port
@@ -10365,6 +10202,7 @@ if __name__ == "__main__":
if(args.localtunnel or args.ngrok or args.remote):
with open('cloudflare.log', 'w') as cloudflarelog:
cloudflarelog.write("KoboldAI has finished loading and is available at the following link : " + cloudflare)
koboldai_vars.cloudflare_link = cloudflare
print(format(colors.GREEN) + "KoboldAI has finished loading and is available at the following link for UI 1: " + cloudflare + format(colors.END))
print(format(colors.GREEN) + "KoboldAI has finished loading and is available at the following link for UI 2: " + cloudflare + "/new_ui" + format(colors.END))
else:

View File

@@ -730,7 +730,7 @@ return function(_python, _bridged)
return prompt
end
end
local actions = koboldbridge.userstate == "genmod" and bridged.koboldai_vars._actions or bridged.koboldai_vars.actions
local actions = koboldbridge.userstate == "genmod" and bridged.koboldai_vars.actions
return _python.as_attrgetter(actions).get(math.tointeger(rawget(t, "_num")) - 1)
end
end
@@ -752,7 +752,7 @@ return function(_python, _bridged)
error("Attempted to set the prompt chunk's content to the empty string; this is not allowed")
return
end
local actions = koboldbridge.userstate == "genmod" and bridged.koboldai_vars._actions or bridged.koboldai_vars.actions
local actions = koboldbridge.userstate == "genmod" and bridged.koboldai_vars.actions
if _k ~= 0 and _python.as_attrgetter(actions).get(_k-1) == nil then
return
end
@@ -777,7 +777,7 @@ return function(_python, _bridged)
---@return fun(): KoboldStoryChunk, table, nil
function KoboldStory:forward_iter()
local actions = koboldbridge.userstate == "genmod" and bridged.koboldai_vars._actions or bridged.koboldai_vars.actions
local actions = koboldbridge.userstate == "genmod" and bridged.koboldai_vars.actions
local nxt, iterator = _python.iter(actions)
local run_once = false
local function f()
@@ -805,7 +805,7 @@ return function(_python, _bridged)
---@return fun(): KoboldStoryChunk, table, nil
function KoboldStory:reverse_iter()
local actions = koboldbridge.userstate == "genmod" and bridged.koboldai_vars._actions or bridged.koboldai_vars.actions
local actions = koboldbridge.userstate == "genmod" and bridged.koboldai_vars.actions
local nxt, iterator = _python.iter(_python.builtins.reversed(actions))
local last_run = false
local function f()

View File

@@ -506,6 +506,22 @@ gensettingstf = [
"default": 0,
"tooltip": "Shows token usage when typing in relevant text boxes. <b>May lag slower devices.</b>"
},
{
"UI_V2_Only": True,
"uitype": "toggle",
"unit": "bool",
"label": "Beep on Complete",
"id": "beep_on_complete",
"min": 1,
"max": 5,
"step": 1,
"default": 3,
"tooltip": "When enabled the UI will beep when completing an action such as generation or model loading.",
"menu_path": "Interface",
"sub_path": "UI",
"classname": "user",
"name": "beep_on_complete"
},
]
gensettingsik =[{

View File

@@ -109,7 +109,7 @@ class koboldai_vars(object):
# TODO: We may want to replace the "text" variable with a list-type
# class of context blocks, the class having a __str__ function.
if self.sp:
if self.sp_length > 0:
context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>"})
# Header is never used?
# if koboldai_vars.model not in ("Colab", "API", "OAI") and self.tokenizer._koboldai_header:
@@ -330,8 +330,11 @@ class settings(object):
json_data = json.loads(data)
else:
json_data = data
for key, value in data.items():
for key, value in json_data.items():
if key in self.__dict__:
if key == 'sampler_order':
if(len(value) < 7):
value = [6] + value
if isinstance(value, str):
if value[:7] == 'base64:':
value = pickle.loads(base64.b64decode(value[7:]))
@@ -658,6 +661,7 @@ class user_settings(settings):
self.debug = False # If set to true, will send debug information to the client for display
self.output_streaming = True
self.show_probs = False # Whether or not to show token probabilities
self.beep_on_complete = False
def __setattr__(self, name, value):
@@ -730,6 +734,7 @@ class system_settings(settings):
self.seed = None # The current RNG seed (as an int), or None if unknown
self.alt_gen = False # Use the calc_ai_text method for generating text to go to the AI
self.theme_list = [".".join(f.split(".")[:-1]) for f in os.listdir("./themes") if os.path.isfile(os.path.join("./themes", f))]
self.cloudflare_link = ""
def __setattr__(self, name, value):
@@ -763,7 +768,7 @@ class KoboldStoryRegister(object):
self.append(item)
def reset(self, sequence=[]):
self.__init__(self.socketio, self.story_settings, sequence=sequence, tokenizer=self.tokenizer)
self.__init__(self.socketio, self.story_settings, self.koboldai_vars, sequence=sequence, tokenizer=self.tokenizer)
def __str__(self):
return "".join([x['Selected Text'] for ignore, x in sorted(self.actions.items())])
@@ -783,7 +788,11 @@ class KoboldStoryRegister(object):
raise StopIteration
def __getitem__(self, i):
return self.actions[i]["Selected Text"]
if isinstance(i, slice):
temp = [self.actions[x]["Selected Text"] for x in list(self.actions)[i]]
return temp
else:
return self.actions[i]["Selected Text"]
def __setitem__(self, i, text):
if i in self.actions:
@@ -1069,23 +1078,36 @@ class KoboldStoryRegister(object):
self.actions[self.action_count+1] = {"Selected Text": "", "Selected Text Length": 0, "Options": []}
for i in range(len(text_list)):
self.actions[self.action_count+1]['Options'].append({"text": text_list[i], "Pinned": False, "Previous Selection": False, "Edited": False, "Probabilities": [], "stream_id": i})
process_variable_changes(self.socketio, "actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": None})
process_variable_changes(self.socketio, "story", 'actions', {"id": self.action_count+1, 'action': self.actions[self.action_count+1]}, None)
#We need to see if this is the last token being streamed. If so due to the rely it will come in AFTER the actual trimmed final text overwriting it in the UI
if self.tokenizer is not None:
if len(self.tokenizer.encode(self.actions[self.action_count+1]["Options"][0]['text'])) != self.koboldai_vars.genamt:
#process_variable_changes(self.socketio, "actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": None})
process_variable_changes(self.socketio, "story", 'actions', {"id": self.action_count+1, 'action': self.actions[self.action_count+1]}, None)
else:
#We're streaming single options so our output is our selected
if self.tokenizer is not None:
selected_text_length = len(self.tokenizer.encode(text_list[0]))
else:
selected_text_length = 0
if self.action_count+1 in self.actions:
self.actions[self.action_count+1]['Selected Text'] = "{}{}".format(self.actions[self.action_count+1]['Selected Text'], text_list[0])
else:
self.actions[self.action_count+1] = {"Selected Text": text_list[0], "Selected Text Length": selected_text_length, "Options": []}
process_variable_changes(self.socketio, "actions", "Selected Text", {"id": self.action_count+1, "text": self.actions[self.action_count+1]['Selected Text']}, None)
process_variable_changes(self.socketio, "actions", 'Selected Text Length', {"id": self.action_count+1, 'length': self.actions[self.action_count+1]['Selected Text Length']}, {"id": self.action_count, 'length': 0})
process_variable_changes(self.socketio, "story", 'actions', {"id": self.action_count+1, 'action': self.actions[self.action_count+1]}, None)
#First we need to see if this is actually the prompt. If so we'll just not do streaming:
if self.story_settings.prompt != "":
if self.action_count+1 in self.actions:
if self.tokenizer is not None:
selected_text_length = len(self.tokenizer.encode(self.actions[self.action_count+1]['Selected Text']))
else:
selected_text_length = 0
self.actions[self.action_count+1]['Selected Text'] = "{}{}".format(self.actions[self.action_count+1]['Selected Text'], text_list[0])
self.actions[self.action_count+1]['Selected Text Length'] = selected_text_length
else:
if self.tokenizer is not None:
selected_text_length = len(self.tokenizer.encode(text_list[0]))
else:
selected_text_length = 0
self.actions[self.action_count+1] = {"Selected Text": text_list[0], "Selected Text Length": selected_text_length, "Options": []}
if self.tokenizer is not None:
if len(self.tokenizer.encode(self.actions[self.action_count+1]['Selected Text'])) != self.koboldai_vars.genamt:
#process_variable_changes(self.socketio, "actions", "Options", {"id": self.action_count+1, "options": self.actions[self.action_count+1]["Options"]}, {"id": self.action_count+1, "options": None})
process_variable_changes(self.socketio, "story", 'actions', {"id": self.action_count+1, 'action': self.actions[self.action_count+1]}, None)
def set_probabilites(self, probabilities, action_id=None):
if action_id is None:
@@ -1374,8 +1396,8 @@ class KoboldWorldInfo(object):
#Make sure we have all the appropriate variables:
for item in self.world_info:
for column in ["uid","title","key","keysecondary","folder","constant","content","comment","token_length","selective","used_in_game"]:
if column not in item:
item[column] = None
if column not in self.world_info[item]:
self.world_info[item][column] = None
try:
self.sync_world_info_to_old_format()
except:

View File

@@ -350,6 +350,10 @@ input[type="range"]::-ms-fill-upper {
display: inline-block;
transition: left 0.5s;
cursor: pointer;
filter: brightness(85%);
}
.pinned .menu_pin {
filter: brightness(200%);
}
}
@@ -485,6 +489,28 @@ input[type="range"]::-ms-fill-upper {
height: 35px;
}
.settings_button > .button_label {
position: relative;
bottom: 3px;
right: 4px;
}
.settings_button > .material-icons-outlined {
position: relative;
top: 1px;
right: 2px;
}
.Model_Info .settings_button {
transform: translateY(7%);
margin: -5px;
}
#import_story_button {
transform: translateY(22%);
}
.settings_button[story_gamesaved="true"] {
filter: brightness(40%);
}
@@ -793,6 +819,10 @@ input[type="range"]::-ms-fill-upper {
display: inline-block;
transition: left 0.5s;
cursor: pointer;
filter: brightness(85%);
}
.pinned .story_menu_pin {
filter: brightness(200%);
}
}

View File

@@ -142,14 +142,11 @@ function create_options(data) {
//Set all options before the next chunk to hidden
var option_container = document.getElementById("Select Options");
var current_chunk = parseInt(document.getElementById("action_count").textContent)+1;
var children = option_container.children;
for (var i = 0; i < children.length; i++) {
var chunk = children[i];
if (chunk.id == "Select Options Chunk " + current_chunk) {
chunk.classList.remove("hidden");
} else {
chunk.classList.add("hidden");
}
if (document.getElementById("Select Options Chunk " + current_chunk)) {
document.getElementById("Select Options Chunk " + current_chunk).classList.remove("hidden")
}
if (document.getElementById("Select Options Chunk " + (current_chunk-1))) {
document.getElementById("Select Options Chunk " + (current_chunk-1)).classList.add("hidden")
}
if (document.getElementById("Select Options Chunk "+data.value.id)) {
@@ -432,6 +429,10 @@ function do_ai_busy(data) {
if (data.value) {
ai_busy_start = Date.now();
favicon.start_swap()
current_chunk = parseInt(document.getElementById("action_count").textContent)+1;
if (document.getElementById("Select Options Chunk " + current_chunk)) {
document.getElementById("Select Options Chunk " + current_chunk).classList.add("hidden")
}
} else {
runtime = Date.now() - ai_busy_start;
if (document.getElementById("Execution Time")) {
@@ -442,6 +443,9 @@ function do_ai_busy(data) {
for (item of document.getElementsByClassName('statusbar_outer')) {
item.classList.add("hidden");
}
if (document.getElementById("user_beep_on_complete").checked) {
beep();
}
}
}
@@ -798,10 +802,12 @@ function popup_items(data) {
var accept = document.getElementById("popup_accept");
if (this.getAttribute("valid") == "true") {
accept.classList.remove("disabled");
accept.disabled = false;
accept.setAttribute("selected_value", this.id);
} else {
accept.setAttribute("selected_value", "");
accept.classList.add("disabled");
accept.disabled = true;
if (this.getAttribute("folder") == "true") {
socket.emit("popup_change_folder", this.id);
}
@@ -827,10 +833,12 @@ function popup_items(data) {
var accept = document.getElementById("popup_accept");
if (this.getAttribute("valid") == "true") {
accept.classList.remove("disabled");
accept.disabled = false;
accept.setAttribute("selected_value", this.id);
} else {
accept.setAttribute("selected_value", "");
accept.classList.add("disabled");
accept.disabled = true;
if (this.getAttribute("folder") == "true") {
socket.emit("popup_change_folder", this.id);
}
@@ -839,7 +847,7 @@ function popup_items(data) {
for (item of popup_list) {
item.classList.remove("selected");
}
this.classList.add("selected");
this.parentElement.classList.add("selected");
};
tr.append(td);
}
@@ -1019,6 +1027,8 @@ function show_model_menu(data) {
model_list.append(list_item);
}
var accept = document.getElementById("btn_loadmodelaccept");
accept.disabled = true;
}
@@ -1175,6 +1185,7 @@ function selected_model_info(data) {
document.getElementById("modellayers").classList.add("hidden");
accept.classList.remove("disabled");
}
accept.disabled = false;
}
@@ -2979,6 +2990,11 @@ function selectTab(tab) {
tabTarget.classList.remove("hidden");
}
function beep() {
var snd = new Audio("data:audio/wav;base64,//uQRAAAAWMSLwUIYAAsYkXgoQwAEaYLWfkWgAI0wWs/ItAAAGDgYtAgAyN+QWaAAihwMWm4G8QQRDiMcCBcH3Cc+CDv/7xA4Tvh9Rz/y8QADBwMWgQAZG/ILNAARQ4GLTcDeIIIhxGOBAuD7hOfBB3/94gcJ3w+o5/5eIAIAAAVwWgQAVQ2ORaIQwEMAJiDg95G4nQL7mQVWI6GwRcfsZAcsKkJvxgxEjzFUgfHoSQ9Qq7KNwqHwuB13MA4a1q/DmBrHgPcmjiGoh//EwC5nGPEmS4RcfkVKOhJf+WOgoxJclFz3kgn//dBA+ya1GhurNn8zb//9NNutNuhz31f////9vt///z+IdAEAAAK4LQIAKobHItEIYCGAExBwe8jcToF9zIKrEdDYIuP2MgOWFSE34wYiR5iqQPj0JIeoVdlG4VD4XA67mAcNa1fhzA1jwHuTRxDUQ//iYBczjHiTJcIuPyKlHQkv/LHQUYkuSi57yQT//uggfZNajQ3Vmz+Zt//+mm3Wm3Q576v////+32///5/EOgAAADVghQAAAAA//uQZAUAB1WI0PZugAAAAAoQwAAAEk3nRd2qAAAAACiDgAAAAAAABCqEEQRLCgwpBGMlJkIz8jKhGvj4k6jzRnqasNKIeoh5gI7BJaC1A1AoNBjJgbyApVS4IDlZgDU5WUAxEKDNmmALHzZp0Fkz1FMTmGFl1FMEyodIavcCAUHDWrKAIA4aa2oCgILEBupZgHvAhEBcZ6joQBxS76AgccrFlczBvKLC0QI2cBoCFvfTDAo7eoOQInqDPBtvrDEZBNYN5xwNwxQRfw8ZQ5wQVLvO8OYU+mHvFLlDh05Mdg7BT6YrRPpCBznMB2r//xKJjyyOh+cImr2/4doscwD6neZjuZR4AgAABYAAAABy1xcdQtxYBYYZdifkUDgzzXaXn98Z0oi9ILU5mBjFANmRwlVJ3/6jYDAmxaiDG3/6xjQQCCKkRb/6kg/wW+kSJ5//rLobkLSiKmqP/0ikJuDaSaSf/6JiLYLEYnW/+kXg1WRVJL/9EmQ1YZIsv/6Qzwy5qk7/+tEU0nkls3/zIUMPKNX/6yZLf+kFgAfgGyLFAUwY//uQZAUABcd5UiNPVXAAAApAAAAAE0VZQKw9ISAAACgAAAAAVQIygIElVrFkBS+Jhi+EAuu+lKAkYUEIsmEAEoMeDmCETMvfSHTGkF5RWH7kz/ESHWPAq/kcCRhqBtMdokPdM7vil7RG98A2sc7zO6ZvTdM7pmOUAZTnJW+NXxqmd41dqJ6mLTXxrPpnV8avaIf5SvL7pndPvPpndJR9Kuu8fePvuiuhorgWjp7Mf/PRjxcFCPDkW31srioCExivv9lcwKEaHsf/7ow2Fl1T/9RkXgEhYElAoCLFtMArxwivDJJ+bR1HTKJdlEoTELCIqgEwVGSQ+hIm0NbK8WXcTEI0UPoa2NbG4y2K00JEWbZavJXkYaqo9CRHS55FcZTjKEk3NKoCYUnSQ0rWxrZbFKbKIhOKPZe1cJKzZSaQrIyULHDZmV5K4xySsDRKWOruanGtjLJXFEmwaIbDLX0hIPBUQPVFVkQkDoUNfSoDgQGKPekoxeGzA4DUvnn4bxzcZrtJyipKfPNy5w+9lnXwgqsiyHNeSVpemw4bWb9psYeq//uQZBoABQt4yMVxYAIAAAkQoAAAHvYpL5m6AAgAACXDAAAAD59jblTirQe9upFsmZbpMudy7Lz1X1DYsxOOSWpfPqNX2WqktK0DMvuGwlbNj44TleLPQ+Gsfb+GOWOKJoIrWb3cIMeeON6lz2umTqMXV8Mj30yWPpjoSa9ujK8SyeJP5y5mOW1D6hvLepeveEAEDo0mgCRClOEgANv3B9a6fikgUSu/DmAMATrGx7nng5p5iimPNZsfQLYB2sDLIkzRKZOHGAaUyDcpFBSLG9MCQALgAIgQs2YunOszLSAyQYPVC2YdGGeHD2dTdJk1pAHGAWDjnkcLKFymS3RQZTInzySoBwMG0QueC3gMsCEYxUqlrcxK6k1LQQcsmyYeQPdC2YfuGPASCBkcVMQQqpVJshui1tkXQJQV0OXGAZMXSOEEBRirXbVRQW7ugq7IM7rPWSZyDlM3IuNEkxzCOJ0ny2ThNkyRai1b6ev//3dzNGzNb//4uAvHT5sURcZCFcuKLhOFs8mLAAEAt4UWAAIABAAAAAB4qbHo0tIjVkUU//uQZAwABfSFz3ZqQAAAAAngwAAAE1HjMp2qAAAAACZDgAAAD5UkTE1UgZEUExqYynN1qZvqIOREEFmBcJQkwdxiFtw0qEOkGYfRDifBui9MQg4QAHAqWtAWHoCxu1Yf4VfWLPIM2mHDFsbQEVGwyqQoQcwnfHeIkNt9YnkiaS1oizycqJrx4KOQjahZxWbcZgztj2c49nKmkId44S71j0c8eV9yDK6uPRzx5X18eDvjvQ6yKo9ZSS6l//8elePK/Lf//IInrOF/FvDoADYAGBMGb7FtErm5MXMlmPAJQVgWta7Zx2go+8xJ0UiCb8LHHdftWyLJE0QIAIsI+UbXu67dZMjmgDGCGl1H+vpF4NSDckSIkk7Vd+sxEhBQMRU8j/12UIRhzSaUdQ+rQU5kGeFxm+hb1oh6pWWmv3uvmReDl0UnvtapVaIzo1jZbf/pD6ElLqSX+rUmOQNpJFa/r+sa4e/pBlAABoAAAAA3CUgShLdGIxsY7AUABPRrgCABdDuQ5GC7DqPQCgbbJUAoRSUj+NIEig0YfyWUho1VBBBA//uQZB4ABZx5zfMakeAAAAmwAAAAF5F3P0w9GtAAACfAAAAAwLhMDmAYWMgVEG1U0FIGCBgXBXAtfMH10000EEEEEECUBYln03TTTdNBDZopopYvrTTdNa325mImNg3TTPV9q3pmY0xoO6bv3r00y+IDGid/9aaaZTGMuj9mpu9Mpio1dXrr5HERTZSmqU36A3CumzN/9Robv/Xx4v9ijkSRSNLQhAWumap82WRSBUqXStV/YcS+XVLnSS+WLDroqArFkMEsAS+eWmrUzrO0oEmE40RlMZ5+ODIkAyKAGUwZ3mVKmcamcJnMW26MRPgUw6j+LkhyHGVGYjSUUKNpuJUQoOIAyDvEyG8S5yfK6dhZc0Tx1KI/gviKL6qvvFs1+bWtaz58uUNnryq6kt5RzOCkPWlVqVX2a/EEBUdU1KrXLf40GoiiFXK///qpoiDXrOgqDR38JB0bw7SoL+ZB9o1RCkQjQ2CBYZKd/+VJxZRRZlqSkKiws0WFxUyCwsKiMy7hUVFhIaCrNQsKkTIsLivwKKigsj8XYlwt/WKi2N4d//uQRCSAAjURNIHpMZBGYiaQPSYyAAABLAAAAAAAACWAAAAApUF/Mg+0aohSIRobBAsMlO//Kk4soosy1JSFRYWaLC4qZBYWFRGZdwqKiwkNBVmoWFSJkWFxX4FFRQWR+LsS4W/rFRb/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////VEFHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAU291bmRib3kuZGUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjAwNGh0dHA6Ly93d3cuc291bmRib3kuZGUAAAAAAAAAACU=");
snd.play();
}
function loadNAILorebook(data, filename) {
let lorebookVersion = data.lorebookVersion;
let wi_data = {folders: {[filename]: []}, entries: {}};

View File

@@ -13,12 +13,12 @@
<span>Drag file(s) above or click here to Upload File<input id="popup_upload_input" type=file onchange="upload_file(this)"></span>
<button id="import_story_button" class="settings_button hidden" onclick="document.getElementById('import_aidg_club_popup').classList.remove('hidden');">
<span class="material-icons-outlined cursor" title="Import Story">cloud_download</span>
<span> Import Story</span>
<span class="button_label">Import Story</span>
</button>
</div>
<div class="popup_load_cancel" id="popup_load_cancel">
<button class="btn popup_load_cancel_button action_button" id="popup_accept">Load</button>
<button class="btn popup_load_cancel_button action_button" id="popup_accept" disabled>Load</button>
<button class="btn popup_load_cancel_button" id="popup_cancel" onclick='document.getElementById("popup").classList.add("hidden");'>Cancel</button>
</div>
</div>
@@ -51,7 +51,7 @@
<input type="checkbox" data-toggle="toggle" data-onstyle="success" id="use_gpu" checked>
<div class="box-label">Use GPU</div>
</div>
<button type="button" class="btn popup_load_cancel_button action_button disabled" onclick="load_model()" id="btn_loadmodelaccept">Load</button>
<button type="button" class="btn popup_load_cancel_button action_button disabled" onclick="load_model()" id="btn_loadmodelaccept" disabled>Load</button>
<button type="button" class="btn popup_load_cancel_button" onclick='document.getElementById("loadmodelcontainer").classList.add("hidden");' id="btn_loadmodelclose">Cancel</button>
</div>
</div>
@@ -85,22 +85,6 @@
<button type="button" class="btn btn-primary" onclick="this.parentElement.parentElement.classList.add('hidden');">Cancel</button>
</div>
</div>
<!---------------- Import aidg.club Prompt ---------------------->
<div class="popup hidden" id="import_aidg_club_popup">
<div class="title">
<div class="popuptitletext">Enter the Prompt Number</div>
</div>
<div class="popup_list_area">
<br/>
<div style="text-align: center;"><a href="https://aetherroom.club/" target="_blank" rel="noopener noreferrer">https://aetherroom.club/</a></div>
<br/>
<input autocomplete="off" class="form-control" type="text" placeholder="Prompt Number (4-digit number at the end of aetherroom.club URL)" id="aidgpromptnum">
</div>
<div class="popup_load_cancel">
<button type="button" class="btn btn-primary" onclick="socket.emit('load_aidg_club', document.getElementById('aidgpromptnum').value); this.parentElement.parentElement.classList.add('hidden');">Accept</button>
<button type="button" class="btn btn-primary" onclick="this.parentElement.parentElement.classList.add('hidden');">Cancel</button>
</div>
</div>
<!---------------- error screen ---------------------->
<div class="popup hidden" id="error_message">

View File

@@ -34,12 +34,12 @@
</span>
</div>
<div id="text_runningmodel">
<b>1) Model: </b>
<b class="noselect">1) Model: </b>
</div>
<div style="text-align: center;">
<button class="settings_button" onclick="socket.emit('load_model_button', {});">
<span class="material-icons-outlined cursor" title="Load Model" style="font-size: 1.4em;">folder_open</span>
<span> Load Model</span>
<span class="button_label">Load Model</span>
</button>
<select class="var_sync_model_selected_preset settings_select presets" onchange='sync_to_server(this)'><option>Preset</option></select>
</div>
@@ -52,24 +52,24 @@
<span class="var_sync_story_story_name fullwidth" contenteditable=true onblur="sync_to_server(this);"></span>
</span>
<span>
<span class="material-icons-outlined cursor" title="Download Story" onclick="document.getElementById('download_iframe').src = 'json';">file_download</span>
<span class="material-icons-outlined cursor" style="padding-top: 8px;" title="Download Story" onclick="document.getElementById('download_iframe').src = 'json';">file_download</span>
</span>
</div>
<div id="text_storyname">
<b>2) Story: </b>
<b class="noselect">2) Story: </b>
</div>
<div class="story_title_icons">
<button class="settings_button" onclick="socket.emit('new_story', '');">
<span class="material-icons-outlined cursor" title="New Story">description</span>
<span> New Story</span>
<span class="button_label">New Story</span>
</button>
<button class="settings_button" onclick="socket.emit('load_story_list', '');">
<span class="material-icons-outlined cursor" title="Load Story">folder_open</span>
<span> Load Story</span>
<span class="button_label">Load Story</span>
</button>
<button class="settings_button var_sync_alt_story_gamesaved" onclick='socket.emit("save_story", null, (response) => {save_as_story(response);});'>
<span class="material-icons-outlined cursor var_sync_alt_story_gamesaved" title="Save Story">save</span>
<span> Save Story</span>
<span class="button_label">Save Story</span>
</button>
@@ -210,7 +210,10 @@
<div class="bias_header">
<div class="bias_header_phrase">Phrase</div>
<div class="bias_header_score">Score</div>
<div class="bias_header_comp_threshold">Completion Threshold</div>
<div class="bias_header_comp_threshold">
Completion Threshold
<span class="helpicon material-icons-outlined" title="Amount of tokens that must match the phrase before it is force-completed.">help_icon</span>
</div>
</div>
</div>
</div>
@@ -333,6 +336,10 @@
<span>Hide token bar</span>
<input type=checkbox class="setting_item_input" data-size="mini" data-onstyle="success" data-toggle="toggle">
</div>
<div class="tweak-container" tweak-path="hide-max-length">
<span>Hide text highlighting</span>
<input type=checkbox class="setting_item_input" data-size="mini" data-onstyle="success" data-toggle="toggle">
</div>
</div>
</div>
<div id="settings_footer" class="settings_footer">

View File

@@ -0,0 +1,4 @@
.within_max_length {
color: inherit !important;
font-weight: inherit !important;
}