From 739cccd8ed9e1da819a3b7260e9c4c79f8a1ab43 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 04:48:46 +0100 Subject: [PATCH 01/12] Revision Fixes --- aiserver.py | 94 ++++++++++++++++++++++++++--------------------------- utils.py | 14 ++++---- 2 files changed, 54 insertions(+), 54 deletions(-) diff --git a/aiserver.py b/aiserver.py index 629dcba3..665b43f6 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1590,13 +1590,13 @@ def get_layer_count(model, directory=""): model = directory from transformers import AutoConfig if(os.path.isdir(model.replace('/', '_'))): - model_config = AutoConfig.from_pretrained(model.replace('/', '_'), revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model.replace('/', '_'), revision=args.revision, cache_dir="cache") elif(os.path.isdir("models/{}".format(model.replace('/', '_')))): - model_config = AutoConfig.from_pretrained("models/{}".format(model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained("models/{}".format(model.replace('/', '_')), revision=args.revision, cache_dir="cache") elif(os.path.isdir(directory)): - model_config = AutoConfig.from_pretrained(directory, revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(directory, revision=args.revision, cache_dir="cache") else: - model_config = AutoConfig.from_pretrained(model, revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model, revision=args.revision, cache_dir="cache") try: if ((utils.HAS_ACCELERATE and model_config.model_type != 'gpt2') or model_config.model_type in ("gpt_neo", "gptj", "xglm", "opt")) and not vars.nobreakmodel: return utils.num_layers(model_config) @@ -2231,19 +2231,19 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal from transformers import AutoConfig if(os.path.isdir(vars.custmodpth.replace('/', '_'))): try: - model_config = AutoConfig.from_pretrained(vars.custmodpth.replace('/', '_'), revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(vars.custmodpth.replace('/', '_'), revision=args.revision, cache_dir="cache") vars.model_type = model_config.model_type except ValueError as e: vars.model_type = "not_found" elif(os.path.isdir("models/{}".format(vars.custmodpth.replace('/', '_')))): try: - model_config = AutoConfig.from_pretrained("models/{}".format(vars.custmodpth.replace('/', '_')), revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained("models/{}".format(vars.custmodpth.replace('/', '_')), revision=args.revision, cache_dir="cache") vars.model_type = model_config.model_type except ValueError as e: vars.model_type = "not_found" else: try: - model_config = AutoConfig.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache") vars.model_type = model_config.model_type except ValueError as e: vars.model_type = "not_found" @@ -2482,19 +2482,19 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal with(maybe_use_float16()): try: if os.path.exists(vars.custmodpth): - model = GPT2LMHeadModel.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache") elif os.path.exists(os.path.join("models/", vars.custmodpth)): - model = GPT2LMHeadModel.from_pretrained(os.path.join("models/", vars.custmodpth), revision=vars.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(os.path.join("models/", vars.custmodpth), revision=vars.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(os.path.join("models/", vars.custmodpth), revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(os.path.join("models/", vars.custmodpth), revision=args.revision, cache_dir="cache") else: - model = GPT2LMHeadModel.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") raise e - tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache") model.save_pretrained("models/{}".format(vars.model.replace('/', '_')), max_shard_size="500MiB") tokenizer.save_pretrained("models/{}".format(vars.model.replace('/', '_'))) vars.modeldim = get_hidden_size_from_model(model) @@ -2541,38 +2541,38 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal lowmem = {} if(os.path.isdir(vars.custmodpth)): try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained(vars.custmodpth, revision=args.revision, cache_dir="cache", **lowmem) elif(os.path.isdir("models/{}".format(vars.model.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", **lowmem) else: old_rebuild_tensor = torch._utils._rebuild_tensor def new_rebuild_tensor(storage: Union[torch_lazy_loader.LazyTensor, torch.Storage], storage_offset, shape, stride): @@ -2588,21 +2588,21 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal torch._utils._rebuild_tensor = new_rebuild_tensor try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(vars.model, revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained(vars.model, revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained(vars.model, revision=args.revision, cache_dir="cache", **lowmem) torch._utils._rebuild_tensor = old_rebuild_tensor @@ -2619,10 +2619,10 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal import huggingface_hub legacy = packaging.version.parse(transformers_version) < packaging.version.parse("4.22.0.dev0") # Save the config.json - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(vars.model, transformers.configuration_utils.CONFIG_NAME, revision=vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(vars.model.replace('/', '_')), transformers.configuration_utils.CONFIG_NAME)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(vars.model, transformers.configuration_utils.CONFIG_NAME, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(vars.model.replace('/', '_')), transformers.configuration_utils.CONFIG_NAME)) if(utils.num_shards is None): # Save the pytorch_model.bin of an unsharded model - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(vars.model, transformers.modeling_utils.WEIGHTS_NAME, revision=vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_NAME)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(vars.model, transformers.modeling_utils.WEIGHTS_NAME, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_NAME)) else: with open(utils.from_pretrained_index_filename) as f: map_data = json.load(f) @@ -2631,7 +2631,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal shutil.move(os.path.realpath(utils.from_pretrained_index_filename), os.path.join("models/{}".format(vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_INDEX_NAME)) # Then save the pytorch_model-#####-of-#####.bin files for filename in filenames: - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(vars.model, filename, revision=vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(vars.model.replace('/', '_')), filename)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(vars.model, filename, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(vars.model.replace('/', '_')), filename)) shutil.rmtree("cache/") if(vars.badwordsids is vars.badwordsids_default and vars.model_type not in ("gpt2", "gpt_neo", "gptj")): @@ -2677,7 +2677,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal else: from transformers import GPT2Tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") else: from transformers import PreTrainedModel from transformers import modeling_utils @@ -2776,11 +2776,11 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal # If we're running Colab or OAI, we still need a tokenizer. if(vars.model in ("Colab", "API", "CLUSTER")): from transformers import GPT2Tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B", revision=args.revision, cache_dir="cache") loadsettings() elif(vars.model == "OAI"): from transformers import GPT2Tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") loadsettings() # Load the TPU backend if requested elif(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX")): @@ -3037,7 +3037,7 @@ def lua_decode(tokens): if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") return utils.decodenewlines(tokenizer.decode(tokens)) #==================================================================# @@ -3049,7 +3049,7 @@ def lua_encode(string): if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") return tokenizer.encode(utils.encodenewlines(string), max_length=int(4e9), truncation=True) #==================================================================# @@ -4198,19 +4198,19 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, try: if(os.path.isdir(tokenizer_id)): try: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) elif(os.path.isdir("models/{}".format(tokenizer_id.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) else: try: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) except: logger.warning(f"Unknown tokenizer {repr(tokenizer_id)}") vars.api_tokenizer_id = tokenizer_id @@ -4622,7 +4622,7 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") lnheader = len(tokenizer._koboldai_header) diff --git a/utils.py b/utils.py index baa74add..79f90b11 100644 --- a/utils.py +++ b/utils.py @@ -261,7 +261,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa if token is None: raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.") _cache_dir = str(cache_dir) if cache_dir is not None else transformers.TRANSFORMERS_CACHE - _revision = revision if revision is not None else huggingface_hub.constants.DEFAULT_REVISION + _revision = args.revision if args.revision is not None else huggingface_hub.constants.DEFAULT_REVISION sharded = False headers = {"user-agent": transformers.file_utils.http_user_agent(user_agent)} if use_auth_token: @@ -272,7 +272,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa def is_cached(filename): try: - huggingface_hub.hf_hub_download(pretrained_model_name_or_path, filename, cache_dir=cache_dir, local_files_only=True) + huggingface_hub.hf_hub_download(pretrained_model_name_or_path, filename, cache_dir=cache_dir, local_files_only=True, revision=_revision) except ValueError: return False return True @@ -281,7 +281,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa filename = transformers.modeling_utils.WEIGHTS_INDEX_NAME if sharded else transformers.modeling_utils.WEIGHTS_NAME except AttributeError: return - url = huggingface_hub.hf_hub_url(pretrained_model_name_or_path, filename, revision=revision) + url = huggingface_hub.hf_hub_url(pretrained_model_name_or_path, filename, revision=_revision) if is_cached(filename) or requests.head(url, allow_redirects=True, proxies=proxies, headers=headers): break if sharded: @@ -295,7 +295,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa with open(map_filename) as f: map_data = json.load(f) filenames = set(map_data["weight_map"].values()) - urls = [huggingface_hub.hf_hub_url(pretrained_model_name_or_path, n, revision=revision) for n in filenames] + urls = [huggingface_hub.hf_hub_url(pretrained_model_name_or_path, n, revision=_revision) for n in filenames] if not force_download: urls = [u for u, n in zip(urls, filenames) if not is_cached(n)] if not urls: @@ -494,7 +494,7 @@ def aria2_hook(pretrained_model_name_or_path: str, force_download=False, cache_d filename = transformers.modeling_utils.WEIGHTS_INDEX_NAME if sharded else transformers.modeling_utils.WEIGHTS_NAME except AttributeError: return - url = huggingface_hub.hf_hub_url(pretrained_model_name_or_path, filename, revision=revision) + url = huggingface_hub.hf_hub_url(pretrained_model_name_or_path, filename, revision=_revision) if is_cached(url) or requests.head(url, allow_redirects=True, proxies=proxies, headers=headers): break if sharded: @@ -508,7 +508,7 @@ def aria2_hook(pretrained_model_name_or_path: str, force_download=False, cache_d with open(map_filename) as f: map_data = json.load(f) filenames = set(map_data["weight_map"].values()) - urls = [huggingface_hub.hf_hub_url(pretrained_model_name_or_path, n, revision=revision) for n in filenames] + urls = [huggingface_hub.hf_hub_url(pretrained_model_name_or_path, n, revision=_revision) for n in filenames] if not force_download: urls = [u for u in urls if not is_cached(u)] if not urls: @@ -555,7 +555,7 @@ def get_num_shards(filename): def get_sharded_checkpoint_num_tensors(pretrained_model_name_or_path, filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, local_files_only=False, use_auth_token=None, user_agent=None, revision=None, **kwargs): import transformers.modeling_utils import torch - shard_paths, _ = transformers.modeling_utils.get_checkpoint_shard_files(pretrained_model_name_or_path, filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=revision) + shard_paths, _ = transformers.modeling_utils.get_checkpoint_shard_files(pretrained_model_name_or_path, filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=_revision) return list(itertools.chain(*(torch.load(p, map_location="cpu").keys() for p in shard_paths))) #==================================================================# From 257a535be59a6ebc57eabf26089bbfdd85310994 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 05:17:34 +0100 Subject: [PATCH 02/12] Revision Fixes Fixes --- utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils.py b/utils.py index 79f90b11..01c8b2a3 100644 --- a/utils.py +++ b/utils.py @@ -460,6 +460,7 @@ def aria2_hook(pretrained_model_name_or_path: str, force_download=False, cache_d import transformers import transformers.modeling_utils from huggingface_hub import HfFolder + _revision = args.revision if args.revision is not None else huggingface_hub.constants.DEFAULT_REVISION if shutil.which("aria2c") is None: # Don't do anything if aria2 is not installed return if local_files_only: # If local_files_only is true, we obviously don't need to download anything @@ -555,6 +556,7 @@ def get_num_shards(filename): def get_sharded_checkpoint_num_tensors(pretrained_model_name_or_path, filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, local_files_only=False, use_auth_token=None, user_agent=None, revision=None, **kwargs): import transformers.modeling_utils import torch + _revision = args.revision if args.revision is not None else huggingface_hub.constants.DEFAULT_REVISION shard_paths, _ = transformers.modeling_utils.get_checkpoint_shard_files(pretrained_model_name_or_path, filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=_revision) return list(itertools.chain(*(torch.load(p, map_location="cpu").keys() for p in shard_paths))) From 661bd5c99e7a03c94bfea436ad5f04614f2953f2 Mon Sep 17 00:00:00 2001 From: henk717 Date: Tue, 31 Jan 2023 19:24:19 +0100 Subject: [PATCH 03/12] Hide Pygmalion 6B Dev, currently only supported on the GPU --- colab/TPU.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/colab/TPU.ipynb b/colab/TPU.ipynb index 60664780..9e2375e2 100644 --- a/colab/TPU.ipynb +++ b/colab/TPU.ipynb @@ -66,7 +66,7 @@ "#@title <-- Select your model below and then click this to start KoboldAI\n", "#@markdown You can find a description of the models below along with instructions on how to start KoboldAI.\n", "\n", - "Model = \"Nerys 13B V2\" #@param [\"Nerys 13B V2\", \"Erebus 13B\", \"Janeway 13B\", \"Shinen 13B\", \"Skein 20B\", \"Erebus 20B\", \"Skein 6B\", \"Janeway 6B\", \"Adventure 6B\", \"Shinen 6B\", \"Pygmalion 6B\", \"Pygmalion 6B Dev\", \"Lit V2 6B\", \"Lit 6B\", \"NeoX 20B\", \"OPT 13B\", \"Fairseq Dense 13B\", \"GPT-J-6B\"] {allow-input: true}\n", + "Model = \"Nerys 13B V2\" #@param [\"Nerys 13B V2\", \"Erebus 13B\", \"Janeway 13B\", \"Shinen 13B\", \"Skein 20B\", \"Erebus 20B\", \"Skein 6B\", \"Janeway 6B\", \"Adventure 6B\", \"Shinen 6B\", \"Pygmalion 6B\", \"Lit V2 6B\", \"Lit 6B\", \"NeoX 20B\", \"OPT 13B\", \"Fairseq Dense 13B\", \"GPT-J-6B\"] {allow-input: true}\n", "Version = \"Official\" #@param [\"Official\", \"United\"] {allow-input: true}\n", "Provider = \"Cloudflare\" #@param [\"Localtunnel\", \"Cloudflare\"]\n", "use_google_drive = True #@param {type:\"boolean\"}\n", From b58daa1ba18a7fef38f0da982d948d3807dda92f Mon Sep 17 00:00:00 2001 From: Henk Date: Fri, 10 Feb 2023 19:11:13 +0100 Subject: [PATCH 04/12] Pin Flask-cloudflared --- environments/huggingface.yml | 2 +- environments/rocm.yml | 2 +- requirements.txt | 2 +- requirements_mtj.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/environments/huggingface.yml b/environments/huggingface.yml index 341a8e87..8b36a529 100644 --- a/environments/huggingface.yml +++ b/environments/huggingface.yml @@ -24,7 +24,7 @@ dependencies: - termcolor - psutil - pip: - - flask-cloudflared + - flask-cloudflared==0.0.10 - flask-ngrok - lupa==1.10 - transformers==4.24.0 diff --git a/environments/rocm.yml b/environments/rocm.yml index 3e50c565..6605709f 100644 --- a/environments/rocm.yml +++ b/environments/rocm.yml @@ -23,7 +23,7 @@ dependencies: - pip: - --extra-index-url https://download.pytorch.org/whl/rocm5.1.1 - torch==1.12.1+rocm5.1.1 - - flask-cloudflared + - flask-cloudflared==0.0.10 - flask-ngrok - lupa==1.10 - transformers==4.24.0 diff --git a/requirements.txt b/requirements.txt index a2854835..bed8308a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ Flask Flask-SocketIO requests torch >= 1.9, < 1.13 -flask-cloudflared +flask-cloudflared==0.0.10 flask-ngrok eventlet dnspython==2.2.1 diff --git a/requirements_mtj.txt b/requirements_mtj.txt index f3dfe339..dc6e06dd 100644 --- a/requirements_mtj.txt +++ b/requirements_mtj.txt @@ -11,7 +11,7 @@ progressbar2 git+https://github.com/VE-FORBRYDERNE/mesh-transformer-jax@ck flask Flask-SocketIO -flask-cloudflared >= 0.0.5 +flask-cloudflared==0.0.10 flask-ngrok eventlet dnspython==2.2.1 From cc01ad730ae428c64202220ac5cd141a501ad677 Mon Sep 17 00:00:00 2001 From: Henk Date: Sat, 11 Feb 2023 11:20:21 +0100 Subject: [PATCH 05/12] Don't install safetensors for MTJ --- requirements_mtj.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements_mtj.txt b/requirements_mtj.txt index dc6e06dd..c03a4c5d 100644 --- a/requirements_mtj.txt +++ b/requirements_mtj.txt @@ -21,5 +21,4 @@ bleach==4.1.0 flask-session marshmallow>=3.13 apispec-webframeworks -safetensors loguru From 00a8806e0d5c0c26c9820f5f13d4f000c255cfc1 Mon Sep 17 00:00:00 2001 From: YellowRoseCx <80486540+YellowRoseCx@users.noreply.github.com> Date: Mon, 13 Feb 2023 20:36:41 -0600 Subject: [PATCH 06/12] Added tooltip to WI noun section --- templates/templates.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/templates/templates.html b/templates/templates.html index 78aa4b14..4f16ff66 100644 --- a/templates/templates.html +++ b/templates/templates.html @@ -1,4 +1,5 @@ +
@@ -24,7 +25,7 @@ contenteditable="true" data-placeholder="Person" spellcheck="false" - > + > help_icon
X From 150d0ea6954076a2689ee1b6127724ae96495919 Mon Sep 17 00:00:00 2001 From: jojorne Date: Fri, 17 Feb 2023 05:49:07 -0300 Subject: [PATCH 07/12] Don't hide genseqs on submit memory. --- static/application.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/static/application.js b/static/application.js index 34a29066..3e7ee4b5 100644 --- a/static/application.js +++ b/static/application.js @@ -946,7 +946,9 @@ function _dosubmit() { submit_throttle = null; input_text.val(""); hideMessage(); - hidegenseqs(); + if(!memorymode){ + hidegenseqs(); + } socket.send({'cmd': 'submit', 'allowabort': !disallow_abort, 'actionmode': adventure ? action_mode : 0, 'chatname': chatmode ? chat_name.val() : undefined, 'data': txt}); } From aa6ce9088b2dbe042bd24a162f4f039730e8bcf2 Mon Sep 17 00:00:00 2001 From: Henk Date: Sat, 18 Feb 2023 18:47:15 +0100 Subject: [PATCH 08/12] Fix vscode artifacting --- aiserver.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/aiserver.py b/aiserver.py index c5b3a425..68190b37 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1792,7 +1792,6 @@ def get_layer_count(model, directory=""): model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=args.revision, cache_dir="cache") else: model_config = AutoConfig.from_pretrained(model, revision=args.revision, cache_dir="cache") - model_config = AutoConfig.from_pretrained(model, revision=args.revision, cache_dir="cache") try: if ((utils.HAS_ACCELERATE and model_config.model_type != 'gpt2') or model_config.model_type in ("gpt_neo", "gptj", "xglm", "opt")) and not koboldai_vars.nobreakmodel: return utils.num_layers(model_config) @@ -3130,7 +3129,6 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: @@ -3148,7 +3146,6 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") except Exception as e: tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: @@ -3179,7 +3176,6 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") except Exception as e: tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: @@ -3264,7 +3260,6 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal else: from transformers import GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") else: from transformers import PreTrainedModel from transformers import modeling_utils @@ -3678,7 +3673,6 @@ def lua_decode(tokens): from transformers import GPT2Tokenizer global tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") return utils.decodenewlines(tokenizer.decode(tokens)) #==================================================================# @@ -3691,7 +3685,6 @@ def lua_encode(string): from transformers import GPT2Tokenizer global tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") return tokenizer.encode(utils.encodenewlines(string), max_length=int(4e9), truncation=True) #==================================================================# @@ -4850,24 +4843,18 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, if(os.path.isdir(tokenizer_id)): try: tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") except: tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) elif(os.path.isdir("models/{}".format(tokenizer_id.replace('/', '_')))): try: tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache") - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache") except: tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) else: try: tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") except: tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) except: logger.warning(f"Unknown tokenizer {repr(tokenizer_id)}") koboldai_vars.api_tokenizer_id = tokenizer_id @@ -5243,7 +5230,6 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, from transformers import GPT2Tokenizer global tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") lnheader = len(tokenizer._koboldai_header) From cd566caf20d03d180001360d2583376458679fcf Mon Sep 17 00:00:00 2001 From: Henk Date: Sun, 19 Feb 2023 00:51:50 +0100 Subject: [PATCH 09/12] Revision Fixes (Removes the workaround) --- aiserver.py | 96 ++++++++++++++++++++++---------------------- koboldai_settings.py | 6 +-- utils.py | 6 +-- 3 files changed, 54 insertions(+), 54 deletions(-) diff --git a/aiserver.py b/aiserver.py index 68190b37..af8f8c66 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1783,15 +1783,15 @@ def get_layer_count(model, directory=""): model = directory from transformers import AutoConfig if(os.path.isdir(model.replace('/', '_'))): - model_config = AutoConfig.from_pretrained(model.replace('/', '_'), revision=args.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model.replace('/', '_'), revision=koboldai_vars.revision, cache_dir="cache") elif(is_model_downloaded(model)): - model_config = AutoConfig.from_pretrained("models/{}".format(model.replace('/', '_')), revision=args.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained("models/{}".format(model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") elif(os.path.isdir(directory)): - model_config = AutoConfig.from_pretrained(directory, revision=args.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(directory, revision=koboldai_vars.revision, cache_dir="cache") elif(os.path.isdir(koboldai_vars.custmodpth.replace('/', '_'))): - model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=args.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=koboldai_vars.revision, cache_dir="cache") else: - model_config = AutoConfig.from_pretrained(model, revision=args.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model, revision=koboldai_vars.revision, cache_dir="cache") try: if ((utils.HAS_ACCELERATE and model_config.model_type != 'gpt2') or model_config.model_type in ("gpt_neo", "gptj", "xglm", "opt")) and not koboldai_vars.nobreakmodel: return utils.num_layers(model_config) @@ -2764,19 +2764,19 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal from transformers import AutoConfig if(os.path.isdir(koboldai_vars.custmodpth.replace('/', '_'))): try: - model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=args.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=koboldai_vars.revision, cache_dir="cache") koboldai_vars.model_type = model_config.model_type except ValueError as e: koboldai_vars.model_type = "not_found" elif(os.path.isdir("models/{}".format(koboldai_vars.custmodpth.replace('/', '_')))): try: - model_config = AutoConfig.from_pretrained("models/{}".format(koboldai_vars.custmodpth.replace('/', '_')), revision=args.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained("models/{}".format(koboldai_vars.custmodpth.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") koboldai_vars.model_type = model_config.model_type except ValueError as e: koboldai_vars.model_type = "not_found" else: try: - model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") koboldai_vars.model_type = model_config.model_type except ValueError as e: koboldai_vars.model_type = "not_found" @@ -2876,7 +2876,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal print(tokenizer_id, koboldai_vars.newlinemode) - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache") loadsettings() koboldai_vars.colaburl = url or koboldai_vars.colaburl @@ -3061,19 +3061,19 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal with(maybe_use_float16()): try: if os.path.exists(koboldai_vars.custmodpth): - model = GPT2LMHeadModel.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") elif os.path.exists(os.path.join("models/", koboldai_vars.custmodpth)): - model = GPT2LMHeadModel.from_pretrained(os.path.join("models/", koboldai_vars.custmodpth), revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(os.path.join("models/", koboldai_vars.custmodpth), revision=args.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(os.path.join("models/", koboldai_vars.custmodpth), revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(os.path.join("models/", koboldai_vars.custmodpth), revision=koboldai_vars.revision, cache_dir="cache") else: - model = GPT2LMHeadModel.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") raise e - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") model.save_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), max_shard_size="500MiB") tokenizer.save_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_'))) koboldai_vars.modeldim = get_hidden_size_from_model(model) @@ -3120,38 +3120,38 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal lowmem = {} if(os.path.isdir(koboldai_vars.custmodpth)): try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) elif(os.path.isdir("models/{}".format(koboldai_vars.model.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", **lowmem) else: old_rebuild_tensor = torch._utils._rebuild_tensor def new_rebuild_tensor(storage: Union[torch_lazy_loader.LazyTensor, torch.Storage], storage_offset, shape, stride): @@ -3167,21 +3167,21 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal torch._utils._rebuild_tensor = new_rebuild_tensor try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) torch._utils._rebuild_tensor = old_rebuild_tensor @@ -3198,13 +3198,13 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal import huggingface_hub legacy = packaging.version.parse(transformers_version) < packaging.version.parse("4.22.0.dev0") # Save the config.json - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, transformers.configuration_utils.CONFIG_NAME, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.configuration_utils.CONFIG_NAME)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, transformers.configuration_utils.CONFIG_NAME, revision=koboldai_vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.configuration_utils.CONFIG_NAME)) if(utils.num_shards is None): # Save the pytorch_model.bin of an unsharded model try: - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, transformers.modeling_utils.WEIGHTS_NAME, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_NAME)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, transformers.modeling_utils.WEIGHTS_NAME, revision=koboldai_vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_NAME)) except: - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, "model.safetensors", revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), "model.safetensors")) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, "model.safetensors", revision=koboldai_vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), "model.safetensors")) else: with open(utils.from_pretrained_index_filename) as f: map_data = json.load(f) @@ -3213,7 +3213,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal shutil.move(os.path.realpath(utils.from_pretrained_index_filename), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_INDEX_NAME)) # Then save the pytorch_model-#####-of-#####.bin files for filename in filenames: - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, filename, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), filename)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, filename, revision=koboldai_vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), filename)) shutil.rmtree("cache/") if(koboldai_vars.badwordsids is koboldai_settings.badwordsids_default and koboldai_vars.model_type not in ("gpt2", "gpt_neo", "gptj")): @@ -3259,7 +3259,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal else: from transformers import GPT2Tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") else: from transformers import PreTrainedModel from transformers import modeling_utils @@ -3672,7 +3672,7 @@ def lua_decode(tokens): if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") return utils.decodenewlines(tokenizer.decode(tokens)) #==================================================================# @@ -3684,7 +3684,7 @@ def lua_encode(string): if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") return tokenizer.encode(utils.encodenewlines(string), max_length=int(4e9), truncation=True) #==================================================================# @@ -4842,19 +4842,19 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, try: if(os.path.isdir(tokenizer_id)): try: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) elif(os.path.isdir("models/{}".format(tokenizer_id.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) else: try: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) except: logger.warning(f"Unknown tokenizer {repr(tokenizer_id)}") koboldai_vars.api_tokenizer_id = tokenizer_id @@ -5229,7 +5229,7 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") lnheader = len(tokenizer._koboldai_header) diff --git a/koboldai_settings.py b/koboldai_settings.py index 674e00ae..cc8e6290 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -649,7 +649,7 @@ class model_settings(settings): no_save_variables = ['modelconfig', 'custmodpth', 'generated_tkns', 'loaded_layers', 'total_layers', 'total_download_chunks', 'downloaded_chunks', 'presets', 'default_preset', 'welcome', 'welcome_default', 'simple_randomness', 'simple_creativity', 'simple_repitition', - 'badwordsids', 'uid_presets', 'revision', 'model', 'model_type', 'lazy_load', 'fp32_model', 'modeldim', 'horde_wait_time', 'horde_queue_position', 'horde_queue_size', 'newlinemode', 'tqdm_progress', 'tqdm_rem_time', '_tqdm'] + 'badwordsids', 'uid_presets', 'model', 'model_type', 'lazy_load', 'fp32_model', 'modeldim', 'horde_wait_time', 'horde_queue_position', 'horde_queue_size', 'newlinemode', 'tqdm_progress', 'tqdm_rem_time', '_tqdm'] settings_name = "model" default_settings = {"rep_pen" : 1.1, "rep_pen_slope": 0.7, "rep_pen_range": 1024, "temp": 0.5, "top_p": 0.9, "top_k": 0, "top_a": 0.0, "tfs": 1.0, "typical": 1.0, "sampler_order": [6,0,1,2,3,4,5]} @@ -707,7 +707,6 @@ class model_settings(settings): self.sampler_order = [6, 0, 1, 2, 3, 4, 5] self.newlinemode = "n" self.lazy_load = True # Whether or not to use torch_lazy_loader.py for transformers models in order to reduce CPU memory usage - self.revision = None self.presets = [] # Holder for presets self.selected_preset = "" self.uid_presets = [] @@ -1124,7 +1123,7 @@ class story_settings(settings): class user_settings(settings): local_only_variables = ['importjs'] - no_save_variables = ['importnum', 'importjs', 'loadselect', 'spselect', 'svowname', 'saveow', 'laststory', 'sid'] + no_save_variables = ['importnum', 'importjs', 'loadselect', 'spselect', 'svowname', 'saveow', 'laststory', 'sid', "revision"] settings_name = "user" def __init__(self, socketio): self._socketio = socketio @@ -1171,6 +1170,7 @@ class user_settings(settings): self.screenshot_author_name = "Anonymous" self.screenshot_use_boring_colors = False self.oaiurl = "" # OpenAI API URL + self.revision = None self.oaiengines = "https://api.openai.com/v1/engines" self.url = "https://api.inferkit.com/v1/models/standard/generate" # InferKit API URL self.colaburl = "" # Ngrok url for Google Colab mode diff --git a/utils.py b/utils.py index e713cc45..87ee77a5 100644 --- a/utils.py +++ b/utils.py @@ -286,7 +286,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa if token is None: raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.") _cache_dir = str(cache_dir) if cache_dir is not None else transformers.TRANSFORMERS_CACHE - _revision = args.revision if args.revision is not None else huggingface_hub.constants.DEFAULT_REVISION + _revision = koboldai_vars.revision if koboldai_vars.revision is not None else huggingface_hub.constants.DEFAULT_REVISION sharded = False headers = {"user-agent": transformers.file_utils.http_user_agent(user_agent)} if use_auth_token: @@ -485,7 +485,7 @@ def aria2_hook(pretrained_model_name_or_path: str, force_download=False, cache_d import transformers import transformers.modeling_utils from huggingface_hub import HfFolder - _revision = args.revision if args.revision is not None else huggingface_hub.constants.DEFAULT_REVISION + _revision = koboldai_vars.revision if koboldai_vars.revision is not None else huggingface_hub.constants.DEFAULT_REVISION if shutil.which("aria2c") is None: # Don't do anything if aria2 is not installed return if local_files_only: # If local_files_only is true, we obviously don't need to download anything @@ -581,7 +581,7 @@ def get_num_shards(filename): def get_sharded_checkpoint_num_tensors(pretrained_model_name_or_path, filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, local_files_only=False, use_auth_token=None, user_agent=None, revision=None, **kwargs): import transformers.modeling_utils import torch - _revision = args.revision if args.revision is not None else huggingface_hub.constants.DEFAULT_REVISION + _revision = koboldai_vars.revision if koboldai_vars.revision is not None else huggingface_hub.constants.DEFAULT_REVISION shard_paths, _ = transformers.modeling_utils.get_checkpoint_shard_files(pretrained_model_name_or_path, filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=_revision) return list(itertools.chain(*(torch.load(p, map_location="cpu").keys() for p in shard_paths))) From 117f0659c3209d4415e76796970bdb553b215454 Mon Sep 17 00:00:00 2001 From: Llama <34464159+pi6am@users.noreply.github.com> Date: Sat, 18 Feb 2023 23:41:17 -0800 Subject: [PATCH 10/12] Fix exception using Save As from the classic UI The `saveas` method was modified to take a data dict but one of the else blocks still referred to the previous `name` parameter. Assign to `name` to fix the `NameError: name 'name' is not defined` exception. --- aiserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aiserver.py b/aiserver.py index 68190b37..e89ba22a 100644 --- a/aiserver.py +++ b/aiserver.py @@ -7162,8 +7162,8 @@ def exitModes(): # Launch in-browser save prompt #==================================================================# def saveas(data): - - koboldai_vars.story_name = data['name'] + name = data['name'] + koboldai_vars.story_name = name if not data['pins']: koboldai_vars.actions.clear_all_options() # Check if filename exists already From 9e6a5db745dce23ead1d6b08a6207bc3c68110bb Mon Sep 17 00:00:00 2001 From: Henk Date: Sun, 19 Feb 2023 16:22:26 +0100 Subject: [PATCH 11/12] UI1 Botname --- aiserver.py | 14 +++++++++++++- koboldai_settings.py | 1 + static/application.js | 9 +++++++-- static/custom.css | 7 +++++++ templates/index.html | 1 + 5 files changed, 29 insertions(+), 3 deletions(-) diff --git a/aiserver.py b/aiserver.py index 782a929c..cd4854ba 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1337,6 +1337,8 @@ def processsettings(js): koboldai_vars.chatmode = js["chatmode"] if("chatname" in js): koboldai_vars.chatname = js["chatname"] + if("botname" in js): + koboldai_vars.botname = js["botname"] if("dynamicscan" in js): koboldai_vars.dynamicscan = js["dynamicscan"] if("nopromptgen" in js): @@ -3858,6 +3860,7 @@ def lua_has_setting(setting): "useprompt", "chatmode", "chatname", + "botname", "adventure", "dynamicscan", "nopromptgen", @@ -4174,6 +4177,7 @@ def do_connect(): return logger.debug("{0}Client connected!{1}".format(colors.GREEN, colors.END)) emit('from_server', {'cmd': 'setchatname', 'data': koboldai_vars.chatname}, room="UI_1") + emit('from_server', {'cmd': 'setbotname', 'data': koboldai_vars.botname}, room="UI_1") emit('from_server', {'cmd': 'setanotetemplate', 'data': koboldai_vars.authornotetemplate}, room="UI_1") emit('from_server', {'cmd': 'connected', 'smandelete': koboldai_vars.smandelete, 'smanrename': koboldai_vars.smanrename, 'modelname': getmodelname()}, room="UI_1") if(koboldai_vars.host): @@ -4239,8 +4243,10 @@ def get_message(msg): if(type(msg['chatname']) is not str): raise ValueError("Chatname must be a string") koboldai_vars.chatname = msg['chatname'] + koboldai_vars.botname = msg['botname'] settingschanged() emit('from_server', {'cmd': 'setchatname', 'data': koboldai_vars.chatname}, room="UI_1") + emit('from_server', {'cmd': 'setbotname', 'data': koboldai_vars.botname}, room="UI_1") koboldai_vars.recentrng = koboldai_vars.recentrngm = None actionsubmit(msg['data'], actionmode=msg['actionmode']) elif(koboldai_vars.mode == "edit"): @@ -4258,8 +4264,10 @@ def get_message(msg): if(type(msg['chatname']) is not str): raise ValueError("Chatname must be a string") koboldai_vars.chatname = msg['chatname'] + koboldai_vars.botname = msg['botname'] settingschanged() emit('from_server', {'cmd': 'setchatname', 'data': koboldai_vars.chatname}, room="UI_1") + emit('from_server', {'cmd': 'setbotname', 'data': koboldai_vars.botname}, room="UI_1") actionretry(msg['data']) # Back/Undo Action elif(msg['cmd'] == 'back'): @@ -4875,9 +4883,13 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, # "Chat" mode if(koboldai_vars.chatmode and koboldai_vars.gamestarted): + if(koboldai_vars.botname): + botname = (koboldai_vars.botname + ":") + else: + botname = "" data = re.sub(r'\n+', ' ', data) if(len(data)): - data = f"\n{koboldai_vars.chatname}: {data}\n" + data = f"\n{koboldai_vars.chatname}: {data}\n{botname}" # If we're not continuing, store a copy of the raw input if(data != ""): diff --git a/koboldai_settings.py b/koboldai_settings.py index cc8e6290..d85e2919 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -870,6 +870,7 @@ class story_settings(settings): self.useprompt = False # Whether to send the full prompt with every submit action self.chatmode = False self.chatname = "You" + self.botname = "Bot" self.adventure = False self.actionmode = 0 self.storymode = 0 diff --git a/static/application.js b/static/application.js index 3e7ee4b5..df51b06e 100644 --- a/static/application.js +++ b/static/application.js @@ -949,7 +949,7 @@ function _dosubmit() { if(!memorymode){ hidegenseqs(); } - socket.send({'cmd': 'submit', 'allowabort': !disallow_abort, 'actionmode': adventure ? action_mode : 0, 'chatname': chatmode ? chat_name.val() : undefined, 'data': txt}); + socket.send({'cmd': 'submit', 'allowabort': !disallow_abort, 'actionmode': adventure ? action_mode : 0, 'chatname': chatmode ? chat_name.val() : undefined, 'botname': chatmode ? bot_name.val() : undefined, 'data': txt}); } function changemode() { @@ -1492,8 +1492,10 @@ function setmodevisibility(state) { function setchatnamevisibility(state) { if(state){ // Enabling show([chat_name]); + show([bot_name]); } else{ // Disabling hide([chat_name]); + hide([bot_name]); } } @@ -2266,6 +2268,7 @@ $(document).ready(function(){ input_text = $('#input_text'); message_text = $('#messagefield'); chat_name = $('#chatname'); + bot_name = $('#botname'); settings_menu = $("#settingsmenu"); format_menu = $('#formatmenu'); anote_menu = $('#anoterowcontainer'); @@ -2869,6 +2872,8 @@ $(document).ready(function(){ hidegenseqs(); } else if(msg.cmd == "setchatname") { chat_name.val(msg.data); + } else if(msg.cmd == "setbotname") { + bot_name.val(msg.data); } else if(msg.cmd == "setlabelnumseq") { // Update setting label with value from server $("#setnumseqcur").val(msg.data); @@ -3183,7 +3188,7 @@ $(document).ready(function(){ button_actretry.on("click", function(ev) { beginStream(); hideMessage(); - socket.send({'cmd': 'retry', 'chatname': chatmode ? chat_name.val() : undefined, 'data': ''}); + socket.send({'cmd': 'retry', 'chatname': chatmode ? chat_name.val() : undefined, 'botname': chatmode ? bot_name.val() : undefined, 'data': ''}); hidegenseqs(); }); diff --git a/static/custom.css b/static/custom.css index d4bfe872..3e266701 100644 --- a/static/custom.css +++ b/static/custom.css @@ -79,6 +79,13 @@ body.connected #topmenu, #topmenu.always-available { margin-left: 10px; } +#botname { + background-color: #404040; + color: #ffffff; + width: 200px; + margin-left: 10px; +} + #menuitems { display: flex; width: 100%; diff --git a/templates/index.html b/templates/index.html index d1995373..af99390f 100644 --- a/templates/index.html +++ b/templates/index.html @@ -144,6 +144,7 @@
+
From b3083034ea60352c0afad3f7278560583bc533ec Mon Sep 17 00:00:00 2001 From: Henk Date: Sun, 19 Feb 2023 17:12:14 +0100 Subject: [PATCH 12/12] UI2 Botname --- templates/settings flyout.html | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/templates/settings flyout.html b/templates/settings flyout.html index e7e41bd8..aae2335e 100644 --- a/templates/settings flyout.html +++ b/templates/settings flyout.html @@ -109,6 +109,20 @@
+
+ + + Bot Name: + help_icon + + + + + + + + +
Download debug dump