From a046db4dedb64c287e882cbf83a32cd195c7ac4a Mon Sep 17 00:00:00 2001 From: henk717 Date: Tue, 24 Jan 2023 13:16:49 +0100 Subject: [PATCH 1/9] Gemaakt met Colaboratory --- colab/GPU.ipynb | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/colab/GPU.ipynb b/colab/GPU.ipynb index 527ac3aa..ca71f3a2 100644 --- a/colab/GPU.ipynb +++ b/colab/GPU.ipynb @@ -5,7 +5,8 @@ "colab": { "name": "ColabKobold GPU", "private_outputs": true, - "provenance": [] + "provenance": [], + "include_colab_link": true }, "kernelspec": { "display_name": "Python 3", @@ -17,6 +18,16 @@ "accelerator": "GPU" }, "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, { "cell_type": "markdown", "metadata": { @@ -71,7 +82,7 @@ "#@title <-- Select your model below and then click this to start KoboldAI\n", "#@markdown You can find a description of the models below along with instructions on how to start KoboldAI.\n", "\n", - "Model = \"Nerys V2 6B\" #@param [\"Nerys V2 6B\", \"Erebus 6B\", \"Skein 6B\", \"Janeway 6B\", \"Adventure 6B\", \"Pygmalion 6B\", \"Lit V2 6B\", \"Lit 6B\", \"Shinen 6B\", \"Nerys 2.7B\", \"AID 2.7B\", \"Erebus 2.7B\", \"Janeway 2.7B\", \"Picard 2.7B\", \"Horni LN 2.7B\", \"Horni 2.7B\", \"Shinen 2.7B\", \"OPT 2.7B\", \"Fairseq Dense 2.7B\", \"Neo 2.7B\"] {allow-input: true}\n", + "Model = \"Nerys V2 6B\" #@param [\"Nerys V2 6B\", \"Erebus 6B\", \"Skein 6B\", \"Janeway 6B\", \"Adventure 6B\", \"Pygmalion 6B\", \"Pygmalion 6B Dev\", \"Lit V2 6B\", \"Lit 6B\", \"Shinen 6B\", \"Nerys 2.7B\", \"AID 2.7B\", \"Erebus 2.7B\", \"Janeway 2.7B\", \"Picard 2.7B\", \"Horni LN 2.7B\", \"Horni 2.7B\", \"Shinen 2.7B\", \"OPT 2.7B\", \"Fairseq Dense 2.7B\", \"Neo 2.7B\"] {allow-input: true}\n", "Version = \"Official\" #@param [\"Official\", \"United\"] {allow-input: true}\n", "Provider = \"Cloudflare\" #@param [\"Localtunnel\", \"Cloudflare\"]\n", "use_google_drive = True #@param {type:\"boolean\"}\n", @@ -87,6 +98,8 @@ " if not os.path.exists(\"/content/drive/MyDrive/\"):\n", " os.mkdir(\"/content/drive/MyDrive/\")\n", "\n", + "Revision = \"\"\n", + "\n", "if Model == \"Nerys V2 6B\":\n", " Model = \"KoboldAI/OPT-6B-nerys-v2\"\n", " path = \"\"\n", @@ -111,6 +124,11 @@ " Model = \"PygmalionAI/pygmalion-6b\"\n", " path = \"\"\n", " download = \"\"\n", + "elif Model == \"Pygmalion 6B Dev\":\n", + " Model = \"PygmalionAI/pygmalion-6b\"\n", + " Revision = \"dev\"\n", + " path = \"\"\n", + " download = \"\"\n", "elif Model == \"Lit V2 6B\":\n", " Model = \"hakurei/litv2-6B-rev3\"\n", " path = \"\"\n", @@ -173,7 +191,7 @@ "else:\n", " tunnel = \"\"\n", "\n", - "!wget https://koboldai.org/ckds -O - | bash /dev/stdin -m $Model -g $Version $tunnel" + "!wget https://koboldai.org/ckds -O - | bash /dev/stdin -m $Model -g $Version $tunnel $Revision" ], "execution_count": null, "outputs": [] From a185cbd0150dd03e09b9a2828b09e4f711c846c9 Mon Sep 17 00:00:00 2001 From: henk717 Date: Tue, 24 Jan 2023 13:31:45 +0100 Subject: [PATCH 2/9] Fix Defaults --- colab/GPU.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/colab/GPU.ipynb b/colab/GPU.ipynb index ca71f3a2..c2a686c4 100644 --- a/colab/GPU.ipynb +++ b/colab/GPU.ipynb @@ -191,7 +191,7 @@ "else:\n", " tunnel = \"\"\n", "\n", - "!wget https://koboldai.org/ckds -O - | bash /dev/stdin -m $Model -g $Version $tunnel $Revision" + "!wget https://koboldai.org/ckds -O - | bash /dev/stdin -m $Model -g $Version $tunnel -r $Revision" ], "execution_count": null, "outputs": [] From 031c06347f0551aa031d39b08c5b7297373ce018 Mon Sep 17 00:00:00 2001 From: henk717 Date: Tue, 24 Jan 2023 13:51:08 +0100 Subject: [PATCH 3/9] Streamlining Revision Support --- colab/GPU.ipynb | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/colab/GPU.ipynb b/colab/GPU.ipynb index c2a686c4..ba86b65e 100644 --- a/colab/GPU.ipynb +++ b/colab/GPU.ipynb @@ -5,8 +5,7 @@ "colab": { "name": "ColabKobold GPU", "private_outputs": true, - "provenance": [], - "include_colab_link": true + "provenance": [] }, "kernelspec": { "display_name": "Python 3", @@ -18,16 +17,6 @@ "accelerator": "GPU" }, "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "view-in-github", - "colab_type": "text" - }, - "source": [ - "\"Open" - ] - }, { "cell_type": "markdown", "metadata": { @@ -124,10 +113,12 @@ " Model = \"PygmalionAI/pygmalion-6b\"\n", " path = \"\"\n", " download = \"\"\n", + " Version = \"United\"\n", "elif Model == \"Pygmalion 6B Dev\":\n", " Model = \"PygmalionAI/pygmalion-6b\"\n", - " Revision = \"dev\"\n", + " Revision = \"--revision dev\"\n", " path = \"\"\n", + " Version = \"United\"\n", " download = \"\"\n", "elif Model == \"Lit V2 6B\":\n", " Model = \"hakurei/litv2-6B-rev3\"\n", @@ -191,7 +182,7 @@ "else:\n", " tunnel = \"\"\n", "\n", - "!wget https://koboldai.org/ckds -O - | bash /dev/stdin -m $Model -g $Version $tunnel -r $Revision" + "!wget https://koboldai.org/ckds -O - | bash /dev/stdin -m $Model -g $Version $Revision $tunnel" ], "execution_count": null, "outputs": [] From e9cf9fa6d04247b18f8c840ba5e1fc4db1a58f83 Mon Sep 17 00:00:00 2001 From: henk717 Date: Fri, 27 Jan 2023 05:20:09 +0100 Subject: [PATCH 4/9] Pygmalion Dev support --- colab/TPU.ipynb | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/colab/TPU.ipynb b/colab/TPU.ipynb index f40dad68..60664780 100644 --- a/colab/TPU.ipynb +++ b/colab/TPU.ipynb @@ -66,9 +66,9 @@ "#@title <-- Select your model below and then click this to start KoboldAI\n", "#@markdown You can find a description of the models below along with instructions on how to start KoboldAI.\n", "\n", - "Model = \"Nerys 13B V2\" #@param [\"Nerys 13B V2\", \"Erebus 13B\", \"Janeway 13B\", \"Shinen 13B\", \"Skein 20B\", \"Erebus 20B\", \"Skein 6B\", \"Janeway 6B\", \"Adventure 6B\", \"Shinen 6B\", \"Pygmalion 6B\", \"Lit V2 6B\", \"Lit 6B\", \"NeoX 20B\", \"OPT 13B\", \"Fairseq Dense 13B\", \"GPT-J-6B\"] {allow-input: true}\n", + "Model = \"Nerys 13B V2\" #@param [\"Nerys 13B V2\", \"Erebus 13B\", \"Janeway 13B\", \"Shinen 13B\", \"Skein 20B\", \"Erebus 20B\", \"Skein 6B\", \"Janeway 6B\", \"Adventure 6B\", \"Shinen 6B\", \"Pygmalion 6B\", \"Pygmalion 6B Dev\", \"Lit V2 6B\", \"Lit 6B\", \"NeoX 20B\", \"OPT 13B\", \"Fairseq Dense 13B\", \"GPT-J-6B\"] {allow-input: true}\n", "Version = \"Official\" #@param [\"Official\", \"United\"] {allow-input: true}\n", - "Provider = \"Localtunnel\" #@param [\"Localtunnel\", \"Cloudflare\"]\n", + "Provider = \"Cloudflare\" #@param [\"Localtunnel\", \"Cloudflare\"]\n", "use_google_drive = True #@param {type:\"boolean\"}\n", "\n", "import os\n", @@ -133,6 +133,13 @@ " Model = \"PygmalionAI/pygmalion-6b\"\n", " path = \"\"\n", " download = \"\"\n", + " Version = \"United\"\n", + "elif Model == \"Pygmalion 6B Dev\":\n", + " Model = \"PygmalionAI/pygmalion-6b\"\n", + " Revision = \"--revision dev\"\n", + " path = \"\"\n", + " Version = \"United\"\n", + " download = \"\"\n", "elif Model == \"Lit V2 6B\":\n", " Model = \"hakurei/litv2-6B-rev3\"\n", " path = \"\"\n", From b276c5ff1513df9ae2dfc1e66de97f228086caf0 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 01:46:55 +0100 Subject: [PATCH 5/9] EOS Hiding Workaround (For models that want EOS behavior) --- aiserver.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/aiserver.py b/aiserver.py index 9d0daabc..107ed549 100644 --- a/aiserver.py +++ b/aiserver.py @@ -6491,6 +6491,12 @@ def applyoutputformatting(txt, no_sentence_trimming=False, no_single_line=False) if len(txt) == 0: return txt + # Workaround for endoftext appearing in models that need it, you can supposedly do this directly with the tokenizer but it keeps showing up + # So for now since we only have two known end of text tokens and only one model that wishes to have its generation stopped this is easier + # If you see this and you wish to do a universal implementation for this, feel free just make sure to test it on all platforms - Henk + txt = txt.replace("<|endoftext|>", "") + txt = txt.replace("", "") + # Use standard quotes and apostrophes txt = utils.fixquotes(txt) From 143d279f2aa4a2744c3e2782822f4a2a2411e031 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 02:03:29 +0100 Subject: [PATCH 6/9] Don't save revision --- koboldai_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index 5a70cde3..0b580f83 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -1187,7 +1187,7 @@ class system_settings(settings): 'lua_koboldcore', 'sp', 'sp_length', '_horde_pid', 'horde_share', 'aibusy', 'serverstarted', 'inference_config', 'image_pipeline', 'summarizer', 'summary_tokenizer', 'use_colab_tpu', 'noai', 'disable_set_aibusy', 'cloudflare_link', 'tts_model', - 'generating_image', 'bit_8_available', 'host', 'hascuda', 'usegpu'] + 'generating_image', 'bit_8_available', 'host', 'hascuda', 'usegpu', 'revision'] settings_name = "system" def __init__(self, socketio, koboldai_var): self.socketio = socketio From 6def9abebc97452c0a53ccf7fa36530138d75081 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 03:06:07 +0100 Subject: [PATCH 7/9] Model setting fixes --- koboldai_settings.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index 0b580f83..ae3f2f2b 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -643,7 +643,7 @@ class model_settings(settings): no_save_variables = ['tqdm', 'tqdm_progress', 'tqdm_rem_time', 'socketio', 'modelconfig', 'custmodpth', 'generated_tkns', 'loaded_layers', 'total_layers', 'total_download_chunks', 'downloaded_chunks', 'presets', 'default_preset', 'koboldai_vars', 'welcome', 'welcome_default', 'simple_randomness', 'simple_creativity', 'simple_repitition', - 'badwordsids', 'uid_presets'] + 'badwordsids', 'uid_presets', 'revision', 'model', 'model_type', 'lazy_load', 'fp32_model', 'modeldim', 'horde_wait_time', 'horde_queue_position', 'horde_queue_size', 'newlinemode'] settings_name = "model" default_settings = {"rep_pen" : 1.1, "rep_pen_slope": 0.7, "rep_pen_range": 1024, "temp": 0.5, "top_p": 0.9, "top_k": 0, "top_a": 0.0, "tfs": 1.0, "typical": 1.0, "sampler_order": [6,0,1,2,3,4,5]} @@ -1187,7 +1187,7 @@ class system_settings(settings): 'lua_koboldcore', 'sp', 'sp_length', '_horde_pid', 'horde_share', 'aibusy', 'serverstarted', 'inference_config', 'image_pipeline', 'summarizer', 'summary_tokenizer', 'use_colab_tpu', 'noai', 'disable_set_aibusy', 'cloudflare_link', 'tts_model', - 'generating_image', 'bit_8_available', 'host', 'hascuda', 'usegpu', 'revision'] + 'generating_image', 'bit_8_available', 'host', 'hascuda', 'usegpu'] settings_name = "system" def __init__(self, socketio, koboldai_var): self.socketio = socketio From 640bd640378118b7ab6d8203c23467399c2a1711 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 04:00:35 +0100 Subject: [PATCH 8/9] Revision Fixes (And Var Workaround) --- aiserver.py | 96 ++++++++++++++++++++++++++--------------------------- utils.py | 8 ++--- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/aiserver.py b/aiserver.py index 107ed549..49a9ae16 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1782,15 +1782,15 @@ def get_layer_count(model, directory=""): model = directory from transformers import AutoConfig if(os.path.isdir(model.replace('/', '_'))): - model_config = AutoConfig.from_pretrained(model.replace('/', '_'), revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model.replace('/', '_'), revision=args.revision, cache_dir="cache") elif(is_model_downloaded(model)): - model_config = AutoConfig.from_pretrained("models/{}".format(model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained("models/{}".format(model.replace('/', '_')), revision=args.revision, cache_dir="cache") elif(os.path.isdir(directory)): - model_config = AutoConfig.from_pretrained(directory, revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(directory, revision=args.revision, cache_dir="cache") elif(os.path.isdir(koboldai_vars.custmodpth.replace('/', '_'))): - model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=args.revision, cache_dir="cache") else: - model_config = AutoConfig.from_pretrained(model, revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(model, revision=args.revision, cache_dir="cache") try: if ((utils.HAS_ACCELERATE and model_config.model_type != 'gpt2') or model_config.model_type in ("gpt_neo", "gptj", "xglm", "opt")) and not koboldai_vars.nobreakmodel: return utils.num_layers(model_config) @@ -2774,19 +2774,19 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal from transformers import AutoConfig if(os.path.isdir(koboldai_vars.custmodpth.replace('/', '_'))): try: - model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth.replace('/', '_'), revision=args.revision, cache_dir="cache") koboldai_vars.model_type = model_config.model_type except ValueError as e: koboldai_vars.model_type = "not_found" elif(os.path.isdir("models/{}".format(koboldai_vars.custmodpth.replace('/', '_')))): try: - model_config = AutoConfig.from_pretrained("models/{}".format(koboldai_vars.custmodpth.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained("models/{}".format(koboldai_vars.custmodpth.replace('/', '_')), revision=args.revision, cache_dir="cache") koboldai_vars.model_type = model_config.model_type except ValueError as e: koboldai_vars.model_type = "not_found" else: try: - model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + model_config = AutoConfig.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") koboldai_vars.model_type = model_config.model_type except ValueError as e: koboldai_vars.model_type = "not_found" @@ -2886,7 +2886,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal print(tokenizer_id, koboldai_vars.newlinemode) - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") loadsettings() koboldai_vars.colaburl = url or koboldai_vars.colaburl @@ -3071,19 +3071,19 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal with(maybe_use_float16()): try: if os.path.exists(koboldai_vars.custmodpth): - model = GPT2LMHeadModel.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") elif os.path.exists(os.path.join("models/", koboldai_vars.custmodpth)): - model = GPT2LMHeadModel.from_pretrained(os.path.join("models/", koboldai_vars.custmodpth), revision=koboldai_vars.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(os.path.join("models/", koboldai_vars.custmodpth), revision=koboldai_vars.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(os.path.join("models/", koboldai_vars.custmodpth), revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(os.path.join("models/", koboldai_vars.custmodpth), revision=args.revision, cache_dir="cache") else: - model = GPT2LMHeadModel.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + model = GPT2LMHeadModel.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") raise e - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") model.save_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), max_shard_size="500MiB") tokenizer.save_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_'))) koboldai_vars.modeldim = get_hidden_size_from_model(model) @@ -3130,38 +3130,38 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal lowmem = {} if(os.path.isdir(koboldai_vars.custmodpth)): try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", **lowmem) elif(os.path.isdir("models/{}".format(koboldai_vars.model.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", **lowmem) else: old_rebuild_tensor = torch._utils._rebuild_tensor def new_rebuild_tensor(storage: Union[torch_lazy_loader.LazyTensor, torch.Storage], storage_offset, shape, stride): @@ -3177,21 +3177,21 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal torch._utils._rebuild_tensor = new_rebuild_tensor try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) + model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", **lowmem) except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") - model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) + model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", **lowmem) torch._utils._rebuild_tensor = old_rebuild_tensor @@ -3208,13 +3208,13 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal import huggingface_hub legacy = packaging.version.parse(transformers_version) < packaging.version.parse("4.22.0.dev0") # Save the config.json - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, transformers.configuration_utils.CONFIG_NAME, revision=koboldai_vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.configuration_utils.CONFIG_NAME)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, transformers.configuration_utils.CONFIG_NAME, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.configuration_utils.CONFIG_NAME)) if(utils.num_shards is None): # Save the pytorch_model.bin of an unsharded model try: - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, transformers.modeling_utils.WEIGHTS_NAME, revision=koboldai_vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_NAME)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, transformers.modeling_utils.WEIGHTS_NAME, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_NAME)) except: - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, "model.safetensors", revision=koboldai_vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), "model.safetensors")) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, "model.safetensors", revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), "model.safetensors")) else: with open(utils.from_pretrained_index_filename) as f: map_data = json.load(f) @@ -3223,7 +3223,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal shutil.move(os.path.realpath(utils.from_pretrained_index_filename), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), transformers.modeling_utils.WEIGHTS_INDEX_NAME)) # Then save the pytorch_model-#####-of-#####.bin files for filename in filenames: - shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, filename, revision=koboldai_vars.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), filename)) + shutil.move(os.path.realpath(huggingface_hub.hf_hub_download(koboldai_vars.model, filename, revision=args.revision, cache_dir="cache", local_files_only=True, legacy_cache_layout=legacy)), os.path.join("models/{}".format(koboldai_vars.model.replace('/', '_')), filename)) shutil.rmtree("cache/") if(koboldai_vars.badwordsids is koboldai_settings.badwordsids_default and koboldai_vars.model_type not in ("gpt2", "gpt_neo", "gptj")): @@ -3269,7 +3269,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal else: from transformers import GPT2Tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") else: from transformers import PreTrainedModel from transformers import modeling_utils @@ -3682,7 +3682,7 @@ def lua_decode(tokens): if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") return utils.decodenewlines(tokenizer.decode(tokens)) #==================================================================# @@ -3694,7 +3694,7 @@ def lua_encode(string): if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") return tokenizer.encode(utils.encodenewlines(string), max_length=int(4e9), truncation=True) #==================================================================# @@ -4852,19 +4852,19 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False, try: if(os.path.isdir(tokenizer_id)): try: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) elif(os.path.isdir("models/{}".format(tokenizer_id.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(tokenizer_id.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) else: try: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache") except: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, revision=args.revision, cache_dir="cache", use_fast=False) except: logger.warning(f"Unknown tokenizer {repr(tokenizer_id)}") koboldai_vars.api_tokenizer_id = tokenizer_id @@ -5239,7 +5239,7 @@ def calcsubmitbudget(actionlen, winfo, mem, anotetxt, actions, submission=None, if("tokenizer" not in globals()): from transformers import GPT2Tokenizer global tokenizer - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") lnheader = len(tokenizer._koboldai_header) diff --git a/utils.py b/utils.py index 22fc05a4..45a7817f 100644 --- a/utils.py +++ b/utils.py @@ -286,7 +286,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa if token is None: raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.") _cache_dir = str(cache_dir) if cache_dir is not None else transformers.TRANSFORMERS_CACHE - _revision = revision if revision is not None else huggingface_hub.constants.DEFAULT_REVISION + _revision = args.revision if args.revision is not None else huggingface_hub.constants.DEFAULT_REVISION sharded = False headers = {"user-agent": transformers.file_utils.http_user_agent(user_agent)} if use_auth_token: @@ -297,7 +297,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa def is_cached(filename): try: - huggingface_hub.hf_hub_download(pretrained_model_name_or_path, filename, cache_dir=cache_dir, local_files_only=True) + huggingface_hub.hf_hub_download(pretrained_model_name_or_path, filename, cache_dir=cache_dir, local_files_only=True, revision=_revision) except ValueError: return False return True @@ -306,7 +306,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa filename = transformers.modeling_utils.WEIGHTS_INDEX_NAME if sharded else transformers.modeling_utils.WEIGHTS_NAME except AttributeError: return - url = huggingface_hub.hf_hub_url(pretrained_model_name_or_path, filename, revision=revision) + url = huggingface_hub.hf_hub_url(pretrained_model_name_or_path, filename, revision=_revision) if is_cached(filename) or requests.head(url, allow_redirects=True, proxies=proxies, headers=headers): break if sharded: @@ -320,7 +320,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa with open(map_filename) as f: map_data = json.load(f) filenames = set(map_data["weight_map"].values()) - urls = [huggingface_hub.hf_hub_url(pretrained_model_name_or_path, n, revision=revision) for n in filenames] + urls = [huggingface_hub.hf_hub_url(pretrained_model_name_or_path, n, revision=_revision) for n in filenames] if not force_download: urls = [u for u, n in zip(urls, filenames) if not is_cached(n)] if not urls: From f57489f73c61f875bb207b0767e83d9f91552295 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 18:46:59 +0100 Subject: [PATCH 9/9] Revision Cleanup --- utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/utils.py b/utils.py index 45a7817f..aa37028e 100644 --- a/utils.py +++ b/utils.py @@ -286,7 +286,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa if token is None: raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.") _cache_dir = str(cache_dir) if cache_dir is not None else transformers.TRANSFORMERS_CACHE - _revision = args.revision if args.revision is not None else huggingface_hub.constants.DEFAULT_REVISION + _revision = revision if revision is not None else huggingface_hub.constants.DEFAULT_REVISION sharded = False headers = {"user-agent": transformers.file_utils.http_user_agent(user_agent)} if use_auth_token: @@ -297,7 +297,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa def is_cached(filename): try: - huggingface_hub.hf_hub_download(pretrained_model_name_or_path, filename, cache_dir=cache_dir, local_files_only=True, revision=_revision) + huggingface_hub.hf_hub_download(pretrained_model_name_or_path, filename, cache_dir=cache_dir, local_files_only=True, revision=revision) except ValueError: return False return True @@ -306,7 +306,7 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa filename = transformers.modeling_utils.WEIGHTS_INDEX_NAME if sharded else transformers.modeling_utils.WEIGHTS_NAME except AttributeError: return - url = huggingface_hub.hf_hub_url(pretrained_model_name_or_path, filename, revision=_revision) + url = huggingface_hub.hf_hub_url(pretrained_model_name_or_path, filename, revision=revision) if is_cached(filename) or requests.head(url, allow_redirects=True, proxies=proxies, headers=headers): break if sharded: @@ -316,11 +316,11 @@ def _transformers22_aria2_hook(pretrained_model_name_or_path: str, force_downloa if not sharded: # If the model has a pytorch_model.bin file, that's the only file to download filenames = [transformers.modeling_utils.WEIGHTS_NAME] else: # Otherwise download the pytorch_model.bin.index.json and then let aria2 download all the pytorch_model-#####-of-#####.bin files mentioned inside it - map_filename = huggingface_hub.hf_hub_download(pretrained_model_name_or_path, filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, use_auth_token=use_auth_token, user_agent=user_agent) + map_filename = huggingface_hub.hf_hub_download(pretrained_model_name_or_path, filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, use_auth_token=use_auth_token, user_agent=user_agent, revision=revision) with open(map_filename) as f: map_data = json.load(f) filenames = set(map_data["weight_map"].values()) - urls = [huggingface_hub.hf_hub_url(pretrained_model_name_or_path, n, revision=_revision) for n in filenames] + urls = [huggingface_hub.hf_hub_url(pretrained_model_name_or_path, n, revision=revision) for n in filenames] if not force_download: urls = [u for u, n in zip(urls, filenames) if not is_cached(n)] if not urls: