From ddc9be00d6beb27feff69cb67c78957cd0da7acc Mon Sep 17 00:00:00 2001 From: vfbd Date: Mon, 26 Sep 2022 13:57:44 -0400 Subject: [PATCH 1/2] Attempt to fix issue where `|` appears in editor after pressing enter --- static/application.js | 29 ++++++++++++++++------------- templates/index.html | 2 +- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/static/application.js b/static/application.js index 85ab88a8..8da83f0a 100644 --- a/static/application.js +++ b/static/application.js @@ -1483,26 +1483,29 @@ function chunkOnBeforeInput(event) { if(buildChunkSetFromNodeArray(getSelectedNodes()).size === 0) { var s = rangy.getSelection(); var r = s.getRangeAt(0); + var rand = Math.random(); if(document.queryCommandSupported && document.execCommand && document.queryCommandSupported('insertHTML')) { - document.execCommand('insertHTML', false, '|'); + document.execCommand('insertHTML', false, '|'); } else { var t = document.createTextNode('|'); var b = document.createElement('span'); - b.id = "_EDITOR_SENTINEL_"; + b.id = "_EDITOR_SENTINEL_" + rand + "_"; b.insertNode(t); r.insertNode(b); } - var sentinel = document.getElementById("_EDITOR_SENTINEL_"); - if(sentinel.nextSibling && sentinel.nextSibling.tagName === "CHUNK") { - r.selectNodeContents(sentinel.nextSibling); - r.collapse(true); - } else if(sentinel.previousSibling && sentinel.previousSibling.tagName === "CHUNK") { - r.selectNodeContents(sentinel.previousSibling); - r.collapse(false); - } - s.removeAllRanges(); - s.addRange(r); - sentinel.parentNode.removeChild(sentinel); + setTimeout(function() { + var sentinel = document.getElementById("_EDITOR_SENTINEL_" + rand + "_"); + if(sentinel.nextSibling && sentinel.nextSibling.tagName === "CHUNK") { + r.selectNodeContents(sentinel.nextSibling); + r.collapse(true); + } else if(sentinel.previousSibling && sentinel.previousSibling.tagName === "CHUNK") { + r.selectNodeContents(sentinel.previousSibling); + r.collapse(false); + } + s.removeAllRanges(); + s.addRange(r); + sentinel.parentNode.removeChild(sentinel); + }, 1); } } diff --git a/templates/index.html b/templates/index.html index 80481543..f8b8f9bb 100644 --- a/templates/index.html +++ b/templates/index.html @@ -17,7 +17,7 @@ - + From 7fba1fd28af0c50e7cea38ea0ee12ab48a3bebf7 Mon Sep 17 00:00:00 2001 From: vfbd Date: Mon, 26 Sep 2022 14:37:25 -0400 Subject: [PATCH 2/2] Fix tokenizer selection code --- aiserver.py | 33 +++++++++++++++------------------ tpu_mtj_backend.py | 33 +++++++++++++++------------------ 2 files changed, 30 insertions(+), 36 deletions(-) diff --git a/aiserver.py b/aiserver.py index 2742af83..79bab845 100644 --- a/aiserver.py +++ b/aiserver.py @@ -1660,15 +1660,14 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go if(os.path.isdir(vars.custmodpth)): try: tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", **lowmem) except Exception as e: @@ -1676,15 +1675,14 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go elif(os.path.isdir("models/{}".format(vars.model.replace('/', '_')))): try: tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", **lowmem) except Exception as e: @@ -1705,15 +1703,14 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go try: tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem) except Exception as e: diff --git a/tpu_mtj_backend.py b/tpu_mtj_backend.py index 0c6667a2..94801323 100644 --- a/tpu_mtj_backend.py +++ b/tpu_mtj_backend.py @@ -1333,15 +1333,14 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo if(os.path.isdir(vars.custmodpth)): try: tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") except Exception as e: @@ -1349,15 +1348,14 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo elif(os.path.isdir("models/{}".format(vars.model.replace('/', '_')))): try: tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") except Exception as e: @@ -1365,15 +1363,14 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo else: try: tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") except Exception as e: