diff --git a/aiserver.py b/aiserver.py index 0f3a10c5..e61cc3b7 100644 --- a/aiserver.py +++ b/aiserver.py @@ -2491,15 +2491,14 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal if(os.path.isdir(vars.custmodpth)): try: tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", **lowmem) except Exception as e: @@ -2509,15 +2508,14 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal elif(os.path.isdir("models/{}".format(vars.model.replace('/', '_')))): try: tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", **lowmem) except Exception as e: @@ -2540,15 +2538,14 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal try: tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem) except Exception as e: diff --git a/static/application.js b/static/application.js index 93ce880c..6cdb531b 100644 --- a/static/application.js +++ b/static/application.js @@ -1646,26 +1646,29 @@ function chunkOnBeforeInput(event) { if(buildChunkSetFromNodeArray(getSelectedNodes()).size === 0) { var s = rangy.getSelection(); var r = s.getRangeAt(0); + var rand = Math.random(); if(document.queryCommandSupported && document.execCommand && document.queryCommandSupported('insertHTML')) { - document.execCommand('insertHTML', false, '|'); + document.execCommand('insertHTML', false, '|'); } else { var t = document.createTextNode('|'); var b = document.createElement('span'); - b.id = "_EDITOR_SENTINEL_"; + b.id = "_EDITOR_SENTINEL_" + rand + "_"; b.insertNode(t); r.insertNode(b); } - var sentinel = document.getElementById("_EDITOR_SENTINEL_"); - if(sentinel.nextSibling && sentinel.nextSibling.tagName === "CHUNK") { - r.selectNodeContents(sentinel.nextSibling); - r.collapse(true); - } else if(sentinel.previousSibling && sentinel.previousSibling.tagName === "CHUNK") { - r.selectNodeContents(sentinel.previousSibling); - r.collapse(false); - } - s.removeAllRanges(); - s.addRange(r); - sentinel.parentNode.removeChild(sentinel); + setTimeout(function() { + var sentinel = document.getElementById("_EDITOR_SENTINEL_" + rand + "_"); + if(sentinel.nextSibling && sentinel.nextSibling.tagName === "CHUNK") { + r.selectNodeContents(sentinel.nextSibling); + r.collapse(true); + } else if(sentinel.previousSibling && sentinel.previousSibling.tagName === "CHUNK") { + r.selectNodeContents(sentinel.previousSibling); + r.collapse(false); + } + s.removeAllRanges(); + s.addRange(r); + sentinel.parentNode.removeChild(sentinel); + }, 1); } } diff --git a/templates/index.html b/templates/index.html index b51fef10..253f6b8a 100644 --- a/templates/index.html +++ b/templates/index.html @@ -18,7 +18,7 @@ - + diff --git a/tpu_mtj_backend.py b/tpu_mtj_backend.py index 79582867..9c1466f7 100644 --- a/tpu_mtj_backend.py +++ b/tpu_mtj_backend.py @@ -1352,15 +1352,14 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo if(os.path.isdir(vars.custmodpth)): try: tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, revision=vars.revision, cache_dir="cache") except Exception as e: @@ -1368,15 +1367,14 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo elif(os.path.isdir("models/{}".format(vars.model.replace('/', '_')))): try: tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained("models/{}".format(vars.model.replace('/', '_')), revision=vars.revision, cache_dir="cache") except Exception as e: @@ -1384,15 +1382,14 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo else: try: tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") - except Exception as e: - pass - try: - tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", use_fast=False) except Exception as e: - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") + try: + tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") + except Exception as e: + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", revision=vars.revision, cache_dir="cache") try: model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache") except Exception as e: