From bf665838e0dd7ddd49422559386f43dd7849ce4a Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 19 Jan 2023 08:06:40 -0500 Subject: [PATCH 01/10] Potential desync fix --- aiserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aiserver.py b/aiserver.py index 6f30c1f0..96646939 100644 --- a/aiserver.py +++ b/aiserver.py @@ -8684,9 +8684,9 @@ def UI_2_redo(data): @logger.catch def UI_2_retry(data): - koboldai_vars.actions.clear_unused_options() if len(koboldai_vars.actions.get_current_options_no_edits()) == 0: - ignore = koboldai_vars.actions.pop(keep=False) + ignore = koboldai_vars.actions.pop(keep=True) + koboldai_vars.actions.clear_unused_options() koboldai_vars.lua_koboldbridge.feedback = None koboldai_vars.recentrng = koboldai_vars.recentrngm = None actionsubmit("", actionmode=koboldai_vars.actionmode) From a0249d7ffab3cbfce45a0823cf36729a70cbd101 Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 19 Jan 2023 12:35:55 -0500 Subject: [PATCH 02/10] Changed image return to show last image rather than returning null if the action doesn't have an image --- koboldai_settings.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index f633e0c3..5318e09a 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -2090,8 +2090,8 @@ class KoboldStoryRegister(object): if self.story_settings.gen_audio and self.koboldai_vars.experimental_features: for i in reversed([-1]+list(self.actions.keys())): self.gen_audio(i, overwrite=False) - else: - print("{} and {}".format(self.story_settings.gen_audio, self.koboldai_vars.experimental_features)) + #else: + # print("{} and {}".format(self.story_settings.gen_audio, self.koboldai_vars.experimental_features)) def set_picture(self, action_id, filename, prompt): if action_id == -1: @@ -2111,7 +2111,22 @@ class KoboldStoryRegister(object): filename = os.path.join(self.koboldai_vars.save_paths.generated_images, self.actions[action_id]['picture_filename']) prompt = self.actions[action_id]['picture_prompt'] else: - return None, None + #Let's find the last picture if there is one + found = False + for i in reversed(range(-1, action_id)): + if i in self.actions and 'picture_filename' in self.actions[i]: + filename = os.path.join(self.koboldai_vars.save_paths.generated_images, self.actions[i]['picture_filename']) + prompt = self.actions[i]['picture_prompt'] + found = True + break + elif i == -1: + if self.story_settings.prompt_picture_filename == "": + return None, None + filename = os.path.join(self.koboldai_vars.save_paths.generated_images, self.story_settings.prompt_picture_filename) + prompt = self.story_settings.prompt_picture_prompt + found = True + if not found: + return None, None if os.path.exists(filename): return filename, prompt From 5aca1420348ebcd936b908341f9ddc6198ea5644 Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 19 Jan 2023 15:55:10 -0500 Subject: [PATCH 03/10] Added blank image. Shows when action is selected and there is no action image for that action --- aiserver.py | 4 +++- static/blank.png | Bin 0 -> 148 bytes 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 static/blank.png diff --git a/aiserver.py b/aiserver.py index 2ec094ee..50952239 100644 --- a/aiserver.py +++ b/aiserver.py @@ -10320,7 +10320,9 @@ def UI_2_action_image(): filename, mimetype="image/jpeg") else: - return None + return send_file( + "static/blank.png", + mimetype="image/png") #==================================================================# # display messages if they have never been sent before on this install diff --git a/static/blank.png b/static/blank.png new file mode 100644 index 0000000000000000000000000000000000000000..994c3ba3244ffe4448c0b4bad32a7ce76eaa391f GIT binary patch literal 148 zcmeAS@N?(olHy`uVBq!ia0vp^OhC-Y!3HG1DAjiXDVAa<&kznEsNqQI0P;BtJR*yM z>aT+^qm#z$3ZS55iEBhjaDG}zd16s2LwR|*US?i)adKios$PCk`s{Z$Qb0w5o-U3d j95a*u{Qqyy3<3-yf0_OtyRdQ*P?EvZ)z4*}Q$iB}W= Date: Mon, 30 Jan 2023 12:57:58 +0800 Subject: [PATCH 04/10] Adding CORS support to allow cross origin requests. Added new dependency for package flask-cors required.package --- aiserver.py | 2 ++ environments/huggingface.yml | 1 + environments/rocm.yml | 1 + requirements.txt | 1 + requirements_mtj.txt | 1 + 5 files changed, 6 insertions(+) diff --git a/aiserver.py b/aiserver.py index 9d0daabc..36e89adc 100644 --- a/aiserver.py +++ b/aiserver.py @@ -563,12 +563,14 @@ from flask_socketio import SocketIO, emit, join_room, leave_room from flask_socketio import emit as _emit from flask_session import Session from flask_compress import Compress +from flask_cors import CORS from werkzeug.exceptions import HTTPException, NotFound, InternalServerError import secrets app = Flask(__name__, root_path=os.getcwd()) app.secret_key = secrets.token_hex() app.config['SESSION_TYPE'] = 'filesystem' app.config['TEMPLATES_AUTO_RELOAD'] = True +CORS(app) Compress(app) socketio = SocketIO(app, async_method="eventlet", manage_session=False, cors_allowed_origins='*', max_http_buffer_size=10_000_000) #socketio = SocketIO(app, async_method="eventlet", manage_session=False, cors_allowed_origins='*', max_http_buffer_size=10_000_000, logger=logger, engineio_logger=True) diff --git a/environments/huggingface.yml b/environments/huggingface.yml index 28c6c916..e8010f88 100644 --- a/environments/huggingface.yml +++ b/environments/huggingface.yml @@ -27,6 +27,7 @@ dependencies: - pip: - flask-cloudflared - flask-ngrok + - flask-cors - lupa==1.10 - transformers==4.25.1 - huggingface_hub>=0.10.1 diff --git a/environments/rocm.yml b/environments/rocm.yml index de71a87c..55129cbd 100644 --- a/environments/rocm.yml +++ b/environments/rocm.yml @@ -26,6 +26,7 @@ dependencies: - torch==1.12.1+rocm5.1.1 - flask-cloudflared - flask-ngrok + - flask-cors - lupa==1.10 - transformers==4.25.1 - huggingface_hub>=0.10.1 diff --git a/requirements.txt b/requirements.txt index 175c3b21..f12faf0d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ requests torch >= 1.9, < 1.13 flask-cloudflared flask-ngrok +flask-cors eventlet dnspython==2.2.1 lupa==1.10 diff --git a/requirements_mtj.txt b/requirements_mtj.txt index a24bb423..998895ad 100644 --- a/requirements_mtj.txt +++ b/requirements_mtj.txt @@ -13,6 +13,7 @@ flask Flask-SocketIO flask-cloudflared >= 0.0.5 flask-ngrok +flask-cors eventlet dnspython==2.2.1 lupa==1.10 From c5ed78377628242f725947012cf6f8fd59f75da4 Mon Sep 17 00:00:00 2001 From: ebolam Date: Tue, 31 Jan 2023 08:01:58 -0500 Subject: [PATCH 05/10] Fix for chat mode name not syncing to server --- templates/settings flyout.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/settings flyout.html b/templates/settings flyout.html index b5307461..fbc0f955 100644 --- a/templates/settings flyout.html +++ b/templates/settings flyout.html @@ -103,7 +103,7 @@ - + From e6608d97fd4eb0afd8253fa725ebee2665f74b9d Mon Sep 17 00:00:00 2001 From: Llama <34464159+pi6am@users.noreply.github.com> Date: Tue, 31 Jan 2023 09:41:36 -0800 Subject: [PATCH 06/10] Permit configuring KoboldAI horde bridge settings The KoboldAI horde bridge is typically configured by copying KoboldAI-Horde-Bridge/clientData_template.py to clientData.py and then editing the worker name, api key, and other settings. However the bridge spawned by the local KoboldAI server was always using hardcoded "anonymous" worker settings. To make it easier for people to use their actual API key and specify a worker name, this change modifies the bridge startup to attempt to import settings from clientData.py. If clientData.py doesn't exist then we fall back to using the default settings, and if the user hasn't edited the worker name then we also generate a randomized name. Also clear the horde bridge pid when stopping the bridge so that it can be restarted within the same session. --- koboldai_settings.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/koboldai_settings.py b/koboldai_settings.py index 47246cc5..cdfb71a5 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -1333,11 +1333,29 @@ class system_settings(settings): logger.info("Starting Horde bridge") bridge = importlib.import_module("KoboldAI-Horde-Bridge.bridge") self._horde_pid = bridge.kai_bridge() - threading.Thread(target=self._horde_pid.bridge, args=(1, "0000000000", f"Automated Instance #{random.randint(-100000000, 100000000)}", 'http://127.0.0.1:{}'.format(self.port), "http://koboldai.net", [])).run() + try: + bridge_cd = importlib.import_module("KoboldAI-Horde-Bridge.clientData") + cluster_url = bridge_cd.cluster_url + kai_name = bridge_cd.kai_name + if kai_name == "My Awesome Instance": + kai_name = f"Automated Instance #{random.randint(-100000000, 100000000)}" + api_key = bridge_cd.api_key + priority_usernames = bridge_cd.priority_usernames + except: + cluster_url = "http://koboldai.net" + kai_name = f"Automated Instance #{random.randint(-100000000, 100000000)}" + api_key = "0000000000" + priority_usernames = [] + # Always use the local URL & port + kai_url = f'http://127.0.0.1:{self.port}' + + logger.info(f"Name: {kai_name} on {kai_url}") + threading.Thread(target=self._horde_pid.bridge, args=(1, api_key, kai_name, kai_url, cluster_url, priority_usernames)).run() else: if self._horde_pid is not None: logger.info("Killing Horde bridge") self._horde_pid.stop() + self._horde_pid = None class KoboldStoryRegister(object): def __init__(self, socketio, story_settings, koboldai_vars, tokenizer=None, sequence=[]): From e555a70a38028a71e18d8eecdf781c86920babe9 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 18:54:48 +0100 Subject: [PATCH 07/10] Arg Revision Workaround for TPU --- tpu_mtj_backend.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/tpu_mtj_backend.py b/tpu_mtj_backend.py index 02754d95..d292de0e 100644 --- a/tpu_mtj_backend.py +++ b/tpu_mtj_backend.py @@ -1461,48 +1461,48 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo with torch_lazy_loader.use_lazy_torch_load(callback=callback, dematerialized_modules=True): if(os.path.isdir(koboldai_vars.custmodpth)): try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") + model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") elif(os.path.isdir("models/{}".format(koboldai_vars.model.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") + model = GPTNeoForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") else: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") + model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") #network.state = network.move_xmap(network.state, np.zeros(cores_per_replica)) From a80027384dcd0758de7fd5c9509a422d62c96cc2 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 19:16:00 +0100 Subject: [PATCH 08/10] Revert 'Arg Revision Workaround for TPU' Turns out that file doesn't have access to arg, reverting. TPU revision support will have to wait until we have the proper value fixed. --- tpu_mtj_backend.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/tpu_mtj_backend.py b/tpu_mtj_backend.py index d292de0e..02754d95 100644 --- a/tpu_mtj_backend.py +++ b/tpu_mtj_backend.py @@ -1461,48 +1461,48 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo with torch_lazy_loader.use_lazy_torch_load(callback=callback, dematerialized_modules=True): if(os.path.isdir(koboldai_vars.custmodpth)): try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=args.revision, cache_dir="cache") + model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache") elif(os.path.isdir("models/{}".format(koboldai_vars.model.replace('/', '_')))): try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=args.revision, cache_dir="cache") + model = GPTNeoForCausalLM.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache") else: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache", use_fast=False) + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False) except Exception as e: try: - tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") + tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: try: - tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=args.revision, cache_dir="cache") + tokenizer = GPT2Tokenizer.from_pretrained("gpt2", revision=koboldai_vars.revision, cache_dir="cache") try: - model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") + model = AutoModelForCausalLM.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") except Exception as e: - model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.model, revision=args.revision, cache_dir="cache") + model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.model, revision=koboldai_vars.revision, cache_dir="cache") #network.state = network.move_xmap(network.state, np.zeros(cores_per_replica)) From 09200856958b6b0d587d0c9d5e85922da38619e4 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 31 Jan 2023 21:00:17 +0100 Subject: [PATCH 09/10] Experimental EOT Support --- aiserver.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/aiserver.py b/aiserver.py index 49a9ae16..edd3b646 100644 --- a/aiserver.py +++ b/aiserver.py @@ -6491,10 +6491,14 @@ def applyoutputformatting(txt, no_sentence_trimming=False, no_single_line=False) if len(txt) == 0: return txt - # Workaround for endoftext appearing in models that need it, you can supposedly do this directly with the tokenizer but it keeps showing up - # So for now since we only have two known end of text tokens and only one model that wishes to have its generation stopped this is easier - # If you see this and you wish to do a universal implementation for this, feel free just make sure to test it on all platforms - Henk - txt = txt.replace("<|endoftext|>", "") + # Handle <|endoftext|> for models that want this + # In the future it would be nice if we could extend this to all EOS models. + # However, since EOS detection may have unforseen consequences for now we hardcode <|endoftext|> until more can be tested + # - Henk + eotregex = re.compile(r'<\|endoftext\|>[.|\n|\W|\w]*') + txt = eotregex.sub('', txt) + + # Cleanup stray txt = txt.replace("", "") # Use standard quotes and apostrophes From 3921ed12169706525f47ef92f53b6ff9f6f493fa Mon Sep 17 00:00:00 2001 From: Henk Date: Wed, 1 Feb 2023 23:52:35 +0100 Subject: [PATCH 10/10] Lock CORS behind --host --- aiserver.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aiserver.py b/aiserver.py index 56689beb..78242c3b 100644 --- a/aiserver.py +++ b/aiserver.py @@ -570,7 +570,6 @@ app = Flask(__name__, root_path=os.getcwd()) app.secret_key = secrets.token_hex() app.config['SESSION_TYPE'] = 'filesystem' app.config['TEMPLATES_AUTO_RELOAD'] = True -CORS(app) Compress(app) socketio = SocketIO(app, async_method="eventlet", manage_session=False, cors_allowed_origins='*', max_http_buffer_size=10_000_000) #socketio = SocketIO(app, async_method="eventlet", manage_session=False, cors_allowed_origins='*', max_http_buffer_size=10_000_000, logger=logger, engineio_logger=True) @@ -13259,6 +13258,8 @@ def run(): general_startup() # Start flask & SocketIO logger.init("Flask", status="Starting") + if koboldai_vars.host: + CORS(app) Session(app) logger.init_ok("Flask", status="OK") logger.init("Webserver", status="Starting")