diff --git a/tpu_mtj_backend.py b/tpu_mtj_backend.py index 25c1ba32..ec69f66d 100644 --- a/tpu_mtj_backend.py +++ b/tpu_mtj_backend.py @@ -1116,10 +1116,10 @@ def load_model(path: str, model_type: str, badwordsids=koboldai_settings.badword thread_resources_env = maps.ResourceEnv(maps.Mesh(devices, ('dp', 'mp')), ()) maps.thread_resources.env = thread_resources_env if initial_load: - logger.message(f"KoboldAI has finished loading and is available at the following link for UI 1: {cloudflare}") - logger.message(f"KoboldAI has finished loading and is available at the following link for UI 2: {cloudflare}/new_ui") - logger.message(f"KoboldAI has finished loading and is available at the following link for KoboldAI Lite: {cloudflare}/lite") - logger.message(f"KoboldAI has finished loading and is available at the following link for the API: {cloudflare}/api") + logger.message(f"KoboldAI has finished loading and is available at the following link for UI 1: {koboldai_vars.cloudflare_link}") + logger.message(f"KoboldAI has finished loading and is available at the following link for UI 2: {koboldai_vars.cloudflare_link}/new_ui") + logger.message(f"KoboldAI has finished loading and is available at the following link for KoboldAI Lite: {koboldai_vars.cloudflare_link}/lite") + logger.message(f"KoboldAI has finished loading and is available at the following link for the API: {koboldai_vars.cloudflare_link}/api") global badwords # These are the tokens that we don't want the AI to ever write