diff --git a/aiserver.py b/aiserver.py index b63241e1..de9db2c6 100644 --- a/aiserver.py +++ b/aiserver.py @@ -10861,6 +10861,8 @@ def run(): # delay the display of this message until after that step logger.message(f"KoboldAI has finished loading and is available at the following link for UI 1: {cloudflare}") logger.message(f"KoboldAI has finished loading and is available at the following link for UI 2: {cloudflare}/new_ui") + logger.message(f"KoboldAI has finished loading and is available at the following link for KoboldAI Lite: {cloudflare}/lite") + logger.message(f"KoboldAI has finished loading and is available at the following link for the API: {cloudflare}/api") else: logger.init_ok("Webserver", status="OK") logger.message(f"Webserver has started, you can now connect to this machine at port: {port}") diff --git a/tpu_mtj_backend.py b/tpu_mtj_backend.py index 401d6ccf..25c1ba32 100644 --- a/tpu_mtj_backend.py +++ b/tpu_mtj_backend.py @@ -1116,8 +1116,10 @@ def load_model(path: str, model_type: str, badwordsids=koboldai_settings.badword thread_resources_env = maps.ResourceEnv(maps.Mesh(devices, ('dp', 'mp')), ()) maps.thread_resources.env = thread_resources_env if initial_load: - logger.message(f"KoboldAI has finished loading and is available at the following link for UI 1: {koboldai_vars.cloudflare_link}") - logger.message(f"KoboldAI has finished loading and is available at the following link for UI 2: {koboldai_vars.cloudflare_link}/new_ui") + logger.message(f"KoboldAI has finished loading and is available at the following link for UI 1: {cloudflare}") + logger.message(f"KoboldAI has finished loading and is available at the following link for UI 2: {cloudflare}/new_ui") + logger.message(f"KoboldAI has finished loading and is available at the following link for KoboldAI Lite: {cloudflare}/lite") + logger.message(f"KoboldAI has finished loading and is available at the following link for the API: {cloudflare}/api") global badwords # These are the tokens that we don't want the AI to ever write