diff --git a/modeling/inference_models/horde/class.py b/modeling/inference_models/horde/class.py index 8c05c688..336ea52f 100644 --- a/modeling/inference_models/horde/class.py +++ b/modeling/inference_models/horde/class.py @@ -39,9 +39,13 @@ class model_backend(InferenceModel): self.capabilties = ModelCapabilities(api_host=False) def is_valid(self, model_name, model_path, menu_path): - self.models = self.get_cluster_models() - logger.debug("Horde Models: {}".format(self.models)) - return model_name == "CLUSTER" or model_name in [x['value'] for x in self.models] + try: + if utils.koboldai_vars.horde_api_key is not "0000000000": + self.models = self.get_cluster_models() + logger.debug("Horde Models: {}".format(self.models)) + return model_name == "CLUSTER" or model_name in [x['value'] for x in self.models] + except: + return False def get_requested_parameters(self, model_name, model_path, menu_path, parameters = {}): self.models = self.get_cluster_models() @@ -108,13 +112,11 @@ class model_backend(InferenceModel): except: logger.init_err("KAI Horde Models", status="Failed") logger.error("Provided KoboldAI Horde URL unreachable") - emit('from_server', {'cmd': 'errmsg', 'data': "Provided KoboldAI Horde URL unreachable"}) return if not req.ok: # Something went wrong, print the message and quit since we can't initialize an engine logger.init_err("KAI Horde Models", status="Failed") logger.error(req.json()) - emit('from_server', {'cmd': 'errmsg', 'data': req.json()}, room="UI_1") return engines = req.json()