Hide exllama if not installed

This commit is contained in:
Henk
2023-09-11 01:21:29 +02:00
parent fff28daafd
commit 2455e5d60c

View File

@@ -1,5 +1,5 @@
from __future__ import annotations from __future__ import annotations
try:
import time, json import time, json
import torch import torch
import requests import requests
@@ -28,9 +28,13 @@ from modeling.inference_model import (
from modeling.tokenizer import GenericTokenizer from modeling.tokenizer import GenericTokenizer
from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
from transformers import LlamaTokenizer from transformers import LlamaTokenizer
from exllama.generator import ExLlamaGenerator from exllama.generator import ExLlamaGenerator
load_failed = False
except:
load_failed = True
model_backend_type = "GPTQ" model_backend_type = "GPTQ"
model_backend_name = "ExLlama" model_backend_name = "ExLlama"
@@ -101,10 +105,11 @@ class model_backend(InferenceModel):
stopper_hooks=True, stopper_hooks=True,
post_token_probs=False, post_token_probs=False,
) )
self.disable = load_failed
def is_valid(self, model_name, model_path, menu_path): def is_valid(self, model_name, model_path, menu_path):
gptq_model, _ = load_model_gptq_settings(model_path)
try: try:
gptq_model, _ = load_model_gptq_settings(model_path)
self.model_config = self._load_config(model_name, model_path) self.model_config = self._load_config(model_name, model_path)
return self.model_config and gptq_model return self.model_config and gptq_model
except: except: