mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Hide exllama if not installed
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
try:
|
||||
import time, json
|
||||
import torch
|
||||
import requests
|
||||
@@ -28,9 +28,13 @@ from modeling.inference_model import (
|
||||
|
||||
from modeling.tokenizer import GenericTokenizer
|
||||
|
||||
|
||||
from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
|
||||
from transformers import LlamaTokenizer
|
||||
from exllama.generator import ExLlamaGenerator
|
||||
load_failed = False
|
||||
except:
|
||||
load_failed = True
|
||||
|
||||
model_backend_type = "GPTQ"
|
||||
model_backend_name = "ExLlama"
|
||||
@@ -101,10 +105,11 @@ class model_backend(InferenceModel):
|
||||
stopper_hooks=True,
|
||||
post_token_probs=False,
|
||||
)
|
||||
self.disable = load_failed
|
||||
|
||||
def is_valid(self, model_name, model_path, menu_path):
|
||||
gptq_model, _ = load_model_gptq_settings(model_path)
|
||||
try:
|
||||
gptq_model, _ = load_model_gptq_settings(model_path)
|
||||
self.model_config = self._load_config(model_name, model_path)
|
||||
return self.model_config and gptq_model
|
||||
except:
|
||||
|
Reference in New Issue
Block a user