mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Hide exllama if not installed
This commit is contained in:
@@ -1,36 +1,40 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
try:
|
||||||
|
import time, json
|
||||||
|
import torch
|
||||||
|
import requests
|
||||||
|
import numpy as np
|
||||||
|
from typing import List, Optional, Union
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
from pathlib import Path
|
||||||
|
import re
|
||||||
|
import warnings
|
||||||
|
import gc
|
||||||
|
|
||||||
import time, json
|
import utils
|
||||||
import torch
|
from logger import logger
|
||||||
import requests
|
|
||||||
import numpy as np
|
|
||||||
from typing import List, Optional, Union
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
from pathlib import Path
|
|
||||||
import re
|
|
||||||
import warnings
|
|
||||||
import gc
|
|
||||||
|
|
||||||
import utils
|
from modeling import warpers
|
||||||
from logger import logger
|
from modeling.warpers import Warper
|
||||||
|
from modeling.stoppers import Stoppers
|
||||||
from modeling import warpers
|
from modeling.post_token_hooks import PostTokenHooks
|
||||||
from modeling.warpers import Warper
|
from modeling.inference_model import (
|
||||||
from modeling.stoppers import Stoppers
|
|
||||||
from modeling.post_token_hooks import PostTokenHooks
|
|
||||||
from modeling.inference_model import (
|
|
||||||
GenerationResult,
|
GenerationResult,
|
||||||
GenerationSettings,
|
GenerationSettings,
|
||||||
InferenceModel,
|
InferenceModel,
|
||||||
ModelCapabilities,
|
ModelCapabilities,
|
||||||
)
|
)
|
||||||
|
|
||||||
from modeling.tokenizer import GenericTokenizer
|
from modeling.tokenizer import GenericTokenizer
|
||||||
|
|
||||||
from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
|
|
||||||
from transformers import LlamaTokenizer
|
from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
|
||||||
from exllama.generator import ExLlamaGenerator
|
from transformers import LlamaTokenizer
|
||||||
|
from exllama.generator import ExLlamaGenerator
|
||||||
|
load_failed = False
|
||||||
|
except:
|
||||||
|
load_failed = True
|
||||||
|
|
||||||
model_backend_type = "GPTQ"
|
model_backend_type = "GPTQ"
|
||||||
model_backend_name = "ExLlama"
|
model_backend_name = "ExLlama"
|
||||||
@@ -101,10 +105,11 @@ class model_backend(InferenceModel):
|
|||||||
stopper_hooks=True,
|
stopper_hooks=True,
|
||||||
post_token_probs=False,
|
post_token_probs=False,
|
||||||
)
|
)
|
||||||
|
self.disable = load_failed
|
||||||
|
|
||||||
def is_valid(self, model_name, model_path, menu_path):
|
def is_valid(self, model_name, model_path, menu_path):
|
||||||
gptq_model, _ = load_model_gptq_settings(model_path)
|
|
||||||
try:
|
try:
|
||||||
|
gptq_model, _ = load_model_gptq_settings(model_path)
|
||||||
self.model_config = self._load_config(model_name, model_path)
|
self.model_config = self._load_config(model_name, model_path)
|
||||||
return self.model_config and gptq_model
|
return self.model_config and gptq_model
|
||||||
except:
|
except:
|
||||||
|
Reference in New Issue
Block a user