BnB dependency check

This commit is contained in:
Henk
2023-07-15 18:56:13 +02:00
parent 160effb9ea
commit c43d60772b

View File

@@ -11,6 +11,7 @@ from transformers import AutoModelForCausalLM, GPTNeoForCausalLM, GPT2LMHeadMode
import utils import utils
import modeling.lazy_loader as lazy_loader import modeling.lazy_loader as lazy_loader
import koboldai_settings import koboldai_settings
import importlib
from logger import logger from logger import logger
@@ -26,17 +27,21 @@ class model_backend(HFTorchInferenceModel):
def get_requested_parameters(self, model_name, model_path, menu_path, parameters = {}): def get_requested_parameters(self, model_name, model_path, menu_path, parameters = {}):
requested_parameters = super().get_requested_parameters(model_name, model_path, menu_path, parameters = {}) requested_parameters = super().get_requested_parameters(model_name, model_path, menu_path, parameters = {})
requested_parameters.append({ dependency_exists = importlib.util.find_spec("bitsandbytes")
"uitype": "toggle", if dependency_exists:
"unit": "bool", requested_parameters.append({
"label": "Use 4-bit", "uitype": "toggle",
"id": "use_4_bit", "unit": "bool",
"default": False, "label": "Use 4-bit",
"tooltip": "Whether or not to use BnB's 4-bit mode", "id": "use_4_bit",
"menu_path": "Layers", "default": False,
"extra_classes": "", "tooltip": "Whether or not to use BnB's 4-bit mode",
"refresh_model_inputs": False "menu_path": "Layers",
}) "extra_classes": "",
"refresh_model_inputs": False
})
else:
logger.warning("Bitsandbytes is not installed, you can not use Huggingface models in 4-bit")
return requested_parameters return requested_parameters
def set_input_parameters(self, parameters): def set_input_parameters(self, parameters):