diff --git a/modeling/inference_models/generic_hf_torch/class.py b/modeling/inference_models/generic_hf_torch/class.py index 0bb954e3..25d49214 100644 --- a/modeling/inference_models/generic_hf_torch/class.py +++ b/modeling/inference_models/generic_hf_torch/class.py @@ -36,13 +36,14 @@ class model_backend(HFTorchInferenceModel): else: temp = {} requested_parameters.append({ - "uitype": "toggle", - "unit": "bool", - "label": "Use 4-bit", - "id": "use_4_bit", - "default": temp['use_4_bit'] if 'use_4_bit' in temp else False, - "tooltip": "Whether or not to use BnB's 4-bit mode", + "uitype": "dropdown", + "unit": "text", + "label": "Quantization", + "id": "quantization", + "default": temp['quantization'] if 'quantization' in temp else 'none', + "tooltip": "Whether or not to use BnB's 4-bit or 8-bit mode", "menu_path": "Layers", + "children": [{'text': 'None', 'value':'none'},{'text': '4-bit', 'value': '4bit'}, {'text': '8-bit', 'value': '8bit'}], "extra_classes": "", "refresh_model_inputs": False }) @@ -52,7 +53,7 @@ class model_backend(HFTorchInferenceModel): def set_input_parameters(self, parameters): super().set_input_parameters(parameters) - self.use_4_bit = parameters['use_4_bit'] if 'use_4_bit' in parameters else False + self.quantization = parameters['quantization'] if 'quantization' in parameters else False def _load(self, save_model: bool, initial_load: bool) -> None: utils.koboldai_vars.allowsp = True @@ -82,7 +83,15 @@ class model_backend(HFTorchInferenceModel): "low_cpu_mem_usage": True, } - if self.use_4_bit or utils.koboldai_vars.colab_arg: + if self.quantization == "8bit": + tf_kwargs.update({ + "quantization_config":BitsAndBytesConfig( + load_in_8bit=True, + llm_int8_enable_fp32_cpu_offload=True + ), + }) + + if self.quantization == "4bit" or utils.koboldai_vars.colab_arg: tf_kwargs.update({ "quantization_config":BitsAndBytesConfig( load_in_4bit=True, @@ -297,7 +306,7 @@ class model_backend(HFTorchInferenceModel): "disk_layers": self.disk_layers if "disk_layers" in vars(self) else 0, - "use_4_bit": self.use_4_bit, + "quantization": self.quantization, }, f, indent="", diff --git a/static/koboldai.js b/static/koboldai.js index 94ac6ce4..8b70dd6a 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -2011,7 +2011,7 @@ function load_model() { data = {} if (settings_area) { for (const element of settings_area.querySelectorAll(".model_settings_input:not(.hidden)")) { - var element_data = element.value; + var element_data = element.getAttribute("data_type") === "bool" ? element.checked : element.value; if ((element.tagName == "SELECT") && (element.multiple)) { element_data = []; for (var i=0, iLen=element.options.length; i