Merge remote-tracking branch 'upstream/united' into 4bit-plugin

This commit is contained in:
0cc4m
2023-07-23 07:18:58 +02:00
8 changed files with 44 additions and 25 deletions

View File

@@ -54,13 +54,14 @@ class model_backend(HFTorchInferenceModel):
else:
temp = {}
requested_parameters.append({
"uitype": "toggle",
"unit": "bool",
"label": "Use 4-bit",
"id": "use_4_bit",
"default": temp['use_4_bit'] if 'use_4_bit' in temp else False,
"tooltip": "Whether or not to use BnB's 4-bit mode",
"uitype": "dropdown",
"unit": "text",
"label": "Quantization",
"id": "quantization",
"default": temp['quantization'] if 'quantization' in temp else 'none',
"tooltip": "Whether or not to use BnB's 4-bit or 8-bit mode",
"menu_path": "Layers",
"children": [{'text': 'None', 'value':'none'},{'text': '4-bit', 'value': '4bit'}, {'text': '8-bit', 'value': '8bit'}],
"extra_classes": "",
"refresh_model_inputs": False
})
@@ -70,7 +71,7 @@ class model_backend(HFTorchInferenceModel):
def set_input_parameters(self, parameters):
super().set_input_parameters(parameters)
self.use_4_bit = parameters['use_4_bit'] if 'use_4_bit' in parameters else False
self.quantization = parameters['quantization'] if 'quantization' in parameters else False
def _load(self, save_model: bool, initial_load: bool) -> None:
utils.koboldai_vars.allowsp = True
@@ -100,7 +101,15 @@ class model_backend(HFTorchInferenceModel):
"low_cpu_mem_usage": True,
}
if self.use_4_bit or utils.koboldai_vars.colab_arg:
if self.quantization == "8bit":
tf_kwargs.update({
"quantization_config":BitsAndBytesConfig(
load_in_8bit=True,
llm_int8_enable_fp32_cpu_offload=True
),
})
if self.quantization == "4bit" or utils.koboldai_vars.colab_arg:
tf_kwargs.update({
"quantization_config":BitsAndBytesConfig(
load_in_4bit=True,
@@ -119,6 +128,11 @@ class model_backend(HFTorchInferenceModel):
# Also, lazy loader doesn't support GPT-2 models
self.lazy_load = False
if self.model_type == "llama":
tf_kwargs.update({
"pretraining_tp": 1 # Workaround recommended by HF to fix their mistake on the config.json tuners adopted
})
logger.debug(
"lazy_load: {} hascuda: {} breakmodel: {} nobreakmode: {}".format(
self.lazy_load,
@@ -315,7 +329,7 @@ class model_backend(HFTorchInferenceModel):
"disk_layers": self.disk_layers
if "disk_layers" in vars(self)
else 0,
"use_4_bit": self.use_4_bit,
"quantization": self.quantization,
},
f,
indent="",