From efe268df605b3bb8f8ba4b2ac9fd161b672ef7e4 Mon Sep 17 00:00:00 2001 From: somebody Date: Tue, 2 May 2023 20:14:10 -0500 Subject: [PATCH] Move overrides to better places --- modeling/inference_model.py | 6 ------ modeling/inference_models/hf.py | 17 +++++++++++++++++ modeling/inference_models/hf_torch.py | 9 --------- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/modeling/inference_model.py b/modeling/inference_model.py index 8d0c5294..886c7e5e 100644 --- a/modeling/inference_model.py +++ b/modeling/inference_model.py @@ -197,12 +197,6 @@ class InferenceModel: Returns: AutoTokenizer: Tokenizer deemed fit for the location string. May be a fallback tokenizer. """ - if utils.koboldai_vars.model_type == "xglm": - # Default to newline mode if using XGLM - utils.koboldai_vars.newlinemode = "s" - elif utils.koboldai_vars.model_type in ["opt", "bloom"]: - # Handle but don't convert newlines if using Fairseq models that have newlines trained in them - utils.koboldai_vars.newlinemode = "ns" std_kwargs = {"revision": utils.koboldai_vars.revision, "cache_dir": "cache"} diff --git a/modeling/inference_models/hf.py b/modeling/inference_models/hf.py index eac5284f..63c0a40d 100644 --- a/modeling/inference_models/hf.py +++ b/modeling/inference_models/hf.py @@ -32,6 +32,23 @@ class HFInferenceModel(InferenceModel): if utils.koboldai_vars.newlinemode == "n": utils.koboldai_vars.badwordsids.append([self.tokenizer.eos_token_id]) + # These are model specific tokenizer overrides if a model has bad defaults + if utils.koboldai_vars.model_type == "llama": + self.tokenizer.decode_with_prefix_space = True + self.tokenizer.add_bos_token = False + elif utils.koboldai_vars.model_type == "opt": + self.tokenizer._koboldai_header = self.tokenizer.encode("") + self.tokenizer.add_bos_token = False + self.tokenizer.add_prefix_space = False + + # Change newline behavior to match model quirks + if utils.koboldai_vars.model_type == "xglm": + # Default to newline mode if using XGLM + utils.koboldai_vars.newlinemode = "s" + elif utils.koboldai_vars.model_type in ["opt", "bloom"]: + # Handle but don't convert newlines if using Fairseq models that have newlines trained in them + utils.koboldai_vars.newlinemode = "ns" + return super()._post_load() def get_local_model_path( diff --git a/modeling/inference_models/hf_torch.py b/modeling/inference_models/hf_torch.py index 1997e7fe..49cdfc0f 100644 --- a/modeling/inference_models/hf_torch.py +++ b/modeling/inference_models/hf_torch.py @@ -132,15 +132,6 @@ class HFTorchInferenceModel(HFInferenceModel): if not utils.koboldai_vars.model_type: utils.koboldai_vars.model_type = m_self.get_model_type() - # These are model specific overrides if a model has bad defaults - if utils.koboldai_vars.model_type == "llama": - m_self.tokenizer.decode_with_prefix_space = True - m_self.tokenizer.add_bos_token = False - elif utils.koboldai_vars.model_type == "opt": - m_self.tokenizer._koboldai_header = m_self.tokenizer.encode("") - m_self.tokenizer.add_bos_token = False - m_self.tokenizer.add_prefix_space = False - # Patch stopping_criteria class PTHStopper(StoppingCriteria): def __call__(