diff --git a/aiserver.py b/aiserver.py index 40ff9c5a..34e2334a 100644 --- a/aiserver.py +++ b/aiserver.py @@ -8161,7 +8161,7 @@ class GenerationInputSchema(SamplerSettingsSchema): frmtrmblln: Optional[bool] = fields.Boolean(metadata={"description": "Output formatting option. When enabled, replaces all occurrences of two or more consecutive newlines in the output with one newline.\n\nIf `disable_output_formatting` is `true`, this defaults to `false` instead of the value in the KoboldAI GUI."}) frmtrmspch: Optional[bool] = fields.Boolean(metadata={"description": "Output formatting option. When enabled, removes `#/@%{}+=~|\^<>` from the output.\n\nIf `disable_output_formatting` is `true`, this defaults to `false` instead of the value in the KoboldAI GUI."}) singleline: Optional[bool] = fields.Boolean(metadata={"description": "Output formatting option. When enabled, removes everything after the first line of the output, including the newline.\n\nIf `disable_output_formatting` is `true`, this defaults to `false` instead of the value in the KoboldAI GUI."}) - use_default_badwordids: bool = fields.Boolean(load_default=True, metadata={"description": "Ban tokens that commonly worsen the writing experience for continuous story writing"}) + use_default_badwordsids: bool = fields.Boolean(load_default=True, metadata={"description": "Ban tokens that commonly worsen the writing experience for continuous story writing"}) disable_input_formatting: bool = fields.Boolean(load_default=True, metadata={"description": "When enabled, all input formatting options default to `false` instead of the value in the KoboldAI GUI"}) frmtadsnsp: Optional[bool] = fields.Boolean(metadata={"description": "Input formatting option. When enabled, adds a leading space to your input if there is no trailing whitespace at the end of the previous action.\n\nIf `disable_input_formatting` is `true`, this defaults to `false` instead of the value in the KoboldAI GUI."}) quiet: Optional[bool] = fields.Boolean(metadata={"description": "When enabled, Generated output will not be displayed in the console."}) @@ -8313,7 +8313,7 @@ def _generate_text(body: GenerationInputSchema): "sampler_order": ("koboldai_vars", "sampler_order", None), "sampler_full_determinism": ("koboldai_vars", "full_determinism", None), "stop_sequence": ("koboldai_vars", "stop_sequence", None), - "use_default_badwordids": ("koboldai_vars", "use_default_badwordids", None), + "use_default_badwordsids": ("koboldai_vars", "use_default_badwordsids", None), } saved_settings = {} set_aibusy(1) diff --git a/gensettings.py b/gensettings.py index 8bb28513..9b69af43 100644 --- a/gensettings.py +++ b/gensettings.py @@ -400,7 +400,7 @@ gensettingstf = [ "uitype": "toggle", "unit": "bool", "label": "Ban Bad Tokens", - "id": "setusedefaultbadwordids", + "id": "setusedefaultbadwordsids", "min": 0, "max": 1, "step": 1, @@ -409,7 +409,7 @@ gensettingstf = [ "menu_path": "Settings", "sub_path": "Sampling", "classname": "model", - "name": "use_default_badwordids", + "name": "use_default_badwordsids", "ui_level": 0 }, { diff --git a/koboldai_settings.py b/koboldai_settings.py index 5598eb62..f447f3ee 100644 --- a/koboldai_settings.py +++ b/koboldai_settings.py @@ -693,7 +693,7 @@ class model_settings(settings): self._koboldai_vars = koboldai_vars self.alt_multi_gen = False self.bit_8_available = None - self.use_default_badwordids = True + self.use_default_badwordsids = True self.supported_gen_modes = [] def reset_for_model_load(self): diff --git a/modeling/inference_models/hf_torch.py b/modeling/inference_models/hf_torch.py index 5e6e0a95..fcdd9fb9 100644 --- a/modeling/inference_models/hf_torch.py +++ b/modeling/inference_models/hf_torch.py @@ -330,17 +330,17 @@ class HFTorchInferenceModel(HFInferenceModel): if seed is not None: torch.manual_seed(seed) - if utils.koboldai_vars.use_default_badwordids: - self.active_badwordids = self.badwordsids + additional_bad_words_ids + if utils.koboldai_vars.use_default_badwordsids: + self.active_badwordsids = self.badwordsids + additional_bad_words_ids else: if additional_bad_words_ids: - self.active_badwordids = additional_bad_words_ids + self.active_badwordsids = additional_bad_words_ids else: - self.active_badwordids = None + self.active_badwordsids = None with torch.no_grad(): start_time = time.time() - if self.active_badwordids: ## I know duplicating this is ugly, but HF checks if its present and accepts nothing but actual token bans if its there (Which I can't guarantee would be universal enough).... - Henk + if self.active_badwordsids: ## I know duplicating this is ugly, but HF checks if its present and accepts nothing but actual token bans if its there (Which I can't guarantee would be universal enough).... - Henk genout = self.model.generate( input_ids=gen_in, do_sample=True, @@ -348,7 +348,7 @@ class HFTorchInferenceModel(HFInferenceModel): len(prompt_tokens) + max_new, utils.koboldai_vars.max_length ), repetition_penalty=1.0, - bad_words_ids=self.active_badwordids, + bad_words_ids=self.active_badwordsids, use_cache=True, num_return_sequences=batch_count, )