mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Merge pull request #414 from one-some/submit-ctx-menu
Submit context menu
This commit is contained in:
@@ -34,6 +34,7 @@ from modeling.stoppers import Stoppers
|
||||
from modeling.post_token_hooks import PostTokenHooks
|
||||
from modeling.inference_models.hf import HFInferenceModel
|
||||
from modeling.inference_model import (
|
||||
GenerationMode,
|
||||
GenerationResult,
|
||||
GenerationSettings,
|
||||
ModelCapabilities,
|
||||
@@ -253,7 +254,10 @@ class HFTorchInferenceModel(HFInferenceModel):
|
||||
assert kwargs.pop("logits_warper", None) is not None
|
||||
kwargs["logits_warper"] = KoboldLogitsWarperList()
|
||||
|
||||
if utils.koboldai_vars.newlinemode in ["s", "ns"]:
|
||||
if (
|
||||
utils.koboldai_vars.newlinemode in ["s", "ns"]
|
||||
and not m_self.gen_state["allow_eos"]
|
||||
):
|
||||
kwargs["eos_token_id"] = -1
|
||||
kwargs.setdefault("pad_token_id", 2)
|
||||
return new_sample.old_sample(self, *args, **kwargs)
|
||||
@@ -604,3 +608,9 @@ class HFTorchInferenceModel(HFInferenceModel):
|
||||
self.breakmodel = False
|
||||
self.usegpu = False
|
||||
return
|
||||
|
||||
def get_supported_gen_modes(self) -> List[GenerationMode]:
|
||||
# This changes a torch patch to disallow eos as a bad word.
|
||||
return super().get_supported_gen_modes() + [
|
||||
GenerationMode.UNTIL_EOS
|
||||
]
|
Reference in New Issue
Block a user