mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
HF 4.33.1
This commit is contained in:
@@ -19,7 +19,6 @@ dependencies:
|
||||
- bleach=4.1.0
|
||||
- pip
|
||||
- git=2.35.1
|
||||
- sentencepiece
|
||||
- protobuf
|
||||
- marshmallow>=3.13
|
||||
- apispec-webframeworks
|
||||
@@ -32,9 +31,9 @@ dependencies:
|
||||
- flask-ngrok
|
||||
- flask-cors
|
||||
- lupa==1.10
|
||||
- transformers==4.32.1
|
||||
- transformers[sentencepiece]==4.33.1
|
||||
- huggingface_hub==0.16.4
|
||||
- optimum==1.12.0
|
||||
- optimum[onnx]==1.12.0
|
||||
- safetensors==0.3.3
|
||||
- accelerate==0.21.0
|
||||
- git+https://github.com/VE-FORBRYDERNE/mkultra
|
||||
|
@@ -15,7 +15,6 @@ dependencies:
|
||||
- bleach=4.1.0
|
||||
- pip
|
||||
- git=2.35.1
|
||||
- sentencepiece
|
||||
- protobuf
|
||||
- marshmallow>=3.13
|
||||
- apispec-webframeworks
|
||||
@@ -31,9 +30,9 @@ dependencies:
|
||||
- flask-ngrok
|
||||
- flask-cors
|
||||
- lupa==1.10
|
||||
- transformers==4.32.1
|
||||
- transformers[sentencepiece]==4.33.1
|
||||
- huggingface_hub==0.16.4
|
||||
- optimum==1.12.0
|
||||
- optimum[onnx]==1.12.0
|
||||
- safetensors==0.3.3
|
||||
- accelerate==0.20.3
|
||||
- git+https://github.com/VE-FORBRYDERNE/mkultra
|
||||
|
@@ -15,7 +15,6 @@ dependencies:
|
||||
- bleach=4.1.0
|
||||
- pip
|
||||
- git=2.35.1
|
||||
- sentencepiece
|
||||
- protobuf
|
||||
- marshmallow>=3.13
|
||||
- apispec-webframeworks
|
||||
@@ -30,9 +29,9 @@ dependencies:
|
||||
- flask-ngrok
|
||||
- flask-cors
|
||||
- lupa==1.10
|
||||
- transformers==4.32.1
|
||||
- transformers[sentencepiece]==4.33.1
|
||||
- huggingface_hub==0.16.4
|
||||
- optimum==1.12.0
|
||||
- optimum[onnx]==1.12.0
|
||||
- safetensors==0.3.3
|
||||
- accelerate==0.21.0
|
||||
- git+https://github.com/VE-FORBRYDERNE/mkultra
|
||||
|
@@ -230,6 +230,8 @@ class HFInferenceModel(InferenceModel):
|
||||
def _post_load(self) -> None:
|
||||
self.badwordsids = koboldai_settings.badwordsids_default
|
||||
self.model_type = str(self.model_config.model_type)
|
||||
self.model.use_cache = True # Workaround for models that accidentally uploaded with False
|
||||
|
||||
# These are model specific tokenizer overrides if a model has bad defaults
|
||||
if self.model_type == "llama":
|
||||
# Note: self.tokenizer is a GenericTokenizer, and self.tokenizer.tokenizer is the actual LlamaTokenizer
|
||||
|
@@ -1,6 +1,6 @@
|
||||
transformers==4.32.1
|
||||
transformers[sentencepiece]==4.33.1
|
||||
huggingface_hub==0.16.4
|
||||
optimum==1.12.0
|
||||
optimum[onnx]==1.12.0
|
||||
safetensors==0.3.3
|
||||
Flask==2.2.3
|
||||
Flask-SocketIO==5.3.2
|
||||
@@ -15,7 +15,6 @@ dnspython==2.2.1
|
||||
lupa==1.10
|
||||
markdown
|
||||
bleach==4.1.0
|
||||
sentencepiece
|
||||
protobuf
|
||||
accelerate==0.21.0
|
||||
flask-session==0.4.0
|
||||
|
Reference in New Issue
Block a user