mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Working tortoise install script
This commit is contained in:
50
install_tortiose_tts.bat
Normal file
50
install_tortiose_tts.bat
Normal file
@@ -0,0 +1,50 @@
|
||||
@echo off
|
||||
cd /D %~dp0
|
||||
|
||||
:Isolation
|
||||
call conda deactivate 2>NUL
|
||||
set Path=%windir%\system32;%windir%;C:\Windows\System32\Wbem;%windir%\System32\WindowsPowerShell\v1.0\;%windir%\System32\OpenSSH\
|
||||
SET CONDA_SHLVL=
|
||||
SET PYTHONNOUSERSITE=1
|
||||
SET PYTHONPATH=
|
||||
|
||||
rmdir /S /Q flask_session 2>NUL
|
||||
|
||||
TITLE KoboldAI - Server
|
||||
SET /P M=<loader.settings
|
||||
IF %M%==1 GOTO drivemap
|
||||
IF %M%==2 GOTO subfolder
|
||||
IF %M%==3 GOTO drivemap_B
|
||||
|
||||
:subfolder
|
||||
ECHO Runtime launching in subfolder mode
|
||||
call miniconda3\condabin\activate
|
||||
pip install git+https://github.com/neonbjb/tortoise-tts progressbar inflect librosa rotary-embedding-torch unidecode lazy_loader llvmlite numba joblib decorator audioread msgpack pooch scikit-learn soundfile soxr platformdirs threadpoolctl pydantic-core annotated-types pydantic --no-dependencies
|
||||
pip install torchaudio --index-url https://download.pytorch.org/whl/cu118
|
||||
cmd /k
|
||||
pause
|
||||
exit
|
||||
|
||||
:drivemap
|
||||
ECHO Runtime launching in K: drive mode
|
||||
subst /D K: >nul
|
||||
subst K: miniconda3 >nul
|
||||
call K:\python\condabin\activate
|
||||
pip install git+https://github.com/neonbjb/tortoise-tts progressbar inflect librosa rotary-embedding-torch unidecode lazy_loader llvmlite numba joblib decorator audioread msgpack pooch scikit-learn soundfile soxr platformdirs threadpoolctl pydantic-core annotated-types pydantic --no-dependencies
|
||||
pip install torchaudio --index-url https://download.pytorch.org/whl/cu118
|
||||
pip install -r requirements.txt --no-dependencies
|
||||
cmd /k
|
||||
pause
|
||||
exit
|
||||
|
||||
:drivemap_B
|
||||
ECHO Runtime launching in B: drive mode
|
||||
subst /D B: >nul
|
||||
subst B: miniconda3 >nul
|
||||
call B:\python\condabin\activate
|
||||
pip install git+https://github.com/neonbjb/tortoise-tts progressbar inflect librosa rotary-embedding-torch unidecode lazy_loader llvmlite numba joblib decorator audioread msgpack pooch scikit-learn soundfile soxr platformdirs threadpoolctl pydantic-core annotated-types pydantic --no-dependencies
|
||||
pip install torchaudio --index-url https://download.pytorch.org/whl/cu118
|
||||
pip install -r requirements.txt --no-dependencies
|
||||
cmd /k
|
||||
pause
|
||||
exit
|
4
install_tortiose_tts.sh
Executable file
4
install_tortiose_tts.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
bin/micromamba run -r runtime -n koboldai pip install git+https://github.com/neonbjb/tortoise-tts OmegaConf deepspeed
|
||||
bin/micromamba run -r runtime -n koboldai pip install torchaudio --index-url https://download.pytorch.org/whl/cu118
|
||||
bin/micromamba run -r runtime -n koboldai pip install -r requirements.txt --no-dependencies
|
@@ -21,6 +21,7 @@ queue = None
|
||||
multi_story = False
|
||||
global enable_whitelist
|
||||
enable_whitelist = False
|
||||
slow_tts_message_shown = False
|
||||
|
||||
if importlib.util.find_spec("tortoise") is not None:
|
||||
from tortoise import api
|
||||
@@ -2119,10 +2120,14 @@ class KoboldStoryRegister(object):
|
||||
|
||||
def create_wave_slow(self, make_audio_queue_slow):
|
||||
import pydub
|
||||
global slow_tts_message_shown
|
||||
sample_rate = 24000
|
||||
speaker = 'train_daws'
|
||||
if importlib.util.find_spec("tortoise") is None and not slow_tts_message_shown:
|
||||
logger.info("Disabling slow (and higher quality) tts as it's not installed")
|
||||
slow_tts_message_shown=True
|
||||
if self.tortoise is None and importlib.util.find_spec("tortoise") is not None:
|
||||
self.tortoise=api.TextToSpeech(use_deepspeed=os.environ.get('deepspeed', "true").lower()=="true", kv_cache=os.environ.get('kv_cache', "true").lower()=="true", half=True)
|
||||
self.tortoise=api.TextToSpeech(use_deepspeed=os.environ.get('deepspeed', "false").lower()=="true", kv_cache=os.environ.get('kv_cache', "true").lower()=="true", half=True)
|
||||
|
||||
if importlib.util.find_spec("tortoise") is not None:
|
||||
voice_samples, conditioning_latents = load_voices([speaker])
|
||||
|
@@ -351,7 +351,6 @@ class HFTorchInferenceModel(HFInferenceModel):
|
||||
bad_words_ids=self.active_badwordsids,
|
||||
use_cache=True,
|
||||
num_return_sequences=batch_count,
|
||||
pad_token_id=self.tokenizer.eos_token_id,
|
||||
)
|
||||
else:
|
||||
genout = self.model.generate(
|
||||
|
Reference in New Issue
Block a user