Merge pull request #179 from henk717/united

1.19.2
This commit is contained in:
henk717 2022-11-20 16:26:03 +01:00 committed by GitHub
commit f2077b8e58
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 1184 additions and 73 deletions

View File

@ -1,7 +1,7 @@
#!/usr/bin/python3
#==================================================================#
# KoboldAI
# Version: 1.19.1
# Version: 1.19.2
# By: The KoboldAI Community
#==================================================================#
@ -125,6 +125,7 @@ model_menu = {
["NSFW Models", "nsfwlist", "", True],
["Untuned OPT", "optlist", "", True],
["Untuned GPT-Neo/J", "gptneolist", "", True],
["Untuned Pythia", "pythialist", "", True],
["Untuned Fairseq Dense", "fsdlist", "", True],
["Untuned Bloom", "bloomlist", "", True],
["Untuned XGLM", "xglmlist", "", True],
@ -154,6 +155,7 @@ model_menu = {
["OPT Nerys 6B V2 (Hybrid)", "KoboldAI/OPT-6B-nerys-v2", "16GB", False],
["Janeway FSD 6.7B", "KoboldAI/fairseq-dense-6.7B-Janeway", "16GB", False],
["Janeway Neo 6B", "KoboldAI/GPT-J-6B-Janeway", "16GB", False],
["Qilin Lit 6B (SFW)", "rexwang8/qilin-lit-6b", "16GB", False],
["Janeway Neo 2.7B", "KoboldAI/GPT-Neo-2.7B-Janeway", "8GB", False],
["Janeway FSD 2.7B", "KoboldAI/fairseq-dense-2.7B-Janeway", "8GB", False],
["Nerys FSD 2.7B (Hybrid)", "KoboldAI/fairseq-dense-2.7B-Nerys", "8GB", False],
@ -183,12 +185,31 @@ model_menu = {
],
'gptneolist': [
["GPT-NeoX 20B", "EleutherAI/gpt-neox-20b", "64GB", False],
["Pythia 13B (NeoX, Same dataset)", "EleutherAI/pythia-13b", "32GB", False],
["GPT-J 6B", "EleutherAI/gpt-j-6B", "16GB", False],
["GPT-Neo 2.7B", "EleutherAI/gpt-neo-2.7B", "8GB", False],
["GPT-Neo 1.3B", "EleutherAI/gpt-neo-1.3B", "6GB", False],
["Pythia 800M (NeoX, Same dataset)", "EleutherAI/pythia-800m", "4GB", False],
["Pythia 350M (NeoX, Same dataset)", "EleutherAI/pythia-350m", "2GB", False],
["GPT-Neo 125M", "EleutherAI/gpt-neo-125M", "2GB", False],
["Return to Main Menu", "mainmenu", "", True],
],
'pythialist': [
["Pythia 13B Deduped", "EleutherAI/pythia-13b-deduped", "32GB", False],
["Pythia 13B", "EleutherAI/pythia-13b", "32GB", False],
["Pythia 6.7B Deduped", "EleutherAI/pythia-6.7b-deduped", "16GB", False],
["Pythia 6.7B", "EleutherAI/pythia-6.7b", "16GB", False],
["Pythia 1.3B Deduped", "EleutherAI/pythia-1.3b-deduped", "6GB", False],
["Pythia 1.3B", "EleutherAI/pythia-1.3b", "6GB", False],
["Pythia 800M", "EleutherAI/pythia-800m", "4GB", False],
["Pythia 350M Deduped", "EleutherAI/pythia-350m-deduped", "2GB", False],
["Pythia 350M", "EleutherAI/pythia-350m", "2GB", False],
["Pythia 125M Deduped", "EleutherAI/pythia-125m-deduped", "2GB", False],
["Pythia 125M", "EleutherAI/pythia-125m", "2GB", False],
["Pythia 19M Deduped", "EleutherAI/pythia-19m-deduped", "1GB", False],
["Pythia 19M", "EleutherAI/pythia-19m", "1GB", False],
["Return to Main Menu", "mainmenu", "", True],
],
'gpt2list': [
["GPT-2 XL", "gpt2-xl", "6GB", False],
["GPT-2 Large", "gpt2-large", "4GB", False],
@ -452,6 +473,7 @@ def emit(*args, **kwargs):
return _emit(*args, **kwargs)
except AttributeError:
return socketio.emit(*args, **kwargs)
utils.emit = emit
# marshmallow/apispec setup
from apispec import APISpec
@ -756,6 +778,12 @@ def getmodelname():
modelname = vars.model
return modelname
#==================================================================#
# Get hidden size from model
#==================================================================#
def get_hidden_size_from_model(model):
return model.get_input_embeddings().embedding_dim
#==================================================================#
# Breakmodel configuration functions
#==================================================================#
@ -873,7 +901,7 @@ def device_config(config):
print(f"{colors.RED}Please enter an integer between -1 and {n_layers}.{colors.END}")
logger.init_ok("Final device configuration:", status="Info")
device_list(n_layers)
device_list(n_layers, primary=breakmodel.primary_device)
# If all layers are on the same device, use the old GPU generation mode
while(len(breakmodel.gpu_blocks) and breakmodel.gpu_blocks[-1] == 0):
@ -989,7 +1017,7 @@ def loadmodelsettings():
if("nobreakmodel" in js):
vars.nobreakmodel = js["nobreakmodel"]
if("sampler_order" in js):
sampler_order = vars.sampler_order
sampler_order = js["sampler_order"]
if(len(sampler_order) < 7):
sampler_order = [6] + sampler_order
vars.sampler_order = sampler_order
@ -1127,7 +1155,7 @@ def processsettings(js):
if("andepth" in js):
vars.andepth = js["andepth"]
if("sampler_order" in js):
sampler_order = vars.sampler_order
sampler_order = js["sampler_order"]
if(len(sampler_order) < 7):
sampler_order = [6] + sampler_order
vars.sampler_order = sampler_order
@ -1354,6 +1382,8 @@ def general_startup(override_args=None):
args = parser.parse_args(shlex.split(os.environ["KOBOLDAI_ARGS"]))
else:
args = parser.parse_args()
utils.args = args
set_logger_verbosity(args.verbosity)
quiesce_logger(args.quiesce)
@ -1790,7 +1820,9 @@ def patch_transformers():
if not args.no_aria2:
utils.aria2_hook(pretrained_model_name_or_path, **kwargs)
return old_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
PreTrainedModel.from_pretrained = new_from_pretrained
if(not hasattr(PreTrainedModel, "_kai_patched")):
PreTrainedModel.from_pretrained = new_from_pretrained
PreTrainedModel._kai_patched = True
if(hasattr(modeling_utils, "get_checkpoint_shard_files")):
old_get_checkpoint_shard_files = modeling_utils.get_checkpoint_shard_files
def new_get_checkpoint_shard_files(pretrained_model_name_or_path, index_filename, *args, **kwargs):
@ -2424,9 +2456,6 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
return lazy_load_callback
def get_hidden_size_from_model(model):
return model.get_input_embeddings().embedding_dim
def maybe_low_cpu_mem_usage() -> Dict[str, Any]:
if(packaging.version.parse(transformers_version) < packaging.version.parse("4.11.0")):
logger.warning(f"Please upgrade to transformers 4.11.0 for lower RAM usage. You have transformers {transformers_version}.")
@ -2668,7 +2697,9 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
if not args.no_aria2:
utils.aria2_hook(pretrained_model_name_or_path, **kwargs)
return old_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
PreTrainedModel.from_pretrained = new_from_pretrained
if(not hasattr(PreTrainedModel, "_kai_patched")):
PreTrainedModel.from_pretrained = new_from_pretrained
PreTrainedModel._kai_patched = True
if(hasattr(modeling_utils, "get_checkpoint_shard_files")):
old_get_checkpoint_shard_files = modeling_utils.get_checkpoint_shard_files
def new_get_checkpoint_shard_files(pretrained_model_name_or_path, index_filename, *args, **kwargs):
@ -2914,7 +2945,7 @@ def lua_startup():
except lupa.LuaError as e:
print(colors.RED + "ERROR!" + colors.END)
vars.lua_koboldbridge.obliterate_multiverse()
logger.debug('LUA ERROR: ' + str(e).replace("\033", ""))
logger.error('LUA ERROR: ' + str(e).replace("\033", ""))
logger.warning("Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.")
exit(1)
logger.init_ok("LUA bridge", status="OK")
@ -3055,6 +3086,8 @@ def lua_compute_context(submission, entries, folders, kwargs):
force_use_txt=True,
scan_story=kwargs["scan_story"] if kwargs["scan_story"] != None else True,
)
if kwargs["include_anote"] is not None and not kwargs["include_anote"]:
anotetxt = ""
txt, _, _ = calcsubmitbudget(
len(actions),
winfo,
@ -3470,7 +3503,7 @@ def execute_inmod():
vars.lua_running = False
emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True)
sendUSStatItems()
logger.debug('LUA ERROR: ' + str(e).replace("\033", ""))
logger.error('LUA ERROR: ' + str(e).replace("\033", ""))
logger.warning("Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.")
set_aibusy(0)
@ -3487,7 +3520,7 @@ def execute_outmod():
vars.lua_running = False
emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True)
sendUSStatItems()
logger.debug('LUA ERROR: ' + str(e).replace("\033", ""))
logger.error('LUA ERROR: ' + str(e).replace("\033", ""))
logger.warning("Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.")
set_aibusy(0)
if(vars.lua_koboldbridge.resend_settings_required):
@ -4907,7 +4940,7 @@ def generate(txt, minimum, maximum, found_entries=None):
vars.lua_running = False
emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True)
sendUSStatItems()
logger.debug('LUA ERROR: ' + str(e).replace("\033", ""))
logger.error('LUA ERROR: ' + str(e).replace("\033", ""))
logger.warning("Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.")
else:
emit('from_server', {'cmd': 'errmsg', 'data': 'Error occurred during generator call; please check console.'}, broadcast=True)
@ -5415,7 +5448,7 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None):
vars.lua_running = False
emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error; please check console.'}, broadcast=True)
sendUSStatItems()
logger.debug('LUA ERROR: ' + str(e).replace("\033", ""))
logger.error('LUA ERROR: ' + str(e).replace("\033", ""))
logger.warning("Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.")
else:
emit('from_server', {'cmd': 'errmsg', 'data': 'Error occurred during generator call; please check console.'}, broadcast=True)

View File

@ -70,10 +70,18 @@
"Model = \"Nerys 2.7B\" #@param [\"Nerys 2.7B\", \"AID 2.7B\", \"Erebus 2.7B\", \"Janeway 2.7B\", \"Picard 2.7B\", \"Horni LN 2.7B\", \"Horni 2.7B\", \"Shinen 2.7B\", \"OPT 2.7B\", \"Fairseq Dense 2.7B\", \"Neo 2.7B\"] {allow-input: true}\n",
"Version = \"Official\" #@param [\"Official\", \"United\"] {allow-input: true}\n",
"Provider = \"Localtunnel\" #@param [\"Localtunnel\", \"Cloudflare\"]\n",
"use_google_drive = True #@param {type:\"boolean\"}\n",
"\n",
"!nvidia-smi\n",
"from google.colab import drive\n",
"drive.mount('/content/drive/')\n",
"if use_google_drive:\n",
" drive.mount('/content/drive/')\n",
"else:\n",
" import os\n",
" if not os.path.exists(\"/content/drive\"):\n",
" os.mkdir(\"/content/drive\")\n",
" if not os.path.exists(\"/content/drive/MyDrive/\"):\n",
" os.mkdir(\"/content/drive/MyDrive/\")\n",
"\n",
"if Model == \"Nerys 2.7B\":\n",
" Model = \"KoboldAI/fairseq-dense-2.7B-Nerys\"\n",

View File

@ -69,6 +69,7 @@
"Model = \"Nerys 13B V2\" #@param [\"Nerys 13B V2\", \"Erebus 13B\", \"Janeway 13B\", \"Shinen 13B\", \"Skein 20B\", \"Erebus 20B\", \"Skein 6B\", \"Janeway 6B\", \"Adventure 6B\", \"Shinen 6B\", \"Lit V2 6B\", \"Lit 6B\", \"NeoX 20B\", \"OPT 13B\", \"Fairseq Dense 13B\", \"GPT-J-6B\"] {allow-input: true}\n",
"Version = \"Official\" #@param [\"Official\", \"United\"] {allow-input: true}\n",
"Provider = \"Localtunnel\" #@param [\"Localtunnel\", \"Cloudflare\"]\n",
"use_google_drive = True #@param {type:\"boolean\"}\n",
"\n",
"import os\n",
"try:\n",
@ -79,7 +80,14 @@
" raise RuntimeError(\"⚠You can not run this notebook without the TPU accelerator, go to Runtime->Sessions, terminate your session and then try again.⚠️\")\n",
"print('Now we will need your Google Drive to store settings and saves, you must login with the same account you used for Colab.')\n",
"from google.colab import drive\n",
"drive.mount('/content/drive/')\n",
"if use_google_drive:\n",
" drive.mount('/content/drive/')\n",
"else:\n",
" import os\n",
" if not os.path.exists(\"/content/drive\"):\n",
" os.mkdir(\"/content/drive\")\n",
" if not os.path.exists(\"/content/drive/MyDrive/\"):\n",
" os.mkdir(\"/content/drive/MyDrive/\")\n",
"\n",
"if Model == \"Janeway 13B\":\n",
" Model = \"KoboldAI/fairseq-dense-13B-Janeway\"\n",

View File

@ -1,5 +1,7 @@
@echo off
cd /D %~dp0
SET CONDA_SHLVL=
TITLE CMD for KoboldAI Runtime
SET /P M=<loader.settings
IF %M%==1 GOTO drivemap

View File

@ -1,26 +0,0 @@
name: koboldai
channels:
- pytorch
- conda-forge
- defaults
dependencies:
- colorama
- flask-socketio
- flask-session
- pytorch
- cudatoolkit=11.1
- tensorflow-gpu
- python=3.8.*
- eventlet
- markdown
- bleach=4.1.0
- pip
- git=2.35.1
- marshmallow>=3.13
- apispec-webframeworks
- loguru
- pip:
- git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b
- flask-cloudflared
- flask-ngrok
- lupa==1.10

View File

@ -20,6 +20,8 @@ dependencies:
- marshmallow>=3.13
- apispec-webframeworks
- loguru
- termcolor
- psutil
- pip:
- flask-cloudflared
- flask-ngrok
@ -27,3 +29,4 @@ dependencies:
- transformers>=4.20.1
- huggingface_hub>=0.10.1
- accelerate
- git+https://github.com/VE-FORBRYDERNE/mkultra

View File

@ -1,25 +0,0 @@
name: koboldai-ft
channels:
- conda-forge
- defaults
dependencies:
- colorama
- flask-socketio
- flask-session
- python=3.8.*
- eventlet
- markdown
- bleach=4.1.0
- pip
- git=2.35.1
- marshmallow>=3.13
- apispec-webframeworks
- loguru
- pip:
- --find-links https://download.pytorch.org/whl/rocm4.2/torch_stable.html
- torch
- torchvision==0.11.1
- flask-cloudflared
- git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b
- flask-ngrok
- lupa==1.10

View File

@ -17,6 +17,8 @@ dependencies:
- marshmallow>=3.13
- apispec-webframeworks
- loguru
- termcolor
- psutil
- pip:
- --extra-index-url https://download.pytorch.org/whl/rocm5.1.1
- torch
@ -27,3 +29,4 @@ dependencies:
- transformers>=4.20.1
- huggingface_hub>=0.10.1
- accelerate
- git+https://github.com/VE-FORBRYDERNE/mkultra

View File

@ -86,7 +86,7 @@ def uspath(filename):
def getstoryfiles():
list = []
for file in listdir("stories"):
if file.endswith(".json"):
if file.endswith(".json") and not file.endswith(".v2.json"):
ob = {}
ob["name"] = file.replace(".json", "")
f = open("stories/"+file, "r")

View File

@ -8,6 +8,7 @@ echo.
Reg add "HKLM\SYSTEM\CurrentControlSet\Control\FileSystem" /v "LongPathsEnabled" /t REG_DWORD /d "1" /f 2>nul
cd /D %~dp0
SET CONDA_SHLVL=
if exist miniconda3\ (
echo Delete existing installation?

View File

@ -1,5 +1,9 @@
@echo off
cd /D %~dp0
SET CONDA_SHLVL=
rmdir /S /Q flask_session
TITLE KoboldAI - Server
SET /P M=<loader.settings
IF %M%==1 GOTO drivemap

1082
prompt_tuner.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -17,3 +17,5 @@ flask-session
marshmallow>=3.13
apispec-webframeworks
loguru
termcolor
git+https://github.com/VE-FORBRYDERNE/mkultra

View File

@ -1148,6 +1148,9 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo
if param not in params:
params[param] = default_params[param]
# Use an optimization that will allow us to avoid one extra transpose operation
params["transposed_linear"] = True
# Load tokenizer
if vars.model == "TPUMeshTransformerGPTNeoX":
tokenizer = Tokenizer.from_file(os.path.join(path, "20B_tokenizer.json"))
@ -1305,8 +1308,9 @@ def load_model(path: str, driver_version="tpu_driver0.1_dev20210607", hf_checkpo
tensor /= params["cores_per_replica"]
if "vocab_pad" in transforms:
tensor = torch.nn.functional.pad(tensor, (0,) * (tensor.ndim * 2 - 1) + (params["n_vocab_padding"],))
if "no_transpose" not in transforms and tensor.ndim == 2:
tensor = tensor.T
# We don't need to transpose linear module weights anymore because MTJ will do it for us if `transposed_linear` is set to True in the config
#if "no_transpose" not in transforms and tensor.ndim == 2:
# tensor = tensor.T
tensor.unsqueeze_(0)
if tensor.dtype is torch.float16 or tensor.dtype is torch.float32:
tensor = tensor.bfloat16()

View File

@ -1,5 +1,7 @@
@echo off
cd /d %~dp0
SET CONDA_SHLVL=
TITLE KoboldAI - Updater
SET /P M=<loader.settings
IF %M%==1 GOTO drivemap

View File

@ -500,6 +500,7 @@
<li>kwargs? (<code>table&lt;string, any&gt;</code>): Table of optional keyword arguments from the following list. Defaults to <code>{}</code>.
<ul>
<li>scan_story? (<code>boolean</code>): Whether or not to scan the past few actions of the story for world info keys in addition to the submission like how world info normally behaves. If this is set to <code>false</code>, only the <code>submission</code> is scanned for world info keys. Defaults to <code>true</code>.</li>
<li>include_anote? (<code>boolean</code>): Whether to include the author's note in the story. Defaults to <code>true</code>, pass <code>false</code> to suppress including the author's note.</li>
</ul>
</li>
</ul>
@ -574,6 +575,7 @@
<li>kwargs? (<code>table&lt;string, any&gt;</code>): Table of optional keyword arguments from the following list. Defaults to <code>{}</code>.
<ul>
<li>scan_story? (<code>boolean</code>): Whether or not to scan the past few actions of the story for world info keys in addition to the submission like how world info normally behaves. If this is set to <code>false</code>, only the <code>submission</code> is scanned for world info keys. Defaults to <code>true</code>.</li>
<li>include_anote? (<code>boolean</code>): Whether to include the author's note in the story. Defaults to <code>true</code>, pass <code>false</code> to suppress including the author's note.</li>
</ul>
</li>
</ul>
@ -687,6 +689,7 @@
<li>kwargs? (<code>table&lt;string, any&gt;</code>): Table of optional keyword arguments from the following list. Defaults to <code>{}</code>.
<ul>
<li>scan_story? (<code>boolean</code>): Whether or not to scan the past few actions of the story for world info keys in addition to the submission like how world info normally behaves. If this is set to <code>false</code>, only the <code>submission</code> is scanned for world info keys. Defaults to <code>true</code>.</li>
<li>include_anote? (<code>boolean</code>): Whether to include the author's note in the story. Defaults to <code>true</code>, pass <code>false</code> to suppress including the author's note.</li>
</ul>
</li>
</ul>

View File

@ -538,6 +538,7 @@ Computes the context that would be sent to the generator with the user's current
* entries? (`KoboldWorldInfoEntry|table<any, KoboldWorldInfoEntry>`): A `KoboldWorldInfoEntry` or table thereof that indicates an allowed subset of world info entries to include in the context. Defaults to all world info entries.
* kwargs? (`table<string, any>`): Table of optional keyword arguments from the following list. Defaults to `{}`.
* scan_story? (`boolean`): Whether or not to scan the past few actions of the story for world info keys in addition to the submission like how world info normally behaves. If this is set to `false`, only the `submission` is scanned for world info keys. Defaults to `true`.
* include_anote? (`boolean`): Whether to include the author's note in the story. Defaults to `true`, pass `false` to suppress including the author's note.
### Returns
@ -636,6 +637,7 @@ The same as calling `kobold.worldinfo:compute_context()` with this world info en
* submission (`string`): String to use as simulated user's input after being formatted by input formatting.
* kwargs? (`table<string, any>`): Table of optional keyword arguments from the following list. Defaults to `{}`.
* scan_story? (`boolean`): Whether or not to scan the past few actions of the story for world info keys in addition to the submission like how world info normally behaves. If this is set to `false`, only the `submission` is scanned for world info keys. Defaults to `true`.
* include_anote? (`boolean`): Whether to include the author's note in the story. Defaults to `true`, pass `false` to suppress including the author's note.
### Returns
@ -819,6 +821,7 @@ Unlike `kobold.worldinfo:compute_context()`, this function doesn't include world
* entries? (`KoboldWorldInfoEntry|table<any, KoboldWorldInfoEntry>`): A `KoboldWorldInfoEntry` or table thereof that indicates an allowed subset of world info entries to include in the context. Entries that are not inside of the folder are still not included. Defaults to all world info entries in the folder.
* kwargs? (`table<string, any>`): Table of optional keyword arguments from the following list. Defaults to `{}`.
* scan_story? (`boolean`): Whether or not to scan the past few actions of the story for world info keys in addition to the submission like how world info normally behaves. If this is set to `false`, only the `submission` is scanned for world info keys. Defaults to `true`.
* include_anote? (`boolean`): Whether to include the author's note in the story. Defaults to `true`, pass `false` to suppress including the author's note.
### Returns

View File

@ -27,6 +27,7 @@ except ImportError:
HAS_ACCELERATE = False
vars = None
args = None
num_shards: Optional[int] = None
current_shard = 0
from_pretrained_model_name = ""
@ -40,6 +41,8 @@ named_buffers: Optional[List[tuple]] = None
default_sampler_order = [6, 0, 1, 2, 3, 4, 5]
emit = None
#==================================================================#
# Decorator to prevent a function's actions from being run until
# at least x seconds have passed without the function being called
@ -198,6 +201,7 @@ def _download_with_aria2(aria2_config: str, total_length: int, directory: str =
pass
import transformers
aria2_port = 6799 if vars is None else vars.aria2_port
lengths = {}
s = requests.Session()
s.mount("http://", requests.adapters.HTTPAdapter(max_retries=requests.adapters.Retry(total=120, backoff_factor=1)))
@ -208,9 +212,9 @@ def _download_with_aria2(aria2_config: str, total_length: int, directory: str =
with tempfile.NamedTemporaryFile("w+b", delete=False) as f:
f.write(aria2_config)
f.flush()
p = subprocess.Popen(["aria2c", "-x", "10", "-s", "10", "-j", "10", "--enable-rpc=true", f"--rpc-secret={secret}", "--rpc-listen-port", str(vars.aria2_port), "--disable-ipv6", "--file-allocation=trunc", "--allow-overwrite", "--auto-file-renaming=false", "-d", directory, "-i", f.name, "-U", transformers.file_utils.http_user_agent(user_agent)] + (["-c"] if not force_download else []) + ([f"--header='Authorization: Bearer {use_auth_token}'"] if use_auth_token else []), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
p = subprocess.Popen(["aria2c", "-x", "10", "-s", "10", "-j", "10", "--enable-rpc=true", f"--rpc-secret={secret}", "--rpc-listen-port", str(aria2_port), "--disable-ipv6", "--file-allocation=trunc", "--allow-overwrite", "--auto-file-renaming=false", "-d", directory, "-i", f.name, "-U", transformers.file_utils.http_user_agent(user_agent)] + (["-c"] if not force_download else []) + ([f"--header='Authorization: Bearer {use_auth_token}'"] if use_auth_token else []), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
while p.poll() is None:
r = s.post(f"http://localhost:{vars.aria2_port}/jsonrpc", json={"jsonrpc": "2.0", "id": "kai", "method": "aria2.tellActive", "params": [f"token:{secret}"]}).json()["result"]
r = s.post(f"http://localhost:{aria2_port}/jsonrpc", json={"jsonrpc": "2.0", "id": "kai", "method": "aria2.tellActive", "params": [f"token:{secret}"]}).json()["result"]
if not r:
s.close()
if bar is not None:
@ -602,4 +606,4 @@ def get_missing_module_names(model: PreTrainedModel, names: List[str]) -> List[s
else:
recurse(c[1], head=name + ".")
recurse(model)
return missing_names
return missing_names