mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Merge pull request #385 from one-some/accelerate-offloading
Accelerate offloading
This commit is contained in:
@@ -1525,6 +1525,7 @@ def general_startup(override_args=None):
|
||||
print(f"Allowed IPs: {allowed_ips}")
|
||||
|
||||
if args.cpu:
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = "None"
|
||||
koboldai_vars.use_colab_tpu = False
|
||||
koboldai_vars.hascuda = False
|
||||
koboldai_vars.usegpu = False
|
||||
@@ -10811,7 +10812,7 @@ def run():
|
||||
Session(app)
|
||||
logger.init_ok("Flask", status="OK")
|
||||
logger.init("Webserver", status="Starting")
|
||||
patch_transformers()
|
||||
patch_transformers(use_tpu=koboldai_vars.use_colab_tpu)
|
||||
|
||||
# Start Flask/SocketIO (Blocking, so this must be last method!)
|
||||
port = args.port if "port" in args and args.port is not None else 5000
|
||||
@@ -10908,7 +10909,7 @@ else:
|
||||
logger.init("Flask", status="Starting")
|
||||
Session(app)
|
||||
logger.init_ok("Flask", status="OK")
|
||||
patch_transformers()
|
||||
patch_transformers(use_tpu=koboldai_vars.use_colab_tpu)
|
||||
startup(command_line_backend)
|
||||
koboldai_settings.port = args.port if "port" in args and args.port is not None else 5000
|
||||
print("{0}\nServer started in WSGI mode!{1}".format(colors.GREEN, colors.END), flush=True)
|
||||
|
Reference in New Issue
Block a user