diff --git a/aiserver.py b/aiserver.py index 3a3079d4..a8a3adaa 100644 --- a/aiserver.py +++ b/aiserver.py @@ -179,7 +179,7 @@ class vars: recentrngm = None # If a new random game was recently generated without Submitting after, this is the memory used (as a string), otherwise this is None useprompt = False # Whether to send the full prompt with every submit action breakmodel = False # For GPU users, whether to use both system RAM and VRAM to conserve VRAM while offering speedup compared to CPU-only - bmsupported = False # Whether the breakmodel option is supported (GPT-Neo/GPT-J only, currently) + bmsupported = False # Whether the breakmodel option is supported (GPT-Neo/GPT-J/XGLM only, currently) nobreakmodel = False # Something specifically requested Breakmodel to be disabled (For example a models config) smandelete = False # Whether stories can be deleted from inside the browser smanrename = False # Whether stories can be renamed from inside the browser @@ -542,7 +542,7 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly", "TPUMeshTransforme loadmodelsettings() print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="") vars.hascuda = torch.cuda.is_available() - vars.bmsupported = vars.model_type in ("gpt_neo", "gptj") and not vars.nobreakmodel + vars.bmsupported = vars.model_type in ("gpt_neo", "gptj", "xglm") and not vars.nobreakmodel if(args.breakmodel is not None and args.breakmodel): print("WARNING: --breakmodel is no longer supported. Breakmodel mode is now automatically enabled when --breakmodel_gpulayers is used (see --help for details).", file=sys.stderr) if(args.breakmodel_layers is not None):