commit
7ab39bac0f
58
aiserver.py
58
aiserver.py
|
@ -351,6 +351,8 @@ class vars:
|
||||||
lazy_load = True # Whether or not to use torch_lazy_loader.py for transformers models in order to reduce CPU memory usage
|
lazy_load = True # Whether or not to use torch_lazy_loader.py for transformers models in order to reduce CPU memory usage
|
||||||
use_colab_tpu = os.environ.get("COLAB_TPU_ADDR", "") != "" or os.environ.get("TPU_NAME", "") != "" # Whether or not we're in a Colab TPU instance or Kaggle TPU instance and are going to use the TPU rather than the CPU
|
use_colab_tpu = os.environ.get("COLAB_TPU_ADDR", "") != "" or os.environ.get("TPU_NAME", "") != "" # Whether or not we're in a Colab TPU instance or Kaggle TPU instance and are going to use the TPU rather than the CPU
|
||||||
revision = None
|
revision = None
|
||||||
|
output_streaming = False
|
||||||
|
token_stream_queue = [] # Queue for the token streaming
|
||||||
|
|
||||||
utils.vars = vars
|
utils.vars = vars
|
||||||
|
|
||||||
|
@ -800,6 +802,7 @@ def savesettings():
|
||||||
js["fulldeterminism"] = vars.full_determinism
|
js["fulldeterminism"] = vars.full_determinism
|
||||||
js["autosave"] = vars.autosave
|
js["autosave"] = vars.autosave
|
||||||
js["welcome"] = vars.welcome
|
js["welcome"] = vars.welcome
|
||||||
|
js["output_streaming"] = vars.output_streaming
|
||||||
|
|
||||||
if(vars.seed_specified):
|
if(vars.seed_specified):
|
||||||
js["seed"] = vars.seed
|
js["seed"] = vars.seed
|
||||||
|
@ -911,6 +914,8 @@ def processsettings(js):
|
||||||
vars.newlinemode = js["newlinemode"]
|
vars.newlinemode = js["newlinemode"]
|
||||||
if("welcome" in js):
|
if("welcome" in js):
|
||||||
vars.welcome = js["welcome"]
|
vars.welcome = js["welcome"]
|
||||||
|
if("output_streaming" in js):
|
||||||
|
vars.output_streaming = js["output_streaming"]
|
||||||
|
|
||||||
if("seed" in js):
|
if("seed" in js):
|
||||||
vars.seed = js["seed"]
|
vars.seed = js["seed"]
|
||||||
|
@ -943,12 +948,20 @@ def processsettings(js):
|
||||||
|
|
||||||
def check_for_sp_change():
|
def check_for_sp_change():
|
||||||
while(True):
|
while(True):
|
||||||
time.sleep(0.1)
|
time.sleep(0.05)
|
||||||
|
|
||||||
if(vars.sp_changed):
|
if(vars.sp_changed):
|
||||||
with app.app_context():
|
with app.app_context():
|
||||||
emit('from_server', {'cmd': 'spstatitems', 'data': {vars.spfilename: vars.spmeta} if vars.allowsp and len(vars.spfilename) else {}}, namespace=None, broadcast=True)
|
emit('from_server', {'cmd': 'spstatitems', 'data': {vars.spfilename: vars.spmeta} if vars.allowsp and len(vars.spfilename) else {}}, namespace=None, broadcast=True)
|
||||||
vars.sp_changed = False
|
vars.sp_changed = False
|
||||||
|
|
||||||
|
if(vars.output_streaming and vars.token_stream_queue):
|
||||||
|
# If emit blocks, waiting for it to complete before clearing could
|
||||||
|
# introduce a race condition that drops tokens.
|
||||||
|
queued_tokens = list(vars.token_stream_queue)
|
||||||
|
vars.token_stream_queue.clear()
|
||||||
|
socketio.emit("from_server", {"cmd": "streamtoken", "data": queued_tokens}, namespace=None, broadcast=True)
|
||||||
|
|
||||||
socketio.start_background_task(check_for_sp_change)
|
socketio.start_background_task(check_for_sp_change)
|
||||||
|
|
||||||
def spRequest(filename):
|
def spRequest(filename):
|
||||||
|
@ -1044,6 +1057,7 @@ def general_startup(override_args=None):
|
||||||
parser.add_argument("--no_aria2", action='store_true', default=False, help="Prevents KoboldAI from using aria2 to download huggingface models more efficiently, in case aria2 is causing you issues")
|
parser.add_argument("--no_aria2", action='store_true', default=False, help="Prevents KoboldAI from using aria2 to download huggingface models more efficiently, in case aria2 is causing you issues")
|
||||||
parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage")
|
parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage")
|
||||||
parser.add_argument("--savemodel", action='store_true', help="Saves the model to the models folder even if --colab is used (Allows you to save models to Google Drive)")
|
parser.add_argument("--savemodel", action='store_true', help="Saves the model to the models folder even if --colab is used (Allows you to save models to Google Drive)")
|
||||||
|
parser.add_argument("--customsettings", help="Preloads arguements from json file. You only need to provide the location of the json file. Use customsettings.json template file. It can be renamed if you wish so that you can store multiple configurations. Leave any settings you want as default as null. Any values you wish to set need to be in double quotation marks")
|
||||||
#args: argparse.Namespace = None
|
#args: argparse.Namespace = None
|
||||||
if "pytest" in sys.modules and override_args is None:
|
if "pytest" in sys.modules and override_args is None:
|
||||||
args = parser.parse_args([])
|
args = parser.parse_args([])
|
||||||
|
@ -1057,6 +1071,14 @@ def general_startup(override_args=None):
|
||||||
else:
|
else:
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.customsettings:
|
||||||
|
f = open (args.customsettings)
|
||||||
|
importedsettings = json.load(f)
|
||||||
|
for items in importedsettings:
|
||||||
|
if importedsettings[items] is not None:
|
||||||
|
setattr(args, items, importedsettings[items])
|
||||||
|
f.close()
|
||||||
|
|
||||||
vars.model = args.model;
|
vars.model = args.model;
|
||||||
vars.revision = args.revision
|
vars.revision = args.revision
|
||||||
|
|
||||||
|
@ -1533,6 +1555,27 @@ def patch_transformers():
|
||||||
new_init.old_init = transformers.generation_logits_process.NoBadWordsLogitsProcessor.__init__
|
new_init.old_init = transformers.generation_logits_process.NoBadWordsLogitsProcessor.__init__
|
||||||
transformers.generation_logits_process.NoBadWordsLogitsProcessor.__init__ = new_init
|
transformers.generation_logits_process.NoBadWordsLogitsProcessor.__init__ = new_init
|
||||||
|
|
||||||
|
class TokenStreamer(StoppingCriteria):
|
||||||
|
# A StoppingCriteria is used here because it seems to run after
|
||||||
|
# everything has been evaluated score-wise.
|
||||||
|
def __init__(self, tokenizer):
|
||||||
|
self.tokenizer = tokenizer
|
||||||
|
|
||||||
|
def __call__(
|
||||||
|
self,
|
||||||
|
input_ids: torch.LongTensor,
|
||||||
|
scores: torch.FloatTensor,
|
||||||
|
**kwargs,
|
||||||
|
) -> bool:
|
||||||
|
# Do not intermingle multiple generations' outputs!
|
||||||
|
if(vars.numseqs > 1):
|
||||||
|
return False
|
||||||
|
|
||||||
|
tokenizer_text = utils.decodenewlines(tokenizer.decode(input_ids[0, -1]))
|
||||||
|
|
||||||
|
vars.token_stream_queue.append(tokenizer_text)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
# Sets up dynamic world info scanner
|
# Sets up dynamic world info scanner
|
||||||
class DynamicWorldInfoScanCriteria(StoppingCriteria):
|
class DynamicWorldInfoScanCriteria(StoppingCriteria):
|
||||||
|
@ -1587,7 +1630,10 @@ def patch_transformers():
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
excluded_world_info=self.kai_scanner_excluded_world_info,
|
excluded_world_info=self.kai_scanner_excluded_world_info,
|
||||||
)
|
)
|
||||||
|
token_streamer = TokenStreamer(tokenizer=tokenizer)
|
||||||
|
|
||||||
stopping_criteria.insert(0, self.kai_scanner)
|
stopping_criteria.insert(0, self.kai_scanner)
|
||||||
|
stopping_criteria.insert(0, token_streamer)
|
||||||
return stopping_criteria
|
return stopping_criteria
|
||||||
transformers.generation_utils.GenerationMixin._get_stopping_criteria = new_get_stopping_criteria
|
transformers.generation_utils.GenerationMixin._get_stopping_criteria = new_get_stopping_criteria
|
||||||
|
|
||||||
|
@ -2689,6 +2735,7 @@ def lua_has_setting(setting):
|
||||||
"rmspch",
|
"rmspch",
|
||||||
"adsnsp",
|
"adsnsp",
|
||||||
"singleline",
|
"singleline",
|
||||||
|
"output_streaming"
|
||||||
)
|
)
|
||||||
|
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
|
@ -2720,6 +2767,7 @@ def lua_get_setting(setting):
|
||||||
if(setting in ("frmtrmspch", "rmspch")): return vars.formatoptns["frmttrmspch"]
|
if(setting in ("frmtrmspch", "rmspch")): return vars.formatoptns["frmttrmspch"]
|
||||||
if(setting in ("frmtadsnsp", "adsnsp")): return vars.formatoptns["frmtadsnsp"]
|
if(setting in ("frmtadsnsp", "adsnsp")): return vars.formatoptns["frmtadsnsp"]
|
||||||
if(setting in ("frmtsingleline", "singleline")): return vars.formatoptns["singleline"]
|
if(setting in ("frmtsingleline", "singleline")): return vars.formatoptns["singleline"]
|
||||||
|
if(setting == "output_streaming"): return vars.output_streaming
|
||||||
|
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
# Set the setting with the given name if it exists
|
# Set the setting with the given name if it exists
|
||||||
|
@ -2756,6 +2804,7 @@ def lua_set_setting(setting, v):
|
||||||
if(setting in ("frmtrmspch", "rmspch")): vars.formatoptns["frmttrmspch"] = v
|
if(setting in ("frmtrmspch", "rmspch")): vars.formatoptns["frmttrmspch"] = v
|
||||||
if(setting in ("frmtadsnsp", "adsnsp")): vars.formatoptns["frmtadsnsp"] = v
|
if(setting in ("frmtadsnsp", "adsnsp")): vars.formatoptns["frmtadsnsp"] = v
|
||||||
if(setting in ("frmtsingleline", "singleline")): vars.formatoptns["singleline"] = v
|
if(setting in ("frmtsingleline", "singleline")): vars.formatoptns["singleline"] = v
|
||||||
|
if(setting == "output_streaming"): vars.output_streaming = v
|
||||||
|
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
# Get contents of memory
|
# Get contents of memory
|
||||||
|
@ -3468,6 +3517,10 @@ def get_message(msg):
|
||||||
vars.full_determinism = msg['data']
|
vars.full_determinism = msg['data']
|
||||||
settingschanged()
|
settingschanged()
|
||||||
refresh_settings()
|
refresh_settings()
|
||||||
|
elif(msg['cmd'] == 'setoutputstreaming'):
|
||||||
|
vars.output_streaming = msg['data']
|
||||||
|
settingschanged()
|
||||||
|
refresh_settings()
|
||||||
elif(not vars.host and msg['cmd'] == 'importwi'):
|
elif(not vars.host and msg['cmd'] == 'importwi'):
|
||||||
wiimportrequest()
|
wiimportrequest()
|
||||||
elif(msg['cmd'] == 'debug'):
|
elif(msg['cmd'] == 'debug'):
|
||||||
|
@ -4665,6 +4718,7 @@ def refresh_settings():
|
||||||
emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': vars.formatoptns["frmtrmspch"]}, broadcast=True)
|
emit('from_server', {'cmd': 'updatefrmtrmspch', 'data': vars.formatoptns["frmtrmspch"]}, broadcast=True)
|
||||||
emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': vars.formatoptns["frmtadsnsp"]}, broadcast=True)
|
emit('from_server', {'cmd': 'updatefrmtadsnsp', 'data': vars.formatoptns["frmtadsnsp"]}, broadcast=True)
|
||||||
emit('from_server', {'cmd': 'updatesingleline', 'data': vars.formatoptns["singleline"]}, broadcast=True)
|
emit('from_server', {'cmd': 'updatesingleline', 'data': vars.formatoptns["singleline"]}, broadcast=True)
|
||||||
|
emit('from_server', {'cmd': 'updateoutputstreaming', 'data': vars.output_streaming}, broadcast=True)
|
||||||
|
|
||||||
# Allow toggle events again
|
# Allow toggle events again
|
||||||
emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True)
|
emit('from_server', {'cmd': 'allowtoggle', 'data': True}, broadcast=True)
|
||||||
|
@ -5838,7 +5892,7 @@ def importgame():
|
||||||
def importAidgRequest(id):
|
def importAidgRequest(id):
|
||||||
exitModes()
|
exitModes()
|
||||||
|
|
||||||
urlformat = "https://prompts.aidg.club/api/"
|
urlformat = "https://aetherroom.club/api/"
|
||||||
req = requests.get(urlformat+id)
|
req = requests.get(urlformat+id)
|
||||||
|
|
||||||
if(req.status_code == 200):
|
if(req.status_code == 200):
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
{"aria2_port":null, "breakmodel":null, "breakmodel_disklayers":null, "breakmodel_gpulayers":null, "breakmodel_layers":null, "colab":null, "configname":null, "cpu":null, "host":null, "localtunnel":null, "lowmem":null, "model":null, "ngrok":null, "no_aria2":null, "noaimenu":null, "nobreakmodel":null, "override_delete":null, "override_rename":null, "path":null, "port":null, "quiet":null, "remote":null, "revision":null, "savemodel":null, "unblock":null}
|
|
@ -251,7 +251,18 @@ gensettingstf = [
|
||||||
"step": 1,
|
"step": 1,
|
||||||
"default": 0,
|
"default": 0,
|
||||||
"tooltip": "Show debug info"
|
"tooltip": "Show debug info"
|
||||||
}
|
},
|
||||||
|
{
|
||||||
|
"uitype": "toggle",
|
||||||
|
"unit": "bool",
|
||||||
|
"label": "Token Streaming",
|
||||||
|
"id": "setoutputstreaming",
|
||||||
|
"min": 0,
|
||||||
|
"max": 1,
|
||||||
|
"step": 1,
|
||||||
|
"default": 0,
|
||||||
|
"tooltip": "Shows outputs to you as they are made. Does not work with more than one gens per action."
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
gensettingsik =[{
|
gensettingsik =[{
|
||||||
|
@ -415,9 +426,9 @@ formatcontrols = [{
|
||||||
"tooltip": "Remove special characters (@,#,%,^, etc)"
|
"tooltip": "Remove special characters (@,#,%,^, etc)"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"label": "Add sentence spacing",
|
"label": "Automatic spacing",
|
||||||
"id": "frmtadsnsp",
|
"id": "frmtadsnsp",
|
||||||
"tooltip": "If the last action ended with punctuation, add a space to the beginning of the next action."
|
"tooltip": "Add spaces automatically if needed"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"label": "Single Line",
|
"label": "Single Line",
|
||||||
|
|
|
@ -44,8 +44,7 @@ copy loader.settings B:\loader.settings
|
||||||
copy disconnect-kobold-drive.bat B:\disconnect-kobold-drive.bat
|
copy disconnect-kobold-drive.bat B:\disconnect-kobold-drive.bat
|
||||||
B:
|
B:
|
||||||
umamba.exe create -r B:\python\ -n base
|
umamba.exe create -r B:\python\ -n base
|
||||||
IF %B%==1 umamba.exe install --no-shortcuts -r B:\python\ -n base -f "%~dp0\environments\huggingface.yml" -y --always-copy
|
umamba.exe install --no-shortcuts -r B:\python\ -n base -f "%~dp0\environments\huggingface.yml" -y --always-copy
|
||||||
IF %B%==2 umamba.exe install --no-shortcuts -r B:\python\ -n base -f "%~dp0\environments\finetuneanon.yml" -y --always-copy
|
|
||||||
umamba.exe -r B:\ clean -a -y
|
umamba.exe -r B:\ clean -a -y
|
||||||
rd B:\Python\pkgs /S /Q
|
rd B:\Python\pkgs /S /Q
|
||||||
subst B: /d
|
subst B: /d
|
||||||
|
@ -57,8 +56,7 @@ echo 2 > loader.settings
|
||||||
SET TEMP=%~DP0MINICONDA3
|
SET TEMP=%~DP0MINICONDA3
|
||||||
SET TMP=%~DP0MINICONDA3
|
SET TMP=%~DP0MINICONDA3
|
||||||
umamba.exe create -r miniconda3\ -n base
|
umamba.exe create -r miniconda3\ -n base
|
||||||
IF %B%==1 umamba.exe install --no-shortcuts -r miniconda3 -n base -f environments\huggingface.yml -y --always-copy
|
umamba.exe install --no-shortcuts -r miniconda3 -n base -f environments\huggingface.yml -y --always-copy
|
||||||
IF %B%==2 umamba.exe install --no-shortcuts -r miniconda3 -n base -f environments\finetuneanon.yml -y --always-copy
|
|
||||||
umamba.exe clean -a -y
|
umamba.exe clean -a -y
|
||||||
rd miniconda3\Python\pkgs /S /Q
|
rd miniconda3\Python\pkgs /S /Q
|
||||||
pause
|
pause
|
||||||
|
|
|
@ -78,6 +78,7 @@ var rs_accept;
|
||||||
var rs_close;
|
var rs_close;
|
||||||
var seqselmenu;
|
var seqselmenu;
|
||||||
var seqselcontents;
|
var seqselcontents;
|
||||||
|
var stream_preview;
|
||||||
|
|
||||||
var storyname = null;
|
var storyname = null;
|
||||||
var memorymode = false;
|
var memorymode = false;
|
||||||
|
@ -103,6 +104,7 @@ var gamestate = "";
|
||||||
var gamesaved = true;
|
var gamesaved = true;
|
||||||
var modelname = null;
|
var modelname = null;
|
||||||
var model = "";
|
var model = "";
|
||||||
|
var ignore_stream = false;
|
||||||
|
|
||||||
// This is true iff [we're in macOS and the browser is Safari] or [we're in iOS]
|
// This is true iff [we're in macOS and the browser is Safari] or [we're in iOS]
|
||||||
var using_webkit_patch = true;
|
var using_webkit_patch = true;
|
||||||
|
@ -888,6 +890,7 @@ function formatChunkInnerText(chunk) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function dosubmit(disallow_abort) {
|
function dosubmit(disallow_abort) {
|
||||||
|
ignore_stream = false;
|
||||||
submit_start = Date.now();
|
submit_start = Date.now();
|
||||||
var txt = input_text.val().replace(/\u00a0/g, " ");
|
var txt = input_text.val().replace(/\u00a0/g, " ");
|
||||||
if((disallow_abort || gamestate !== "wait") && !memorymode && !gamestarted && ((!adventure || !action_mode) && txt.trim().length == 0)) {
|
if((disallow_abort || gamestate !== "wait") && !memorymode && !gamestarted && ((!adventure || !action_mode) && txt.trim().length == 0)) {
|
||||||
|
@ -902,6 +905,7 @@ function dosubmit(disallow_abort) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function _dosubmit() {
|
function _dosubmit() {
|
||||||
|
ignore_stream = false;
|
||||||
var txt = submit_throttle.txt;
|
var txt = submit_throttle.txt;
|
||||||
var disallow_abort = submit_throttle.disallow_abort;
|
var disallow_abort = submit_throttle.disallow_abort;
|
||||||
submit_throttle = null;
|
submit_throttle = null;
|
||||||
|
@ -2082,6 +2086,15 @@ function unbindGametext() {
|
||||||
gametext_bound = false;
|
gametext_bound = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function endStream() {
|
||||||
|
// Clear stream, the real text is about to be displayed.
|
||||||
|
ignore_stream = true;
|
||||||
|
if (stream_preview) {
|
||||||
|
stream_preview.remove();
|
||||||
|
stream_preview = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function update_gpu_layers() {
|
function update_gpu_layers() {
|
||||||
var gpu_layers
|
var gpu_layers
|
||||||
gpu_layers = 0;
|
gpu_layers = 0;
|
||||||
|
@ -2258,6 +2271,21 @@ $(document).ready(function(){
|
||||||
active_element.focus();
|
active_element.focus();
|
||||||
})();
|
})();
|
||||||
$("body").addClass("connected");
|
$("body").addClass("connected");
|
||||||
|
} else if (msg.cmd == "streamtoken") {
|
||||||
|
// Sometimes the stream_token messages will come in too late, after
|
||||||
|
// we have recieved the full text. This leads to some stray tokens
|
||||||
|
// appearing after the output. To combat this, we only allow tokens
|
||||||
|
// to be displayed after requesting and before recieving text.
|
||||||
|
if (ignore_stream) return;
|
||||||
|
if (!$("#setoutputstreaming")[0].checked) return;
|
||||||
|
|
||||||
|
if (!stream_preview) {
|
||||||
|
stream_preview = document.createElement("span");
|
||||||
|
game_text.append(stream_preview);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream_preview.innerText += msg.data.join("");
|
||||||
|
scrollToBottom();
|
||||||
} else if(msg.cmd == "updatescreen") {
|
} else if(msg.cmd == "updatescreen") {
|
||||||
var _gamestarted = gamestarted;
|
var _gamestarted = gamestarted;
|
||||||
gamestarted = msg.gamestarted;
|
gamestarted = msg.gamestarted;
|
||||||
|
@ -2307,7 +2335,11 @@ $(document).ready(function(){
|
||||||
} else if (!empty_chunks.has(index.toString())) {
|
} else if (!empty_chunks.has(index.toString())) {
|
||||||
// Append at the end
|
// Append at the end
|
||||||
unbindGametext();
|
unbindGametext();
|
||||||
var lc = game_text[0].lastChild;
|
|
||||||
|
// game_text can contain things other than chunks (stream
|
||||||
|
// preview), so we use querySelector to get the last chunk.
|
||||||
|
var lc = game_text[0].querySelector("chunk:last-of-type");
|
||||||
|
|
||||||
if(lc.tagName === "CHUNK" && lc.lastChild !== null && lc.lastChild.tagName === "BR") {
|
if(lc.tagName === "CHUNK" && lc.lastChild !== null && lc.lastChild.tagName === "BR") {
|
||||||
lc.removeChild(lc.lastChild);
|
lc.removeChild(lc.lastChild);
|
||||||
}
|
}
|
||||||
|
@ -2323,7 +2355,11 @@ $(document).ready(function(){
|
||||||
var element = game_text.children('#n' + index);
|
var element = game_text.children('#n' + index);
|
||||||
if(element.length) {
|
if(element.length) {
|
||||||
unbindGametext();
|
unbindGametext();
|
||||||
if((element[0].nextSibling === null || element[0].nextSibling.nodeType !== 1 || element[0].nextSibling.tagName !== "CHUNK") && element[0].previousSibling !== null && element[0].previousSibling.tagName === "CHUNK") {
|
if(
|
||||||
|
(element[0].nextSibling === null || element[0].nextSibling.nodeType !== 1 || element[0].nextSibling.tagName !== "CHUNK")
|
||||||
|
&& element[0].previousSibling !== null
|
||||||
|
&& element[0].previousSibling.tagName === "CHUNK"
|
||||||
|
) {
|
||||||
element[0].previousSibling.appendChild(document.createElement("br"));
|
element[0].previousSibling.appendChild(document.createElement("br"));
|
||||||
}
|
}
|
||||||
element.remove(); // Remove the chunk
|
element.remove(); // Remove the chunk
|
||||||
|
@ -2333,6 +2369,7 @@ $(document).ready(function(){
|
||||||
} else if(msg.cmd == "setgamestate") {
|
} else if(msg.cmd == "setgamestate") {
|
||||||
// Enable or Disable buttons
|
// Enable or Disable buttons
|
||||||
if(msg.data == "ready") {
|
if(msg.data == "ready") {
|
||||||
|
endStream();
|
||||||
enableSendBtn();
|
enableSendBtn();
|
||||||
enableButtons([button_actmem, button_actwi, button_actback, button_actfwd, button_actretry]);
|
enableButtons([button_actmem, button_actwi, button_actback, button_actfwd, button_actretry]);
|
||||||
hideWaitAnimation();
|
hideWaitAnimation();
|
||||||
|
@ -2519,6 +2556,9 @@ $(document).ready(function(){
|
||||||
} else if(msg.cmd == "updatesingleline") {
|
} else if(msg.cmd == "updatesingleline") {
|
||||||
// Update toggle state
|
// Update toggle state
|
||||||
$("#singleline").prop('checked', msg.data).change();
|
$("#singleline").prop('checked', msg.data).change();
|
||||||
|
} else if(msg.cmd == "updateoutputstreaming") {
|
||||||
|
// Update toggle state
|
||||||
|
$("#setoutputstreaming").prop('checked', msg.data).change();
|
||||||
} else if(msg.cmd == "allowtoggle") {
|
} else if(msg.cmd == "allowtoggle") {
|
||||||
// Allow toggle change states to propagate
|
// Allow toggle change states to propagate
|
||||||
allowtoggle = msg.data;
|
allowtoggle = msg.data;
|
||||||
|
@ -2914,6 +2954,7 @@ $(document).ready(function(){
|
||||||
});
|
});
|
||||||
|
|
||||||
button_actretry.on("click", function(ev) {
|
button_actretry.on("click", function(ev) {
|
||||||
|
ignore_stream = false;
|
||||||
hideMessage();
|
hideMessage();
|
||||||
socket.send({'cmd': 'retry', 'chatname': chatmode ? chat_name.val() : undefined, 'data': ''});
|
socket.send({'cmd': 'retry', 'chatname': chatmode ? chat_name.val() : undefined, 'data': ''});
|
||||||
hidegenseqs();
|
hidegenseqs();
|
||||||
|
@ -3160,6 +3201,7 @@ $(document).ready(function(){
|
||||||
});
|
});
|
||||||
|
|
||||||
rs_accept.on("click", function(ev) {
|
rs_accept.on("click", function(ev) {
|
||||||
|
ignore_stream = false;
|
||||||
hideMessage();
|
hideMessage();
|
||||||
socket.send({'cmd': 'rndgame', 'memory': $("#rngmemory").val(), 'data': topic.val()});
|
socket.send({'cmd': 'rndgame', 'memory': $("#rngmemory").val(), 'data': topic.val()});
|
||||||
hideRandomStoryPopup();
|
hideRandomStoryPopup();
|
||||||
|
@ -3233,6 +3275,32 @@ $(document).ready(function(){
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Shortcuts
|
||||||
|
$(window).keydown(function (ev) {
|
||||||
|
// Only ctrl prefixed (for now)
|
||||||
|
if (!ev.ctrlKey) return;
|
||||||
|
|
||||||
|
let handled = true;
|
||||||
|
switch (ev.key) {
|
||||||
|
// Ctrl+Z - Back
|
||||||
|
case "z":
|
||||||
|
button_actback.click();
|
||||||
|
break;
|
||||||
|
// Ctrl+Y - Forward
|
||||||
|
case "y":
|
||||||
|
button_actfwd.click();
|
||||||
|
break;
|
||||||
|
// Ctrl+E - Retry
|
||||||
|
case "e":
|
||||||
|
button_actretry.click();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
handled = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (handled) ev.preventDefault();
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@
|
||||||
<div class="dropdown-menu">
|
<div class="dropdown-menu">
|
||||||
<a class="dropdown-item" href="#" id="btn_import">AI Dungeon Adventure</a>
|
<a class="dropdown-item" href="#" id="btn_import">AI Dungeon Adventure</a>
|
||||||
<a class="dropdown-item" href="#" id="btn_importwi">AI Dungeon World Info</a>
|
<a class="dropdown-item" href="#" id="btn_importwi">AI Dungeon World Info</a>
|
||||||
<a class="dropdown-item" href="#" id="btn_impaidg">aidg.club Prompt</a>
|
<a class="dropdown-item" href="#" id="btn_impaidg">aetherroom.club Prompt</a>
|
||||||
</div>
|
</div>
|
||||||
</li>
|
</li>
|
||||||
<li class="nav-item">
|
<li class="nav-item">
|
||||||
|
@ -233,7 +233,7 @@
|
||||||
<div class="popuptitletext">Enter the Prompt Number</div>
|
<div class="popuptitletext">Enter the Prompt Number</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="aidgpopuplistheader">
|
<div class="aidgpopuplistheader">
|
||||||
(4-digit number at the end of aidg.club URL)
|
(4-digit number at the end of aetherroom.club URL)
|
||||||
</div>
|
</div>
|
||||||
<div class="aidgpopupcontent">
|
<div class="aidgpopupcontent">
|
||||||
<input class="form-control" type="text" placeholder="Prompt Number" id="aidgpromptnum">
|
<input class="form-control" type="text" placeholder="Prompt Number" id="aidgpromptnum">
|
||||||
|
|
2
utils.py
2
utils.py
|
@ -210,7 +210,7 @@ def aria2_hook(pretrained_model_name_or_path: str, force_download=False, cache_d
|
||||||
def is_cached(url):
|
def is_cached(url):
|
||||||
try:
|
try:
|
||||||
transformers.file_utils.get_from_cache(url, cache_dir=cache_dir, local_files_only=True)
|
transformers.file_utils.get_from_cache(url, cache_dir=cache_dir, local_files_only=True)
|
||||||
except FileNotFoundError:
|
except (FileNotFoundError, transformers.file_utils.EntryNotFoundError):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
while True: # Try to get the huggingface.co URL of the model's pytorch_model.bin or pytorch_model.bin.index.json file
|
while True: # Try to get the huggingface.co URL of the model's pytorch_model.bin or pytorch_model.bin.index.json file
|
||||||
|
|
Loading…
Reference in New Issue