This commit is contained in:
ebolam
2022-10-11 10:59:17 -04:00
5 changed files with 553 additions and 52 deletions

View File

@@ -2163,6 +2163,9 @@ def patch_transformers():
scores: torch.FloatTensor,
**kwargs,
) -> bool:
if not koboldai_vars.inference_config.do_core:
return False
koboldai_vars.generated_tkns += 1
if (
@@ -5006,7 +5009,7 @@ def calcsubmit(txt):
# Send it!
ikrequest(subtxt)
def core_generate(text: list, min: int, max: int, found_entries: set):
def core_generate(text: list, min: int, max: int, found_entries: set, is_core: bool = False):
# This generation function is tangled with koboldai_vars intentionally. It
# is meant for the story and nothing else.
@@ -5065,6 +5068,7 @@ def core_generate(text: list, min: int, max: int, found_entries: set):
batch_count=numseqs,
# Real max length is handled by CoreStopper.
bypass_hf_maxlength=True,
is_core=True,
)
genout = result.encoded
@@ -5192,9 +5196,11 @@ def raw_generate(
do_dynamic_wi: bool = False,
batch_count: int = 1,
bypass_hf_maxlength: bool = False,
generation_settings: Optional[dict] = None
generation_settings: Optional[dict] = None,
is_core: bool = False
) -> GenerationResult:
koboldai_vars.inference_config.do_core = is_core
gen_settings = GenerationSettings(*(generation_settings or {}))
model_functions = {
@@ -6140,6 +6146,18 @@ def applyoutputformatting(txt):
if(koboldai_vars.singleline or koboldai_vars.chatmode):
txt = utils.singlelineprocessing(txt, koboldai_vars)
for sub in koboldai_vars.substitutions:
if not sub["enabled"]:
continue
i = 0
while sub["trueTarget"] in txt or sub["target"] in txt:
i += 1
if i > 1000:
logger.error("[substitutions] Infinite recursion :^(")
break
txt = txt.replace(sub["trueTarget"], sub["substitution"])
txt = txt.replace(sub["target"], sub["substitution"])
return txt
#==================================================================#
@@ -8501,6 +8519,11 @@ def UI_2_phrase_bias_update(biases):
koboldai_vars.biases = biases
@socketio.on("substitution_update")
@logger.catch
def UI_2_substitutions_update(substitutions):
koboldai_vars.substitutions = substitutions
#==================================================================#
# Event triggered to rely a message

View File

@@ -1,5 +1,6 @@
from dataclasses import dataclass
import os, re, time, threading, json, pickle, base64, copy, tqdm, datetime, sys
from typing import Union
from io import BytesIO
from flask import has_request_context, session
from flask_socketio import SocketIO, join_room, leave_room
@@ -180,6 +181,18 @@ class koboldai_vars(object):
def reset_model(self):
self._model_settings.reset_for_model_load()
def get_token_representation(self, text: Union[str, list, None]) -> list:
if not self.tokenizer or not text:
return []
if isinstance(text, str):
encoded = self.tokenizer.encode(text)
else:
encoded = text
# TODO: This might be ineffecient, should we cache some of this?
return [[token, self.tokenizer.decode(token)] for token in encoded]
def calc_ai_text(self, submitted_text="", return_text=False):
#start_time = time.time()
if self.alt_gen:
@@ -198,7 +211,7 @@ class koboldai_vars(object):
# TODO: We may want to replace the "text" variable with a list-type
# class of context blocks, the class having a __str__ function.
if self.sp_length > 0:
context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>", "tokens": self.sp_length})
context.append({"type": "soft_prompt", "text": f"<{self.sp_length} tokens of Soft Prompt.>", "tokens": [-1] * self.sp_length})
# Header is never used?
# if koboldai_vars.model not in ("Colab", "API", "OAI") and self.tokenizer._koboldai_header:
# context.append({"type": "header", "text": f"{len(self.tokenizer._koboldai_header})
@@ -208,11 +221,16 @@ class koboldai_vars(object):
#Add memory
memory_length = self.max_memory_length if self.memory_length > self.max_memory_length else self.memory_length
memory_text = self.memory
memory_encoded = None
if memory_length+used_tokens <= token_budget:
if self.tokenizer is not None and self.memory_length > self.max_memory_length:
memory_text = self.tokenizer.decode(self.tokenizer.encode(self.memory)[-self.max_memory_length-1:])
if self.tokenizer is not None and self.memory_length > self.max_memory_length:
memory_encoded = self.tokenizer.encode(self.memory)[-self.max_memory_length-1:]
memory_text = self.tokenizer.decode(memory_encoded)
if not memory_encoded and self.tokenizer:
memory_encoded = self.tokenizer.encode(memory_text)
context.append({"type": "memory", "text": memory_text, "tokens": memory_length})
context.append({"type": "memory", "text": memory_text, "tokens": self.get_token_representation(memory_encoded)})
text += memory_text
#Add constant world info entries to memory
@@ -223,7 +241,11 @@ class koboldai_vars(object):
used_world_info.append(wi['uid'])
self.worldinfo_v2.set_world_info_used(wi['uid'])
wi_text = wi['content']
context.append({"type": "world_info", "text": wi_text, "tokens": wi['token_length']})
context.append({
"type": "world_info",
"text": wi_text,
"tokens": self.get_token_representation(wi_text),
})
text += wi_text
@@ -268,7 +290,7 @@ class koboldai_vars(object):
used_tokens+=0 if wi['token_length'] is None else wi['token_length']
used_world_info.append(wi['uid'])
wi_text = wi['content']
context.append({"type": "world_info", "text": wi_text, "tokens": wi['token_length']})
context.append({"type": "world_info", "text": wi_text, "tokens": self.get_token_representation(wi_text)})
text += wi_text
self.worldinfo_v2.set_world_info_used(wi['uid'])
@@ -288,31 +310,50 @@ class koboldai_vars(object):
game_context = []
authors_note_final = self.authornotetemplate.replace("<|>", self.authornote)
used_all_tokens = False
for action in range(len(self.actions)):
self.actions.set_action_in_ai(action, used=False)
for i in range(len(action_text_split)-1, -1, -1):
if action_text_split[i][3] or action_text_split[i][1] == [-1]:
#We've hit an item we've already included or items that are only prompt. Stop
for action in action_text_split[i][1]:
if action >= 0:
self.actions.set_action_in_ai(action)
break;
break
if len(action_text_split) - i - 1 == self.andepth and self.authornote != "":
game_text = "{}{}".format(authors_note_final, game_text)
game_context.insert(0, {"type": "authors_note", "text": authors_note_final, "tokens": self.authornote_length})
length = 0 if self.tokenizer is None else len(self.tokenizer.encode(action_text_split[i][0]))
game_context.insert(0, {"type": "authors_note", "text": authors_note_final, "tokens": self.get_token_representation(authors_note_final)})
encoded_action = [] if not self.tokenizer else self.tokenizer.encode(action_text_split[i][0])
length = len(encoded_action)
if length+used_tokens <= token_budget and not used_all_tokens:
used_tokens += length
selected_text = action_text_split[i][0]
action_text_split[i][3] = True
game_text = "{}{}".format(selected_text, game_text)
if action_text_split[i][1] == [self.actions.action_count+1]:
game_context.insert(0, {"type": "submit", "text": selected_text, "tokens": length, "action_ids": action_text_split[i][1]})
game_context.insert(0, {
"type": "submit",
"text": selected_text,
"tokens": self.get_token_representation(encoded_action),
"action_ids": action_text_split[i][1]
})
else:
game_context.insert(0, {"type": "action", "text": selected_text, "tokens": length, "action_ids": action_text_split[i][1]})
game_context.insert(0, {
"type": "action",
"text": selected_text,
"tokens": self.get_token_representation(encoded_action),
"action_ids": action_text_split[i][1]
})
for action in action_text_split[i][1]:
if action >= 0:
self.actions.set_action_in_ai(action)
#Now we need to check for used world info entries
for wi in self.worldinfo_v2:
if wi['uid'] not in used_world_info:
@@ -336,12 +377,13 @@ class koboldai_vars(object):
used_tokens+=0 if wi['token_length'] is None else wi['token_length']
used_world_info.append(wi['uid'])
wi_text = wi["content"]
encoded_wi = self.tokenizer.encode(wi_text)
if method == 1:
text = "{}{}".format(wi_text, game_text)
context.insert(0, {"type": "world_info", "text": wi_text, "tokens": wi['token_length']})
context.insert(0, {"type": "world_info", "text": wi_text, "tokens": self.get_token_representation(encoded_wi)})
else:
game_text = "{}{}".format(wi_text, game_text)
game_context.insert(0, {"type": "world_info", "text": wi_text, "tokens": wi['token_length']})
game_context.insert(0, {"type": "world_info", "text": wi_text, "tokens": self.get_token_representation(encoded_wi)})
self.worldinfo_v2.set_world_info_used(wi['uid'])
else:
used_all_tokens = True
@@ -350,11 +392,11 @@ class koboldai_vars(object):
#if we don't have enough actions to get to author's note depth then we just add it right before the game text
if len(action_text_split) < self.andepth and self.authornote != "":
game_text = "{}{}".format(authors_note_final, game_text)
game_context.insert(0, {"type": "authors_note", "text": authors_note_final, "tokens": authornote_length})
game_context.insert(0, {"type": "authors_note", "text": authors_note_final, "tokens": self.get_token_representation(authors_note_final)})
if self.useprompt:
text += prompt_text
context.append({"type": "prompt", "text": prompt_text, "tokens": prompt_length})
context.append({"type": "prompt", "text": prompt_text, "tokens": self.get_token_representation(prompt_text)})
elif not used_all_tokens:
prompt_length = 0
prompt_text = ""
@@ -392,12 +434,12 @@ class koboldai_vars(object):
used_tokens+=0 if wi['token_length'] is None else wi['token_length']
used_world_info.append(wi['uid'])
wi_text = wi['content']
context.append({"type": "world_info", "text": wi_text, "tokens": wi['token_length']})
context.append({"type": "world_info", "text": wi_text, "tokens": self.get_token_representation(wi_text)})
text += wi_text
self.worldinfo_v2.set_world_info_used(wi['uid'])
text += prompt_text
context.append({"type": "prompt", "text": prompt_text, "tokens": prompt_length})
context.append({"type": "prompt", "text": prompt_text, "tokens": self.get_token_representation(prompt_text)})
self.prompt_in_ai = True
else:
self.prompt_in_ai = False
@@ -723,6 +765,14 @@ class story_settings(settings):
self.revisions = []
self.picture = "" #base64 of the image shown for the story
self.picture_prompt = "" #Prompt used to create picture
self.substitutions = [
{"target": "--", "substitution": "", "enabled": False},
{"target": "---", "substitution": "", "enabled": False},
{"target": "...", "substitution": "", "enabled": False},
# {"target": "(c)", "substitution": "©", "enabled": False},
# {"target": "(r)", "substitution": "®", "enabled": False},
# {"target": "(tm)", "substitution": "™", "enabled": False},
]
#must be at bottom
self.no_save = False #Temporary disable save (doesn't save with the file)
@@ -1001,6 +1051,7 @@ class system_settings(settings):
do_dynamic_wi: bool = False
# Genamt stopping is mostly tied to Dynamic WI
stop_at_genamt: bool = False
do_core: bool = True
self.inference_config = _inference_config()
self._koboldai_var = koboldai_var

View File

@@ -1860,6 +1860,10 @@ body {
height: 100%;
flex-grow: 1;
padding: 0px 10px;
/* HACK: This is a visually ugly hack to avoid cutting of token tooltips on
the first line. */
padding-top: 15px;
}
.context-symbol {
@@ -1874,10 +1878,30 @@ body {
font-family: monospace;
}
.context-block:hover {
.context-token {
position: relative;
background-color: inherit;
}
.context-token:hover {
outline: 1px solid gray;
}
.context-token:hover::after {
content: attr(token-id);
position: absolute;
top: -120%;
left: 50%;
transform: translateX(-50%);
padding: 0px 2px;
background-color: rgba(0, 0, 0, 0.6);
pointer-events: none;
z-index: 9999999;
}
.context-sp {background-color: var(--context_colors_soft_prompt);}
.context-prompt {background-color: var(--context_colors_prompt);}
.context-wi {background-color: var(--context_colors_world_info);}
@@ -2259,6 +2283,80 @@ body {
margin-right: 5px;
}
/* Substitutions */
#Substitutions > .helpicon {
margin-left: 5px;
align-self: auto;
}
#substitution-header {
display: flex;
width: 100%;
justify-content: space-around;
}
#substitution-container {
width: 100%;
}
.substitution-card, #new-sub-card {
display: flex;
column-gap: 5px;
height: 30px;
width: 100%;
padding: 1px 0px;
}
.substitution-card > .card-section {
display: flex;
}
.substitution-card > .card-left > .material-icons-outlined {
margin-left: -5px;
margin-right: 5px;
color: gray;
}
.substitution-card > .card-section > input {
flex-grow: 1;
min-width: 0;
}
.substitution-card > * {
min-width: 0;
}
.substitution-card input {
padding-left: 2px;
border-color: var(--setting_background);
}
#new-sub-card {
display: flex;
justify-content: center;
background-color: var(--setting_background);
border-radius: 2px;
padding: 1px;
margin: 3px 0px;
}
.true-t {
display: none;
}
.true-t + label::before {
content: "edit_off";
color: gray;
margin-left: 3px;
}
.true-t:checked + label::before {
content: "edit";
color: white
}
/*---------------------------------- Global ------------------------------------------------*/
.hidden {
display: none;
@@ -2553,30 +2651,33 @@ input[type='range'] {
/*Tooltip based on attribute*/
[tooltip] {
cursor: pointer;
display: inline-block;
line-height: 1;
position: relative;
cursor: pointer;
display: inline-block;
line-height: 1;
position: relative;
}
[tooltip]::after {
background-color: rgba(51, 51, 51, 0.9);
border-radius: 0.3rem;
color: #fff;
content: attr(tooltip);
font-size: 1rem;
font-size: 85%;
font-weight: normal;
line-height: 1.15rem;
opacity: 0;
padding: 0.25rem 0.5rem;
position: absolute;
text-align: center;
text-transform: none;
transition: opacity 0.2s;
visibility: hidden;
white-space: nowrap;
z-index: 1;
background-color: rgba(51, 51, 51, 0.9);
border-radius: 0.3rem;
color: #fff;
content: attr(tooltip);
font-size: 1rem;
font-size: 85%;
font-weight: normal;
line-height: 1.15rem;
opacity: 0;
padding: 0.25rem 0.5rem;
position: absolute;
text-align: center;
text-transform: none;
transition: opacity 0.2s;
visibility: hidden;
white-space: nowrap;
z-index: 9999;
pointer-events: none;
}
@media (max-width: 767px) {
[tooltip].tooltip::before {
display: none;

View File

@@ -33,6 +33,7 @@ socket.on("request_prompt_config", configurePrompt);
socket.on("log_message", function(data){process_log_message(data);});
socket.on("debug_message", function(data){console.log(data);});
socket.on("scratchpad_response", recieveScratchpadResponse);
socket.on("scratchpad_response", recieveScratchpadResponse);
//socket.onAny(function(event_name, data) {console.log({"event": event_name, "class": data.classname, "data": data});});
var presets = {};
@@ -596,6 +597,9 @@ function var_changed(data) {
//Special Case for phrase biasing
} else if ((data.classname == 'story') && (data.name == 'biases')) {
do_biases(data);
//Special Case for substitutions
} else if ((data.classname == 'story') && (data.name == 'substitutions')) {
load_substitutions(data.value);
//Special Case for sample_order
} else if ((data.classname == 'model') && (data.name == 'sampler_order')) {
for (const [index, item] of data.value.entries()) {
@@ -2841,6 +2845,22 @@ function update_bias_slider_value(slider) {
slider.parentElement.parentElement.querySelector(".bias_slider_cur").textContent = slider.value;
}
function distortColor(rgb) {
// rgb are 0..255, NOT NORMALIZED!!!!!!
const brightnessTamperAmplitude = 0.1;
const psuedoHue = 12;
let brightnessDistortion = Math.random() * (255 * brightnessTamperAmplitude);
rgb = rgb.map(x => x + brightnessDistortion);
// Cheap hack to imitate hue rotation
rgb = rgb.map(x => x += (Math.random() * psuedoHue * 2) - psuedoHue);
// Clamp and round
rgb = rgb.map(x => Math.round(Math.max(0, Math.min(255, x))));
return rgb;
}
function update_context(data) {
$(".context-block").remove();
@@ -2858,7 +2878,6 @@ function update_context(data) {
}
for (const entry of data) {
//console.log(entry);
let contextClass = "context-" + ({
soft_prompt: "sp",
prompt: "prompt",
@@ -2869,14 +2888,27 @@ function update_context(data) {
submit: 'submit'
}[entry.type]);
let el = document.createElement("span");
el.classList.add("context-block");
el.classList.add(contextClass);
el.innerText = entry.text;
el.title = entry.tokens + " tokens";
let el = $e(
"span",
$el("#context-container"),
{classes: ["context-block", contextClass]}
);
el.innerHTML = el.innerHTML.replaceAll("<br>", '<span class="material-icons-outlined context-symbol">keyboard_return</span>');
let rgb = window.getComputedStyle(el)["background-color"].match(/(\d+), (\d+), (\d+)/).slice(1, 4).map(Number);
for (const [tokenId, token] of entry.tokens) {
let tokenColor = distortColor(rgb);
tokenColor = "#" + (tokenColor.map((x) => x.toString(16)).join(""));
let tokenEl = $e("span", el, {
classes: ["context-token"],
"token-id": tokenId === -1 ? "Soft" : tokenId,
innerText: token,
"style.backgroundColor": tokenColor,
});
tokenEl.innerHTML = tokenEl.innerHTML.replaceAll("<br>", '<span class="material-icons-outlined context-symbol">keyboard_return</span>');
}
document.getElementById("context-container").appendChild(el);
switch (entry.type) {
@@ -3833,6 +3865,43 @@ async function loadKoboldData(data, filename) {
}
}
function readLoreCard(file) {
// "naidata"
const magicNumber = new Uint8Array([0x6e, 0x61, 0x69, 0x64, 0x61, 0x74, 0x61]);
let filename = file.name;
let reader = new FileReader();
reader.readAsArrayBuffer(file);
reader.addEventListener("load", function() {
let bin = new Uint8Array(reader.result);
// naidata is prefixed with magic number
let offset = bin.findIndex(function(item, possibleIndex, array) {
for (let i=0;i<magicNumber.length;i++) {
if (bin[i + possibleIndex] !== magicNumber[i]) return false;
}
return true;
});
if (offset === null) throw Error("Couldn't find offset!");
let lengthBytes = bin.slice(offset - 8, offset - 4);
let length = 0;
for (const byte of lengthBytes) {
length = (length << 8) + byte;
}
let binData = bin.slice(offset + 8, offset + length);
// Encoded in base64
let data = atob(new TextDecoder().decode(binData));
let j = JSON.parse(data);
loadNAILorebook(j, filename);
})
}
async function processDroppedFile(file) {
let extension = /.*\.(.*)/.exec(file.name)[1];
console.log("file is", file)
@@ -3840,10 +3909,9 @@ async function processDroppedFile(file) {
switch (extension) {
case "png":
// TODO: Support NovelAI's image lorebook cards. The format for those
// is base64-encoded JSON under a TXT key called "naidata".
console.warn("TODO: NAI LORECARDS");
return;
// NovelAI lorecard, a png with a lorebook file embedded inside it.
readLoreCard(file);
break;
case "json":
// KoboldAI file
data = JSON.parse(await file.text());
@@ -4673,6 +4741,243 @@ process_cookies();
updateTitle();
})();
/* Substitution */
let load_substitutions;
[load_substitutions] = (function() {
// TODO: Don't allow multiple substitutions for one target
// Defaults
let substitutions = [];
const substitutionContainer = $el("#substitution-container");
let charMap = [];
function getTrueTarget(bareBonesTarget) {
// If -- is converted to the 2dash, we make a "true target" so that a 2dash and - is the 3dash.
if (!bareBonesTarget) return bareBonesTarget;
let tries = 0;
let whatWeGot = bareBonesTarget;
// eehhhh this kinda sucks but it's the best I can think of at the moment
while (true) {
// Sanity check; never 100% cpu!
tries++;
if (tries > 2000) {
alert("Some Substitution shenanigans are afoot; please send the developers your substitutions!");
throw Error("Substitution shenanigans!")
return;
}
let escape = true;
for (const c of substitutions) {
if (c.target === bareBonesTarget) continue;
if (!c.enabled) continue;
if (whatWeGot.includes(c.target)) {
whatWeGot = whatWeGot.replaceAll(c.target, c.substitution);
escape = false;
break;
}
}
if (escape) break;
}
return whatWeGot;
}
function getSubstitutionIndex(cardElement) {
for (const i in substitutions) {
if (substitutions[i].card === cardElement) {
return i
}
}
throw Error("Didn't find substitution!");
}
function getDuplicateCards(target) {
let duplicates = [];
for (const c of substitutions) {
if (c.target === target) duplicates.push(c.card);
}
console.log(duplicates)
return duplicates.length > 1 ? duplicates : [];
}
function makeCard(c) {
// How do we differentiate -- and ---? Convert stuff!
let card = $e("div", substitutionContainer, {classes: ["substitution-card"]});
let leftContainer = $e("div", card, {classes: ["card-section", "card-left"]});
let deleteIcon = $e("span", leftContainer, {classes: ["material-icons-outlined", "cursor"], innerText: "clear"});
let targetInput = $e("input", leftContainer, {classes: ["target"], value: c.target});
let rightContainer = $e("div", card, {classes: ["card-section"]});
let substitutionInput = $e("input", rightContainer, {classes: ["target"], value: c.substitution});
// HACK
let checkboxId = "sbcb" + Math.round(Math.random() * 9999).toString();
let enabledCheckbox = $e("input", rightContainer, {id: checkboxId, classes: ["true-t"], type: "checkbox", checked: c.enabled});
let initCheckTooltip = c.enabled ? "Enabled" : "Disabled";
// HACK: We don't use in-house tooltip as it's cut off by container :(
let enabledVisual = $e("label", rightContainer, {for: checkboxId, "title": initCheckTooltip, classes: ["material-icons-outlined"]});
targetInput.addEventListener("change", function() {
let card = this.parentElement.parentElement;
let i = getSubstitutionIndex(card);
substitutions[i].target = this.value;
// Don't do a full rebake
substitutions[i].trueTarget = getTrueTarget(this.value);
for (const duplicateCard of getDuplicateCards(this.value)) {
if (duplicateCard === card) continue;
console.log("DUPE", duplicateCard)
substitutions.splice(getSubstitutionIndex(duplicateCard), 1);
duplicateCard.remove();
}
rebuildCharMap();
updateSubstitutions();
});
substitutionInput.addEventListener("change", function() {
let card = this.parentElement.parentElement;
let i = getSubstitutionIndex(card);
substitutions[i].substitution = this.value;
// No rebaking at all is needed, that all hinges on target value; not edited here.
updateSubstitutions();
});
deleteIcon.addEventListener("click", function() {
let card = this.parentElement.parentElement;
// Find and remove from substitution array
substitutions.splice(getSubstitutionIndex(card), 1);
updateSubstitutions();
rebakeSubstitutions();
card.remove();
});
enabledCheckbox.addEventListener("change", function() {
let card = this.parentElement.parentElement;
let i = getSubstitutionIndex(card);
console.log(this.checked)
substitutions[i].enabled = this.checked;
enabledVisual.setAttribute("title", this.checked ? "Enabled" : "Disabled")
rebakeSubstitutions();
updateSubstitutions();
});
return card;
}
function updateSubstitutions() {
let subs = substitutions.map(x => ({target: x.target, substitution: x.substitution, trueTarget: x.trueTarget, enabled: x.enabled}));
socket.emit("substitution_update", subs);
}
function rebakeSubstitutions() {
for (const c of substitutions) {
c.trueTarget = getTrueTarget(c.target);
}
rebuildCharMap();
}
function rebuildCharMap() {
charMap = [];
for (const c of substitutions) {
if (!c.enabled) continue;
for (const char of c.target) {
if (!charMap.includes(char)) charMap.push(char)
}
}
}
const newCardButton = $el("#new-sub-card");
newCardButton.addEventListener("click", function() {
let c = {target: "", substitution: "", enabled: true}
substitutions.push(c);
c.card = makeCard(c);
newCardButton.scrollIntoView();
});
// Event handler on input
// TODO: Apply to all of gametext
const inputText = $el("#input_text");
inputText.addEventListener("keydown", function(event) {
if (event.ctrlKey) return;
if (event.ctrlKey) return;
if (!charMap.includes(event.key)) return;
let caretPosition = inputText.selectionStart;
// We don't have to worry about special keys due to charMap (hopefully)
let futureValue = inputText.value.slice(0, caretPosition) + event.key + inputText.value.slice(caretPosition);
for (const c of substitutions) {
if (!c.target) continue;
if (!c.enabled) continue;
let t = c.trueTarget;
let preCaretPosition = caretPosition - t.length + 1;
let bit = futureValue.slice(caretPosition - t.length + 1, caretPosition + 1)
if (bit === t) {
// We're doing it!!!!
event.preventDefault();
// Assemble the new text value
let before = inputText.value.slice(0, caretPosition - t.length + 1);
let after = inputText.value.slice(caretPosition);
let newText = before + c.substitution + after;
inputText.value = newText;
// Move cursor back after setting text
let sLength = c.substitution.length;
inputText.selectionStart = preCaretPosition + sLength;
inputText.selectionEnd = preCaretPosition + sLength;
break;
}
}
});
let firstLoad = true;
function load_substitutions(miniSubs) {
// HACK: Does the same "replace all on load" thing that WI does; tab
// support is broken and overall that kinda sucks. Would be nice to
// make a robust system for syncing multiple entries.
console.log("load", miniSubs)
$(".substitution-card").remove();
// we only get target, trueTarget, and such
for (const c of miniSubs) {
if (!c.trueTarget) c.trueTarget = getTrueTarget(c.target);
//if (!c.enabled) c.enabled = false;
c.card = makeCard(c);
}
substitutions = miniSubs;
rebuildCharMap();
// We build trueTarget on the client, and it's not initalized on the server because I'm lazy.
// May want to do that on the server in the future.
if (firstLoad) updateSubstitutions();
firstLoad = false;
}
return [load_substitutions];
})();
/* -- Shortcuts -- */
document.addEventListener("keydown", function(event) {

View File

@@ -412,6 +412,27 @@
<input type=checkbox class="setting_item_input" data-size="mini" data-onstyle="success" data-toggle="toggle">
</div>
</div>
<div class="collapsable_header" onclick="toggle_setting_category(this);">
<h4 style="width:var(--flyout_menu_width);"><span class="material-icons-outlined cursor">expand_more</span> Substitutions</h4>
</div>
<div class="setting_tile_area" id="Substitutions">
<span class="help_text">Automatically replaces phrases that you or the AI insert.</span>
<span class="helpicon material-icons-outlined" title="Can be used to help you insert special characters or automatically correct the AI. The pencil button toggles if a substitution is active or not.">help_icon</span>
<div id="substitution-header" class="noselect">
<b>Replace</b> <b>With</b>
</div>
<div id="substitution-container"></div>
<div id="new-sub-card" class="cursor" title="Add Substitution">
<span class="material-icons-outlined">
add
</span>
</div>
</div>
</div>
<div id="settings_footer" class="settings_footer">
<span>Execution Time: <span id="Execution Time"></span></span> |