mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-02-17 12:10:49 +01:00
Resolve merge conflict
This commit is contained in:
commit
579e85820c
56
aiserver.py
56
aiserver.py
@ -1097,27 +1097,28 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Re
|
|||||||
else:
|
else:
|
||||||
vars.lazy_load = False
|
vars.lazy_load = False
|
||||||
|
|
||||||
# Temporary fix for XGLM positional embedding issues until
|
# Some versions of transformers 4.17.0.dev0 are affected by
|
||||||
# https://github.com/huggingface/transformers/issues/15736
|
# https://github.com/huggingface/transformers/issues/15736
|
||||||
# is resolved
|
# This is a workaround for those versions of transformers.
|
||||||
try:
|
if(transformers_version == "4.17.0.dev0"):
|
||||||
from transformers.models.xglm.modeling_xglm import XGLMSinusoidalPositionalEmbedding
|
try:
|
||||||
except ImportError:
|
from transformers.models.xglm.modeling_xglm import XGLMSinusoidalPositionalEmbedding
|
||||||
pass
|
except ImportError:
|
||||||
else:
|
pass
|
||||||
@torch.no_grad()
|
else:
|
||||||
def new_forward(self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0):
|
@torch.no_grad()
|
||||||
bsz, seq_len = inputs_embeds.size()[:-1]
|
def new_forward(self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0):
|
||||||
input_shape = inputs_embeds.size()[:-1]
|
bsz, seq_len = inputs_embeds.size()[:-1]
|
||||||
sequence_length = input_shape[1]
|
input_shape = inputs_embeds.size()[:-1]
|
||||||
position_ids = torch.arange(
|
sequence_length = input_shape[1]
|
||||||
past_key_values_length + self.padding_idx + 1, past_key_values_length + sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
|
position_ids = torch.arange(
|
||||||
).unsqueeze(0).expand(input_shape).contiguous()
|
past_key_values_length + self.padding_idx + 1, past_key_values_length + sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
|
||||||
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
|
).unsqueeze(0).expand(input_shape).contiguous()
|
||||||
if max_pos > self.weights.size(0):
|
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
|
||||||
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
|
if max_pos > self.weights.size(0):
|
||||||
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
|
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
|
||||||
XGLMSinusoidalPositionalEmbedding.forward = new_forward
|
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
|
||||||
|
XGLMSinusoidalPositionalEmbedding.forward = new_forward
|
||||||
|
|
||||||
# Patch transformers to use our soft prompt
|
# Patch transformers to use our soft prompt
|
||||||
def patch_causallm(cls):
|
def patch_causallm(cls):
|
||||||
@ -2877,7 +2878,7 @@ def actionback():
|
|||||||
vars.recentback = True
|
vars.recentback = True
|
||||||
remove_story_chunk(last_key + 1)
|
remove_story_chunk(last_key + 1)
|
||||||
#for the redo to not get out of whack, need to reset the max # in the actions sequence
|
#for the redo to not get out of whack, need to reset the max # in the actions sequence
|
||||||
vars.actions.set_next_id(vars.actions.get_last_key())
|
vars.actions.set_next_id(last_key)
|
||||||
elif(len(vars.genseqs) == 0):
|
elif(len(vars.genseqs) == 0):
|
||||||
emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."})
|
emit('from_server', {'cmd': 'errmsg', 'data': "Cannot delete the prompt."})
|
||||||
else:
|
else:
|
||||||
@ -2890,10 +2891,9 @@ def actionredo():
|
|||||||
genout = [{"generated_text": item['Text']} for item in vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] if (item["Previous Selection"]==True)]
|
genout = [{"generated_text": item['Text']} for item in vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] if (item["Previous Selection"]==True)]
|
||||||
if len(genout) > 0:
|
if len(genout) > 0:
|
||||||
genout = genout + [{"generated_text": item['Text']} for item in vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] if (item["Pinned"]==True) and (item["Previous Selection"]==False)]
|
genout = genout + [{"generated_text": item['Text']} for item in vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] if (item["Pinned"]==True) and (item["Previous Selection"]==False)]
|
||||||
|
|
||||||
if len(genout) == 1:
|
if len(genout) == 1:
|
||||||
vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] = [item for item in vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] if (item["Previous Selection"]!=True)]
|
vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] = [item for item in vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] if (item["Previous Selection"]!=True)]
|
||||||
genresult(genout[0]['generated_text'], flash=True)
|
genresult(genout[0]['generated_text'], flash=True, ignore_formatting=True)
|
||||||
else:
|
else:
|
||||||
# Store sequences in memory until selection is made
|
# Store sequences in memory until selection is made
|
||||||
vars.genseqs = genout
|
vars.genseqs = genout
|
||||||
@ -2901,6 +2901,7 @@ def actionredo():
|
|||||||
|
|
||||||
# Send sequences to UI for selection
|
# Send sequences to UI for selection
|
||||||
genout = [[item['Text'], "redo"] for item in vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] if (item["Previous Selection"]==True)]
|
genout = [[item['Text'], "redo"] for item in vars.actions_metadata[vars.actions.get_last_key()+1]['Alternative Text'] if (item["Previous Selection"]==True)]
|
||||||
|
|
||||||
emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True)
|
emit('from_server', {'cmd': 'genseqs', 'data': genout}, broadcast=True)
|
||||||
else:
|
else:
|
||||||
emit('from_server', {'cmd': 'popuperror', 'data': "There's nothing to undo"}, broadcast=True)
|
emit('from_server', {'cmd': 'popuperror', 'data': "There's nothing to undo"}, broadcast=True)
|
||||||
@ -3281,12 +3282,13 @@ def generate(txt, minimum, maximum, found_entries=None):
|
|||||||
#==================================================================#
|
#==================================================================#
|
||||||
# Deal with a single return sequence from generate()
|
# Deal with a single return sequence from generate()
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
def genresult(genout, flash=True):
|
def genresult(genout, flash=True, ignore_formatting=False):
|
||||||
if not vars.quiet:
|
if not vars.quiet:
|
||||||
print("{0}{1}{2}".format(colors.CYAN, genout, colors.END))
|
print("{0}{1}{2}".format(colors.CYAN, genout, colors.END))
|
||||||
|
|
||||||
# Format output before continuing
|
# Format output before continuing
|
||||||
genout = applyoutputformatting(genout)
|
if not ignore_formatting:
|
||||||
|
genout = applyoutputformatting(genout)
|
||||||
|
|
||||||
vars.lua_koboldbridge.feedback = genout
|
vars.lua_koboldbridge.feedback = genout
|
||||||
|
|
||||||
@ -4708,8 +4710,6 @@ def loadRequest(loadpath, filename=None):
|
|||||||
emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True)
|
emit('from_server', {'cmd': 'hidegenseqs', 'data': ''}, broadcast=True)
|
||||||
print("{0}Story loaded from {1}!{2}".format(colors.GREEN, filename, colors.END))
|
print("{0}Story loaded from {1}!{2}".format(colors.GREEN, filename, colors.END))
|
||||||
|
|
||||||
print([k for k in vars.actions])
|
|
||||||
print([k for k in vars.actions_metadata])
|
|
||||||
send_debug()
|
send_debug()
|
||||||
|
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
@ -5120,7 +5120,7 @@ def send_debug():
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
debug_info = "{}Actions: {}\n".format(debug_info, vars.actions.get_last_key())
|
debug_info = "{}Actions: {}\n".format(debug_info, [k for k in vars.actions])
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
|
@ -48,7 +48,7 @@ function launch
|
|||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
cd /content/KoboldAI-Client
|
cd /content/KoboldAI-Client
|
||||||
echo "Launching KoboldAI with the following options : python3 aiserver.py$model$kmpath$configname$ngrok --remote --override_delete --override_rename"
|
echo "Launching KoboldAI with the following options : python3 aiserver.py$model$kmpath$configname$ngrok --colab"
|
||||||
python3 aiserver.py$model$kmpath$configname$ngrok --colab
|
python3 aiserver.py$model$kmpath$configname$ngrok --colab
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
@ -151,7 +151,7 @@ if [ "$init" != "skip" ]; then
|
|||||||
ln -s /content/drive/MyDrive/KoboldAI/userscripts/ userscripts
|
ln -s /content/drive/MyDrive/KoboldAI/userscripts/ userscripts
|
||||||
ln -s /content/drive/MyDrive/KoboldAI/models/ models
|
ln -s /content/drive/MyDrive/KoboldAI/models/ models
|
||||||
|
|
||||||
if [ "$model" == " --model TPUMeshTransformerGPTJ" ]; then
|
if [ -n "${COLAB_TPU_ADDR+set}" ]; then
|
||||||
pip install -r requirements_mtj.txt
|
pip install -r requirements_mtj.txt
|
||||||
else
|
else
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
|
@ -19,10 +19,16 @@ class KoboldStoryRegister(collections.OrderedDict):
|
|||||||
return self.popitem()[1]
|
return self.popitem()[1]
|
||||||
|
|
||||||
def get_first_key(self) -> int:
|
def get_first_key(self) -> int:
|
||||||
return next(iter(self))
|
if len(self) == 0:
|
||||||
|
return -1
|
||||||
|
else:
|
||||||
|
return next(iter(self))
|
||||||
|
|
||||||
def get_last_key(self) -> int:
|
def get_last_key(self) -> int:
|
||||||
return next(reversed(self))
|
if len(self) == 0:
|
||||||
|
return -1
|
||||||
|
else:
|
||||||
|
return next(reversed(self))
|
||||||
|
|
||||||
def __getitem__(self, k: int) -> str:
|
def __getitem__(self, k: int) -> str:
|
||||||
return super().__getitem__(k)
|
return super().__getitem__(k)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user