mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Use eventlet instead of gevent-websocket
This commit is contained in:
62
aiserver.py
62
aiserver.py
@ -6,7 +6,12 @@
|
|||||||
#==================================================================#
|
#==================================================================#
|
||||||
|
|
||||||
# External packages
|
# External packages
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
import os
|
import os
|
||||||
|
os.environ['EVENTLET_THREADPOOL_SIZE'] = '1'
|
||||||
|
from eventlet import tpool
|
||||||
|
|
||||||
from os import path, getcwd
|
from os import path, getcwd
|
||||||
import re
|
import re
|
||||||
import tkinter as tk
|
import tkinter as tk
|
||||||
@ -559,7 +564,7 @@ from flask import Flask, render_template, Response, request
|
|||||||
from flask_socketio import SocketIO, emit
|
from flask_socketio import SocketIO, emit
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.config['SECRET KEY'] = 'secret!'
|
app.config['SECRET KEY'] = 'secret!'
|
||||||
socketio = SocketIO(app)
|
socketio = SocketIO(app, async_method="eventlet")
|
||||||
print("{0}OK!{1}".format(colors.GREEN, colors.END))
|
print("{0}OK!{1}".format(colors.GREEN, colors.END))
|
||||||
|
|
||||||
# Start transformers and create pipeline
|
# Start transformers and create pipeline
|
||||||
@ -990,8 +995,8 @@ def load_lua_scripts():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
vars.lua_koboldbridge.obliterate_multiverse()
|
vars.lua_koboldbridge.obliterate_multiverse()
|
||||||
vars.lua_koboldbridge.load_corescript("default.lua")
|
tpool.execute(vars.lua_koboldbridge.load_corescript, "default.lua")
|
||||||
vars.lua_koboldbridge.load_userscripts(filenames, modulenames, descriptions)
|
tpool.execute(vars.lua_koboldbridge.load_userscripts, filenames, modulenames, descriptions)
|
||||||
except lupa.LuaError as e:
|
except lupa.LuaError as e:
|
||||||
vars.lua_koboldbridge.obliterate_multiverse()
|
vars.lua_koboldbridge.obliterate_multiverse()
|
||||||
if(vars.serverstarted):
|
if(vars.serverstarted):
|
||||||
@ -1293,7 +1298,7 @@ def lua_is_custommodel():
|
|||||||
def execute_inmod():
|
def execute_inmod():
|
||||||
vars.lua_logname = ...
|
vars.lua_logname = ...
|
||||||
try:
|
try:
|
||||||
vars.lua_koboldbridge.execute_inmod()
|
tpool.execute(vars.lua_koboldbridge.execute_inmod)
|
||||||
except lupa.LuaError as e:
|
except lupa.LuaError as e:
|
||||||
vars.lua_koboldbridge.obliterate_multiverse()
|
vars.lua_koboldbridge.obliterate_multiverse()
|
||||||
emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error, please check console.'}, broadcast=True)
|
emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error, please check console.'}, broadcast=True)
|
||||||
@ -1307,7 +1312,7 @@ def execute_genmod():
|
|||||||
|
|
||||||
def execute_outmod():
|
def execute_outmod():
|
||||||
try:
|
try:
|
||||||
vars.lua_koboldbridge.execute_outmod()
|
tpool.execute(vars.lua_koboldbridge.execute_outmod)
|
||||||
except lupa.LuaError as e:
|
except lupa.LuaError as e:
|
||||||
vars.lua_koboldbridge.obliterate_multiverse()
|
vars.lua_koboldbridge.obliterate_multiverse()
|
||||||
emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error, please check console.'}, broadcast=True)
|
emit('from_server', {'cmd': 'errmsg', 'data': 'Lua script error, please check console.'}, broadcast=True)
|
||||||
@ -1315,6 +1320,9 @@ def execute_outmod():
|
|||||||
print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr)
|
print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr)
|
||||||
print("{0}{1}{2}".format(colors.YELLOW, "Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.", colors.END), file=sys.stderr)
|
print("{0}{1}{2}".format(colors.YELLOW, "Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.", colors.END), file=sys.stderr)
|
||||||
set_aibusy(0)
|
set_aibusy(0)
|
||||||
|
if(vars.lua_koboldbridge.resend_settings_required):
|
||||||
|
vars.lua_koboldbridge.resend_settings_required = False
|
||||||
|
lua_resend_settings()
|
||||||
|
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
# Lua runtime startup
|
# Lua runtime startup
|
||||||
@ -1348,7 +1356,6 @@ bridged = {
|
|||||||
"has_setting": lua_has_setting,
|
"has_setting": lua_has_setting,
|
||||||
"get_setting": lua_get_setting,
|
"get_setting": lua_get_setting,
|
||||||
"set_setting": lua_set_setting,
|
"set_setting": lua_set_setting,
|
||||||
"resend_settings": lua_resend_settings,
|
|
||||||
"set_chunk": lua_set_chunk,
|
"set_chunk": lua_set_chunk,
|
||||||
"get_modeltype": lua_get_modeltype,
|
"get_modeltype": lua_get_modeltype,
|
||||||
"get_modelbackend": lua_get_modelbackend,
|
"get_modelbackend": lua_get_modelbackend,
|
||||||
@ -2157,23 +2164,8 @@ def calcsubmit(txt):
|
|||||||
#==================================================================#
|
#==================================================================#
|
||||||
# Send text to generator and deal with output
|
# Send text to generator and deal with output
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
def generate(txt, minimum, maximum, found_entries=None):
|
|
||||||
if(found_entries is None):
|
|
||||||
found_entries = set()
|
|
||||||
found_entries = tuple(found_entries.copy() for _ in range(vars.numseqs))
|
|
||||||
|
|
||||||
print("{0}Min:{1}, Max:{2}, Txt:{3}{4}".format(colors.YELLOW, minimum, maximum, txt, colors.END))
|
def _generate(txt, minimum, maximum, found_entries):
|
||||||
|
|
||||||
# Store context in memory to use it for comparison with generated content
|
|
||||||
vars.lastctx = txt
|
|
||||||
|
|
||||||
# Clear CUDA cache if using GPU
|
|
||||||
if(vars.hascuda and (vars.usegpu or vars.breakmodel)):
|
|
||||||
gc.collect()
|
|
||||||
torch.cuda.empty_cache()
|
|
||||||
|
|
||||||
# Submit input text to generator
|
|
||||||
try:
|
|
||||||
gen_in = tokenizer.encode(txt, return_tensors="pt", truncation=True).long()
|
gen_in = tokenizer.encode(txt, return_tensors="pt", truncation=True).long()
|
||||||
if(vars.sp is not None):
|
if(vars.sp is not None):
|
||||||
soft_tokens = torch.arange(
|
soft_tokens = torch.arange(
|
||||||
@ -2251,6 +2243,27 @@ def generate(txt, minimum, maximum, found_entries=None):
|
|||||||
model.kai_scanner_head_length = encoded.shape[-1]
|
model.kai_scanner_head_length = encoded.shape[-1]
|
||||||
numseqs = 1
|
numseqs = 1
|
||||||
|
|
||||||
|
return genout, already_generated
|
||||||
|
|
||||||
|
|
||||||
|
def generate(txt, minimum, maximum, found_entries=None):
|
||||||
|
if(found_entries is None):
|
||||||
|
found_entries = set()
|
||||||
|
found_entries = tuple(found_entries.copy() for _ in range(vars.numseqs))
|
||||||
|
|
||||||
|
print("{0}Min:{1}, Max:{2}, Txt:{3}{4}".format(colors.YELLOW, minimum, maximum, txt, colors.END))
|
||||||
|
|
||||||
|
# Store context in memory to use it for comparison with generated content
|
||||||
|
vars.lastctx = txt
|
||||||
|
|
||||||
|
# Clear CUDA cache if using GPU
|
||||||
|
if(vars.hascuda and (vars.usegpu or vars.breakmodel)):
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
# Submit input text to generator
|
||||||
|
try:
|
||||||
|
genout, already_generated = tpool.execute(_generate, txt, minimum, maximum, found_entries)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if(issubclass(type(e), lupa.LuaError)):
|
if(issubclass(type(e), lupa.LuaError)):
|
||||||
vars.lua_koboldbridge.obliterate_multiverse()
|
vars.lua_koboldbridge.obliterate_multiverse()
|
||||||
@ -2448,7 +2461,8 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None):
|
|||||||
dtype=np.uint32
|
dtype=np.uint32
|
||||||
)
|
)
|
||||||
|
|
||||||
genout = tpu_mtj_backend.infer(
|
genout = tpool.execute(
|
||||||
|
tpu_mtj_backend.infer,
|
||||||
txt,
|
txt,
|
||||||
gen_len = maximum-minimum+1,
|
gen_len = maximum-minimum+1,
|
||||||
temp=vars.temp,
|
temp=vars.temp,
|
||||||
@ -3813,4 +3827,4 @@ if __name__ == "__main__":
|
|||||||
webbrowser.open_new('http://localhost:5000')
|
webbrowser.open_new('http://localhost:5000')
|
||||||
print("{0}\nServer started!\nYou may now connect with a browser at http://127.0.0.1:5000/{1}".format(colors.GREEN, colors.END))
|
print("{0}\nServer started!\nYou may now connect with a browser at http://127.0.0.1:5000/{1}".format(colors.GREEN, colors.END))
|
||||||
vars.serverstarted = True
|
vars.serverstarted = True
|
||||||
socketio.run(app)
|
socketio.run(app, port=5000)
|
||||||
|
@ -1679,9 +1679,6 @@ return function(_python, _bridged)
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
if koboldbridge.resend_settings_required then
|
|
||||||
bridged.resend_settings()
|
|
||||||
end
|
|
||||||
koboldbridge.userstate = nil
|
koboldbridge.userstate = nil
|
||||||
return r
|
return r
|
||||||
end
|
end
|
||||||
|
@ -10,11 +10,11 @@ dependencies:
|
|||||||
- cudatoolkit=11.1
|
- cudatoolkit=11.1
|
||||||
- tensorflow-gpu
|
- tensorflow-gpu
|
||||||
- python=3.8.*
|
- python=3.8.*
|
||||||
- gevent-websocket
|
- eventlet
|
||||||
- pip
|
- pip
|
||||||
- git
|
- git
|
||||||
- pip:
|
- pip:
|
||||||
- git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b
|
- git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b
|
||||||
- flask-cloudflared
|
- flask-cloudflared
|
||||||
- flask-ngrok
|
- flask-ngrok
|
||||||
- lupa
|
- lupa==1.10
|
||||||
|
@ -11,10 +11,10 @@ dependencies:
|
|||||||
- python=3.8.*
|
- python=3.8.*
|
||||||
- cudatoolkit=11.1
|
- cudatoolkit=11.1
|
||||||
- transformers
|
- transformers
|
||||||
- gevent-websocket
|
- eventlet
|
||||||
- pip
|
- pip
|
||||||
- git
|
- git
|
||||||
- pip:
|
- pip:
|
||||||
- flask-cloudflared
|
- flask-cloudflared
|
||||||
- flask-ngrok
|
- flask-ngrok
|
||||||
- lupa
|
- lupa==1.10
|
||||||
|
@ -6,7 +6,7 @@ dependencies:
|
|||||||
- colorama
|
- colorama
|
||||||
- flask-socketio
|
- flask-socketio
|
||||||
- python=3.8.*
|
- python=3.8.*
|
||||||
- gevent-websocket
|
- eventlet
|
||||||
- pip
|
- pip
|
||||||
- git
|
- git
|
||||||
- pip:
|
- pip:
|
||||||
@ -16,4 +16,4 @@ dependencies:
|
|||||||
- flask-cloudflared
|
- flask-cloudflared
|
||||||
- git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b
|
- git+https://github.com/finetuneanon/transformers@gpt-neo-localattention3-rp-b
|
||||||
- flask-ngrok
|
- flask-ngrok
|
||||||
- lupa
|
- lupa==1.10
|
||||||
|
@ -8,7 +8,7 @@ dependencies:
|
|||||||
- colorama
|
- colorama
|
||||||
- flask-socketio
|
- flask-socketio
|
||||||
- python=3.8.*
|
- python=3.8.*
|
||||||
- gevent-websocket
|
- eventlet
|
||||||
- pip
|
- pip
|
||||||
- git
|
- git
|
||||||
- pip:
|
- pip:
|
||||||
@ -17,4 +17,4 @@ dependencies:
|
|||||||
- torchvision==0.11.1
|
- torchvision==0.11.1
|
||||||
- flask-cloudflared
|
- flask-cloudflared
|
||||||
- flask-ngrok
|
- flask-ngrok
|
||||||
- lupa
|
- lupa==1.10
|
||||||
|
@ -1619,7 +1619,7 @@ $(document).ready(function(){
|
|||||||
seqselcontents = $("#seqselcontents");
|
seqselcontents = $("#seqselcontents");
|
||||||
|
|
||||||
// Connect to SocketIO server
|
// Connect to SocketIO server
|
||||||
socket = io.connect(window.document.origin);
|
socket = io.connect(window.document.origin, {transports: ['websocket', 'polling']});
|
||||||
|
|
||||||
socket.on('from_server', function(msg) {
|
socket.on('from_server', function(msg) {
|
||||||
if(msg.cmd == "connected") {
|
if(msg.cmd == "connected") {
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
<script src="static/jquery-3.6.0.min.js"></script>
|
<script src="static/jquery-3.6.0.min.js"></script>
|
||||||
<script src="static/jquery-ui.sortable.min.js"></script>
|
<script src="static/jquery-ui.sortable.min.js"></script>
|
||||||
<script src="static/socket.io.min.js"></script>
|
<script src="static/socket.io.min.js"></script>
|
||||||
<script src="static/application.js?ver=1.16.4e"></script>
|
<script src="static/application.js?ver=1.16.4f"></script>
|
||||||
<script src="static/bootstrap.min.js"></script>
|
<script src="static/bootstrap.min.js"></script>
|
||||||
<script src="static/bootstrap-toggle.min.js"></script>
|
<script src="static/bootstrap-toggle.min.js"></script>
|
||||||
<script src="static/rangy-core.min.js"></script>
|
<script src="static/rangy-core.min.js"></script>
|
||||||
|
Reference in New Issue
Block a user