OpenAI support (by @CncAnon1)

This commit is contained in:
SillyLossy
2023-03-21 23:31:26 +02:00
parent bd47521ed6
commit e6413d0905
9 changed files with 1280 additions and 225 deletions

107
package-lock.json generated
View File

@ -8,6 +8,8 @@
"name": "TavernAI",
"version": "1.1.0",
"dependencies": {
"@dqbd/tiktoken": "^1.0.2",
"axios": "^1.3.4",
"cookie-parser": "^1.4.6",
"cors": "^2.8.5",
"csrf-csrf": "^2.2.3",
@ -28,6 +30,11 @@
"TavernAI": "server.js"
}
},
"node_modules/@dqbd/tiktoken": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@dqbd/tiktoken/-/tiktoken-1.0.2.tgz",
"integrity": "sha512-AjGTBRWsMoVmVeN55NLyupyM8TNamOUBl6tj5t/leLDVup3CFGO9tVagNL1jf3GyZLkWZSTmYVbPQ/M2LEcNzw=="
},
"node_modules/@jimp/bmp": {
"version": "0.22.7",
"resolved": "https://registry.npmjs.org/@jimp/bmp/-/bmp-0.22.7.tgz",
@ -468,6 +475,21 @@
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg=="
},
"node_modules/asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
},
"node_modules/axios": {
"version": "1.3.4",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.3.4.tgz",
"integrity": "sha512-toYm+Bsyl6VC5wSkfkbbNB6ROv7KY93PEBBL6xyDczaIHasAiv4wPqQ/c4RjoQzipxRD2W5g21cOqQulZ7rHwQ==",
"dependencies": {
"follow-redirects": "^1.15.0",
"form-data": "^4.0.0",
"proxy-from-env": "^1.1.0"
}
},
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
@ -609,6 +631,17 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"dependencies": {
"delayed-stream": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
@ -737,6 +770,14 @@
"node": ">=8"
}
},
"node_modules/delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/depd": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
@ -909,6 +950,19 @@
}
}
},
"node_modules/form-data": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"mime-types": "^2.1.12"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/forwarded": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
@ -1513,6 +1567,11 @@
"node": ">= 0.10"
}
},
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
},
"node_modules/qs": {
"version": "6.11.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
@ -1951,6 +2010,11 @@
}
},
"dependencies": {
"@dqbd/tiktoken": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@dqbd/tiktoken/-/tiktoken-1.0.2.tgz",
"integrity": "sha512-AjGTBRWsMoVmVeN55NLyupyM8TNamOUBl6tj5t/leLDVup3CFGO9tVagNL1jf3GyZLkWZSTmYVbPQ/M2LEcNzw=="
},
"@jimp/bmp": {
"version": "0.22.7",
"resolved": "https://registry.npmjs.org/@jimp/bmp/-/bmp-0.22.7.tgz",
@ -2281,6 +2345,21 @@
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg=="
},
"asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
},
"axios": {
"version": "1.3.4",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.3.4.tgz",
"integrity": "sha512-toYm+Bsyl6VC5wSkfkbbNB6ROv7KY93PEBBL6xyDczaIHasAiv4wPqQ/c4RjoQzipxRD2W5g21cOqQulZ7rHwQ==",
"requires": {
"follow-redirects": "^1.15.0",
"form-data": "^4.0.0",
"proxy-from-env": "^1.1.0"
}
},
"balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
@ -2380,6 +2459,14 @@
"get-intrinsic": "^1.0.2"
}
},
"combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"requires": {
"delayed-stream": "~1.0.0"
}
},
"concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
@ -2475,6 +2562,11 @@
"resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz",
"integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og=="
},
"delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="
},
"depd": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
@ -2612,6 +2704,16 @@
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz",
"integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA=="
},
"form-data": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
"requires": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"mime-types": "^2.1.12"
}
},
"forwarded": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
@ -3068,6 +3170,11 @@
}
}
},
"proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
},
"qs": {
"version": "6.11.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",

View File

@ -1,5 +1,7 @@
{
"dependencies": {
"@dqbd/tiktoken": "^1.0.2",
"axios": "^1.3.4",
"cookie-parser": "^1.4.6",
"cors": "^2.8.5",
"csrf-csrf": "^2.2.3",

View File

@ -0,0 +1,5 @@
{
"temperature": 0.9,
"frequency_penalty": 0.7,
"presence_penalty": 0.7
}

View File

@ -92,6 +92,7 @@
<option value="kobold">KoboldAI</option>
<option value="textgenerationwebui">Text generation web UI</option>
<option value="novel">NovelAI</option>
<option value="openai">OpenAI</option>
</select>
</div>
<div id="kobold_api" style="position: relative;"> <!-- shows the kobold settings -->
@ -176,11 +177,30 @@
<input id="api_button_textgenerationwebui" class="menu_button" type="submit" value="Connect">
<img id="api_loading_textgenerationwebui" src="img/load.svg">
</form>
<div id="online_status4">
<div id="online_status_indicator4"></div>
<div id="online_status_text4">Not connected</div>
<div class="online_status4">
<div class="online_status_indicator4"></div>
<div class="online_status_text4">Not connected</div>
</div>
</div>
<div id="openai_api" style="display: none;position: relative;">
<form action="javascript:void(null);" method="post" enctype="multipart/form-data">
<h4>API key </h4>
<input id="api_key_openai" name="api_key_openai" class="text_pole" maxlength="500" size="35" value=""
autocomplete="off">
<input id="api_button_openai" class="menu_button" type="submit" value="Connect">
<img id="api_loading_openai" src="img/load.svg">
</form>
<div class="online_status4">
<div class="online_status_indicator4"></div>
<div class="online_status_text4">No connection...</div>
</div>
<h4>Preset settings</h4>
<h5>Selecting settings</h5>
<select id="settings_perset_openai" class="option_select_right_menu">
<option value="gui">Default</option>
</select>
</div>
</div>
</div>
@ -227,7 +247,7 @@
</div>
</div>
</div>
<div id="ai-config-button" class="drawer" style="z-index:3002;">
<div class="drawer-toggle drawer-header">
<div class="drawer-icon icon-sliders closedIcon"></div>
@ -242,14 +262,16 @@
Amount generation
</div>
<span id="amount_gen_counter" class="range-block-counter">select</span>
<input type="range" class="range-block-range" id="amount_gen" name="volume" min="16" max="512" step="1">
<input type="range" class="range-block-range" id="amount_gen" name="volume" min="16"
max="512" step="1">
</div>
<div id="max_context_block" class="range-block">
<div class="range-block-title">
Context Size
</div>
<span id="max_context_counter" class="range-block-counter">select</span>
<input type="range" class="range-block-range" id="max_context" name="volume" min="512" max="2048" step="1">
<input type="range" class="range-block-range" id="max_context" name="volume" min="512"
max="2048" step="1">
</div>
</div>
</div>
@ -310,7 +332,7 @@
</div>
<div class="range-block-title"></div>
Repetition Penalty Range
<div class="range-block-counter">
<span id="rep_pen_size_counter_novel">select</span>
</div>
@ -327,7 +349,8 @@
<span id="temp_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="temp_textgenerationwebui" name="volume" min="0.1" max="2.0" step="0.01">
<input type="range" id="temp_textgenerationwebui" name="volume" min="0.1" max="2.0"
step="0.01">
</div>
</div>
<div class="range-block">
@ -338,7 +361,8 @@
<span id="rep_pen_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="rep_pen_textgenerationwebui" name="volume" min="1" max="1.5" step="0.01">
<input type="range" id="rep_pen_textgenerationwebui" name="volume" min="1" max="1.5"
step="0.01">
</div>
</div>
<div class="range-block">
@ -349,7 +373,67 @@
<span id="rep_pen_size_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="rep_pen_size_textgenerationwebui" name="volume" min="0" max="2048" step="1">
<input type="range" id="rep_pen_size_textgenerationwebui" name="volume" min="0"
max="2048" step="1">
</div>
</div>
</div>
<div id="range_block_openai">
<div class="range-block">
<div class="range-block-title">
OpenAI Context Size
</div>
<div class="range-block-counter">
<span id="openai_max_context_counter">select</span>
</div>
<div class="range-block-range">
<input type="range" id="openai_max_context" name="volume" min="512" max="4095" step="1">
</div>
</div>
<div class="range-block">
<div class="range-block-title">
OpenAI max response length (in tokens)
</div>
<div class="range-block-range">
<input type="number" id="openai_max_tokens" name="openai_max_tokens" class="text_pole"
min="50" max="1000">
</div>
</div>
<div class="range-block">
<div class="range-block-title">
Temperature
</div>
<div class="range-block-counter">
<span id="temp_counter_openai">select</span>
</div>
<div class="range-block-range">
<input type="range" id="temp_openai" name="volume" min="0" max="2.0" step="0.01">
</div>
</div>
<div class="range-block">
<div class="range-block-title">
Frequency Penalty
</div>
<div class="range-block-counter">
<span id="freq_pen_counter_openai">select</span>
</div>
<div class="range-block-range">
<input type="range" id="freq_pen_openai" name="volume" min="-2" max="2" step="0.01">
</div>
</div>
<div class="range-block">
<div class="range-block-title">
Presence Penalty
</div>
<div class="range-block-counter">
<span id="pres_pen_counter_openai">select</span>
</div>
<div class="range-block-range">
<input type="range" id="pres_pen_openai" name="volume" min="-2" max="2" step="0.01">
</div>
</div>
</div>
@ -453,9 +537,9 @@
</div>
</div>
<div id="novel_api-settings">
***COMING SOON***
</div>
<div id="textgenerationwebui_api-settings">
<div class="range-block">
@ -466,49 +550,123 @@
<span id="top_k_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="top_k_textgenerationwebui" name="volume" min="0" max="200" step="1">
<input type="range" id="top_k_textgenerationwebui" name="volume" min="0" max="200"
step="1">
</div>
<div class="range-block">
<div class="range-block-title">
Top P
</div>
<div class="range-block-counter">
<span id="top_p_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="top_p_textgenerationwebui" name="volume" min="0" max="1" step="0.1">
</div>
</div>
<div class="range-block">
<div class="range-block-title">
Top P
</div>
<div class="range-block">
<div class="range-block-title">
Typical P
</div>
<div class="range-block-counter">
<span id="typical_p_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="typical_p_textgenerationwebui" name="volume" min="0" max="1" step="0.1">
</div>
<div class="range-block-counter">
<span id="top_p_counter_textgenerationwebui">select</span>
</div>
<div class="range-block">
<div class="range-block-title">
Penalty Alpha
</div>
<div class="range-block-counter">
<span id="penalty_alpha_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="penalty_alpha_textgenerationwebui" name="volume" min="0" max="5" step="0.05" />
</div>
<div class="range-block-range">
<input type="range" id="top_p_textgenerationwebui" name="volume" min="0" max="1"
step="0.1">
</div>
</div>
<div class="range-block">
<div class="range-block-title">
Typical P
</div>
<div class="range-block-counter">
<span id="typical_p_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="typical_p_textgenerationwebui" name="volume" min="0" max="1"
step="0.1">
</div>
</div>
<div class="range-block">
<div class="range-block-title">
Penalty Alpha
</div>
<div class="range-block-counter">
<span id="penalty_alpha_counter_textgenerationwebui">select</span>
</div>
<div class="range-block-range">
<input type="range" id="penalty_alpha_textgenerationwebui" name="volume" min="0" max="5"
step="0.05" />
</div>
</div>
</div>
<div id="openai_settings">
<div class="range-block">
<label class="checkbox_label" for="nsfw_toggle">
<input id="nsfw_toggle" type="checkbox" checked>
NSFW Toggle
</label>
</div>
<div class="range-block">
<label title="NSFW block goes first in the resulting prompt" class="checkbox_label">
<input id="nsfw_first" type="checkbox" />
NSFW first
</label>
</div>
<!-- Currently broken -->
<div style="display: none" class="range-block">
<label title="Enables OpenAI completion streaming" class="checkbox_label" for="stream_toggle">
<input id="stream_toggle" type="checkbox" />
Streaming
</label>
</div>
<div class="range-block">
<label title="Blends definitions with model's knowledge" class="checkbox_label">
<input id="enhance_definitions" type="checkbox" />
Enhance Definitions
</label>
</div>
<div class="range-block">
<label for="wrap_in_quotes" title="Wrap user messages in quotes before sending" class="checkbox_label">
<input id="wrap_in_quotes" type="checkbox" />
Wrap in Quotes
</label>
</div>
<br>
<div class="range-block">
<div class="range-block-title">
Main prompt
</div>
<div class="range-block-counter">
The main prompt used to set the model behavior
</div>
<div class="range-block-range">
<textarea id="main_prompt_textarea" class="text_pole" name="main_prompt" rows="5"
placeholder=""></textarea>
</div>
</div>
<div class="range-block">
<div class="range-block-title">
NSFW prompt
</div>
<div class="range-block-counter">
Prompt that is used when the NSFW toggle is on
</div>
<div class="range-block-range">
<textarea id="nsfw_prompt_textarea" class="custom_textarea" name="nsfw_prompt" rows="5"
placeholder=""></textarea>
</div>
</div>
<div class="range-block">
<input id="save_prompts" class="menu_button" type="button" value="Save prompt settings">
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<div id="WI-SP-button" class="drawer" style="z-index:3003;">
<div class="drawer-toggle drawer-header">
<div class="drawer-icon icon-globe closedIcon "></div>

View File

@ -46,6 +46,18 @@ import {
disable_scenario_formatting,
} from "./scripts/power-user.js";
import {
setOpenAIMessageExamples,
setOpenAIMessages,
prepareOpenAIMessages,
sendOpenAIRequest,
loadOpenAISettings,
setOpenAIOnlineStatus,
generateOpenAIPromptCache,
oai_settings,
is_get_status_openai
} from "./scripts/openai.js";
import {
getNovelTier,
loadNovelPreset,
@ -74,10 +86,13 @@ export {
select_rm_info,
setCharacterId,
setCharacterName,
setOnlineStatus,
checkOnlineStatus,
setEditedMessageId,
setSendButtonState,
selectRightMenuWithAnimation,
setRightTabSelectedClass,
messageFormating,
chat,
this_chid,
settings,
@ -89,10 +104,12 @@ export {
token,
is_send_press,
api_server_textgenerationwebui,
count_view_mes,
default_avatar,
system_message_types,
talkativeness_default,
default_ch_mes,
saveChat,
}
// API OBJECT FOR EXTERNAL WIRING
@ -343,15 +360,18 @@ function checkOnlineStatus() {
$("#online_status_text2").html("No connection...");
$("#online_status_indicator3").css("background-color", "red"); //Novel
$("#online_status_text3").html("No connection...");
$(".online_status_indicator4").css("background-color", "red"); //OAI / ooba
$(".online_status_text4").html("No connection...");
is_get_status = false;
is_get_status_novel = false;
setOpenAIOnlineStatus(false);
} else {
$("#online_status_indicator2").css("background-color", "green"); //kobold
$("#online_status_text2").html(online_status);
$("#online_status_indicator3").css("background-color", "green"); //novel
$("#online_status_text3").html(online_status);
$("#online_status_indicator4").css("background-color", "green"); //extensions api
$("#online_status_text4").html(online_status);
$(".online_status_indicator4").css("background-color", "green"); //OAI / ooba
$(".online_status_text4").html(online_status);
}
}
@ -426,7 +446,7 @@ async function getStatus() {
},
});
} else {
if (is_get_status_novel != true) {
if (is_get_status_novel != true && is_get_status_openai != true) {
online_status = "no_connection";
}
}
@ -951,6 +971,7 @@ async function Generate(type, automatic_trigger, force_name2) {//encode("dsfs").
else if (type !== "swipe") {
chat.length = chat.length - 1;
count_view_mes -= 1;
openai_msgs.pop();
$('#chat').children().last().hide(500, function () {
$(this).remove();
});
@ -1038,6 +1059,11 @@ async function Generate(type, automatic_trigger, force_name2) {//encode("dsfs").
let mesExamplesArray = mesExamples.split(/<START>/gi).slice(1).map(block => `<START>\n${block.trim()}\n`);
if (main_api === 'openai') {
setOpenAIMessages(chat);
setOpenAIMessageExamples(mesExamplesArray);
}
if (is_pygmalion) {
storyString += appendToStoryString(charDescription, disable_description_formatting ? '' : name2 + "'s Persona: ");
storyString += appendToStoryString(charPersonality, disable_personality_formatting ? '' : 'Personality: ');
@ -1190,6 +1216,10 @@ async function Generate(type, automatic_trigger, force_name2) {//encode("dsfs").
generatedPromtCache += cycleGenerationPromt;
if (generatedPromtCache.length == 0) {
if (main_api === 'openai') {
generateOpenAIPromptCache(charPersonality, topAnchorDepth, anchorTop, anchorBottom);
}
console.log('generating prompt');
chatString = "";
arrMes = arrMes.reverse();
@ -1417,6 +1447,8 @@ async function Generate(type, automatic_trigger, force_name2) {//encode("dsfs").
"order": this_settings.order
};
}
var generate_url = '';
if (main_api == 'kobold') {
generate_url = '/generate';
@ -1426,180 +1458,182 @@ async function Generate(type, automatic_trigger, force_name2) {//encode("dsfs").
generate_url = '/generate_novelai';
}
console.log('rungenerate calling API');
jQuery.ajax({
type: 'POST', //
url: generate_url, //
data: JSON.stringify(generate_data),
beforeSend: function () {
//$('#create_button').attr('value','Creating...');
},
cache: false,
dataType: "json",
contentType: "application/json",
success: function (data) {
//console.log('generation success');
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
//console.log('Tokens requested in total: '+tokens_already_generated);
//$("#send_textarea").focus();
//$("#send_textarea").removeAttr('disabled');
is_send_press = false;
if (!data.error) {
//const getData = await response.json();
var getMessage = "";
if (main_api == 'kobold') {
getMessage = data.results[0].text;
} else if (main_api == 'textgenerationwebui') {
getMessage = data.data[0];
if (getMessage == null || data.error) {
callPopup('<h3>Got empty response from Text generation web UI. Try restarting the API with recommended options.</h3>', 'text');
return;
}
getMessage = getMessage.substring(finalPromt.length);
} else if (main_api == 'novel') {
getMessage = data.output;
if (main_api == 'openai') {
let prompt = prepareOpenAIMessages(name2, storyString);
sendOpenAIRequest(prompt).then(onSuccess).catch(onError);
}
else {
jQuery.ajax({
type: 'POST', //
url: generate_url, //
data: JSON.stringify(generate_data),
beforeSend: function () {
//$('#create_button').attr('value','Creating...');
},
cache: false,
dataType: "json",
contentType: "application/json",
success: onSuccess,
error: onError
}); //end of "if not data error"
}
function onSuccess(data) {
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
is_send_press = false;
if (!data.error) {
//const getData = await response.json();
var getMessage = "";
if (main_api == 'kobold') {
getMessage = data.results[0].text;
} else if (main_api == 'textgenerationwebui') {
getMessage = data.data[0];
if (getMessage == null || data.error) {
callPopup('<h3>Got empty response from Text generation web UI. Try restarting the API with recommended options.</h3>', 'text');
return;
}
if (collapse_newlines) {
getMessage = collapseNewlines(getMessage);
}
//Pygmalion run again // to make it continue generating so long as it's under max_amount and hasn't signaled
// an end to the character's response via typing "You:" or adding "<endoftext>"
if (is_pygmalion) {
if_typing_text = false;
message_already_generated += getMessage;
promptBias = '';
//console.log('AI Response so far: '+message_already_generated);
if (message_already_generated.indexOf('You:') === -1 && //if there is no 'You:' in the response msg
message_already_generated.indexOf('<|endoftext|>') === -1 && //if there is no <endoftext> stamp in the response msg
tokens_already_generated < parseInt(amount_gen) && //if the gen'd msg is less than the max response length..
getMessage.length > 0) { //if we actually have gen'd text at all...
runGenerate(getMessage);
console.log('returning to make pyg generate again'); //generate again with the 'GetMessage' argument..
return;
}
getMessage = message_already_generated;
}
//Formating
getMessage = $.trim(getMessage);
if (is_pygmalion) {
getMessage = getMessage.replace(new RegExp('<USER>', "g"), name1);
getMessage = getMessage.replace(new RegExp('<BOT>', "g"), name2);
getMessage = getMessage.replace(new RegExp('You:', "g"), name1 + ':');
}
if (getMessage.indexOf(name1 + ":") != -1) {
getMessage = getMessage.substr(0, getMessage.indexOf(name1 + ":"));
}
if (getMessage.indexOf('<|endoftext|>') != -1) {
getMessage = getMessage.substr(0, getMessage.indexOf('<|endoftext|>'));
}
// clean-up group message from excessive generations
if (selected_group) {
getMessage = cleanGroupMessage(getMessage);
}
let this_mes_is_name = true;
if (getMessage.indexOf(name2 + ":") === 0) {
getMessage = getMessage.replace(name2 + ':', '');
getMessage = getMessage.trimStart();
} else {
this_mes_is_name = false;
}
if (force_name2) this_mes_is_name = true;
//getMessage = getMessage.replace(/^\s+/g, '');
if (getMessage.length > 0) {
if (chat[chat.length - 1]['swipe_id'] === undefined ||
chat[chat.length - 1]['is_user']) { type = 'normal'; }
if (type === 'swipe') {
chat[chat.length - 1]['swipes'][chat[chat.length - 1]['swipes'].length] = getMessage;
if (chat[chat.length - 1]['swipe_id'] === chat[chat.length - 1]['swipes'].length - 1) {
//console.log(getMessage);
chat[chat.length - 1]['mes'] = getMessage;
// console.log('runGenerate calls addOneMessage for swipe');
addOneMessage(chat[chat.length - 1], 'swipe');
} else {
chat[chat.length - 1]['mes'] = getMessage;
}
is_send_press = false;
} else {
console.log('entering chat update routine for non-swipe post');
is_send_press = false;
chat[chat.length] = {};
chat[chat.length - 1]['name'] = name2;
chat[chat.length - 1]['is_user'] = false;
chat[chat.length - 1]['is_name'] = this_mes_is_name;
chat[chat.length - 1]['send_date'] = humanizedDateTime();
getMessage = $.trim(getMessage);
chat[chat.length - 1]['mes'] = getMessage;
if (selected_group) {
console.log('entering chat update for groups');
let avatarImg = 'img/fluffy.png';
if (characters[this_chid].avatar != 'none') {
avatarImg = `characters/${characters[this_chid].avatar}?${Date.now()}`;
}
chat[chat.length - 1]['is_name'] = true;
chat[chat.length - 1]['force_avatar'] = avatarImg;
}
//console.log('runGenerate calls addOneMessage');
addOneMessage(chat[chat.length - 1]);
$("#send_but").css("display", "inline");
$("#loading_mes").css("display", "none");
}
} else {
// regenerate with character speech reenforced
// to make sure we leave on swipe type while also adding the name2 appendage
const newType = type == "swipe" ? "swipe" : "force_name2";
Generate(newType, automatic_trigger = false, force_name2 = true);
}
} else {
$("#send_but").css("display", "inline");
$("#loading_mes").css("display", "none");
//console.log('runGenerate calling showSwipeBtns');
showSwipeButtons();
getMessage = getMessage.substring(finalPromt.length);
} else if (main_api == 'novel') {
getMessage = data.output;
}
if (main_api == 'openai') {
getMessage = data;
}
console.log('/savechat called by /Generate');
if (collapse_newlines) {
getMessage = collapseNewlines(getMessage);
}
//Pygmalion run again // to make it continue generating so long as it's under max_amount and hasn't signaled
// an end to the character's response via typing "You:" or adding "<endoftext>"
if (is_pygmalion) {
if_typing_text = false;
message_already_generated += getMessage;
promptBias = '';
//console.log('AI Response so far: '+message_already_generated);
if (message_already_generated.indexOf('You:') === -1 && //if there is no 'You:' in the response msg
message_already_generated.indexOf('<|endoftext|>') === -1 && //if there is no <endoftext> stamp in the response msg
tokens_already_generated < parseInt(amount_gen) && //if the gen'd msg is less than the max response length..
getMessage.length > 0) { //if we actually have gen'd text at all...
runGenerate(getMessage);
console.log('returning to make pyg generate again'); //generate again with the 'GetMessage' argument..
return;
}
getMessage = message_already_generated;
}
//Formating
getMessage = $.trim(getMessage);
if (is_pygmalion) {
getMessage = getMessage.replace(/<USER>/g, name1);
getMessage = getMessage.replace(/<BOT>/g, name2);
getMessage = getMessage.replace(/You:/g, name1 + ':');
}
if (getMessage.indexOf(name1 + ":") != -1) {
getMessage = getMessage.substr(0, getMessage.indexOf(name1 + ":"));
}
if (getMessage.indexOf('<|endoftext|>') != -1) {
getMessage = getMessage.substr(0, getMessage.indexOf('<|endoftext|>'));
}
// clean-up group message from excessive generations
if (selected_group) {
saveGroupChat(selected_group);
} else {
saveChat();
getMessage = cleanGroupMessage(getMessage);
}
//let final_message_length = encode(JSON.stringify(getMessage)).length;
//console.log('AI Response: +'+getMessage+ '('+final_message_length+' tokens)');
let this_mes_is_name = true;
if (getMessage.indexOf(name2 + ":") === 0) {
getMessage = getMessage.replace(name2 + ':', '');
getMessage = getMessage.trimStart();
} else {
this_mes_is_name = false;
}
if (force_name2) this_mes_is_name = true;
//getMessage = getMessage.replace(/^\s+/g, '');
if (getMessage.length > 0) {
if (chat[chat.length - 1]['swipe_id'] === undefined ||
chat[chat.length - 1]['is_user']) { type = 'normal'; }
if (type === 'swipe') {
chat[chat.length - 1]['swipes'][chat[chat.length - 1]['swipes'].length] = getMessage;
if (chat[chat.length - 1]['swipe_id'] === chat[chat.length - 1]['swipes'].length - 1) {
//console.log(getMessage);
chat[chat.length - 1]['mes'] = getMessage;
// console.log('runGenerate calls addOneMessage for swipe');
addOneMessage(chat[chat.length - 1], 'swipe');
} else {
chat[chat.length - 1]['mes'] = getMessage;
}
is_send_press = false;
} else {
console.log('entering chat update routine for non-swipe post');
is_send_press = false;
chat[chat.length] = {};
chat[chat.length - 1]['name'] = name2;
chat[chat.length - 1]['is_user'] = false;
chat[chat.length - 1]['is_name'] = this_mes_is_name;
chat[chat.length - 1]['send_date'] = humanizedDateTime();
getMessage = $.trim(getMessage);
chat[chat.length - 1]['mes'] = getMessage;
if (selected_group) {
console.log('entering chat update for groups');
let avatarImg = 'img/fluffy.png';
if (characters[this_chid].avatar != 'none') {
avatarImg = `characters/${characters[this_chid].avatar}?${Date.now()}`;
}
chat[chat.length - 1]['is_name'] = true;
chat[chat.length - 1]['force_avatar'] = avatarImg;
}
//console.log('runGenerate calls addOneMessage');
addOneMessage(chat[chat.length - 1]);
$("#send_but").css("display", "inline");
$("#loading_mes").css("display", "none");
}
} else {
// regenerate with character speech reenforced
// to make sure we leave on swipe type while also adding the name2 appendage
const newType = type == "swipe" ? "swipe" : "force_name2";
Generate(newType, automatic_trigger = false, force_name2 = true);
}
} else {
$("#send_but").css("display", "inline");
//console.log('runGenerate calling showSwipeBtns pt. 2');
$("#loading_mes").css("display", "none");
//console.log('runGenerate calling showSwipeBtns');
showSwipeButtons();
$("#loading_mes").css("display", "none");
$('.mes_edit:last').show();
},
error: function (jqXHR, exception) {
$("#send_textarea").removeAttr('disabled');
is_send_press = false;
$("#send_but").css("display", "inline");
$("#loading_mes").css("display", "none");
console.log(exception);
console.log(jqXHR);
}
console.log('/savechat called by /Generate');
if (selected_group) {
saveGroupChat(selected_group);
} else {
saveChat();
}
//let final_message_length = encode(JSON.stringify(getMessage)).length;
//console.log('AI Response: +'+getMessage+ '('+final_message_length+' tokens)');
$("#send_but").css("display", "inline");
//console.log('runGenerate calling showSwipeBtns pt. 2');
showSwipeButtons();
$("#loading_mes").css("display", "none");
$('.mes_edit:last').show();
};
function onError(jqXHR, exception) {
$("#send_textarea").removeAttr('disabled');
is_send_press = false;
$("#send_but").css("display", "inline");
$("#loading_mes").css("display", "none");
console.log(exception);
console.log(jqXHR);
};
}); //end of "if not data error"
} //rungenerate ends
} else { //generate's primary loop ends, after this is error handling for no-connection or safety-id
if (this_chid == undefined || this_chid == 'invalid-safety-id') {
@ -1628,6 +1662,10 @@ function setCharacterName(value) {
name2 = value;
}
function setOnlineStatus(value) {
online_status = value;
}
function setEditedMessageId(value) {
this_edit_mes_id = value;
}
@ -1797,7 +1835,15 @@ function changeMainAPI() {
maxContextElem: $("#max_context_block"),
amountGenElem: $("#amount_gen_block"),
softPromptElem: $("#softprompt_block")
}
},
"openai": {
apiSettings: $("#openai_settings"),
apiConnector: $("#openai_api"),
apiRanges: $("#range_block_openai"),
maxContextElem: $("#max_context_block"),
amountGenElem: $("#amount_gen_block"),
softPromptElem: $("#softprompt_block"),
},
};
console.log('--- apiElements--- ');
//console.log(apiElements);
@ -1810,6 +1856,9 @@ function changeMainAPI() {
apiObj.apiConnector.css("display", isCurrentApi ? "block" : "none");
apiObj.apiRanges.css("display", isCurrentApi ? "block" : "none");
// Hide common settings for OpenAI
$("#common-gen-settings-block").css("display", isCurrentApi && apiName !== "openai" ? "block" : "none");
if (isCurrentApi && apiName === "kobold") {
console.log("enabling SP for kobold");
$("#softprompt_block").css("display", "block");
@ -1989,6 +2038,9 @@ async function getSettings(type) {
//Novel
loadNovelSettings(settings);
// OpenAI
loadOpenAISettings(data, settings);
//Enable GUI deference settings if GUI is selected for Kobold
if (main_api === "kobold") {
if (preset_settings == "gui") {
@ -2086,6 +2138,7 @@ async function saveSettings(type) {
swipes: swipes,
...nai_settings,
...kai_settings,
...oai_settings,
}),
beforeSend: function () {
//console.log('saveSettings() -- active_character -- '+active_character);
@ -2253,7 +2306,7 @@ async function getStatusNovel() {
},
});
} else {
if (is_get_status != true) {
if (is_get_status != true && is_get_status_openai != true) {
online_status = "no_connection";
}
}
@ -3591,6 +3644,7 @@ $(document).ready(function () {
is_pygmalion = false;
is_get_status = false;
is_get_status_novel = false;
setOpenAIOnlineStatus(false);
online_status = "no_connection";
clearSoftPromptsList();
checkOnlineStatus();

View File

@ -20,6 +20,7 @@ import {
import { LoadLocal, SaveLocal, ClearLocal, CheckLocal, LoadLocalBool } from "./f-localStorage.js";
import { selected_group, is_group_generating } from "./group-chats.js";
import { oai_settings } from "./openai.js";
var NavToggle = document.getElementById("nav-toggle");
var PanelPin = document.getElementById("rm_button_panel_pin");
@ -233,6 +234,12 @@ function RA_autoconnect() {
RA_AC_retries = 1;
}
break;
case 'openai':
if (oai_settings.api_key_openai) {
$("#api_button_openai").click();
retry_delay = 100;
RA_AC_retries = 1;
}
}
if (!connection_made) {

575
public/scripts/openai.js Normal file
View File

@ -0,0 +1,575 @@
/*
* CODE FOR OPENAI SUPPORT
* By CncAnon (@CncAnon1)
*/
import {
saveSettingsDebounced,
addOneMessage,
messageFormating,
substituteParams,
count_view_mes,
saveChat,
checkOnlineStatus,
setOnlineStatus,
token,
} from "../script.js";
import {
pin_examples,
} from "./power-user.js";
export {
is_get_status_openai,
openai_msgs,
oai_settings,
loadOpenAISettings,
setOpenAIMessages,
setOpenAIMessageExamples,
generateOpenAIPromptCache,
prepareOpenAIMessages,
sendOpenAIRequest,
setOpenAIOnlineStatus,
}
let openai_msgs = [];
let openai_msgs_example = [];
let is_get_status_openai = false;
let is_api_button_press_openai = false;
const default_main_prompt = "Write {{char}}'s next reply in a fictional chat between {{char}} and {{user}}. Write 1 reply only in internet RP style, italicize actions, and avoid quotation marks. Use markdown. Be proactive, creative, and drive the plot and conversation forward. Write at least 1 paragraph, up to 4. Always stay in character and avoid repetition.";
const default_nsfw_prompt = "NSFW/Smut is allowed. Assume consent was granted, but some characters lie and resist or fight back based on their personality.";
const oai_settings = {
preset_settings_openai: 'Default',
api_key_openai: '',
temp_openai: 1.0,
freq_pen_openai: 0,
pres_pen_openai: 0,
stream_openai: true,
openai_max_context: 4095,
openai_max_tokens: 300,
nsfw_toggle: true,
enhance_definitions: false,
wrap_in_quotes: false,
nsfw_first: false,
main_prompt: default_main_prompt,
nsfw_prompt: default_nsfw_prompt,
};
let openai_setting_names;
let openai_settings;
function setOpenAIOnlineStatus(value) {
is_get_status_openai = value;
}
function setOpenAIMessages(chat) {
let j = 0;
// clean openai msgs
openai_msgs = [];
for (let i = chat.length - 1; i >= 0; i--) {
// first greeting message
if (j == 0) {
chat[j]['mes'] = substituteParams(chat[j]['mes']);
}
let role = chat[j]['is_user'] ? 'user' : 'assistant';
let content = chat[j]['mes'];
// Apply the "wrap in quotes" option
if (role == 'user' && oai_settings.wrap_in_quotes) content = `"${content}"`;
openai_msgs[i] = { "role": role, "content": content };
j++;
}
}
function setOpenAIMessageExamples(mesExamplesArray) {
// get a nice array of all blocks of all example messages = array of arrays (important!)
openai_msgs_example = [];
for (let item of mesExamplesArray) {
// remove <START> {Example Dialogue:} and replace \r\n with just \n
let replaced = item.replace(/<START>/i, "{Example Dialogue:}").replace(/\r/gm, '');
let parsed = parseExampleIntoIndividual(replaced);
// add to the example message blocks array
openai_msgs_example.push(parsed);
}
}
function generateOpenAIPromptCache(charPersonality, topAnchorDepth, anchorTop, anchorBottom) {
openai_msgs = openai_msgs.reverse();
let is_add_personality = false;
openai_msgs.forEach(function (msg, i, arr) {//For added anchors and others
let item = msg["content"];
if (i === openai_msgs.length - topAnchorDepth && count_view_mes >= topAnchorDepth && !is_add_personality) {
is_add_personality = true;
if ((anchorTop != "" || charPersonality != "")) {
if (anchorTop != "") charPersonality += ' ';
item = `[${name2} is ${charPersonality}${anchorTop}]\n${item}`;
}
}
if (i >= openai_msgs.length - 1 && count_view_mes > 8 && $.trim(item).substr(0, (name1 + ":").length) == name1 + ":") {//For add anchor in end
item = anchorBottom + "\n" + item;
}
msg["content"] = item;
openai_msgs[i] = msg;
});
}
function parseExampleIntoIndividual(messageExampleString) {
let result = []; // array of msgs
let tmp = messageExampleString.split("\n");
let cur_msg_lines = [];
let in_user = false;
let in_bot = false;
// DRY my cock and balls
function add_msg(name, role) {
// join different newlines (we split them by \n and join by \n)
// remove char name
// strip to remove extra spaces
let parsed_msg = cur_msg_lines.join("\n").replace(name + ":", "").trim();
result.push({ "role": role, "content": parsed_msg });
cur_msg_lines = [];
}
// skip first line as it'll always be "This is how {bot name} should talk"
for (let i = 1; i < tmp.length; i++) {
let cur_str = tmp[i];
// if it's the user message, switch into user mode and out of bot mode
// yes, repeated code, but I don't care
if (cur_str.indexOf(name1 + ":") === 0) {
in_user = true;
// we were in the bot mode previously, add the message
if (in_bot) {
add_msg(name2, "assistant");
}
in_bot = false;
} else if (cur_str.indexOf(name2 + ":") === 0) {
in_bot = true;
// we were in the user mode previously, add the message
if (in_user) {
add_msg(name1, "user");
}
in_user = false;
}
// push the current line into the current message array only after checking for presence of user/bot
cur_msg_lines.push(cur_str);
}
// Special case for last message in a block because we don't have a new message to trigger the switch
if (in_user) {
add_msg(name1, "user");
} else if (in_bot) {
add_msg(name2, "assistant");
}
return result;
}
function prepareOpenAIMessages(name2, storyString) {
let this_max_context = oai_settings.openai_max_context;
let nsfw_toggle_prompt = "";
let enhance_definitions_prompt = "";
if (oai_settings.nsfw_toggle) {
nsfw_toggle_prompt = oai_settings.nsfw_prompt;
} else {
nsfw_toggle_prompt = "Avoid writing a NSFW/Smut reply. Creatively write around it NSFW/Smut scenarios in character.";
}
// Experimental but kinda works
if (oai_settings.enhance_definitions) {
enhance_definitions_prompt = "If you have more knowledge of " + name2 + ", add to the character's lore and personality to enhance them but keep the Character Sheet's definitions absolute.";
}
let whole_prompt = [];
// If it's toggled, NSFW prompt goes first.
if (oai_settings.nsfw_first) {
whole_prompt = [nsfw_toggle_prompt, oai_settings.main_prompt, enhance_definitions_prompt, "\n\n", storyString]
}
else {
whole_prompt = [oai_settings.main_prompt, nsfw_toggle_prompt, enhance_definitions_prompt, "\n\n", storyString]
}
// Join by a space and replace placeholders with real user/char names
storyString = substituteParams(whole_prompt.join(" "))
let prompt_msg = { "role": "system", "content": storyString }
let examples_tosend = [];
let openai_msgs_tosend = [];
// todo: static value, maybe include in the initial context calculation
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
let start_chat_count = countTokens([new_chat_msg]);
let total_count = countTokens([prompt_msg], true) + start_chat_count;
// The user wants to always have all example messages in the context
if (pin_examples) {
// first we send *all* example messages
// we don't check their token size since if it's bigger than the context, the user is fucked anyway
// and should've have selected that option (maybe have some warning idk, too hard to add)
for (const element of openai_msgs_example) {
// get the current example block with multiple user/bot messages
let example_block = element;
// add the first message from the user to tell the model that it's a new dialogue
// TODO: instead of role user content use role system name example_user
// message from the user so the model doesn't confuse the context (maybe, I just think that this should be done)
if (example_block.length != 0) {
examples_tosend.push(new_chat_msg);
}
for (const example of example_block) {
// add all the messages from the example
examples_tosend.push(example);
}
}
total_count += countTokens(examples_tosend);
// go from newest message to oldest, because we want to delete the older ones from the context
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
let item_count = countTokens(item);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
openai_msgs_tosend.push(item);
total_count += item_count;
}
else {
// early break since if we still have more messages, they just won't fit anyway
break;
}
}
} else {
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
let item_count = countTokens(item);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
openai_msgs_tosend.push(item);
total_count += item_count;
}
else {
// early break since if we still have more messages, they just won't fit anyway
break;
}
}
console.log(total_count);
for (const example of openai_msgs_example) {
// get the current example block with multiple user/bot messages
let example_block = example;
for (let k = 0; k < example_block.length; k++) {
if (example_block.length == 0) { continue; }
let example_count = countTokens(example_block[k]);
// add all the messages from the example
if ((total_count + example_count + start_chat_count) < (this_max_context - oai_settings.openai_max_tokens)) {
if (k == 0) {
examples_tosend.push(new_chat_msg);
total_count += start_chat_count;
}
examples_tosend.push(example_block[k]);
total_count += example_count;
}
else { break; }
}
}
}
// reverse the messages array because we had the newest at the top to remove the oldest,
// now we want proper order
openai_msgs_tosend.reverse();
openai_msgs_tosend = [prompt_msg, ...examples_tosend, new_chat_msg, ...openai_msgs_tosend]
console.log("We're sending this:")
console.log(openai_msgs_tosend);
console.log(`Calculated the total context to be ${total_count} tokens`);
return openai_msgs_tosend;
}
async function sendOpenAIRequest(openai_msgs_tosend) {
const generate_data = {
"messages": openai_msgs_tosend,
// todo: add setting for le custom model
"model": "gpt-3.5-turbo-0301",
"temperature": parseFloat(oai_settings.temp_openai),
"frequency_penalty": parseFloat(oai_settings.freq_pen_openai),
"presence_penalty": parseFloat(oai_settings.pres_pen_openai),
"max_tokens": oai_settings.openai_max_tokens,
"stream": oai_settings.stream_openai,
};
const generate_url = '/generate_openai';
const streaming = oai_settings.stream_openai;
const last_view_mes = count_view_mes;
const response = await fetch(generate_url, {
method: 'POST',
body: JSON.stringify(generate_data),
headers: {
'Content-Type': 'application/json',
"X-CSRF-Token": token,
}
});
const data = await response.json();
if (data.error) {
throw new Error(data);
}
return data.choices[0]["message"]["content"];
}
// Unused
function onStream(e, resolve, reject, last_view_mes) {
let end = false;
if (!oai_settings.stream_openai)
return;
let response = e.currentTarget.response;
if (response == "{\"error\":true}") {
reject('', 'error');
}
let eventList = response.split("\n");
let getMessage = "";
for (let event of eventList) {
if (!event.startsWith("data"))
continue;
if (event == "data: [DONE]") {
chat[chat.length - 1]['mes'] = getMessage;
$("#send_but").css("display", "block");
$("#loading_mes").css("display", "none");
saveChat();
end = true;
break;
}
let data = JSON.parse(event.substring(6));
// the first and last messages are undefined, protect against that
getMessage += data.choices[0]["delta"]["content"] || "";
}
if ($("#chat").children().filter(`[mesid="${last_view_mes}"]`).length == 0) {
chat[chat.length] = {};
chat[chat.length - 1]['name'] = name2;
chat[chat.length - 1]['is_user'] = false;
chat[chat.length - 1]['is_name'] = false;
chat[chat.length - 1]['send_date'] = Date.now();
chat[chat.length - 1]['mes'] = "";
addOneMessage(chat[chat.length - 1]);
}
let messageText = messageFormating($.trim(getMessage), name1);
$("#chat").children().filter(`[mesid="${last_view_mes}"]`).children('.mes_block').children('.mes_text').html(messageText);
let $textchat = $('#chat');
$textchat.scrollTop($textchat[0].scrollHeight);
if (end) {
resolve();
}
}
function countTokens(messages, full = false) {
if (!Array.isArray(messages)) {
messages = [messages];
}
let token_count = -1;
jQuery.ajax({
async: false,
type: 'POST', //
url: '/tokenize_openai', //
data: JSON.stringify(messages),
dataType: "json",
contentType: "application/json",
success: function (data) {
token_count = data.token_count;
}
});
if (!full) token_count -= 2;
return token_count;
}
function loadOpenAISettings(data, settings) {
if (settings.api_key_openai != undefined) {
oai_settings.api_key_openai = settings.api_key_openai;
$("#api_key_openai").val(oai_settings.api_key_openai);
}
openai_setting_names = data.openai_setting_names;
openai_settings = data.openai_settings;
openai_settings = data.openai_settings;
openai_settings.forEach(function (item, i, arr) {
openai_settings[i] = JSON.parse(item);
});
$("#settings_perset_openai").empty();
let arr_holder = {};
openai_setting_names.forEach(function (item, i, arr) {
arr_holder[item] = i;
$('#settings_perset_openai').append(`<option value=${i}>${item}</option>`);
});
openai_setting_names = arr_holder;
oai_settings.preset_settings_openai = settings.preset_settings_openai;
$(`#settings_perset_openai option[value=${openai_setting_names[oai_settings.preset_settings_openai]}]`).attr('selected', true);
oai_settings.temp_openai = settings.temp_openai ?? 0.9;
oai_settings.freq_pen_openai = settings.freq_pen_openai ?? 0.7;
oai_settings.pres_pen_openai = settings.pres_pen_openai ?? 0.7;
oai_settings.stream_openai = settings.stream_openai ?? true;
oai_settings.openai_max_context = settings.openai_max_context ?? 4095;
oai_settings.openai_max_tokens = settings.openai_max_tokens ?? 300;
if (settings.nsfw_toggle !== undefined) oai_settings.nsfw_toggle = !!settings.nsfw_toggle;
if (settings.keep_example_dialogue !== undefined) oai_settings.keep_example_dialogue = !!settings.keep_example_dialogue;
if (settings.enhance_definitions !== undefined) oai_settings.enhance_definitions = !!settings.enhance_definitions;
if (settings.wrap_in_quotes !== undefined) oai_settings.wrap_in_quotes = !!settings.wrap_in_quotes;
if (settings.nsfw_first !== undefined) oai_settings.nsfw_first = !!settings.nsfw_first;
$('#stream_toggle').prop('checked', oai_settings.stream_openai);
$('#openai_max_context').val(oai_settings.openai_max_context);
$('#openai_max_context_counter').html(`${oai_settings.openai_max_context} Tokens`);
$('#openai_max_tokens').val(oai_settings.openai_max_tokens);
$('#nsfw_toggle').prop('checked', oai_settings.nsfw_toggle);
$('#keep_example_dialogue').prop('checked', oai_settings.keep_example_dialogue);
$('#enhance_definitions').prop('checked', oai_settings.enhance_definitions);
$('#wrap_in_quotes').prop('checked', oai_settings.wrap_in_quotes);
$('#nsfw_first').prop('checked', oai_settings.nsfw_first);
if (settings.main_prompt !== undefined) oai_settings.main_prompt = settings.main_prompt;
if (settings.nsfw_prompt !== undefined) oai_settings.nsfw_prompt = settings.nsfw_prompt;
$('#main_prompt_textarea').val(oai_settings.main_prompt);
$('#nsfw_prompt_textarea').val(oai_settings.nsfw_prompt);
$('#temp_openai').val(oai_settings.temp_openai);
$('#temp_counter_openai').text(Number(oai_settings.temp_openai).toFixed(2));
$('#freq_pen_openai').val(oai_settings.freq_pen_openai);
$('#freq_pen_counter_openai').text(Number(oai_settings.freq_pen_openai).toFixed(2));
$('#pres_pen_openai').val(oai_settings.pres_pen_openai);
$('#pres_pen_counter_openai').text(Number(oai_settings.pres_pen_openai).toFixed(2));
}
async function getStatusOpen() {
if (is_get_status_openai) {
let data = { key: oai_settings.api_key_openai };
jQuery.ajax({
type: 'POST', //
url: '/getstatus_openai', //
data: JSON.stringify(data),
beforeSend: function () { },
cache: false,
dataType: "json",
contentType: "application/json",
success: function (data) {
if (!('error' in data))
setOnlineStatus('Valid');
resultCheckStatusOpen();
},
error: function (jqXHR, exception) {
setOnlineStatus('no_connection');
console.log(exception);
console.log(jqXHR);
resultCheckStatusOpen();
}
});
} else {
setOnlineStatus('no_connection');
}
}
function resultCheckStatusOpen() {
is_api_button_press_openai = false;
checkOnlineStatus();
$("#api_loading_openai").css("display", 'none');
$("#api_button_openai").css("display", 'inline-block');
}
$(document).ready(function () {
$(document).on('input', '#temp_openai', function () {
oai_settings.temp_openai = $(this).val();
$('#temp_counter_openai').text(Number($(this).val()).toFixed(2));
saveSettingsDebounced();
});
$(document).on('input', '#freq_pen_openai', function () {
oai_settings.freq_pen_openai = $(this).val();
$('#freq_pen_counter_openai').text(Number($(this).val()).toFixed(2));
saveSettingsDebounced();
});
$(document).on('input', '#pres_pen_openai', function () {
oai_settings.pres_pen_openai = $(this).val();
$('#pres_pen_counter_openai').text(Number($(this).val()));
saveSettingsDebounced();
});
$(document).on('input', '#openai_max_context', function () {
oai_settings.openai_max_context = parseInt($(this).val());
$('#openai_max_context_counter').text(`${$(this).val()} Tokens`);
saveSettingsDebounced();
});
$(document).on('input', '#openai_max_tokens', function () {
oai_settings.openai_max_tokens = parseInt($(this).val());
saveSettingsDebounced();
});
$('#stream_toggle').change(function () {
oai_settings.stream_openai = !!$('#stream_toggle').prop('checked');
saveSettingsDebounced();
});
$('#nsfw_toggle').change(function () {
oai_settings.nsfw_toggle = !!$('#nsfw_toggle').prop('checked');
saveSettingsDebounced();
});
$('#enhance_definitions').change(function () {
oai_settings.enhance_definitions = !!$('#enhance_definitions').prop('checked');
saveSettingsDebounced();
});
$('#wrap_in_quotes').change(function () {
oai_settings.wrap_in_quotes = !!$('#wrap_in_quotes').prop('checked');
saveSettingsDebounced();
});
$('#nsfw_first').change(function () {
oai_settings.nsfw_first = !!$('#nsfw_first').prop('checked');
saveSettingsDebounced();
});
$("#settings_perset_openai").change(function () {
oai_settings.preset_settings_openai = $('#settings_perset_openai').find(":selected").text();
const preset = openai_settings[openai_setting_names[preset_settings_openai]];
oai_settings.temp_openai = preset.temperature;
oai_settings.freq_pen_openai = preset.frequency_penalty;
oai_settings.pres_pen_openai = preset.presence_penalty;
// probably not needed
$('#temp_counter_openai').text(oai_settings.temp_openai);
$('#freq_pen_counter_openai').text(oai_settings.freq_pen_openai);
$('#pres_pen_counter_openai').text(oai_settings.pres_pen_openai);
$('#temp_openai').val(oai_settings.temp_openai).trigger('input');
$('#freq_pen_openai').val(oai_settings.freq_pen_openai).trigger('input');
$('#pres_pen_openai').val(oai_settings.pres_pen_openai).trigger('input');
saveSettingsDebounced();
});
$("#api_button_openai").click(function () {
if ($('#api_key_openai').val() != '') {
$("#api_loading_openai").css("display", 'inline-block');
$("#api_button_openai").css("display", 'none');
oai_settings.api_key_openai = $.trim($('#api_key_openai').val());
saveSettingsDebounced();
is_get_status_openai = true;
is_api_button_press_openai = true;
getStatusOpen();
}
});
});

View File

@ -788,23 +788,14 @@ img[src*="user-slash-solid.svg"] {
#api_button:hover,
#api_button_novel:hover,
#api_button_textgenerationwebui {
#api_button_textgenerationwebui:hover {
background-color: green;
}
#api_loading,
#api_loading_textgenerationwebui {
img[src="img/load.svg"] {
width: 25px;
height: 25px;
display: none;
}
#api_loading_novel {
width: 25px;
height: 25px;
display: none;
}
#rm_characters_block {
@ -1237,15 +1228,14 @@ input[type=search]:focus::-webkit-search-cancel-button {
}
/* ------ online status indicators and texts. 2 = kobold AI, 3 = Novel AI ----------*/
#online_status2,
#online_status4 {
.online_status4 {
opacity: 0.5;
margin-top: 2px;
margin-bottom: 15px;
}
#online_status_indicator2,
#online_status_indicator4 {
.online_status_indicator4 {
border-radius: 7px;
width: 14px;
height: 14px;
@ -1254,7 +1244,7 @@ input[type=search]:focus::-webkit-search-cancel-button {
}
#online_status_text2,
#online_status_text4 {
.online_status_text4 {
margin-left: 4px;
display: inline-block;
}

157
server.js
View File

@ -29,11 +29,15 @@ const autorun = config.autorun;
const enableExtensions = config.enableExtensions;
const listen = config.listen;
const axios = require('axios');
const tiktoken = require('@dqbd/tiktoken');
var Client = require('node-rest-client').Client;
var client = new Client();
var api_server = "http://0.0.0.0:5000";
var api_novelai = "https://api.novelai.net";
let api_openai = "https://api.openai.com/v1";
var main_api = "kobold";
var response_get_story;
@ -51,6 +55,10 @@ var response_getstatus_novel;
var response_getlastversion;
var api_key_novel;
let response_generate_openai;
let response_getstatus_openai;
let api_key_openai;
//RossAscends: Added function to format dates used in files and chat timestamps to a humanized format.
//Mostly I wanted this to be for file names, but couldn't figure out exactly where the filename save code was as everything seemed to be connected.
//During testing, this performs the same as previous date.now() structure.
@ -93,6 +101,7 @@ const directories = {
backgrounds: 'public/backgrounds',
novelAI_Settings: 'public/NovelAI Settings',
koboldAI_Settings: 'public/KoboldAI Settings',
openAI_Settings: 'public/OpenAI Settings',
};
// CSRF Protection //
@ -851,6 +860,8 @@ app.post('/getsettings', jsonParser, (request, response) => { //Wintermute's cod
const koboldai_setting_names = [];
const novelai_settings = [];
const novelai_setting_names = [];
const openai_settings = [];
const openai_setting_names = [];
const settings = fs.readFileSync('public/settings.json', 'utf8', (err, data) => {
if (err) return response.sendStatus(500);
@ -909,6 +920,30 @@ app.post('/getsettings', jsonParser, (request, response) => { //Wintermute's cod
novelai_setting_names.push(item.replace(/\.[^/.]+$/, ''));
});
//OpenAI
const files3 = fs
.readdirSync('public/OpenAI Settings')
.sort(
(a, b) =>
new Date(fs.statSync(`public/OpenAI Settings/${b}`).mtime) -
new Date(fs.statSync(`public/OpenAI Settings/${a}`).mtime)
);
files3.forEach(item => {
const file3 = fs.readFileSync(
`public/OpenAI Settings/${item}`,
'utf8',
(err, data) => {
if (err) return response.sendStatus(500);
return data;
}
);
openai_settings.push(file3);
openai_setting_names.push(item.replace(/\.[^/.]+$/, ''));
});
response.send({
settings,
koboldai_settings,
@ -916,6 +951,8 @@ app.post('/getsettings', jsonParser, (request, response) => { //Wintermute's cod
world_names,
novelai_settings,
novelai_setting_names,
openai_settings,
openai_setting_names,
enable_extensions: enableExtensions,
});
});
@ -1550,6 +1587,126 @@ app.post('/deletegroup', jsonParser, async (request, response) => {
return response.send({ ok: true });
});
/* OpenAI */
app.post("/getstatus_openai", jsonParser, function(request, response_getstatus_openai = response){
if(!request.body) return response_getstatus_openai.sendStatus(400);
api_key_openai = request.body.key;
const args = {
headers: { "Authorization": "Bearer "+api_key_openai}
};
client.get(api_openai+"/models",args, function (data, response) {
if(response.statusCode == 200){
console.log(data);
response_getstatus_openai.send(data);//data);
}
if(response.statusCode == 401){
console.log('Access Token is incorrect.');
response_getstatus_openai.send({error: true});
}
if(response.statusCode == 500 || response.statusCode == 501 || response.statusCode == 501 || response.statusCode == 503 || response.statusCode == 507){
console.log(data);
response_getstatus_openai.send({error: true});
}
}).on('error', function (err) {
response_getstatus_openai.send({error: true});
});
});
app.post("/generate_openai", jsonParser, function(request, response_generate_openai){
if(!request.body) return response_generate_openai.sendStatus(400);
console.log(request.body);
const config = {
method: 'post',
url: api_openai + '/chat/completions',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + api_key_openai
},
data: {
"messages": request.body.messages,
"model": request.body.model,
"temperature": request.body.temperature,
"max_tokens": request.body.max_tokens,
"stream": request.body.stream,
"presence_penalty": request.body.presence_penalty,
"frequency_penalty": request.body.frequency_penalty,
"stop": request.body.stop,
"logit_bias": request.body.logit_bias
}
};
if (request.body.stream)
config.responseType = 'stream';
axios(config)
.then(function (response) {
if (response.status <= 299) {
if (request.body.stream) {
console.log("Streaming request in progress")
response.data.pipe(response_generate_openai);
response.data.on('end', function () {
console.log("Streaming request finished");
response_generate_openai.end();
});
} else {
console.log(response.data);
response_generate_openai.send(response.data);
}
} else if (response.status == 400) {
console.log('Validation error');
response_generate_openai.send({ error: true });
} else if (response.status == 401) {
console.log('Access Token is incorrect');
response_generate_openai.send({ error: true });
} else if (response.status == 402) {
console.log('An active subscription is required to access this endpoint');
response_generate_openai.send({ error: true });
} else if (response.status == 500 || response.status == 409) {
if (request.body.stream) {
response.data.on('data', chunk => {
console.log(chunk.toString());
});
} else {
console.log(response.data);
}
response_generate_openai.send({ error: true });
}
})
.catch(function (error) {
if(error.response){
if (request.body.stream) {
error.response.data.on('data', chunk => {
console.log(chunk.toString());
});
} else {
console.log(error.response.data);
}
}
response_generate_openai.send({ error: true });
});
});
const turbo_encoder = tiktoken.get_encoding("cl100k_base");
app.post("/tokenize_openai", jsonParser, function(request, response_tokenize_openai = response){
if(!request.body) return response_tokenize_openai.sendStatus(400);
let num_tokens = 0;
for (var msg of request.body) {
num_tokens += 4;
for (const [key, value] of Object.entries(msg)) {
num_tokens += turbo_encoder.encode(value).length;
if (key == "name") {
num_tokens += -1;
}
}
}
num_tokens += 2;
response_tokenize_openai.send({"token_count": num_tokens});
});
// ** REST CLIENT ASYNC WRAPPERS **
function deleteAsync(url, args) {
return new Promise((resolve, reject) => {