Various improvements for stability and token counting.

This commit is contained in:
maver 2023-06-19 19:26:38 +02:00
parent 2c36fbdc1d
commit b8b3dd4caf
2 changed files with 73 additions and 28 deletions

View File

@ -72,6 +72,7 @@ function PromptManagerModule() {
this.listElement = null;
this.activeCharacter = null;
this.tokenHandler = null;
this.tokenCache = 0;
this.error = null;
this.tryGenerate = () => { };
@ -221,10 +222,12 @@ PromptManagerModule.prototype.init = function (moduleConfiguration, serviceSetti
// Trigger re-render when token settings are changed
document.getElementById('openai_max_context').addEventListener('change', (event) => {
this.serviceSettings.openai_max_context = event.target.value;
if (this.activeCharacter) this.render();
});
document.getElementById('openai_max_tokens').addEventListener('change', (event) => {
this.serviceSettings.openai_max_tokens = event.target.value;
if (this.activeCharacter) this.render();
});
@ -351,6 +354,7 @@ PromptManagerModule.prototype.sanitizeServiceSettings = function () {
this.serviceSettings.prompt_manager_settings = Object.assign({}, defaultPromptManagerSettings);
}
// Add identifiers if there are none assigned to a prompt
this.serviceSettings.prompts.forEach((prompt => prompt && (prompt.identifier = prompt.identifier || this.getUuidv4())));
};
@ -501,7 +505,9 @@ PromptManagerModule.prototype.getPromptIndexById = function (identifier) {
PromptManagerModule.prototype.preparePrompt = function (prompt) {
const groupMembers = this.getActiveGroupCharacters();
if (0 < groupMembers.length) return {role: prompt.role || 'system', content: substituteParams(prompt.content ?? '', null, null, groupMembers.join(', '))}
prompt.content = substituteParams(prompt.content);
const preparedPrompt = new Prompt(prompt);
preparedPrompt.content = substituteParams(prompt.content);
return new Prompt(prompt);
}
@ -563,6 +569,8 @@ PromptManagerModule.prototype.populateTokenHandler = function(messageCollection)
messageCollection.getCollection().forEach((message) => {
counts[message.identifier] = message.getTokens();
});
this.tokenCache = this.tokenHandler.getTotal();
}
// Empties, then re-assembles the container containing the prompt list.
@ -579,7 +587,7 @@ PromptManagerModule.prototype.renderPromptManager = function () {
</div>
`;
const activeTokenInfo = `<span class="tooltip fa-solid fa-info-circle" title="Including tokens from hidden prompts"></span>`;
const totalActiveTokens = this.tokenHandler?.getTotal();
const totalActiveTokens = this.tokenCache;
promptManagerDiv.insertAdjacentHTML('beforeend', `
<div class="range-block-title" data-i18n="Prompts">
@ -654,6 +662,8 @@ PromptManagerModule.prototype.renderPromptManagerListItems = function () {
`;
this.getPromptsForCharacter(this.activeCharacter).forEach(prompt => {
if (!prompt) return;
const advancedEnabled = this.serviceSettings.prompt_manager_settings.showAdvancedSettings;
let draggableEnabled = true;
if (prompt.system_prompt && !advancedEnabled) draggableEnabled = false;
@ -670,13 +680,22 @@ PromptManagerModule.prototype.renderPromptManagerListItems = function () {
const markerClass = prompt.marker ? `${prefix}prompt_manager_marker` : '';
const tokens = this.tokenHandler?.getCounts()[prompt.identifier] ?? 0;
// Warn the user if the chat history uses less than 30% of the total context
// To calculate the warning, at least 90% of the token budget has to be used up
let warningClass = '';
let warningTitle = '';
if ('chatHistory' === prompt.identifier) {
if (500 >= tokens) {
const tokenBudget = this.serviceSettings.openai_max_context - this.serviceSettings.openai_max_tokens;
const tokenThreshold = tokenBudget * 0.9;
if (this.tokenCache >= tokenThreshold &&
'chatHistory' === prompt.identifier) {
const warningThreshold = tokenBudget * 0.40;
const dangerThreshold = tokenBudget * 0.20;
if (tokens <= dangerThreshold) {
warningClass = 'fa-solid tooltip fa-triangle-exclamation text_danger';
warningTitle = 'Very little of your chat history is being sent, consider deactivating some other prompts.';
} else if (1000 >= tokens) {
} else if (tokens <= warningThreshold) {
warningClass = 'fa-solid tooltip fa-triangle-exclamation text_warning';
warningTitle = 'Only a few messages worth chat history are being sent.';
}

View File

@ -370,31 +370,33 @@ function formatWorldInfo(value) {
/**
* Populates the chat history of the conversation.
*
* @param {Map} prompts - Map object containing all prompts where the key is the prompt identifier and the value is the prompt object.
* @param {PromptCollection} prompts - Map object containing all prompts where the key is the prompt identifier and the value is the prompt object.
* @param {ChatCompletion} chatCompletion - An instance of ChatCompletion class that will be populated with the prompts.
*/
function populateChatHistory(prompts, chatCompletion) {
// Chat History
chatCompletion.add( new MessageCollection('chatHistory'), prompts.index('chatHistory'));
chatCompletion.add(new MessageCollection('chatHistory'), prompts.index('chatHistory'));
const mainChat = selected_group ? '[Start a new group chat. Group members: ${names}]' : '[Start a new Chat]';
const mainChatMessage = new Message('system', mainChat, 'newMainChat');
// Insert chat messages
if (chatCompletion.canAfford(mainChatMessage)) {
chatCompletion.insert(mainChatMessage, 'chatHistory');
[...openai_msgs].forEach((prompt, index) => {
const chatMessage = new Message(prompt.role, prompt.content, 'chatHistory-' + index);
if (chatCompletion.canAfford(chatMessage)) {
chatCompletion.insert(chatMessage, 'chatHistory');
}
});
}
chatCompletion.reserveBudget(mainChatMessage);
// Insert chat messages as long as there is budget available
[...openai_msgs].reverse().every((prompt, index) => {
const chatMessage = new Message(prompt.role, prompt.content, 'chatHistory-' + index);
if (chatCompletion.canAfford(chatMessage)) chatCompletion.insertAtStart(chatMessage, 'chatHistory');
else return false;
return true;
});
chatCompletion.freeBudget(mainChatMessage);
chatCompletion.insertAtStart(mainChatMessage, 'chatHistory');
}
/**
* This function populates the dialogue examples in the conversation.
*
* @param {Map} prompts - Map object containing all prompts where the key is the prompt identifier and the value is the prompt object.
* @param {PromptCollection} prompts - Map object containing all prompts where the key is the prompt identifier and the value is the prompt object.
* @param {ChatCompletion} chatCompletion - An instance of ChatCompletion class that will be populated with the prompts.
*/
function populateDialogueExamples(prompts, chatCompletion) {
@ -532,7 +534,9 @@ async function prepareOpenAIMessages({
} = {}) {
const prompts = promptManager.getPromptCollection();
const chatCompletion = new ChatCompletion();
chatCompletion.setTokenBudget(oai_settings.openai_max_context - oai_settings.openai_max_tokens);
const userSettings = promptManager.serviceSettings;
chatCompletion.setTokenBudget(userSettings.openai_max_context, userSettings.openai_max_tokens);
if (power_user.console_log_prompts) chatCompletion.enableLogging();
@ -555,12 +559,13 @@ async function prepareOpenAIMessages({
// Tavern Extras - Summary
const summary = extensionPrompts['1_memory'];
if (summary) mappedPrompts.push({role: 'system', content: summary.content, identifier: 'summary'});
if (summary && summary.content) mappedPrompts.push({role: 'system', content: summary.content, identifier: 'summary'});
// Authors Note
const authorsNote = extensionPrompts['2_floating_prompt'];
if (authorsNote) mappedPrompts.push({role: 'system', content: authorsNote.content, identifier: 'authorsNote'});
if (authorsNote && authorsNote.content) mappedPrompts.push({role: 'system', content: authorsNote.content, identifier: 'authorsNote'});
// Create prompt objects and substitute markers
mappedPrompts.forEach((prompt) => {
const newPrompt = promptManager.preparePrompt(prompt);
const markerIndex = prompts.index(prompt.identifier);
@ -570,7 +575,7 @@ async function prepareOpenAIMessages({
});
// Allow subscribers to manipulate the prompts object
await eventSource.emit(event_types.OAI_BEFORE_CHATCOMPLETION, prompts);
eventSource.emit(event_types.OAI_BEFORE_CHATCOMPLETION, prompts);
try {
populateChatCompletion(prompts, chatCompletion, {bias, quietPrompt, type});
@ -583,10 +588,10 @@ async function prepareOpenAIMessages({
chatCompletion.log('Unexpected error:');
chatCompletion.log(error);
}
} finally {
promptManager.populateTokenHandler(chatCompletion.getMessages());
}
promptManager.populateTokenHandler(chatCompletion.getMessages());
const chat = chatCompletion.getChat();
openai_messages_count = chat.filter(x => x.role === "user" || x.role === "assistant").length;
@ -1209,8 +1214,12 @@ class ChatCompletion {
return this.messages;
}
setTokenBudget(tokenBudget) {
this.tokenBudget = tokenBudget;
setTokenBudget(context, response) {
console.log(`Context size: ${context}`);
console.log(`Response size: ${response}`);
this.tokenBudget = context - response;
console.log(`Token budget: ${this.tokenBudget}`);
}
@ -1230,13 +1239,23 @@ class ChatCompletion {
return this;
}
insert(message, identifier) {
insertAtStart(message, identifier) {
this.insert(message, identifier, 'start');
}
insertAtEnd(message, identifier) {
this.insert(message, identifier, 'end');
}
insert(message, identifier, position = 'end') {
this.validateMessage(message);
this.checkTokenBudget(message, message.identifier);
const index = this.findMessageIndex(identifier);
if (message.content) {
this.messages.collection[index].collection.push(message);
if ('start' === position) this.messages.collection[index].collection.unshift(message);
else if ('end' === position) this.messages.collection[index].collection.push(message);
this.decreaseTokenBudgetBy(message.getTokens());
this.log(`Inserted ${message.identifier} into ${identifier}. Remaining tokens: ${this.tokenBudget}`);
}
@ -1299,6 +1318,13 @@ class ChatCompletion {
}
}
reserveBudget(message) { this.decreaseTokenBudgetBy(message.getTokens()) };
freeBudget(message) { this.increaseTokenBudgetBy(message.getTokens()) };
increaseTokenBudgetBy(tokens) {
this.tokenBudget += tokens;
}
decreaseTokenBudgetBy(tokens) {
this.tokenBudget -= tokens;
}