import { Metadata } from "@models/Metadata.model";
import { Prompt } from "@models/Prompt.model";
import { Intents, ModifyInvestigationIntents } from "@typez/intent/enums";
import { getLogger } from "@utils/asyncLocalStorage";
import yamlModule from "js-yaml";
import { AIProcessor } from "../helpers/ai_processor";
import { AssistantIntentDetectionFormat, AssistantModifyInvestigationIntentDetectionFormat, AssistantEditInvestigationFieldFormat, AssistantSummarizedMessageFormat, AssistantAddInvestigationFieldFormat, AssistantRemoveInvestigationFieldFormat, AssistantUnitDetectionFormat, AssistantLessonDetectionFormat, } from "../schemas/intentProcessor.validation";
/**
* Intent Processor Service
* @category Services
*/
export class IntentProcessorService {
aiProcessor;
/**
* Constructor
* @category Services
*/
constructor() {
this.aiProcessor = new AIProcessor();
}
/**
* Detects the intent to determine which process should be executed.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @returns {Promise<Intents>} The detected intent based on the user's message.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async detectIntent(message, history) {
const logger = getLogger();
try {
logger.info(`Started to detecting intent for message: ${message}`);
const prompt = await Prompt.findOne({ name: "detect_intent" });
if (!prompt) {
throw new Error("Intent detection prompt not found.");
}
// const messages = [{ role: "system", content: prompt.template }];
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
// messages.push({ role: "user", content: message });
const promptTemplate = prompt.template.replace("{history}", JSON.stringify(chatHistory) || "-");
let retry = 0;
let response = null;
let intent;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantIntentDetectionFormat));
if (Array.isArray(response)) {
response = response[0];
}
if (response.intent === undefined ||
response.intent === null ||
!Object.values(Intents).includes(response.intent)) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to detect an intent: ${JSON.stringify(response)}. Message: ${message}`);
}
intent = response.intent;
logger.info(`Detected intent is: ${intent}`);
return intent;
}
catch (error) {
logger.error({ error: error instanceof Error ? error.message : "Unknown error" }, `Failed to detect intent for message: ${message}.`);
throw error;
}
}
/**
* Detects the intent to determine which modifications should be made to the current investigation.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @param {ICreateInvestigationDto | null} investigation - The current investigation.
* @returns {Promise<IModifyInvestigationIntentDetectionFormat>} The detected intent based on the user's message for modifying the investigation.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async detectModifyInvestigationIntent(message, history, investigation) {
const logger = getLogger();
try {
logger.info(`Started to detecting intent of modify investigation for message: ${message}`);
const prompt = await Prompt.findOne({ name: "detect_modify_investigation_intent" });
if (!prompt) {
throw new Error("Modify investigation's intent detection prompt not found.");
}
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
// messages.push({ role: "user", content: message });
const promptTemplate = prompt.template
.replace("{investigation}", JSON.stringify(investigation))
.replace("{history}", JSON.stringify(chatHistory) || "-");
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantModifyInvestigationIntentDetectionFormat));
if (response.response === undefined ||
response.response === null ||
("intents" in response.response &&
!response.response.intents.every((v) => Object.values(ModifyInvestigationIntents).includes(v)))) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Filed to detect investigation modification intent: ${JSON.stringify(response)}.`);
}
logger.info(`Response for detecting investigation modification intent is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({ error: error instanceof Error ? error.message : "Unknown error" }, `Failed to detect investigation modification intent for message: ${message}.`);
throw error;
}
}
/**
* Detects a message that user wants to implement changes in the investigation.
* @category Services
* @param {string} message - The user's message from which the message about editing fields must be detected.
* @param {string} history - The chat history between the user and the assistant.
* @param {string} investigation - The provided investigation with yaml format.
* @param {string} investigationMetadata - The metadata of the investigation.
* @returns {Promise<IDetectEditableFieldsMessageFormat>} The response containing the message that user wants to implement changes in the investigation.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async detectMessageOfEditableFields(message, history, investigation, investigationMetadata) {
const logger = getLogger();
try {
logger.info(`Started to detect a message that user wants to implement changes in the investigation for message: ${message}`);
const prompt = await Prompt.findOne({ name: "detect_message_of_editable_fields" });
if (!prompt) {
throw new Error("detect_message_of_editable_fields prompt not found.");
}
const promptTemplate = prompt.template
.replace("{investigation}", investigation)
.replace("{guide}", investigationMetadata || "")
.replace("{history}", history || "-");
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantEditInvestigationFieldFormat));
if (response.response === undefined || response.response === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to detect a message that user wants to implement changes in the investigation for message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for detect a message that user wants to implement changes in the investigation is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to detect a message that user wants to implement changes in the investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects which fields must be edited in the current investigation only in text fields by requesting the LLM.
* @category Services
* @param {string} message - The user's message.
* @param {ICreateInvestigationDto} investigation - The provided investigation without steps and objects with yaml format.
* @param {string} investigationMetadata - The metadata of the investigation.
* @returns {Promise<IDetectEditableFieldsFormat>} The response containing editable fields that must be done.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async detectEditableFieldsFromInvestigationTextFields(message, investigation, investigationMetadata) {
const logger = getLogger();
try {
logger.info(`Started to detect editable text fields in the investigation for message: ${message}`);
const prompt = await Prompt.findOne({ name: "detect_editable_plain_fields" });
if (!prompt) {
throw new Error("detect_editable_plain_fields prompt not found.");
}
const promptTemplate = prompt.template
.replace("{investigation}", JSON.stringify(investigation))
.replace("{guide}", investigationMetadata)
.replace("{userMessage}", message);
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantEditInvestigationFieldFormat));
if (response.response === undefined ||
(response.response != "-" && response.changes === undefined)) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to detect editable text fields in the investigation for message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for detect editable text fields in the investigation is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to detect editable text fields in the investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects which fields must be edited in the objects of the current investigation by requesting the LLM.
* @category Services
* @param {string} message - The user's message.
* @param {ICreateInvestigationDto} investigation - The provided investigation containing only objects with yaml format.
* @param {string} investigationMetadata - The metadata of the investigation.
* @returns {Promise<IDetectEditableFieldsFormat>} The response containing editable fields that must be done.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async detectEditableFieldsFromInvestigationObjects(message, investigation, investigationMetadata) {
const logger = getLogger();
try {
logger.info(`Started to detect editable objects in the investigation for message: ${message}`);
const prompt = await Prompt.findOne({ name: "detect_editable_objects_fields" });
if (!prompt) {
throw new Error("detect_editable_objects_fields prompt not found.");
}
const promptTemplate = prompt.template
.replace("{investigation}", JSON.stringify(investigation))
.replace("{guide}", investigationMetadata)
.replace("{userMessage}", message);
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantEditInvestigationFieldFormat));
if (response.response === undefined ||
(response.response != "-" && response.changes === undefined)) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to detect editable objects in the investigation for message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for detect editable objects in the investigation is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to detect editable objects in the investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects which fields must be edited in the steps of the current investigation by requesting the LLM.
* @category Services
* @param {string} message - The user's message.
* @param {ICreateInvestigationDto} investigation - The provided investigation containing only steps with yaml format.
* @param {string} investigationMetadata - The metadata of the investigation.
* @returns {Promise<IDetectEditableFieldsFormat>} The response containing editable fields that must be done.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async detectEditableFieldsFromInvestigationSteps(message, investigation, investigationMetadata) {
const logger = getLogger();
try {
logger.info(`Started to detect editable steps in the investigation for message: ${message}`);
const prompt = await Prompt.findOne({ name: "detect_editable_steps_fields" });
if (!prompt) {
throw new Error("detect_editable_steps_fields prompt not found.");
}
const promptTemplate = prompt.template
.replace("{investigation}", JSON.stringify(investigation))
.replace("{guide}", investigationMetadata)
.replace("{userMessage}", message);
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantEditInvestigationFieldFormat));
if (response.response === undefined ||
(response.response != "-" && response.changes === undefined)) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to detect editable steps in the investigation for message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for detect editable steps in the investigation is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to detect editable steps in the investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects which fields must be edited in the current investigation by requesting the LLM.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @param {ICreateInvestigationDto} investigation - The current investigation.
* @returns {Promise<IEditInvestigationFieldFormat>} The response containing the fields that must be edited.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async editInvestigationField(message, history, investigation) {
const logger = getLogger();
try {
logger.info(`Started to detect editing fields for message: ${message}`);
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
const yaml = yamlModule;
const unit = investigation?.unitNumberAndTitle?.split(":")[0]?.split(" ")[1];
const lesson = investigation?.lessonNumberAndTitle?.split(":")[0]?.split(" ")[1] || "0";
const lessonNumber = parseInt(lesson, 10);
const metadata = (await Metadata.findOne({
unit: unit,
lessonNumber: Number.isNaN(lessonNumber) ? undefined : lessonNumber,
}));
let investigationMetadataYaml;
if (metadata) {
const investigationMetadataObject = metadata.toObject({
versionKey: false,
transform: (doc, ret) => {
delete ret._id;
return ret;
},
});
investigationMetadataYaml = yaml.dump(investigationMetadataObject);
}
else {
investigationMetadataYaml = " ";
}
const detectedMessage = await this.detectMessageOfEditableFields(message, yaml.dump(chatHistory), yaml.dump(investigation), investigationMetadataYaml);
if ("question" in detectedMessage.response) {
return detectedMessage;
}
const { objects, steps, ...investigationTextFields } = investigation;
const editableFieldsResponses = await Promise.all([
await this.detectEditableFieldsFromInvestigationTextFields(detectedMessage.response.message, investigationTextFields, investigationMetadataYaml),
await this.detectEditableFieldsFromInvestigationObjects(detectedMessage.response.message, { objects: objects }, investigationMetadataYaml),
await this.detectEditableFieldsFromInvestigationSteps(detectedMessage.response.message, { steps: steps }, investigationMetadataYaml),
]);
let editableFields = {};
let changes = [];
for (const editableFieldsResponse of editableFieldsResponses) {
if (editableFieldsResponse.response != "-") {
Object.assign(editableFields, editableFieldsResponse.response ?? {});
changes.push(editableFieldsResponse.changes || "");
}
}
const responseChanges = await this.summarizeMessages(changes);
const response = {
response: {
editableFields: editableFields,
changes: responseChanges,
},
};
logger.info(`Response for editing fields detection is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to detect editing fields for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects which fields must be added to the current investigation by requesting the LLM.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @param {ICreateInvestigationDto | null | undefined} investigation - The current investigation.
* @returns {Promise<IAddInvestigationFieldFormat>} The response containing the data for fields that must be added.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async addInvestigationField(message, history, investigation) {
const logger = getLogger();
try {
logger.info(`Started to detect adding fields for message: ${message}`);
const prompt = await Prompt.findOne({ name: "add_investigation_fields" });
if (!prompt) {
throw new Error("Add investigation field detection prompt not found.");
}
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
const yaml = yamlModule;
const unit = investigation?.unitNumberAndTitle?.split(":")[0]?.split(" ")[1];
const lesson = investigation?.lessonNumberAndTitle?.split(":")[0]?.split(" ")[1] || "0";
const lessonNumber = parseInt(lesson, 10);
const metadata = (await Metadata.findOne({
unit: unit,
lessonNumber: Number.isNaN(lessonNumber) ? undefined : lessonNumber,
}));
let investigationMetadataYaml;
if (metadata) {
const investigationMetadataObject = metadata.toObject({
versionKey: false,
transform: (doc, ret) => {
delete ret._id;
return ret;
},
});
investigationMetadataYaml = yaml.dump(investigationMetadataObject);
}
else {
investigationMetadataYaml = " ";
}
const promptTemplate = prompt.template
.replace("{investigation}", JSON.stringify(investigation))
.replace("{history}", yaml.dump(chatHistory) || "-")
.replace("{guide}", investigationMetadataYaml);
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantAddInvestigationFieldFormat));
if (response.response === undefined || response.response === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to detect adding fields for message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for adding fields detection is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to detect adding fields for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects which fields must be removed from the current investigation by requesting the LLM.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @param {ICreateInvestigationDto | null | undefined} investigation - The current investigation.
* @returns {Promise<IRemoveInvestigationFieldFormat>} The response containing the fields whose values must be removed.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async removeInvestigationField(message, history, investigation) {
const logger = getLogger();
try {
logger.info(`Started to detect removing fields for message: ${message}`);
const prompt = await Prompt.findOne({ name: "remove_investigation_fields" });
if (!prompt) {
throw new Error("Remove investigation field detection prompt not found.");
}
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
const promptTemplate = prompt.template
.replace("{investigation}", JSON.stringify(investigation))
.replace("{history}", JSON.stringify(chatHistory) || "-");
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantRemoveInvestigationFieldFormat));
if (response.response === undefined || response.response === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to detect removing fields for message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for removing fields detection is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to detect removing fields for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Summarizes multiple messages into a single message.
* @category Services
* @param {string[]} summarizingMessages - The messages from which the lesson must be detected.
* @returns {Promise<string>} The summarized message.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async summarizeMessages(summarizingMessages) {
const logger = getLogger();
try {
logger.info(`Started to summarizing messages: ${summarizingMessages.toString()}`);
const prompt = await Prompt.findOne({ name: "summarizing_message" });
if (!prompt) {
throw new Error("Message summarizing prompt not found.");
}
const promptTemplate = prompt.template.replace("{messages}", summarizingMessages.toString());
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantSummarizedMessageFormat));
if (response.summarizedMessage === undefined || response.summarizedMessage === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to summarizing message for messages: ${JSON.stringify(summarizingMessages)}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Summarized message is: ${JSON.stringify(response)}`);
return response.summarizedMessage;
}
catch (error) {
logger.error({
message: `Failed to summarizing fields for messages: ${JSON.stringify(summarizingMessages)}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects the unit of an investigation based on a user message.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @returns {Promise<IUnitDetectionFormat>} - The detected unit response.
* @throws {Error} If something goes wrong when requesting the LLM.
*/
async detectUnitFromMessage(message, history) {
const logger = getLogger();
try {
logger.info(`Started to detecting unit: ${message}`);
const prompt = await Prompt.findOne({ name: "detect_unit_from_message" });
if (!prompt) {
throw new Error("Unit detection prompt not found.");
}
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
const promptTemplate = prompt.template.replace("{history}", JSON.stringify(chatHistory) || "-");
logger.info(`Detect unit messages prompt is: ${promptTemplate}`);
let retry = 0;
let unitDetectionResponse = null;
while (retry < 3) {
unitDetectionResponse = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantUnitDetectionFormat));
if (unitDetectionResponse.question === undefined ||
unitDetectionResponse.question === null ||
unitDetectionResponse.unit === undefined ||
unitDetectionResponse.unit === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !unitDetectionResponse) {
throw new Error(`Failed to detect unit from message: ${message}. Response is: ${JSON.stringify(unitDetectionResponse)}.`);
}
logger.info(`Response for unit detection is: ${JSON.stringify(unitDetectionResponse)}`);
return unitDetectionResponse;
}
catch (error) {
logger.error({
message: `Failed to detect unit from message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects the lesson containing investigation metadata based on a user message.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {string} lessonData - Data containing information about all lessons for the detected lesson.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @returns {Promise<ILessonDetectionFormat>} - The detected lesson response.
* @throws {Error} If something goes wrong when requesting the LLM.
*/
async detectLessonWithInvestigationFromMessage(message, lessonData, history) {
const logger = getLogger();
try {
logger.info(`Started to detecting lesson with investigation: ${message}`);
const prompt = await Prompt.findOne({
name: "detect_lesson_with_investigation_from_message",
});
if (!prompt) {
throw new Error("Lesson with investigation detection prompt not found.");
}
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
const promptTemplate = prompt.template
.replace("{lessonAndNames}", lessonData)
.replace("{history}", JSON.stringify(chatHistory) || "-");
logger.info(`Detect lesson with investigation prompt is: ${promptTemplate}`);
let retry = 0;
let lessonDetectionResponse = null;
while (retry < 3) {
lessonDetectionResponse = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantLessonDetectionFormat));
if (lessonDetectionResponse.question === undefined ||
lessonDetectionResponse.question === null ||
lessonDetectionResponse.lesson === undefined ||
lessonDetectionResponse.lesson === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !lessonDetectionResponse) {
throw new Error(`Failed to detect lesson with investigation: ${JSON.stringify(lessonDetectionResponse)}. Message: ${message}`);
}
logger.info(`Detected lesson with investigation is: ${JSON.stringify(lessonDetectionResponse)}`);
return lessonDetectionResponse;
}
catch (error) {
logger.error({
message: `Failed to detect lesson with investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects the lesson not containing investigation metadata based on a user message.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {string} lessonData - Data containing information about all lessons for the detected lesson.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @returns {Promise<ILessonDetectionFormat>} - The detected lesson response.
* @throws {Error} If something goes wrong when requesting the LLM.
*/
async detectLessonWithoutInvestigationFromMessage(message, lessonData, history) {
const logger = getLogger();
try {
logger.info(`Started to detecting lesson without investigation: ${message}`);
const prompt = await Prompt.findOne({
name: "detect_lesson_without_investigation_from_message",
});
if (!prompt) {
throw new Error("Lesson without investigation detection prompt not found.");
}
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
const promptTemplate = prompt.template
.replace("{lessonAndNames}", lessonData)
.replace("{history}", JSON.stringify(chatHistory) || "-");
logger.info(`Detect lesson without investigation prompt is: ${promptTemplate}`);
let retry = 0;
let lessonDetectionResponse = null;
while (retry < 3) {
lessonDetectionResponse = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantLessonDetectionFormat));
if (lessonDetectionResponse.question === undefined ||
lessonDetectionResponse.question === null ||
lessonDetectionResponse.lesson === undefined ||
lessonDetectionResponse.lesson === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !lessonDetectionResponse) {
throw new Error(`Failed to detect lesson with investigation: ${JSON.stringify(lessonDetectionResponse)}. Message: ${message}`);
}
logger.info(`Detected lesson with investigation is: ${JSON.stringify(lessonDetectionResponse)}`);
return lessonDetectionResponse;
}
catch (error) {
logger.error({
message: `Failed to detect lesson with investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Detects if user change exists in the user message.
* @category Services
* @param {string} message - The user's message from which the existing user changes will be detected.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @returns {Promise<boolean>} True if user change exists in the message, otherwise false.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async detectUserChangeExist(message, history) {
const logger = getLogger();
try {
logger.info(`Started to detecting user changes in the message: ${message}`);
const prompt = await Prompt.findOne({ name: "detect_user_change_exist" });
if (!prompt) {
throw new Error("detect_user_change_exist prompt not found.");
}
const yaml = yamlModule;
const promptTemplate = prompt.template
.replace("{userMessage}", message)
.replace("{history}", yaml.dump(history));
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantSummarizedMessageFormat));
if (response.isUserChangeExists === undefined || response.isUserChangeExists === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to detect user changes for message: ${JSON.stringify(message)}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Is user changes detected in the message is: ${JSON.stringify(response)}`);
if (typeof response.isUserChangeExists === "string") {
return response.isUserChangeExists !== "false";
}
return response.isUserChangeExists;
}
catch (error) {
logger.error({
message: `Failed to detect user changes for message: ${JSON.stringify(message)}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
}
Source