import { AIProcessor } from "@helpers/ai_processor";
import { formatHistory } from "@helpers/history";
import { Prompt } from "@models/Prompt.model";
import { IntentProcessorService } from "@services/intentProcessor.service";
import { Intents } from "@typez/intent/enums";
import { getLogger } from "@utils/asyncLocalStorage";
import { getLessonData, getUnitNames } from "@utils/lessonsFactory";
import { AssistantDefaultResponseFormat } from "schemas/assistant.validation";
import { ClassificatorService } from "./classificator.service";
import { convertInvestigationFromModel } from "../helpers/investigation";
/**
* Assistant Service
* @category Services
*/
export class AssistantService {
aiProcessor;
classificator;
intentProcessor;
/**
* Constructor
* @category Services
*/
constructor() {
this.aiProcessor = new AIProcessor();
this.classificator = new ClassificatorService();
this.intentProcessor = new IntentProcessorService();
}
/**
* Default message processor used for the "AnotherIntent", when the user is not asking to generate an investigation.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {IFormattedHistory[]} history - The chat history between the user and the assistant.
* @param {string} lessonDataWithInvestigation - Lesson data with investigation detected from the user's message.
* @param {ICreateInvestigationDto} investigation - The current investigation, if it exists.
* @returns {Promise<string | null>} The AI Assistant's response.
* @throws {Error} If a required prompt is missing or something goes wrong when requesting the LLM.
*/
async defaultMessageProcessor(message, history, lessonDataWithInvestigation, investigation) {
const logger = getLogger();
try {
logger.info("Default processor is working...");
const prompt = await Prompt.findOne({ name: "default_prompt" });
if (!prompt) {
throw new Error("Default message processing prompt not found.");
}
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
const unitAndNames = await getUnitNames();
const promptTemplate = prompt.template
.replace("{investigation}", JSON.stringify(investigation) || "")
.replace("{unitData}", JSON.stringify(lessonDataWithInvestigation) || "")
.replace("{unitAndNames}", unitAndNames || "")
.replace("{history}", JSON.stringify(chatHistory) || "");
let retry = 0;
let response = null;
while (retry < 3) {
response = (await this.aiProcessor.fetchLLMResponse(promptTemplate, AssistantDefaultResponseFormat));
if (response.responseToUser === undefined || response.responseToUser === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Default processor failed to respond message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Default processor response for message: ${message} is: ${JSON.stringify(response)}.`);
return response.responseToUser;
}
catch (error) {
logger.error({
message: `Default processor failed to respond message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Processes messages sent by the user.
* @category Services
* @param {string} message - The user's message from which the lesson must be detected.
* @param {IChatHistory[]} chatHistory - The chat history between the user and the assistant.
* @param {IInvestigation} investigation - The current investigation, if it exists.
* @param {string} lessonWithoutInvestigation - Lesson data that does not contain an investigation.
* @returns {Promise<IAssistantResponseFormat>} The processed response to the user's message.
* @throws {Error} If something goes wrong when processing the message.
*/
async processMessage(message, chatHistory, investigation, lessonWithoutInvestigation) {
try {
const formattedHistory = formatHistory(chatHistory);
const intent = await this.intentProcessor.detectIntent(message, formattedHistory);
let classificatorResponse;
let assistantResponse;
let investigationDto;
if (investigation) {
investigationDto = convertInvestigationFromModel(investigation);
}
if (intent === Intents.ANOTHER_INTENT) {
const resp = await this.intentProcessor.detectUnitFromMessage(message, formattedHistory);
if (resp.question != "-") {
return { message: resp.question, intent: intent };
}
let lessonDataWithInvestigation;
if (resp.unit != "-") {
lessonDataWithInvestigation = await getLessonData(resp.unit);
}
else {
lessonDataWithInvestigation = null;
}
const messageResponse = await this.defaultMessageProcessor(message, formattedHistory, lessonDataWithInvestigation, investigationDto);
assistantResponse = { message: messageResponse, intent: intent };
}
else if (intent === Intents.GET_INVESTIGATION_INFO) {
const messageResponse = await this.defaultMessageProcessor(message, formattedHistory, "-", investigationDto);
assistantResponse = { message: messageResponse, intent: intent };
}
else {
const classificatorData = {
message: message,
intent: intent,
history: formattedHistory,
lessonWithoutInvestigation: lessonWithoutInvestigation,
};
if (investigationDto) {
classificatorData.investigationModel = investigation;
classificatorData.investigationDto = investigationDto;
}
classificatorResponse = await this.classificator.process(classificatorData);
assistantResponse = {
message: classificatorResponse.message,
metadata: classificatorResponse.data,
intent: intent,
lessonWithoutInvestigation: classificatorResponse.lessonWithoutInvestigation,
};
}
return assistantResponse;
}
catch (error) {
throw error;
}
}
}
Source