Source

helpers/ai_processor.js

import { env } from "@config/env";
import { globalLogger } from "@config/logger";
import OpenAI from "openai";
import Replicate from "replicate";
import Together from "together-ai";
/**
 * Utility wrapper around different LLM providers used by the platform.
 * @category Helpers
 */
export class AIProcessor {
    together;
    replicate;
    openai;
    llm;
    /**
     * Instantiate provider SDK clients from configuration.
     */
    constructor() {
        this.together = new Together({
            apiKey: env.TOGETHER_API_KEY,
        });
        this.replicate = new Replicate();
        this.openai = new OpenAI({
            apiKey: env.OPENAI_API_KEY,
        });
        /* Current LLM */
        this.llm = this.together || this.openai;
    }
    /**
     * Creates a chat completion request to TogetherAI with a timeout.
     * @param {string} model Name of the LLM model to use for completion.
     * @param {string} prompt Prompt sent to the LLM.
     * @param {(Record<string, unknown>)} response_format Expected JSON shape of the response.
     * @param {number} timeoutMs Maximum time in milliseconds before the request is aborted.
     * @returns {Promise<IAssistantBaseFormat>} Parsed LLM response.
     * @throws {Error} When the request fails, parsing fails, or the timeout elapses.
     */
    async createTogetherCompletionWithTimeout(model, prompt, response_format, timeoutMs) {
        const controller = new AbortController();
        const timeout = setTimeout(() => controller.abort(), timeoutMs);
        try {
            // const jsonSchema = zodToJsonSchema(response_format, { target: 'openAi' })
            globalLogger.info("Started to process chat completion using TogetherAI!");
            const completion = await this.together.chat.completions.create({
                model: model,
                messages: [{ role: "system", content: prompt }],
                // response_format: zodResponseFormat(response_format, response_format_schema_name),
                response_format: {
                    type: "json_object",
                    // schema: response_format,
                },
                temperature: env.OPENAI_MODEL_TEMPERATURE,
            }, {
                signal: controller.signal,
            });
            const content = completion.choices?.[0]?.message?.content;
            if (content === null || content === undefined) {
                throw new Error("Failed to parse completion response");
            }
            const match = content.match(/{.*}/s);
            if (!match) {
                throw new Error("Failed to match completion response with regexp.");
            }
            return JSON.parse(match[0]);
        }
        finally {
            clearTimeout(timeout);
        }
    }
    /**
     * Creates a chat completion request to Replicate with a timeout.
     * @param {string} model Name of the LLM model to use for completion.
     * @param {string} prompt Prompt sent to the LLM.
     * @param {(Record<string, unknown>)} response_format Expected JSON shape of the response.
     * @param {number} timeoutMs Maximum time in milliseconds before the request is aborted.
     * @returns {Promise<IAssistantBaseFormat>} Parsed LLM response.
     * @throws {Error} When the request fails, parsing fails, or the timeout elapses.
     */
    async createReplicateCompletionWithTimeout(model, prompt, response_format, timeoutMs) {
        const controller = new AbortController();
        const timeout = setTimeout(() => controller.abort(), timeoutMs);
        try {
            // const jsonSchema = zodToJsonSchema(response_format, { target: 'openAi' })
            globalLogger.info("Started to process chat completion using ReplicateAI!");
            const input = {
                prompt: prompt,
                max_tokens: 4096,
            };
            const output = (await this.replicate.run("deepseek-ai/deepseek-v3", {
                input,
            }));
            const content = Array.isArray(output) ? output.join("") : "";
            if (content === null || content === undefined) {
                throw new Error("Failed to parse completion response");
            }
            let match = content.match(/(?<=```json\s*)([\s\S]*?)(?=```)/g);
            if (!match) {
                match = content.match(/<format>\s*([\s\S]*?)\s*<\/format>/g);
                if (!match) {
                    return JSON.parse(content);
                }
            }
            // Extract JSON from first '{' to last '}'
            const matchedContent = match[0];
            const firstBrace = matchedContent.indexOf("{");
            const lastBrace = matchedContent.lastIndexOf("}");
            if (firstBrace !== -1 && lastBrace !== -1 && lastBrace > firstBrace) {
                const jsonString = matchedContent.substring(firstBrace, lastBrace + 1);
                return JSON.parse(jsonString);
            }
            return JSON.parse(matchedContent);
        }
        finally {
            clearTimeout(timeout);
        }
    }
    /**
     * Send a request to a configured LLM provider and retrieve the parsed response.
     * @param {string} messages Prompt or conversation payload sent to the LLM.
     * @param {(Record<string, unknown>)} responseFormat Expected JSON shape of the response.
     * @param {string|null|undefined} llmModel Optional LLM model override.
     * @param {number} retry Number of the current retry attempt.
     * @returns {Promise<IAssistantBaseFormat>} Parsed LLM response.
     * @throws {Error} When all retry attempts fail or the response is invalid.
     */
    async fetchLLMResponse(messages, responseFormat, llmModel, retry = 0) {
        if (!this.llm) {
            throw new Error("LLM API is not configured. Please provide your API key.");
        }
        try {
            let model = llmModel || env.OPENAI_MODEL;
            globalLogger.info(`Using LLM model: ${model}`);
            globalLogger.info(`Sending messages to LLM: ${JSON.stringify(messages)}`);
            let completion;
            if (retry < 1) {
                completion = await this.createReplicateCompletionWithTimeout(model, messages, responseFormat, 600_000);
            }
            else {
                completion = await this.createTogetherCompletionWithTimeout(model, messages, responseFormat, 600_000);
            }
            if ((Array.isArray(completion) && completion.length === 1 && completion[0] === true) ||
                completion === undefined) {
                throw new Error(`The response is ${String(completion)}. Retrying LLM request.`);
            }
            return completion;
        }
        catch (error) {
            globalLogger.error(String(error));
            if (retry < 3) {
                setTimeout(() => {
                    globalLogger.warn("Prepare to retry LLM request.");
                }, 2000);
                return await this.fetchLLMResponse(messages, responseFormat, llmModel, retry + 1);
            }
            /* Throw an error after 3 attempts */
            throw new Error("Unable to create a completion");
        }
    }
}