joel memories

This commit is contained in:
2026-02-01 17:55:21 +01:00
parent c13ffc93c0
commit 0c0efa645a
22 changed files with 2463 additions and 304 deletions

View File

@@ -3,15 +3,21 @@
*/
import OpenAI from "openai";
import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources/chat/completions";
import { config } from "../../core/config";
import { createLogger } from "../../core/logger";
import type { AiProvider, AiResponse, AskOptions, MessageStyle } from "./types";
import type { AiProvider, AiResponse, AskOptions, AskWithToolsOptions, MessageStyle } from "./types";
import { JOEL_TOOLS, MEMORY_EXTRACTION_TOOLS, type ToolCall, type ToolContext } from "./tools";
import { executeTools } from "./tool-handlers";
const logger = createLogger("AI:OpenRouter");
// Style classification options
const STYLE_OPTIONS: MessageStyle[] = ["story", "snarky", "insult", "explicit", "helpful"];
// Maximum tool call iterations to prevent infinite loops
const MAX_TOOL_ITERATIONS = 5;
export class OpenRouterProvider implements AiProvider {
private client: OpenAI;
@@ -61,6 +67,148 @@ export class OpenRouterProvider implements AiProvider {
}
}
/**
* Generate a response with tool calling support
* The AI can call tools (like looking up memories) during response generation
*/
async askWithTools(options: AskWithToolsOptions): Promise<AiResponse> {
const { prompt, systemPrompt, context, maxTokens, temperature } = options;
const messages: ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
{ role: "user", content: prompt },
];
let iterations = 0;
while (iterations < MAX_TOOL_ITERATIONS) {
iterations++;
try {
const completion = await this.client.chat.completions.create({
model: config.ai.model,
messages,
tools: JOEL_TOOLS,
tool_choice: "auto",
max_tokens: maxTokens ?? config.ai.maxTokens,
temperature: temperature ?? config.ai.temperature,
});
const choice = completion.choices[0];
const message = choice?.message;
if (!message) {
logger.warn("No message in completion");
return { text: "" };
}
// Check if the AI wants to call tools
if (message.tool_calls && message.tool_calls.length > 0) {
logger.debug("AI requested tool calls", {
count: message.tool_calls.length,
tools: message.tool_calls.map(tc => tc.function.name)
});
// Add the assistant's message with tool calls
messages.push(message);
// Parse and execute tool calls
const toolCalls: ToolCall[] = message.tool_calls.map((tc) => ({
id: tc.id,
name: tc.function.name,
arguments: JSON.parse(tc.function.arguments || "{}"),
}));
const results = await executeTools(toolCalls, context);
// Add tool results as messages
for (let i = 0; i < toolCalls.length; i++) {
messages.push({
role: "tool",
tool_call_id: toolCalls[i].id,
content: results[i].result,
});
}
// Continue the loop to get the AI's response after tool execution
continue;
}
// No tool calls - we have a final response
const text = message.content ?? "";
logger.debug("AI response generated", {
iterations,
textLength: text.length
});
return { text: text.slice(0, 1900) };
} catch (error: unknown) {
logger.error("Failed to generate response with tools", error);
throw error;
}
}
logger.warn("Max tool iterations reached");
return { text: "I got stuck in a loop thinking about that..." };
}
/**
* Analyze a message to extract memorable information
*/
async extractMemories(
message: string,
authorName: string,
context: ToolContext
): Promise<void> {
const systemPrompt = `You are analyzing a Discord message to determine if it contains any memorable or useful information about the user "${authorName}".
Look for:
- Personal information (name, age, location, job, hobbies)
- Preferences (likes, dislikes, favorites)
- Embarrassing admissions or confessions
- Strong opinions or hot takes
- Achievements or accomplishments
- Relationships or social information
- Recurring patterns or habits
If you find something worth remembering, use the extract_memory tool. Only extract genuinely interesting or useful information - don't save trivial things.
The user's Discord ID is: ${context.userId}`;
try {
const completion = await this.client.chat.completions.create({
model: config.ai.classificationModel,
messages: [
{ role: "system", content: systemPrompt },
{ role: "user", content: `Analyze this message for memorable content:\n\n"${message}"` },
],
tools: MEMORY_EXTRACTION_TOOLS,
tool_choice: "auto",
max_tokens: 200,
temperature: 0.3,
});
const toolCalls = completion.choices[0]?.message?.tool_calls;
if (toolCalls && toolCalls.length > 0) {
const parsedCalls: ToolCall[] = toolCalls.map((tc) => ({
id: tc.id,
name: tc.function.name,
arguments: JSON.parse(tc.function.arguments || "{}"),
}));
await executeTools(parsedCalls, context);
logger.debug("Memory extraction complete", {
extracted: parsedCalls.length,
authorName
});
}
} catch (error) {
// Don't throw - memory extraction is non-critical
logger.error("Memory extraction failed", error);
}
}
/**
* Classify a message to determine the appropriate response style
*/