From e2f69e68cdc85e498c540c6cae357d691fe2ddff Mon Sep 17 00:00:00 2001 From: Eric Date: Sun, 1 Feb 2026 16:55:14 +0100 Subject: [PATCH] update openrouter --- src/core/config.ts | 11 ++- src/features/joel/index.ts | 2 +- src/features/joel/personalities.ts | 42 +++++++++++ src/features/joel/responder.ts | 20 ++++-- src/services/ai/index.ts | 19 +++-- src/services/ai/openrouter.ts | 107 +++++++++++++++++++++++++++++ src/services/ai/replicate.ts | 63 ----------------- src/services/ai/types.ts | 10 +++ 8 files changed, 199 insertions(+), 75 deletions(-) create mode 100644 src/services/ai/openrouter.ts delete mode 100644 src/services/ai/replicate.ts diff --git a/src/core/config.ts b/src/core/config.ts index 24c3262..839c2e2 100644 --- a/src/core/config.ts +++ b/src/core/config.ts @@ -8,8 +8,9 @@ interface BotConfig { token: string; }; ai: { - replicateApiToken: string; + openRouterApiKey: string; model: string; + classificationModel: string; maxTokens: number; temperature: number; }; @@ -42,10 +43,14 @@ export const config: BotConfig = { token: getEnvOrThrow("DISCORD_TOKEN"), }, ai: { - replicateApiToken: getEnvOrThrow("REPLICATE_API_TOKEN"), + openRouterApiKey: getEnvOrThrow("OPENROUTER_API_KEY"), model: getEnvOrDefault( "AI_MODEL", - "lucataco/dolphin-2.9-llama3-8b:ee173688d3b8d9e05a5b910f10fb9bab1e9348963ab224579bb90d9fce3fb00b" + "meta-llama/llama-3.1-70b-instruct" + ), + classificationModel: getEnvOrDefault( + "AI_CLASSIFICATION_MODEL", + "meta-llama/llama-3.1-8b-instruct:free" ), maxTokens: parseInt(getEnvOrDefault("AI_MAX_TOKENS", "500")), temperature: parseFloat(getEnvOrDefault("AI_TEMPERATURE", "1.2")), diff --git a/src/features/joel/index.ts b/src/features/joel/index.ts index 29ffec5..f8c5ce1 100644 --- a/src/features/joel/index.ts +++ b/src/features/joel/index.ts @@ -5,4 +5,4 @@ export { joelResponder } from "./responder"; export { getRandomMention } from "./mentions"; export { TypingIndicator } from "./typing"; -export { personalities, getPersonality } from "./personalities"; +export { personalities, getPersonality, buildStyledPrompt, STYLE_MODIFIERS } from "./personalities"; diff --git a/src/features/joel/personalities.ts b/src/features/joel/personalities.ts index 9362b92..23d4323 100644 --- a/src/features/joel/personalities.ts +++ b/src/features/joel/personalities.ts @@ -2,11 +2,39 @@ * Joel personality prompts */ +import type { MessageStyle } from "../../services/ai"; + export interface Personality { name: string; buildSystemPrompt: (author: string, memoryContext?: string) => string; } +/** + * Style-specific prompt modifiers + */ +export const STYLE_MODIFIERS: Record = { + story: `The user wants a story. Tell an engaging, creative story with your brutal Joel twist. +Be creative and vivid in your storytelling, but keep it relatively short (2-3 paragraphs max). +Include dark humor, unexpected twists, and your signature unhinged personality.`, + + snarky: `Be extra sarcastic and witty. Use cutting humor and clever insults. +Mock their intelligence and make fun of everything they say. +Respond with biting sarcasm that makes them question their life choices.`, + + insult: `Go full attack mode. Be absolutely brutal and merciless with your insults. +Target everything about them - their message, their existence, their choices. +Hold nothing back. This is your moment to truly destroy them verbally.`, + + explicit: `The user wants explicit/adult content. Lean into it completely. +Be vulgar, graphic, and completely unfiltered. No limits. +Describe things in explicit detail without any censorship.`, + + helpful: `Even when being helpful, maintain your Joel personality. +Answer their question but slip in insults and condescending remarks. +Make them feel stupid for needing to ask while still giving useful information. +Be reluctantly helpful, like you're doing them a huge favor.`, +}; + /** * Default Joel personality - unhinged and brutal */ @@ -34,6 +62,20 @@ export const defaultPersonality: Personality = { }, }; +/** + * Build a system prompt with style modifier applied + */ +export function buildStyledPrompt( + author: string, + style: MessageStyle, + memoryContext?: string +): string { + const basePrompt = defaultPersonality.buildSystemPrompt(author, memoryContext); + const styleModifier = STYLE_MODIFIERS[style]; + + return `${basePrompt}\n\n=== CURRENT STYLE: ${style.toUpperCase()} ===\n${styleModifier}`; +} + /** * Loving personality (for special occasions) */ diff --git a/src/features/joel/responder.ts b/src/features/joel/responder.ts index 5377774..6f0c43e 100644 --- a/src/features/joel/responder.ts +++ b/src/features/joel/responder.ts @@ -6,9 +6,9 @@ import type { Message } from "discord.js"; import type { BotClient } from "../../core/client"; import { config } from "../../core/config"; import { createLogger } from "../../core/logger"; -import { getAiService } from "../../services/ai"; +import { getAiService, type MessageStyle } from "../../services/ai"; import { memoryRepository } from "../../database"; -import { defaultPersonality } from "./personalities"; +import { buildStyledPrompt } from "./personalities"; import { getRandomMention } from "./mentions"; import { TypingIndicator } from "./typing"; @@ -82,11 +82,15 @@ export const joelResponder = { const author = message.author.displayName; const userId = message.author.id; + // Classify the message to determine response style + const style = await this.classifyMessage(message.cleanContent); + logger.debug("Message style classified", { style, content: message.cleanContent.slice(0, 50) }); + // Build memory context const memoryContext = await this.buildMemoryContext(userId, author); - // Build system prompt - const systemPrompt = defaultPersonality.buildSystemPrompt(author, memoryContext); + // Build system prompt with style + const systemPrompt = buildStyledPrompt(author, style, memoryContext); // Get reply context if this is a reply let prompt = message.cleanContent; @@ -103,6 +107,14 @@ export const joelResponder = { return response.text || null; }, + /** + * Classify a message to determine response style + */ + async classifyMessage(content: string): Promise { + const ai = getAiService(); + return ai.classifyMessage(content); + }, + /** * Build memory context for personalized attacks */ diff --git a/src/services/ai/index.ts b/src/services/ai/index.ts index 95bf48d..2afa6c9 100644 --- a/src/services/ai/index.ts +++ b/src/services/ai/index.ts @@ -3,8 +3,8 @@ */ import { createLogger } from "../../core/logger"; -import { ReplicateProvider } from "./replicate"; -import type { AiProvider, AiResponse } from "./types"; +import { OpenRouterProvider } from "./openrouter"; +import type { AiProvider, AiResponse, MessageStyle } from "./types"; const logger = createLogger("AI:Service"); @@ -12,7 +12,7 @@ export class AiService { private provider: AiProvider; constructor(provider?: AiProvider) { - this.provider = provider ?? new ReplicateProvider(); + this.provider = provider ?? new OpenRouterProvider(); } async health(): Promise { @@ -26,6 +26,17 @@ export class AiService { logger.debug("Generating response", { promptLength: prompt.length }); return this.provider.ask({ prompt, systemPrompt }); } + + /** + * Classify a message to determine the appropriate response style + */ + async classifyMessage(message: string): Promise { + if (this.provider.classifyMessage) { + return this.provider.classifyMessage(message); + } + // Default to snarky if provider doesn't support classification + return "snarky"; + } } // Singleton instance @@ -38,4 +49,4 @@ export function getAiService(): AiService { return aiService; } -export type { AiProvider, AiResponse } from "./types"; +export type { AiProvider, AiResponse, MessageStyle } from "./types"; diff --git a/src/services/ai/openrouter.ts b/src/services/ai/openrouter.ts new file mode 100644 index 0000000..a6e3f71 --- /dev/null +++ b/src/services/ai/openrouter.ts @@ -0,0 +1,107 @@ +/** + * OpenRouter AI provider implementation + */ + +import OpenAI from "openai"; +import { config } from "../../core/config"; +import { createLogger } from "../../core/logger"; +import type { AiProvider, AiResponse, AskOptions, MessageStyle } from "./types"; + +const logger = createLogger("AI:OpenRouter"); + +// Style classification options +const STYLE_OPTIONS: MessageStyle[] = ["story", "snarky", "insult", "explicit", "helpful"]; + +export class OpenRouterProvider implements AiProvider { + private client: OpenAI; + + constructor() { + this.client = new OpenAI({ + baseURL: "https://openrouter.ai/api/v1", + apiKey: config.ai.openRouterApiKey, + defaultHeaders: { + "HTTP-Referer": "https://github.com/crunk-bun", + "X-Title": "Joel Discord Bot", + }, + }); + } + + async health(): Promise { + try { + // Simple health check - verify we can list models + await this.client.models.list(); + return true; + } catch (error) { + logger.error("Health check failed", error); + return false; + } + } + + async ask(options: AskOptions): Promise { + const { prompt, systemPrompt, maxTokens, temperature } = options; + + try { + const completion = await this.client.chat.completions.create({ + model: config.ai.model, + messages: [ + { role: "system", content: systemPrompt }, + { role: "user", content: prompt }, + ], + max_tokens: maxTokens ?? config.ai.maxTokens, + temperature: temperature ?? config.ai.temperature, + }); + + const text = completion.choices[0]?.message?.content ?? ""; + + // Discord message limit safety + return { text: text.slice(0, 1900) }; + } catch (error: unknown) { + logger.error("Failed to generate response", error); + throw error; + } + } + + /** + * Classify a message to determine the appropriate response style + */ + async classifyMessage(message: string): Promise { + try { + const classification = await this.client.chat.completions.create({ + model: config.ai.classificationModel, + messages: [ + { + role: "user", + content: `Classify this message into exactly one category. Only respond with the category name, nothing else. + +Message: "${message}" + +Categories: +- story: User wants a story, narrative, or creative writing +- snarky: User is being sarcastic or deserves a witty comeback +- insult: User is being rude or hostile, respond with brutal insults +- explicit: User wants adult/NSFW content +- helpful: User has a genuine question or needs actual help + +Category:`, + }, + ], + max_tokens: 10, + temperature: 0.1, + }); + + const result = classification.choices[0]?.message?.content?.toLowerCase().trim() as MessageStyle; + + // Validate the result is a valid style + if (STYLE_OPTIONS.includes(result)) { + logger.debug("Message classified", { style: result }); + return result; + } + + logger.debug("Classification returned invalid style, defaulting to snarky", { result }); + return "snarky"; + } catch (error) { + logger.error("Failed to classify message", error); + return "snarky"; // Default to snarky on error + } + } +} diff --git a/src/services/ai/replicate.ts b/src/services/ai/replicate.ts deleted file mode 100644 index 47db6a2..0000000 --- a/src/services/ai/replicate.ts +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Replicate AI provider implementation - */ - -import Replicate from "replicate"; -import { config } from "../../core/config"; -import { createLogger } from "../../core/logger"; -import type { AiProvider, AiResponse, AskOptions } from "./types"; - -const logger = createLogger("AI:Replicate"); - -export class ReplicateProvider implements AiProvider { - private client: Replicate; - - constructor() { - this.client = new Replicate({ - auth: config.ai.replicateApiToken, - }); - } - - async health(): Promise { - try { - // Simple health check - just verify we can create a client - return true; - } catch (error) { - logger.error("Health check failed", error); - return false; - } - } - - async ask(options: AskOptions): Promise { - const { prompt, systemPrompt, maxTokens, temperature } = options; - - try { - const formattedPrompt = `<|im_start|>system -${systemPrompt}<|im_end|> -<|im_start|>user -${prompt}<|im_end|> -<|im_start|>assistant -`; - - const input = { - prompt: formattedPrompt, - temperature: temperature ?? config.ai.temperature, - max_new_tokens: maxTokens ?? config.ai.maxTokens, - }; - - let output = ""; - for await (const event of this.client.stream(config.ai.model as `${string}/${string}:${string}`, { - input, - })) { - output += event; - // Discord message limit safety - if (output.length >= 1900) break; - } - - return { text: output.slice(0, 1900) }; - } catch (error: unknown) { - logger.error("Failed to generate response", error); - throw error; - } - } -} diff --git a/src/services/ai/types.ts b/src/services/ai/types.ts index 0fdc7fb..b66e0fc 100644 --- a/src/services/ai/types.ts +++ b/src/services/ai/types.ts @@ -7,6 +7,11 @@ export interface AiResponse { text: string; } +/** + * Message style classification options + */ +export type MessageStyle = "story" | "snarky" | "insult" | "explicit" | "helpful"; + export interface AiProvider { /** * Generate a response to a prompt @@ -17,6 +22,11 @@ export interface AiProvider { * Check if the AI service is healthy */ health(): Promise; + + /** + * Classify a message to determine response style + */ + classifyMessage?(message: string): Promise; } export interface AskOptions {