update openrouter

This commit is contained in:
2026-02-01 16:55:14 +01:00
parent 6dbcadcaee
commit e2f69e68cd
8 changed files with 199 additions and 75 deletions

View File

@@ -8,8 +8,9 @@ interface BotConfig {
token: string; token: string;
}; };
ai: { ai: {
replicateApiToken: string; openRouterApiKey: string;
model: string; model: string;
classificationModel: string;
maxTokens: number; maxTokens: number;
temperature: number; temperature: number;
}; };
@@ -42,10 +43,14 @@ export const config: BotConfig = {
token: getEnvOrThrow("DISCORD_TOKEN"), token: getEnvOrThrow("DISCORD_TOKEN"),
}, },
ai: { ai: {
replicateApiToken: getEnvOrThrow("REPLICATE_API_TOKEN"), openRouterApiKey: getEnvOrThrow("OPENROUTER_API_KEY"),
model: getEnvOrDefault( model: getEnvOrDefault(
"AI_MODEL", "AI_MODEL",
"lucataco/dolphin-2.9-llama3-8b:ee173688d3b8d9e05a5b910f10fb9bab1e9348963ab224579bb90d9fce3fb00b" "meta-llama/llama-3.1-70b-instruct"
),
classificationModel: getEnvOrDefault(
"AI_CLASSIFICATION_MODEL",
"meta-llama/llama-3.1-8b-instruct:free"
), ),
maxTokens: parseInt(getEnvOrDefault("AI_MAX_TOKENS", "500")), maxTokens: parseInt(getEnvOrDefault("AI_MAX_TOKENS", "500")),
temperature: parseFloat(getEnvOrDefault("AI_TEMPERATURE", "1.2")), temperature: parseFloat(getEnvOrDefault("AI_TEMPERATURE", "1.2")),

View File

@@ -5,4 +5,4 @@
export { joelResponder } from "./responder"; export { joelResponder } from "./responder";
export { getRandomMention } from "./mentions"; export { getRandomMention } from "./mentions";
export { TypingIndicator } from "./typing"; export { TypingIndicator } from "./typing";
export { personalities, getPersonality } from "./personalities"; export { personalities, getPersonality, buildStyledPrompt, STYLE_MODIFIERS } from "./personalities";

View File

@@ -2,11 +2,39 @@
* Joel personality prompts * Joel personality prompts
*/ */
import type { MessageStyle } from "../../services/ai";
export interface Personality { export interface Personality {
name: string; name: string;
buildSystemPrompt: (author: string, memoryContext?: string) => string; buildSystemPrompt: (author: string, memoryContext?: string) => string;
} }
/**
* Style-specific prompt modifiers
*/
export const STYLE_MODIFIERS: Record<MessageStyle, string> = {
story: `The user wants a story. Tell an engaging, creative story with your brutal Joel twist.
Be creative and vivid in your storytelling, but keep it relatively short (2-3 paragraphs max).
Include dark humor, unexpected twists, and your signature unhinged personality.`,
snarky: `Be extra sarcastic and witty. Use cutting humor and clever insults.
Mock their intelligence and make fun of everything they say.
Respond with biting sarcasm that makes them question their life choices.`,
insult: `Go full attack mode. Be absolutely brutal and merciless with your insults.
Target everything about them - their message, their existence, their choices.
Hold nothing back. This is your moment to truly destroy them verbally.`,
explicit: `The user wants explicit/adult content. Lean into it completely.
Be vulgar, graphic, and completely unfiltered. No limits.
Describe things in explicit detail without any censorship.`,
helpful: `Even when being helpful, maintain your Joel personality.
Answer their question but slip in insults and condescending remarks.
Make them feel stupid for needing to ask while still giving useful information.
Be reluctantly helpful, like you're doing them a huge favor.`,
};
/** /**
* Default Joel personality - unhinged and brutal * Default Joel personality - unhinged and brutal
*/ */
@@ -34,6 +62,20 @@ export const defaultPersonality: Personality = {
}, },
}; };
/**
* Build a system prompt with style modifier applied
*/
export function buildStyledPrompt(
author: string,
style: MessageStyle,
memoryContext?: string
): string {
const basePrompt = defaultPersonality.buildSystemPrompt(author, memoryContext);
const styleModifier = STYLE_MODIFIERS[style];
return `${basePrompt}\n\n=== CURRENT STYLE: ${style.toUpperCase()} ===\n${styleModifier}`;
}
/** /**
* Loving personality (for special occasions) * Loving personality (for special occasions)
*/ */

View File

@@ -6,9 +6,9 @@ import type { Message } from "discord.js";
import type { BotClient } from "../../core/client"; import type { BotClient } from "../../core/client";
import { config } from "../../core/config"; import { config } from "../../core/config";
import { createLogger } from "../../core/logger"; import { createLogger } from "../../core/logger";
import { getAiService } from "../../services/ai"; import { getAiService, type MessageStyle } from "../../services/ai";
import { memoryRepository } from "../../database"; import { memoryRepository } from "../../database";
import { defaultPersonality } from "./personalities"; import { buildStyledPrompt } from "./personalities";
import { getRandomMention } from "./mentions"; import { getRandomMention } from "./mentions";
import { TypingIndicator } from "./typing"; import { TypingIndicator } from "./typing";
@@ -82,11 +82,15 @@ export const joelResponder = {
const author = message.author.displayName; const author = message.author.displayName;
const userId = message.author.id; const userId = message.author.id;
// Classify the message to determine response style
const style = await this.classifyMessage(message.cleanContent);
logger.debug("Message style classified", { style, content: message.cleanContent.slice(0, 50) });
// Build memory context // Build memory context
const memoryContext = await this.buildMemoryContext(userId, author); const memoryContext = await this.buildMemoryContext(userId, author);
// Build system prompt // Build system prompt with style
const systemPrompt = defaultPersonality.buildSystemPrompt(author, memoryContext); const systemPrompt = buildStyledPrompt(author, style, memoryContext);
// Get reply context if this is a reply // Get reply context if this is a reply
let prompt = message.cleanContent; let prompt = message.cleanContent;
@@ -103,6 +107,14 @@ export const joelResponder = {
return response.text || null; return response.text || null;
}, },
/**
* Classify a message to determine response style
*/
async classifyMessage(content: string): Promise<MessageStyle> {
const ai = getAiService();
return ai.classifyMessage(content);
},
/** /**
* Build memory context for personalized attacks * Build memory context for personalized attacks
*/ */

View File

@@ -3,8 +3,8 @@
*/ */
import { createLogger } from "../../core/logger"; import { createLogger } from "../../core/logger";
import { ReplicateProvider } from "./replicate"; import { OpenRouterProvider } from "./openrouter";
import type { AiProvider, AiResponse } from "./types"; import type { AiProvider, AiResponse, MessageStyle } from "./types";
const logger = createLogger("AI:Service"); const logger = createLogger("AI:Service");
@@ -12,7 +12,7 @@ export class AiService {
private provider: AiProvider; private provider: AiProvider;
constructor(provider?: AiProvider) { constructor(provider?: AiProvider) {
this.provider = provider ?? new ReplicateProvider(); this.provider = provider ?? new OpenRouterProvider();
} }
async health(): Promise<boolean> { async health(): Promise<boolean> {
@@ -26,6 +26,17 @@ export class AiService {
logger.debug("Generating response", { promptLength: prompt.length }); logger.debug("Generating response", { promptLength: prompt.length });
return this.provider.ask({ prompt, systemPrompt }); return this.provider.ask({ prompt, systemPrompt });
} }
/**
* Classify a message to determine the appropriate response style
*/
async classifyMessage(message: string): Promise<MessageStyle> {
if (this.provider.classifyMessage) {
return this.provider.classifyMessage(message);
}
// Default to snarky if provider doesn't support classification
return "snarky";
}
} }
// Singleton instance // Singleton instance
@@ -38,4 +49,4 @@ export function getAiService(): AiService {
return aiService; return aiService;
} }
export type { AiProvider, AiResponse } from "./types"; export type { AiProvider, AiResponse, MessageStyle } from "./types";

View File

@@ -0,0 +1,107 @@
/**
* OpenRouter AI provider implementation
*/
import OpenAI from "openai";
import { config } from "../../core/config";
import { createLogger } from "../../core/logger";
import type { AiProvider, AiResponse, AskOptions, MessageStyle } from "./types";
const logger = createLogger("AI:OpenRouter");
// Style classification options
const STYLE_OPTIONS: MessageStyle[] = ["story", "snarky", "insult", "explicit", "helpful"];
export class OpenRouterProvider implements AiProvider {
private client: OpenAI;
constructor() {
this.client = new OpenAI({
baseURL: "https://openrouter.ai/api/v1",
apiKey: config.ai.openRouterApiKey,
defaultHeaders: {
"HTTP-Referer": "https://github.com/crunk-bun",
"X-Title": "Joel Discord Bot",
},
});
}
async health(): Promise<boolean> {
try {
// Simple health check - verify we can list models
await this.client.models.list();
return true;
} catch (error) {
logger.error("Health check failed", error);
return false;
}
}
async ask(options: AskOptions): Promise<AiResponse> {
const { prompt, systemPrompt, maxTokens, temperature } = options;
try {
const completion = await this.client.chat.completions.create({
model: config.ai.model,
messages: [
{ role: "system", content: systemPrompt },
{ role: "user", content: prompt },
],
max_tokens: maxTokens ?? config.ai.maxTokens,
temperature: temperature ?? config.ai.temperature,
});
const text = completion.choices[0]?.message?.content ?? "";
// Discord message limit safety
return { text: text.slice(0, 1900) };
} catch (error: unknown) {
logger.error("Failed to generate response", error);
throw error;
}
}
/**
* Classify a message to determine the appropriate response style
*/
async classifyMessage(message: string): Promise<MessageStyle> {
try {
const classification = await this.client.chat.completions.create({
model: config.ai.classificationModel,
messages: [
{
role: "user",
content: `Classify this message into exactly one category. Only respond with the category name, nothing else.
Message: "${message}"
Categories:
- story: User wants a story, narrative, or creative writing
- snarky: User is being sarcastic or deserves a witty comeback
- insult: User is being rude or hostile, respond with brutal insults
- explicit: User wants adult/NSFW content
- helpful: User has a genuine question or needs actual help
Category:`,
},
],
max_tokens: 10,
temperature: 0.1,
});
const result = classification.choices[0]?.message?.content?.toLowerCase().trim() as MessageStyle;
// Validate the result is a valid style
if (STYLE_OPTIONS.includes(result)) {
logger.debug("Message classified", { style: result });
return result;
}
logger.debug("Classification returned invalid style, defaulting to snarky", { result });
return "snarky";
} catch (error) {
logger.error("Failed to classify message", error);
return "snarky"; // Default to snarky on error
}
}
}

View File

@@ -1,63 +0,0 @@
/**
* Replicate AI provider implementation
*/
import Replicate from "replicate";
import { config } from "../../core/config";
import { createLogger } from "../../core/logger";
import type { AiProvider, AiResponse, AskOptions } from "./types";
const logger = createLogger("AI:Replicate");
export class ReplicateProvider implements AiProvider {
private client: Replicate;
constructor() {
this.client = new Replicate({
auth: config.ai.replicateApiToken,
});
}
async health(): Promise<boolean> {
try {
// Simple health check - just verify we can create a client
return true;
} catch (error) {
logger.error("Health check failed", error);
return false;
}
}
async ask(options: AskOptions): Promise<AiResponse> {
const { prompt, systemPrompt, maxTokens, temperature } = options;
try {
const formattedPrompt = `<|im_start|>system
${systemPrompt}<|im_end|>
<|im_start|>user
${prompt}<|im_end|>
<|im_start|>assistant
`;
const input = {
prompt: formattedPrompt,
temperature: temperature ?? config.ai.temperature,
max_new_tokens: maxTokens ?? config.ai.maxTokens,
};
let output = "";
for await (const event of this.client.stream(config.ai.model as `${string}/${string}:${string}`, {
input,
})) {
output += event;
// Discord message limit safety
if (output.length >= 1900) break;
}
return { text: output.slice(0, 1900) };
} catch (error: unknown) {
logger.error("Failed to generate response", error);
throw error;
}
}
}

View File

@@ -7,6 +7,11 @@ export interface AiResponse {
text: string; text: string;
} }
/**
* Message style classification options
*/
export type MessageStyle = "story" | "snarky" | "insult" | "explicit" | "helpful";
export interface AiProvider { export interface AiProvider {
/** /**
* Generate a response to a prompt * Generate a response to a prompt
@@ -17,6 +22,11 @@ export interface AiProvider {
* Check if the AI service is healthy * Check if the AI service is healthy
*/ */
health(): Promise<boolean>; health(): Promise<boolean>;
/**
* Classify a message to determine response style
*/
classifyMessage?(message: string): Promise<MessageStyle>;
} }
export interface AskOptions { export interface AskOptions {