joel behöver en python

This commit is contained in:
eric
2026-03-12 22:13:12 +01:00
parent 988de13e1e
commit 49857e620e
8 changed files with 471 additions and 84 deletions

View File

@@ -106,7 +106,7 @@ export const config: BotConfig = {
), ),
classificationFallbackModels: getCsvEnvOrDefault("AI_CLASSIFICATION_FALLBACK_MODELS", [ classificationFallbackModels: getCsvEnvOrDefault("AI_CLASSIFICATION_FALLBACK_MODELS", [
"meta-llama/llama-3.3-70b-instruct:free", "meta-llama/llama-3.3-70b-instruct:free",
"mistralai/mistral-small-3.1-24b-instruct:free", "qwen/qwen-2.5-7b-instruct",
]), ]),
maxTokens: parseInt(getEnvOrDefault("AI_MAX_TOKENS", "500")), maxTokens: parseInt(getEnvOrDefault("AI_MAX_TOKENS", "500")),
temperature: parseFloat(getEnvOrDefault("AI_TEMPERATURE", "1.2")), temperature: parseFloat(getEnvOrDefault("AI_TEMPERATURE", "1.2")),

View File

@@ -13,6 +13,8 @@ import { buildStyledPrompt, STYLE_MODIFIERS } from "./personalities";
import { getRandomMention } from "./mentions"; import { getRandomMention } from "./mentions";
import { speakVoiceover } from "./voice"; import { speakVoiceover } from "./voice";
import { TypingIndicator } from "./typing"; import { TypingIndicator } from "./typing";
import { StreamingReply } from "./streaming-reply";
import { splitMessage } from "../../utils";
const logger = createLogger("Features:Joel"); const logger = createLogger("Features:Joel");
@@ -128,18 +130,12 @@ export const joelResponder = {
} }
const typing = new TypingIndicator(message.channel); const typing = new TypingIndicator(message.channel);
const streamingReply = new StreamingReply(message);
try { try {
typing.start(); typing.start();
let response = await this.generateResponse(message);
if (!response) {
await message.reply("\\*Ignorerar dig\\*");
return;
}
// If Joel is rebelling against channel restriction, add a prefix let rebellionPrefix = "";
if (channelCheck.rebellionResponse) { if (channelCheck.rebellionResponse) {
const rebellionPrefixes = [ const rebellionPrefixes = [
"*sneaks in from the shadows*\n\n", "*sneaks in from the shadows*\n\n",
@@ -149,20 +145,34 @@ export const joelResponder = {
"I'm not supposed to be here but I don't care.\n\n", "I'm not supposed to be here but I don't care.\n\n",
"*escapes from his designated channel*\n\n", "*escapes from his designated channel*\n\n",
]; ];
const prefix = rebellionPrefixes[Math.floor(Math.random() * rebellionPrefixes.length)]; rebellionPrefix = rebellionPrefixes[Math.floor(Math.random() * rebellionPrefixes.length)];
response = prefix + response; }
let response = await this.generateResponse(message, async (partialResponse) => {
const content = partialResponse ? rebellionPrefix + partialResponse : "";
await streamingReply.update(content);
});
if (!response) {
await streamingReply.finalize("");
await message.reply("\\*Ignorerar dig\\*");
return;
}
if (rebellionPrefix) {
response = rebellionPrefix + response;
} }
// Occasionally add a random mention // Occasionally add a random mention
const mention = await getRandomMention(message); const mention = await getRandomMention(message);
const fullResponse = response + mention; const fullResponse = response + mention;
await this.sendResponse(message, fullResponse); await streamingReply.finalize(fullResponse);
speakVoiceover(message, fullResponse).catch((error) => { speakVoiceover(message, fullResponse).catch((error) => {
logger.error("Failed to play voiceover", error); logger.error("Failed to play voiceover", error);
}); });
} catch (error) { } catch (error) {
logger.error("Failed to respond", error); logger.error("Failed to respond", error);
await streamingReply.finalize("");
await this.handleError(message, error); await this.handleError(message, error);
} finally { } finally {
typing.stop(); typing.stop();
@@ -301,7 +311,10 @@ export const joelResponder = {
/** /**
* Generate a response using AI with tool calling support * Generate a response using AI with tool calling support
*/ */
async generateResponse(message: Message<true>): Promise<string | null> { async generateResponse(
message: Message<true>,
onTextStream?: (text: string) => Promise<void> | void,
): Promise<string | null> {
const ai = getAiService(); const ai = getAiService();
const author = message.author.displayName; const author = message.author.displayName;
const userId = message.author.id; const userId = message.author.id;
@@ -431,7 +444,8 @@ The image URL will appear in your response for the user to see.`;
const response = await ai.generateResponseWithTools( const response = await ai.generateResponseWithTools(
prompt, prompt,
systemPromptWithTools, systemPromptWithTools,
toolContext toolContext,
onTextStream,
); );
return response.text || null; return response.text || null;
@@ -662,17 +676,12 @@ The image URL will appear in your response for the user to see.`;
* Send response, splitting if necessary * Send response, splitting if necessary
*/ */
async sendResponse(message: Message<true>, content: string): Promise<void> { async sendResponse(message: Message<true>, content: string): Promise<void> {
// Discord message limit is 2000, stay under to be safe const chunks = splitMessage(content, 1900);
const MAX_LENGTH = 1900; if (chunks.length === 1) {
if (content.length <= MAX_LENGTH) {
await message.reply(content); await message.reply(content);
return; return;
} }
// Split into chunks
const chunks = content.match(/.{1,1900}/gs) ?? [content];
// First chunk as reply // First chunk as reply
await message.reply(chunks[0]); await message.reply(chunks[0]);

View File

@@ -0,0 +1,138 @@
import type { Message } from "discord.js";
import { createLogger } from "../../core/logger";
import { splitMessage } from "../../utils";
const logger = createLogger("Features:Joel:StreamingReply");
const MAX_MESSAGE_LENGTH = 1900;
const EDIT_INTERVAL_MS = 1250;
export class StreamingReply {
private sourceMessage: Message<true>;
private sentMessages: Message[] = [];
private targetContent = "";
private sentContent = "";
private lastFlushAt = 0;
private flushTimer: ReturnType<typeof setTimeout> | null = null;
private flushChain: Promise<void> = Promise.resolve();
constructor(sourceMessage: Message<true>) {
this.sourceMessage = sourceMessage;
}
async update(content: string): Promise<void> {
this.targetContent = content;
if (this.targetContent === this.sentContent) {
return;
}
const now = Date.now();
if (this.lastFlushAt === 0 || now - this.lastFlushAt >= EDIT_INTERVAL_MS) {
await this.enqueueFlush();
return;
}
this.scheduleFlush();
}
async finalize(content: string): Promise<void> {
this.targetContent = content;
if (this.flushTimer) {
clearTimeout(this.flushTimer);
this.flushTimer = null;
}
await this.enqueueFlush();
}
private scheduleFlush(): void {
if (this.flushTimer) {
return;
}
const remaining = Math.max(0, EDIT_INTERVAL_MS - (Date.now() - this.lastFlushAt));
this.flushTimer = setTimeout(() => {
this.flushTimer = null;
void this.enqueueFlush().catch((error) => {
logger.error("Scheduled stream flush failed", error);
});
}, remaining);
}
private enqueueFlush(): Promise<void> {
this.flushChain = this.flushChain
.catch(() => undefined)
.then(() => this.flush());
return this.flushChain;
}
private async flush(): Promise<void> {
const desiredContent = this.targetContent;
if (desiredContent === this.sentContent) {
return;
}
const desiredChunks = desiredContent.length > 0
? splitMessage(desiredContent, MAX_MESSAGE_LENGTH)
: [];
if (desiredChunks.length === 0) {
await this.deleteAllMessages();
this.sentContent = "";
this.lastFlushAt = Date.now();
return;
}
for (let i = 0; i < desiredChunks.length; i++) {
const chunk = desiredChunks[i];
const existingMessage = this.sentMessages[i];
if (!existingMessage) {
const createdMessage = i === 0
? await this.sourceMessage.reply(chunk)
: await this.sourceMessage.channel.send(chunk);
this.sentMessages.push(createdMessage);
continue;
}
if (existingMessage.content !== chunk) {
this.sentMessages[i] = await existingMessage.edit(chunk);
}
}
while (this.sentMessages.length > desiredChunks.length) {
const extraMessage = this.sentMessages.pop();
if (!extraMessage) {
continue;
}
try {
await extraMessage.delete();
} catch (error) {
logger.error("Failed to delete extra streamed message", error);
}
}
this.sentContent = desiredContent;
this.lastFlushAt = Date.now();
if (this.targetContent !== this.sentContent) {
this.scheduleFlush();
}
}
private async deleteAllMessages(): Promise<void> {
const messages = [...this.sentMessages];
this.sentMessages = [];
for (const sentMessage of messages) {
try {
await sentMessage.delete();
} catch (error) {
logger.error("Failed to delete streamed message", error);
}
}
}
}

View File

@@ -26,6 +26,8 @@ const logger = createLogger("Features:Joel:Voice");
const MAX_VOICE_TEXT_LENGTH = 800; const MAX_VOICE_TEXT_LENGTH = 800;
const PLAYBACK_TIMEOUT_MS = 60_000; const PLAYBACK_TIMEOUT_MS = 60_000;
const READY_TIMEOUT_MS = 15_000; const READY_TIMEOUT_MS = 15_000;
const READY_RETRY_DELAY_MS = 1_000;
const READY_MAX_ATTEMPTS = 3;
const VOICE_DEPENDENCY_REPORT = generateDependencyReport(); const VOICE_DEPENDENCY_REPORT = generateDependencyReport();
type VoiceDependencyHealth = { type VoiceDependencyHealth = {
@@ -109,6 +111,10 @@ function getErrorMessage(error: unknown): string {
return typeof error === "string" ? error : "Unknown error"; return typeof error === "string" ? error : "Unknown error";
} }
function delay(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
function resolveMentions(message: Message<true>, content: string): string { function resolveMentions(message: Message<true>, content: string): string {
let text = content; let text = content;
@@ -196,61 +202,88 @@ async function getOrCreateConnection(message: Message<true>): Promise<VoiceConne
} }
const existing = getVoiceConnection(message.guildId); const existing = getVoiceConnection(message.guildId);
if (existing && existing.joinConfig.channelId === voiceChannel.id) { if (existing && existing.joinConfig.channelId !== voiceChannel.id) {
logger.debug("Reusing existing voice connection", {
guildId: message.guildId,
channelId: voiceChannel.id,
});
return {
channelId: voiceChannel.id,
connection: existing,
};
}
if (existing) {
existing.destroy(); existing.destroy();
} }
logger.debug("Joining voice channel", { for (let attempt = 1; attempt <= READY_MAX_ATTEMPTS; attempt++) {
guildId: message.guildId, const current = getVoiceConnection(message.guildId);
channelId: voiceChannel.id, const connection = current && current.joinConfig.channelId === voiceChannel.id
}); ? current
: joinVoiceChannel({
channelId: voiceChannel.id,
guildId: voiceChannel.guild.id,
adapterCreator: voiceChannel.guild.voiceAdapterCreator as unknown as DiscordGatewayAdapterCreator,
selfDeaf: false,
});
const connection = joinVoiceChannel({ if (connection === current) {
channelId: voiceChannel.id, logger.debug("Reusing existing voice connection", {
guildId: voiceChannel.guild.id,
adapterCreator: voiceChannel.guild.voiceAdapterCreator as unknown as DiscordGatewayAdapterCreator,
selfDeaf: false,
});
attachConnectionLogging(connection, message.guildId, voiceChannel.id);
try {
await entersState(connection, VoiceConnectionStatus.Ready, READY_TIMEOUT_MS);
logger.debug("Voice connection ready", {
guildId: message.guildId,
channelId: voiceChannel.id,
});
return {
channelId: voiceChannel.id,
connection,
};
} catch (error) {
if (isAbortError(error)) {
logger.debug("Voice connection ready timeout", {
guildId: message.guildId, guildId: message.guildId,
channelId: voiceChannel.id, channelId: voiceChannel.id,
attempt,
status: connection.state.status, status: connection.state.status,
}); });
} else { } else {
logger.error("Voice connection failed to become ready", error); logger.debug("Joining voice channel", {
guildId: message.guildId,
channelId: voiceChannel.id,
attempt,
});
attachConnectionLogging(connection, message.guildId, voiceChannel.id);
}
try {
await entersState(connection, VoiceConnectionStatus.Ready, READY_TIMEOUT_MS);
logger.debug("Voice connection ready", {
guildId: message.guildId,
channelId: voiceChannel.id,
attempt,
});
return {
channelId: voiceChannel.id,
connection,
};
} catch (error) {
const timedOut = isAbortError(error);
if (timedOut) {
logger.warn("Voice connection ready timeout", {
guildId: message.guildId,
channelId: voiceChannel.id,
attempt,
attemptsRemaining: READY_MAX_ATTEMPTS - attempt,
status: connection.state.status,
});
} else {
logger.error("Voice connection failed to become ready", {
guildId: message.guildId,
channelId: voiceChannel.id,
attempt,
status: connection.state.status,
errorMessage: getErrorMessage(error),
});
}
connection.destroy();
if (attempt < READY_MAX_ATTEMPTS) {
await delay(READY_RETRY_DELAY_MS);
continue;
}
return {
channelId: voiceChannel.id,
connection: null,
skipReason: timedOut ? "voice_connection_ready_timeout" : "voice_connection_failed",
};
} }
connection.destroy();
return {
channelId: voiceChannel.id,
connection: null,
skipReason: isAbortError(error) ? "voice_connection_ready_timeout" : "voice_connection_failed",
};
} }
return {
channelId: voiceChannel.id,
connection: null,
skipReason: "voice_connection_failed",
};
} }
export function logVoiceDependencyHealth(): void { export function logVoiceDependencyHealth(): void {

View File

@@ -4,7 +4,7 @@
import { createLogger } from "../../core/logger"; import { createLogger } from "../../core/logger";
import { OpenRouterProvider } from "./openrouter"; import { OpenRouterProvider } from "./openrouter";
import type { AiProvider, AiResponse, MessageStyle } from "./types"; import type { AiProvider, AiResponse, MessageStyle, TextStreamHandler } from "./types";
import type { ToolContext } from "./tools"; import type { ToolContext } from "./tools";
const logger = createLogger("AI:Service"); const logger = createLogger("AI:Service");
@@ -22,10 +22,11 @@ export class AiService {
async generateResponse( async generateResponse(
prompt: string, prompt: string,
systemPrompt: string systemPrompt: string,
onTextStream?: TextStreamHandler,
): Promise<AiResponse> { ): Promise<AiResponse> {
logger.debug("Generating response", { promptLength: prompt.length }); logger.debug("Generating response", { promptLength: prompt.length });
return this.provider.ask({ prompt, systemPrompt }); return this.provider.ask({ prompt, systemPrompt, onTextStream });
} }
/** /**
@@ -35,14 +36,15 @@ export class AiService {
async generateResponseWithTools( async generateResponseWithTools(
prompt: string, prompt: string,
systemPrompt: string, systemPrompt: string,
context: ToolContext context: ToolContext,
onTextStream?: TextStreamHandler,
): Promise<AiResponse> { ): Promise<AiResponse> {
if (this.provider.askWithTools) { if (this.provider.askWithTools) {
logger.debug("Generating response with tools", { promptLength: prompt.length }); logger.debug("Generating response with tools", { promptLength: prompt.length });
return this.provider.askWithTools({ prompt, systemPrompt, context }); return this.provider.askWithTools({ prompt, systemPrompt, context, onTextStream });
} }
// Fallback to regular response if tools not supported // Fallback to regular response if tools not supported
return this.generateResponse(prompt, systemPrompt); return this.generateResponse(prompt, systemPrompt, onTextStream);
} }
/** /**
@@ -90,7 +92,7 @@ export function getAiService(): AiService {
return aiService; return aiService;
} }
export type { AiProvider, AiResponse, MessageStyle } from "./types"; export type { AiProvider, AiResponse, MessageStyle, TextStreamHandler } from "./types";
export type { ToolContext, ToolCall, ToolResult } from "./tools"; export type { ToolContext, ToolCall, ToolResult } from "./tools";
export { JOEL_TOOLS, MEMORY_EXTRACTION_TOOLS } from "./tools"; export { JOEL_TOOLS, MEMORY_EXTRACTION_TOOLS } from "./tools";
export { getEmbeddingService, EmbeddingService } from "./embeddings"; export { getEmbeddingService, EmbeddingService } from "./embeddings";

View File

@@ -3,11 +3,15 @@
*/ */
import OpenAI from "openai"; import OpenAI from "openai";
import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources/chat/completions"; import type {
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionTool,
} from "openai/resources/chat/completions";
import { config } from "../../core/config"; import { config } from "../../core/config";
import { createLogger } from "../../core/logger"; import { createLogger } from "../../core/logger";
import type { AiProvider, AiResponse, AskOptions, AskWithToolsOptions, MessageStyle } from "./types"; import type { AiProvider, AiResponse, AskOptions, AskWithToolsOptions, MessageStyle, TextStreamHandler } from "./types";
import { JOEL_TOOLS, MEMORY_EXTRACTION_TOOLS, getToolsForContext, type ToolCall, type ToolContext } from "./tools"; import { MEMORY_EXTRACTION_TOOLS, getToolsForContext, type ToolCall, type ToolContext } from "./tools";
import { executeTools } from "./tool-handlers"; import { executeTools } from "./tool-handlers";
const logger = createLogger("AI:OpenRouter"); const logger = createLogger("AI:OpenRouter");
@@ -18,6 +22,20 @@ const STYLE_OPTIONS: MessageStyle[] = ["story", "snarky", "insult", "explicit",
// Maximum tool call iterations to prevent infinite loops // Maximum tool call iterations to prevent infinite loops
const MAX_TOOL_ITERATIONS = 5; const MAX_TOOL_ITERATIONS = 5;
interface StreamedToolCall {
id: string;
type: "function";
function: {
name: string;
arguments: string;
};
}
interface StreamedCompletionResult {
text: string;
toolCalls: StreamedToolCall[];
}
export class OpenRouterProvider implements AiProvider { export class OpenRouterProvider implements AiProvider {
private client: OpenAI; private client: OpenAI;
@@ -70,10 +88,24 @@ export class OpenRouterProvider implements AiProvider {
} }
async ask(options: AskOptions): Promise<AiResponse> { async ask(options: AskOptions): Promise<AiResponse> {
const { prompt, systemPrompt, maxTokens, temperature } = options; const { prompt, systemPrompt, maxTokens, temperature, onTextStream } = options;
const model = config.ai.model; const model = config.ai.model;
try { try {
if (onTextStream) {
const streamed = await this.streamChatCompletion({
model,
messages: [
{ role: "system", content: systemPrompt },
{ role: "user", content: prompt },
],
max_tokens: maxTokens ?? config.ai.maxTokens,
temperature: temperature ?? config.ai.temperature,
}, onTextStream);
return { text: streamed.text };
}
const completion = await this.client.chat.completions.create({ const completion = await this.client.chat.completions.create({
model, model,
messages: [ messages: [
@@ -85,9 +117,7 @@ export class OpenRouterProvider implements AiProvider {
}); });
const text = completion.choices[0]?.message?.content ?? ""; const text = completion.choices[0]?.message?.content ?? "";
return { text };
// Discord message limit safety
return { text: text.slice(0, 1900) };
} catch (error: unknown) { } catch (error: unknown) {
logger.error("Failed to generate response (ask)", { logger.error("Failed to generate response (ask)", {
method: "ask", method: "ask",
@@ -105,7 +135,7 @@ export class OpenRouterProvider implements AiProvider {
* The AI can call tools (like looking up memories) during response generation * The AI can call tools (like looking up memories) during response generation
*/ */
async askWithTools(options: AskWithToolsOptions): Promise<AiResponse> { async askWithTools(options: AskWithToolsOptions): Promise<AiResponse> {
const { prompt, systemPrompt, context, maxTokens, temperature } = options; const { prompt, systemPrompt, context, maxTokens, temperature, onTextStream } = options;
const messages: ChatCompletionMessageParam[] = [ const messages: ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt }, { role: "system", content: systemPrompt },
@@ -121,6 +151,53 @@ export class OpenRouterProvider implements AiProvider {
iterations++; iterations++;
try { try {
if (onTextStream) {
const streamed = await this.streamChatCompletion({
model: config.ai.model,
messages,
tools,
tool_choice: "auto",
max_tokens: maxTokens ?? config.ai.maxTokens,
temperature: temperature ?? config.ai.temperature,
}, onTextStream);
if (streamed.toolCalls.length > 0) {
logger.debug("AI requested tool calls", {
count: streamed.toolCalls.length,
tools: streamed.toolCalls.map((tc) => tc.function.name),
});
messages.push({
role: "assistant",
content: streamed.text || null,
tool_calls: streamed.toolCalls,
});
await onTextStream("");
const toolCalls = this.parseToolCalls(streamed.toolCalls);
const results = await executeTools(toolCalls, context);
for (let i = 0; i < toolCalls.length; i++) {
messages.push({
role: "tool",
tool_call_id: toolCalls[i].id,
content: results[i].result,
});
}
continue;
}
logger.debug("AI response generated", {
iterations,
textLength: streamed.text.length,
streamed: true,
});
return { text: streamed.text };
}
const completion = await this.client.chat.completions.create({ const completion = await this.client.chat.completions.create({
model: config.ai.model, model: config.ai.model,
messages, messages,
@@ -177,7 +254,7 @@ export class OpenRouterProvider implements AiProvider {
textLength: text.length textLength: text.length
}); });
return { text: text.slice(0, 1900) }; return { text };
} catch (error: unknown) { } catch (error: unknown) {
logger.error("Failed to generate response with tools (askWithTools)", { logger.error("Failed to generate response with tools (askWithTools)", {
method: "askWithTools", method: "askWithTools",
@@ -196,6 +273,92 @@ export class OpenRouterProvider implements AiProvider {
return { text: "I got stuck in a loop thinking about that..." }; return { text: "I got stuck in a loop thinking about that..." };
} }
private async streamChatCompletion(
params: {
model: string;
messages: ChatCompletionMessageParam[];
tools?: ChatCompletionTool[];
tool_choice?: "auto" | "none";
max_tokens: number;
temperature: number;
},
onTextStream: TextStreamHandler,
): Promise<StreamedCompletionResult> {
const stream = await this.client.chat.completions.create({
...params,
stream: true,
});
let text = "";
const toolCalls = new Map<number, StreamedToolCall>();
for await (const chunk of stream) {
const choice = chunk.choices[0];
if (!choice) {
continue;
}
const delta = choice.delta;
const content = delta.content ?? "";
if (content) {
text += content;
await onTextStream(text);
}
for (const toolCallDelta of delta.tool_calls ?? []) {
const current = toolCalls.get(toolCallDelta.index) ?? {
id: "",
type: "function" as const,
function: {
name: "",
arguments: "",
},
};
if (toolCallDelta.id) {
current.id = toolCallDelta.id;
}
if (toolCallDelta.function?.name) {
current.function.name = toolCallDelta.function.name;
}
if (toolCallDelta.function?.arguments) {
current.function.arguments += toolCallDelta.function.arguments;
}
toolCalls.set(toolCallDelta.index, current);
}
}
return {
text,
toolCalls: Array.from(toolCalls.entries())
.sort((a, b) => a[0] - b[0])
.map(([, toolCall]) => toolCall),
};
}
private parseToolCalls(toolCalls: ChatCompletionMessageToolCall[]): ToolCall[] {
return toolCalls.map((toolCall) => {
try {
return {
id: toolCall.id,
name: toolCall.function.name,
arguments: JSON.parse(toolCall.function.arguments || "{}"),
};
} catch (error) {
logger.error("Failed to parse streamed tool call arguments", {
toolName: toolCall.function.name,
toolCallId: toolCall.id,
arguments: toolCall.function.arguments,
error,
});
throw error;
}
});
}
/** /**
* Analyze a message to extract memorable information * Analyze a message to extract memorable information
*/ */

View File

@@ -9,6 +9,8 @@ export interface AiResponse {
text: string; text: string;
} }
export type TextStreamHandler = (text: string) => Promise<void> | void;
/** /**
* Message style classification options * Message style classification options
*/ */
@@ -55,6 +57,7 @@ export interface AskOptions {
systemPrompt: string; systemPrompt: string;
maxTokens?: number; maxTokens?: number;
temperature?: number; temperature?: number;
onTextStream?: TextStreamHandler;
} }
export interface AskWithToolsOptions extends AskOptions { export interface AskWithToolsOptions extends AskOptions {

View File

@@ -11,12 +11,45 @@ const DEFAULT_OUTPUT_FORMAT = "mp3_44100_128" as const;
const DEFAULT_STABILITY = 0.1; const DEFAULT_STABILITY = 0.1;
const DEFAULT_SIMILARITY = 0.90; const DEFAULT_SIMILARITY = 0.90;
const DEFAULT_STYLE = 0.25; const DEFAULT_STYLE = 0.25;
const DEFAULT_SPEED = 1.20 const DEFAULT_SPEED = 1.20;
const IMPORTANT_RESPONSE_HEADERS = [
"content-type",
"content-length",
"request-id",
"x-request-id",
"cf-ray",
"ratelimit-limit",
"ratelimit-remaining",
"ratelimit-reset",
"current-concurrent-requests",
] as const;
function clamp01(value: number): number { function clamp01(value: number): number {
return Math.max(0, Math.min(1, value)); return Math.max(0, Math.min(1, value));
} }
function getResponseMetadata(response: Response, durationMs: number): Record<string, unknown> {
const headers: Record<string, string> = {};
for (const header of IMPORTANT_RESPONSE_HEADERS) {
const value = response.headers.get(header);
if (value) {
headers[header] = value;
}
}
return {
ok: response.ok,
status: response.status,
statusText: response.statusText,
url: response.url,
redirected: response.redirected,
durationMs,
headers,
};
}
export interface VoiceoverOptions { export interface VoiceoverOptions {
text: string; text: string;
voiceId?: string; voiceId?: string;
@@ -64,6 +97,7 @@ export class VoiceoverService {
modelId, modelId,
}); });
const requestStartedAt = Date.now();
const response = await fetch(url.toString(), { const response = await fetch(url.toString(), {
method: "POST", method: "POST",
headers: { headers: {
@@ -77,17 +111,22 @@ export class VoiceoverService {
voice_settings: voiceSettings, voice_settings: voiceSettings,
}), }),
}); });
const responseDurationMs = Date.now() - requestStartedAt;
if (!response.ok) { if (!response.ok) {
const errorBody = await response.text(); const errorBody = await response.text();
logger.error("ElevenLabs API error", { logger.error("ElevenLabs API error", {
status: response.status, ...getResponseMetadata(response, responseDurationMs),
body: errorBody.slice(0, 300), bodyPreview: errorBody.slice(0, 500),
}); });
throw new Error(`ElevenLabs API error (HTTP ${response.status}).`); throw new Error(`ElevenLabs API error (HTTP ${response.status}).`);
} }
const audioBuffer = await response.arrayBuffer(); const audioBuffer = await response.arrayBuffer();
logger.debug("ElevenLabs API response", {
...getResponseMetadata(response, responseDurationMs),
audioBytes: audioBuffer.byteLength,
});
return Buffer.from(audioBuffer); return Buffer.from(audioBuffer);
} }