230 lines
8.8 KiB
JavaScript
230 lines
8.8 KiB
JavaScript
import { SlashCommandBuilder, AttachmentBuilder } from 'discord.js';
|
|
import fs from 'fs/promises';
|
|
import path from 'path';
|
|
import axios from 'axios';
|
|
|
|
/**
|
|
* Split long text into Discord-safe chunks without breaking mid-line.
|
|
* @param {string} text
|
|
* @param {number} max
|
|
* @returns {string[]}
|
|
*/
|
|
function splitLongMessage(text, max = 2000) {
|
|
const lines = text.split('\n');
|
|
const chunks = [];
|
|
let chunk = '';
|
|
for (const line of lines) {
|
|
const next = line + '\n';
|
|
if (chunk.length + next.length > max) {
|
|
chunks.push(chunk);
|
|
chunk = '';
|
|
}
|
|
chunk += next;
|
|
}
|
|
if (chunk) chunks.push(chunk);
|
|
return chunks;
|
|
}
|
|
|
|
/**
|
|
* Handle 'generate_image' function calls for slash commands.
|
|
* Returns true if image was sent.
|
|
*/
|
|
async function handleImageInteraction(client, interaction, resp, cfg, ephemeral) {
|
|
const calls = Array.isArray(resp.output) ? resp.output : [];
|
|
const fn = calls.find(o => o.type === 'function_call' && o.name === 'generate_image');
|
|
if (!fn?.arguments) return false;
|
|
client.logger.debug(`Image function args: ${fn.arguments}`);
|
|
let args;
|
|
try { args = JSON.parse(fn.arguments); } catch { return false; }
|
|
if (!args.prompt?.trim()) {
|
|
await interaction.editReply({ content: 'Cannot generate image: empty prompt.', ephemeral });
|
|
return true;
|
|
}
|
|
let size;
|
|
switch (args.aspect) {
|
|
case 'landscape': size = '1792x1024'; break;
|
|
case 'portrait': size = '1024x1792'; break;
|
|
case 'square': default: size = '1024x1024'; break;
|
|
}
|
|
const quality = ['standard', 'hd'].includes(args.quality)
|
|
? args.quality
|
|
: cfg.imageGeneration.defaultQuality;
|
|
try {
|
|
const imgRes = await client.openai.images.generate({ model: 'dall-e-3', prompt: args.prompt, quality, size, n: 1 });
|
|
const url = imgRes.data?.[0]?.url;
|
|
if (!url) throw new Error('No image URL');
|
|
const dl = await axios.get(url, { responseType: 'arraybuffer' });
|
|
const buf = Buffer.from(dl.data);
|
|
const filename = `${interaction.user.id}-${Date.now()}.png`;
|
|
const dir = cfg.imageGeneration.imageSavePath || './images';
|
|
await fs.mkdir(dir, { recursive: true });
|
|
const filePath = path.join(dir, filename);
|
|
await fs.writeFile(filePath, buf);
|
|
client.logger.info(`Saved image: ${filePath}`);
|
|
const attachment = new AttachmentBuilder(buf, { name: filename });
|
|
await interaction.editReply({ content: args.prompt, files: [attachment] });
|
|
// Recap output for context
|
|
try {
|
|
const convKey = interaction.channelId;
|
|
const toolOutputItem = {
|
|
type: 'function_call_output',
|
|
call_id: fn.call_id,
|
|
output: JSON.stringify({ url }),
|
|
};
|
|
const recapBody = {
|
|
model: cfg.defaultModel,
|
|
instructions: client.responsesSystemPrompt,
|
|
previous_response_id: resp.id,
|
|
input: [toolOutputItem],
|
|
max_output_tokens: Math.min(100, cfg.defaultMaxTokens),
|
|
temperature: cfg.defaultTemperature,
|
|
};
|
|
const recapResp = await client.openai.responses.create(recapBody);
|
|
client.pb?.cache?.set(convKey, recapResp.id, Math.floor(cfg.conversationExpiry / 1000));
|
|
const recapTokens = recapResp.usage?.total_tokens ?? recapResp.usage?.completion_tokens ?? 0;
|
|
if (client.scorekeeper && recapTokens > 0) {
|
|
client.scorekeeper.addOutput(interaction.guildId, interaction.user.id, recapTokens)
|
|
.catch(e => client.logger.error(`Scorekeeper error: ${e.message}`));
|
|
}
|
|
} catch (err) {
|
|
client.logger.error(`Recap failed: ${err.message}`);
|
|
}
|
|
return true;
|
|
} catch (err) {
|
|
client.logger.error(`Image generation error: ${err.message}`);
|
|
await interaction.editReply({ content: `Image generation error: ${err.message}`, ephemeral });
|
|
return true;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* /query slash command: send a custom AI query using the Responses API.
|
|
* Options:
|
|
* prompt - Required string: the text to send to AI.
|
|
* ephemeral - Optional boolean: send response ephemerally (default: true).
|
|
*/
|
|
export const commands = [
|
|
{
|
|
data: new SlashCommandBuilder()
|
|
.setName('query')
|
|
.setDescription('Send a custom AI query')
|
|
.addStringOption(opt =>
|
|
opt.setName('prompt')
|
|
.setDescription('Your query text')
|
|
.setRequired(true)
|
|
)
|
|
.addBooleanOption(opt =>
|
|
opt.setName('ephemeral')
|
|
.setDescription('Receive an ephemeral response')
|
|
.setRequired(false)
|
|
),
|
|
async execute(interaction, client) {
|
|
const cfg = client.config.responses;
|
|
// Enforce minimum score to use /query
|
|
try {
|
|
const scoreData = await client.scorekeeper.getScore(interaction.guildId, interaction.user.id);
|
|
if (scoreData.totalScore < cfg.minScore) {
|
|
return interaction.reply({
|
|
content: `You need a score of at least ${cfg.minScore} to use /query. Your current score is ${scoreData.totalScore.toFixed(2)}.`,
|
|
ephemeral: true
|
|
});
|
|
}
|
|
} catch (err) {
|
|
client.logger.error(`Error checking score: ${err.message}`);
|
|
return interaction.reply({ content: 'Error verifying your score. Please try again later.', ephemeral: true });
|
|
}
|
|
const prompt = interaction.options.getString('prompt');
|
|
const flag = interaction.options.getBoolean('ephemeral');
|
|
const ephemeral = flag !== null ? flag : true;
|
|
await interaction.deferReply({ ephemeral });
|
|
|
|
// Determine channel/thread key for context
|
|
const key = interaction.channelId;
|
|
// Initialize per-channel lock map
|
|
const lockMap = client._responseLockMap || (client._responseLockMap = new Map());
|
|
// Get last pending promise for this key
|
|
const last = lockMap.get(key) || Promise.resolve();
|
|
// Handler to run in sequence
|
|
const handler = async () => {
|
|
// Read previous response ID
|
|
const previous = client.pb?.cache?.get(key);
|
|
// Build request body
|
|
const body = {
|
|
model: cfg.defaultModel,
|
|
instructions: client.responsesSystemPrompt,
|
|
input: prompt,
|
|
previous_response_id: previous,
|
|
max_output_tokens: cfg.defaultMaxTokens,
|
|
temperature: cfg.defaultTemperature,
|
|
};
|
|
// Assemble enabled tools
|
|
const tools = [];
|
|
if (cfg.tools?.imageGeneration) {
|
|
tools.push({
|
|
type: 'function',
|
|
name: 'generate_image',
|
|
description: 'Generate an image with a given prompt, aspect, and quality.',
|
|
parameters: {
|
|
type: 'object',
|
|
properties: {
|
|
prompt: { type: 'string' },
|
|
aspect: { type: 'string', enum: ['square','portrait','landscape'] },
|
|
quality: { type: 'string', enum: ['standard','hd'] },
|
|
},
|
|
required: ['prompt','aspect','quality'],
|
|
additionalProperties: false,
|
|
},
|
|
strict: true,
|
|
});
|
|
}
|
|
if (cfg.tools?.webSearch) {
|
|
tools.push({ type: 'web_search_preview' });
|
|
}
|
|
if (tools.length) body.tools = tools;
|
|
|
|
// Call AI
|
|
let resp;
|
|
try {
|
|
resp = await client.openai.responses.create(body);
|
|
// Award output tokens
|
|
const tokens = resp.usage?.total_tokens ?? resp.usage?.completion_tokens ?? 0;
|
|
if (client.scorekeeper && tokens > 0) {
|
|
client.scorekeeper.addOutput(interaction.guildId, interaction.user.id, tokens)
|
|
.catch(e => client.logger.error(`Scorekeeper error: ${e.message}`));
|
|
}
|
|
} catch (err) {
|
|
client.logger.error(`AI error in /query: ${err.message}`);
|
|
return interaction.editReply({ content: 'Error generating response.', ephemeral });
|
|
}
|
|
|
|
// Cache response ID if not a function call
|
|
const isFuncCall = Array.isArray(resp.output) && resp.output.some(o => o.type === 'function_call');
|
|
if (!isFuncCall && resp.id && cfg.conversationExpiry) {
|
|
client.pb?.cache?.set(key, resp.id, Math.floor(cfg.conversationExpiry / 1000));
|
|
}
|
|
|
|
// Handle image function call if present
|
|
if (await handleImageInteraction(client, interaction, resp, cfg, ephemeral)) {
|
|
return;
|
|
}
|
|
// Send text reply chunks
|
|
const text = resp.output_text?.trim() || '';
|
|
if (!text) {
|
|
return interaction.editReply({ content: 'No response generated.', ephemeral });
|
|
}
|
|
const chunks = splitLongMessage(text, 2000);
|
|
for (let i = 0; i < chunks.length; i++) {
|
|
if (i === 0) {
|
|
await interaction.editReply({ content: chunks[i] });
|
|
} else {
|
|
await interaction.followUp({ content: chunks[i], ephemeral });
|
|
}
|
|
}
|
|
};
|
|
// Chain handler after last and await
|
|
const next = last.then(handler).catch(err => client.logger.error(`Queued /query error for ${key}: ${err.message}`));
|
|
lockMap.set(key, next);
|
|
await next;
|
|
}
|
|
}
|
|
]; |