import { MessageFlags } from 'discord-api-types/v10'; /** * Slash command module for '/query'. * Defines and handles the /query command via the OpenAI Responses API, * including optional image generation function calls. */ import { SlashCommandBuilder, AttachmentBuilder, PermissionFlagsBits } from 'discord.js'; import fs from 'fs/promises'; import path from 'path'; import axios from 'axios'; /** * Split long text into chunks safe for Discord messaging. * @param {string} text - Text to split. * @param {number} max - Max characters per chunk. * @returns {string[]} Array of message chunks. */ function splitLongMessage(text, max = 2000) { const lines = text.split('\n'); const chunks = []; let chunk = ''; for (const line of lines) { const next = line + '\n'; if (chunk.length + next.length > max) { chunks.push(chunk); chunk = ''; } chunk += next; } if (chunk) chunks.push(chunk); return chunks; } /** * Handle an OpenAI 'generate_image' function call in a slash-command interaction. * Downloads generated images, saves locally, and replies with attachments. * @param {object} client - Discord client instance. * @param {CommandInteraction} interaction - The slash command interaction. * @param {object} resp - OpenAI Responses API response. * @param {object} cfg - Module configuration. * @param {boolean} ephemeral - Whether to reply ephemerally. * @returns {Promise} True if a function call was handled. */ async function handleImageInteraction(client, interaction, resp, cfg, ephemeral) { const calls = Array.isArray(resp.output) ? resp.output : []; const fn = calls.find(o => o.type === 'function_call' && o.name === 'generate_image'); if (!fn?.arguments) return false; client.logger.debug(`Image function args: ${fn.arguments}`); let args; try { args = JSON.parse(fn.arguments); } catch { return false; } if (!args.prompt?.trim()) { await interaction.editReply({ content: 'Cannot generate image: empty prompt.', ephemeral }); return true; } // Always use image model defined in config const model = cfg.imageGeneration.defaultModel; const promptText = args.prompt; // Determine number of images (1-10); DALL·E-3 only supports 1 let count = 1; if (args.n != null) { const nVal = typeof args.n === 'number' ? args.n : parseInt(args.n, 10); if (!Number.isNaN(nVal)) count = nVal; } // clamp count = Math.max(1, Math.min(10, count)); if (model === 'dall-e-3') count = 1; const size = args.size || 'auto'; // Determine quality based on config and model constraints let quality = args.quality || cfg.imageGeneration.defaultQuality; if (model === 'gpt-image-1') { if (!['low', 'medium', 'high', 'auto'].includes(quality)) quality = 'auto'; } else if (model === 'dall-e-2') { quality = 'standard'; } else if (model === 'dall-e-3') { if (!['standard', 'hd', 'auto'].includes(quality)) quality = 'standard'; } const background = args.background; const moderation = args.moderation; const outputFormat = args.output_format; const compression = args.output_compression; const style = args.style; const user = args.user || interaction.user.id; try { // Build generate parameters const genParams = { model, prompt: promptText, n: count, size, quality, user }; // response_format supported for DALL·E models (not gpt-image-1) if (model !== 'gpt-image-1' && args.response_format) { genParams['response_format'] = args.response_format; } // gpt-image-1 supports background, moderation, output_format, and output_compression if (model === 'gpt-image-1') { if (background) genParams['background'] = background; if (moderation) genParams['moderation'] = moderation; if (outputFormat) { genParams['output_format'] = outputFormat; // only support compression for JPEG or WEBP formats if (['jpeg','webp'].includes(outputFormat) && typeof compression === 'number') { genParams['output_compression'] = compression; } } } // dall-e-3 supports style if (model === 'dall-e-3' && style) { genParams['style'] = style; } // Generate images via OpenAI Images API const imgRes = await client.openai.images.generate(genParams); const images = imgRes.data || []; if (!images.length) throw new Error('No images generated'); // Ensure save directory exists const dir = cfg.imageGeneration?.imageSavePath || './images'; await fs.mkdir(dir, { recursive: true }); const attachments = []; const outputs = []; // Process each generated image for (let i = 0; i < images.length; i++) { const img = images[i]; let buffer, ext = outputFormat || 'png'; if (img.b64_json) { buffer = Buffer.from(img.b64_json, 'base64'); outputs.push({ b64_json: img.b64_json }); } else if (img.url) { const dl = await axios.get(img.url, { responseType: 'arraybuffer' }); buffer = Buffer.from(dl.data); const parsed = path.extname(img.url.split('?')[0]).replace(/^[.]/, ''); if (parsed) ext = parsed; outputs.push({ url: img.url }); } else { throw new Error('No image data'); } const filename = `${interaction.user.id}-${Date.now()}-${i}.${ext}`; const filePath = path.join(dir, filename); await fs.writeFile(filePath, buffer); client.logger.info(`Saved image: ${filePath}`); attachments.push(new AttachmentBuilder(buffer, { name: filename })); } // Reply with attachments await interaction.editReply({ content: promptText, files: attachments }); return true; } catch (err) { client.logger.error(`Image generation error: ${err.message}`); await interaction.editReply({ content: `Image generation error: ${err.message}`, ephemeral }); return true; } } /** * /query slash command: send a custom AI query using the Responses API. * Options: * prompt - Required string: the text to send to AI. * ephemeral - Optional boolean: send response ephemerally (default: true). */ /** * Slash command definitions and handlers for the '/query' command. */ export const commands = [ { data: new SlashCommandBuilder() .setName('query') .setDescription('Send a custom AI query') .addStringOption(opt => opt.setName('prompt') .setDescription('Your query text') .setRequired(true) ) .addBooleanOption(opt => opt.setName('ephemeral') .setDescription('Receive an ephemeral response') .setRequired(false) ), async execute(interaction, client) { const cfg = client.config.responses; // Enforce minimum score to use /query if scorekeeper is enabled if (client.scorekeeper) { try { const isAdmin = interaction.member?.permissions?.has(PermissionFlagsBits.Administrator); const scoreData = await client.scorekeeper.getScore(interaction.guildId, interaction.user.id); if (!isAdmin && scoreData.totalScore < cfg.minScore) { return interaction.reply({ content: `You need an I/O score of at least ${cfg.minScore} to use /query. Your current I/O score is ${scoreData.totalScore.toFixed(2)}.`, ephemeral: true }); } } catch (err) { client.logger.error(`[cmd:query] Error checking score: ${err.message}`); return interaction.reply({ content: 'Error verifying your score. Please try again later.', flags: MessageFlags.Ephemeral}); } } const prompt = interaction.options.getString('prompt'); const flag = interaction.options.getBoolean('ephemeral'); client.logger.info(`[cmd:query] Prompt received from ${interaction.user.id}, length=${prompt.length}`); const ephemeral = flag !== null ? flag : true; await interaction.deferReply({ ephemeral }); // Determine channel/thread key for context const key = interaction.channelId; // Initialize per-channel lock map const lockMap = client._responseLockMap || (client._responseLockMap = new Map()); // Get last pending promise for this key const last = lockMap.get(key) || Promise.resolve(); // Handler to run in sequence const handler = async () => { // Read previous response ID const previous = client.pb?.cache?.get(key); // Build request body const body = { model: cfg.defaultModel, instructions: client.responsesPrompt, input: prompt, previous_response_id: previous, max_output_tokens: cfg.defaultMaxTokens, temperature: cfg.defaultTemperature, }; // Assemble enabled tools const tools = []; if (cfg.tools?.imageGeneration) { const model = cfg.imageGeneration.defaultModel; // Configure allowed sizes per model let sizeEnum; switch (model) { case 'gpt-image-1': sizeEnum = ['auto','1024x1024','1536x1024','1024x1536']; break; case 'dall-e-2': sizeEnum = ['256x256','512x512','1024x1024']; break; case 'dall-e-3': sizeEnum = ['auto','1024x1024','1792x1024','1024x1792']; break; default: sizeEnum = ['auto','1024x1024']; } // Configure quality options per model let qualityEnum; switch (model) { case 'gpt-image-1': qualityEnum = ['auto','low','medium','high']; break; case 'dall-e-2': qualityEnum = ['standard']; break; case 'dall-e-3': qualityEnum = ['auto','standard','hd']; break; default: qualityEnum = ['auto','standard']; } // Build schema properties dynamically const properties = { prompt: { type: 'string', description: 'Text description of desired image(s).' }, n: { type: 'number', description: 'Number of images to generate.' }, size: { type: 'string', enum: sizeEnum, description: 'Image size.' }, quality: { type: 'string', enum: qualityEnum, description: 'Image quality.' }, user: { type: 'string', description: 'Unique end-user identifier.' } }; if (model !== 'gpt-image-1') { properties.response_format = { type: 'string', enum: ['url','b64_json'], description: 'Format of returned images.' }; } if (model === 'gpt-image-1') { properties.background = { type: 'string', enum: ['transparent','opaque','auto'], description: 'Background transparency.' }; properties.moderation = { type: 'string', enum: ['low','auto'], description: 'Content moderation level.' }; properties.output_format = { type: 'string', enum: ['png','jpeg','webp'], description: 'Output image format.' }; properties.output_compression = { type: 'number', description: 'Compression level (0-100).' }; } if (model === 'dall-e-3') { properties.style = { type: 'string', enum: ['vivid','natural'], description: 'Style option for dall-e-3.' }; } // Determine required fields const required = ['prompt','n','size','quality','user']; if (model !== 'gpt-image-1') required.push('response_format'); if (model === 'gpt-image-1') required.push('background','moderation','output_format','output_compression'); if (model === 'dall-e-3') required.push('style'); tools.push({ type: 'function', name: 'generate_image', description: `Generate images using model ${model} with requested parameters.`, parameters: { type: 'object', properties, required, additionalProperties: false }, strict: true, }); } if (cfg.tools?.webSearch) { tools.push({ type: 'web_search_preview' }); } if (tools.length) body.tools = tools; // Call AI let resp; try { resp = await client.openai.responses.create(body); // Award output tokens const tokens = resp.usage?.total_tokens ?? resp.usage?.completion_tokens ?? 0; if (client.scorekeeper && tokens > 0) { client.scorekeeper.addOutput(interaction.guildId, interaction.user.id, tokens) .catch(e => client.logger.error(`Scorekeeper error: ${e.message}`)); } } catch (err) { client.logger.error(`AI error in /query: ${err.message}`); return interaction.editReply({ content: 'Error generating response.', ephemeral }); } // Cache response ID if not a function call const isFuncCall = Array.isArray(resp.output) && resp.output.some(o => o.type === 'function_call'); if (!isFuncCall && resp.id && cfg.conversationExpiry) { client.pb?.cache?.set(key, resp.id, Math.floor(cfg.conversationExpiry / 1000)); } // Handle image function call if present if (await handleImageInteraction(client, interaction, resp, cfg, ephemeral)) { return; } // Send text reply chunks const text = resp.output_text?.trim() || ''; if (!text) { return interaction.editReply({ content: 'No response generated.', ephemeral }); } const chunks = splitLongMessage(text, 2000); for (let i = 0; i < chunks.length; i++) { if (i === 0) { await interaction.editReply({ content: chunks[i] }); } else { await interaction.followUp({ content: chunks[i], ephemeral }); } } }; // Chain handler after last and await const next = last.then(handler).catch(err => client.logger.error(`Queued /query error for ${key}: ${err.message}`)); lockMap.set(key, next); await next; } } ];