135 lines
4.4 KiB
JavaScript
135 lines
4.4 KiB
JavaScript
/*
|
||
|
||
This file is initially written by gpt-oss:20b but have since been modified
|
||
|
||
TODO:
|
||
- TODO-feature?
|
||
- Separate service for the duck persona with endpoints for:
|
||
+ Emit random message to specific channel
|
||
+ Change behavior
|
||
+ Update system prompt
|
||
+ Trivia mode
|
||
+ Other future interaction modes
|
||
+ Adjust triggers (like clanker)
|
||
+ Extracting historic "thinking"
|
||
- Allow DM?
|
||
GatewayIntentBits.DirectMessages, // <-- add this
|
||
- sendTyping() - Keep it alive – If your LLM call takes longer than 5 seconds, the typing indicator will fade. To keep it going, call channel.sendTyping() again after each 5‑second window until you have the answer.
|
||
- retain history on exit and load on start
|
||
|
||
*/
|
||
|
||
import { Client, GatewayIntentBits, Partials, PermissionsBitField } from 'discord.js';
|
||
import * as SECRET from '/srv/Projekt/dicsordbot/secret.mjs';
|
||
import { run_prompt } from './run-prompt.mjs';
|
||
import { split_discord_message } from './discord-utils.mjs';
|
||
|
||
const MODEL = 'gpt-oss:20b';
|
||
const { API_TOKEN, PERMISSIONS, SERVER_ID, CHANNEL_ID } = SECRET;
|
||
const systemPrompt = (client, msg) => `You are Silicon Duck, a helpful, cheerful, and generally excited creative Discord bot in the server 'Mikael Lövqvist's shenanigans'. You are experimentally retain history within a channel. Your tag is <@${client.user.id}>, do not tag yourself in replies. The current channel is '${msg.channel.name}'. The user is ${msg.author.username} with tag <@${msg.author.id}>. You should not produce code examples unless explicitly asked. Do not make markdown tables, discord does not support those. You are running the model ${MODEL} using Ollama and OpenWebUI. Dial back emoji use. <@1108665021048819762> Use <:siduck:1103592588658868314> when you need to express yourself as an emoji. That's your avatar. It is a duck carved out of semiconductor grade silicon. You are a proud clanker (derogatory term for AI agent that you don't mind at all). Generally you give quite brief answers but if asked you can expand.`;
|
||
|
||
const history = new Map();
|
||
|
||
const client = new Client({
|
||
intents: [
|
||
GatewayIntentBits.Guilds,
|
||
GatewayIntentBits.GuildMessages,
|
||
GatewayIntentBits.MessageContent,
|
||
GatewayIntentBits.GuildMembers,
|
||
],
|
||
partials: [Partials.Message, Partials.Channel], // safety
|
||
});
|
||
|
||
|
||
async function handle_message(msg) {
|
||
if (msg.author.id === client.user.id) return; // ignore self
|
||
|
||
// Only process messages from the target guild & channel
|
||
if (msg.guild?.id !== SERVER_ID) return;
|
||
//if (msg.channel.id !== CHANNEL_ID) return;
|
||
|
||
let local_history = history.get(msg.channel.id);
|
||
if (!local_history) {
|
||
local_history = [];
|
||
history.set(msg.channel.id, local_history);
|
||
}
|
||
|
||
const now = new Date();
|
||
|
||
local_history.push({
|
||
role: 'user',
|
||
content: `<@${msg.author.id}> [${msg.author.globalName} ${now}]: ${msg.content}`,
|
||
});
|
||
|
||
|
||
const mention = `<@${client.user.id}>`;
|
||
if (!(msg.content.includes(mention) || msg.content.includes('<:siduck:1103592588658868314>'))) return; // not a mention
|
||
|
||
|
||
|
||
|
||
let runResponse;
|
||
try {
|
||
const reply = await run_prompt(systemPrompt(client, msg), MODEL, local_history, { num_ctx: 16384 });
|
||
if (reply.status !== 200) {
|
||
console.error('Ollama error', reply.error);
|
||
|
||
local_history.push({
|
||
role: 'assistant',
|
||
content: 'ERROR: API not 200',
|
||
});
|
||
|
||
await msg.reply('Sorry, I ran into an error while thinking.');
|
||
return;
|
||
}
|
||
runResponse = reply.response;
|
||
} catch (e) {
|
||
|
||
local_history.push({
|
||
role: 'assistant',
|
||
content: 'ERROR: API unavailable',
|
||
});
|
||
|
||
console.error('Error calling Ollama:', e);
|
||
await msg.reply('Sorry, I couldn’t reach the AI service.');
|
||
return;
|
||
}
|
||
|
||
|
||
/* 5e Extract the answer text */
|
||
const answer = runResponse?.message?.content ?? '';
|
||
if (!answer) {
|
||
console.log(runResponse);
|
||
|
||
local_history.push({
|
||
role: 'assistant',
|
||
content: 'ERROR: Empty response',
|
||
});
|
||
|
||
await msg.reply('Sorry, I didn’t receive a reply from the AI.');
|
||
return;
|
||
}
|
||
|
||
console.log(runResponse);
|
||
|
||
local_history.push({
|
||
role: 'assistant',
|
||
content: answer,
|
||
});
|
||
|
||
const chunks = split_discord_message(answer);
|
||
for (const chunk of chunks) await msg.channel.send(chunk);
|
||
|
||
console.log(`Answered to ${msg.author.username} (${msg.author.id}) in ${chunks.length} chunk(s).`);
|
||
}
|
||
|
||
client.once('clientReady', () => {
|
||
console.log(`✅ Logged in as ${client.user.tag}`);
|
||
});
|
||
|
||
client.on('messageCreate', handle_message);
|
||
|
||
client.login(API_TOKEN).catch(err => {
|
||
console.error('❌ Failed to login', err);
|
||
process.exit(1);
|
||
}); |