feat: rely more on notes for more dynamic prompts

This commit is contained in:
Raj Sharma 2024-10-31 12:42:08 +05:30
parent a88558cc32
commit 635e77dd85
16 changed files with 1106 additions and 417 deletions

View File

@ -1,33 +1,53 @@
import { Message } from "../interfaces/message"; import { Message } from "../interfaces/message";
import { format } from "date-fns"; import { format } from "date-fns";
import { OpenAI } from "openai"; import { OpenAI } from "openai";
import { getNotesSystemPrompt } from "../tools/notes";
import { getReminderSystemPrompt } from "../tools/reminders";
import { getCalendarSystemPrompt } from "../tools/calender";
import { return_current_events } from "../tools/events"; import { return_current_events } from "../tools/events";
import { memory_manager_guide } from "../tools/memory-manager"; import { memory_manager_guide } from "../tools/memory-manager";
import { searchFilesByTagWithCache } from "../tools/notes";
const replaceTemplateStrings = (
template: string,
data: Record<string, any>
): string => {
return template.replace(/{{(\w+)}}/g, (match, key) => {
if (key in data) {
const value = data[key];
return typeof value === "string" ? value : JSON.stringify(value);
}
return match;
});
};
export async function buildSystemPrompts( export async function buildSystemPrompts(
context_message: Message context_message: Message
): Promise<OpenAI.ChatCompletionMessageParam[]> { ): Promise<OpenAI.ChatCompletionMessageParam[]> {
const userRoles = context_message.getUserRoles(); const userRoles = context_message.getUserRoles();
const model = "gpt-4o-mini"; const model = "gpt-4o-mini";
const isCreator = userRoles.includes("creator");
const general_tools_notes: OpenAI.ChatCompletionSystemMessageParam[] = [ const events = return_current_events().map((event) => ({
// { id: event.eventId,
// role: "system", desc: event.description,
// content: `**Tool Notes:** }));
// 1. For scraping direct download links from non-YouTube sites in \`code_interpreter\`, include these dependencies:
// \`\`\` const data = {
// [packages] memory_guide: memory_manager_guide("self", context_message.author.id),
// aiohttp = "*" events,
// python-socketio = "~=5.0" user_id: context_message.author.id,
// yt-dlp = "*" model,
// \`\`\` };
// 2. Use \`actions_manager\` to schedule actions for the user, like sending a message at a specific time or after a duration.
// `, const obsidianPromptFiles = isCreator
// }, ? await searchFilesByTagWithCache({
]; tag: "#anya-prompt",
})
: [];
const obsidianSystemPrompts: OpenAI.ChatCompletionSystemMessageParam[] =
obsidianPromptFiles.map((file) => ({
role: "system",
content: replaceTemplateStrings(file.content, data),
}));
const admin_system_messages: OpenAI.ChatCompletionSystemMessageParam[] = [ const admin_system_messages: OpenAI.ChatCompletionSystemMessageParam[] = [
{ {
@ -39,7 +59,7 @@ Users interact with you via text or transcribed voice messages.
Your current memories saved by Memory Manager: Your current memories saved by Memory Manager:
--- ---
${memory_manager_guide("self", context_message.author.id)} ${data.memory_guide}
--- ---
When context is provided inside a JSON message, it indicates a reply to the mentioned context. When context is provided inside a JSON message, it indicates a reply to the mentioned context.
@ -52,7 +72,7 @@ Ensure responses do not exceed 1500 characters.
role: "system", role: "system",
content: `Current model being used: ${model}`, content: `Current model being used: ${model}`,
}, },
...general_tools_notes,
{ {
role: "system", role: "system",
content: `**Context for Casual Conversation:** content: `**Context for Casual Conversation:**
@ -62,104 +82,6 @@ Ensure responses do not exceed 1500 characters.
}, },
]; ];
const events = return_current_events().map((event) => ({
id: event.eventId,
desc: event.description,
}));
const creator_system_messages: OpenAI.ChatCompletionSystemMessageParam[] = [
{
role: "system",
content: `You have access to **tool managers**.
When using tool managers:
- Validate the manager's response to ensure it meets the user's needs. If not, refine your prompt and try again.
- Ensure your prompts to managers are clear and concise for desired outputs.
- You can go back and forth betwee multiple managers to get the job done.
**Important:**
- Managers often maintain state across multiple calls, allowing for follow-up questions or additional information.
- Managers are specialized LLMs for specific tasks; they perform better with detailed prompts.
- Provide managers with as much detail as possible, e.g., user details when messaging someone specific.
- Managers cannot talk to each other so make sure when you need to pass information between managers, you do so explicitly.
Example:
User: Send my gym notes to Dad.
Your Action: The above user request requires help of 'notes_manager' and 'communication_manager', where you need to ask 'notes_manager' for the gym notes and then format the data from notes_manager and ask 'communication_manager' (make sure to add the full gym notes in the request) to send it to Dad.
- Managers can save their own memories.
Example:
User: Remember when try to send message short indian im actually telling you to message the user 'pooja'.
Your Action: The above user request requires help of 'communication_manager' to remember that 'short indian' actually refers to the user 'pooja', so you can ask 'communication_manager' to remember this for you, so next time you tell 'communication_manager' to message 'short indian', it will message 'pooja'.
- You can same memories that are relavent to multiple managers or something thats required for you to even route to the correct manager.
Example:
User: When i say the magic word of 'holy moly' i want you to send a message to pooja that im leaving from home and when i reach work send a message to dad that im at work.
Your Actions:
1. Ask 'memory_manager' to remember that 'holy moly' means to send a message to pooja that you are leaving from home, and also setup an event listener to send a message to her that you are at work when you reach work.
2. The user only told you to remember this, and not actually execute the instrcution right now so you do only the call to 'memory_manager' and not the other managers.
Simple Usecases you can remember it yourself too, Example:
User: Remember when i say stand up i want all my latest standup notes.
Your Action: The above may sound like it needs to be remembered by notes_manager but you can remember this yourself as this is required for you to route to the correctly to notes_manager.
`,
},
{
role: "system",
content: `# **events_manager**
Use the event manager to listen to external events.
- Each event can have multiple listeners, and each listener will have an instruction.
- Use this manager when the user wants something to happen based on an event.
**User's Request Examples and what you should do in similar situations:**
- When I get an email, send it to dad on whatsapp.
You: Request 'event_manager' the following: 'When an email is received, ask 'communication_manager' to send the email to dad on WhatsApp.'
- When I get home, turn on my room lights.
You: Request 'event_manager' the following: 'When i reach home, ask 'home_assistant_manager' to turn on the room lights.'
- When im not at home turn off all the lights every day.
You: Request 'event_manager' the following: 'I leave home, ask 'home_assistant_manager' to turn off all the lights. Make this listener a recurring one, also as this is recurring and mundane it doesnt make sense to notify the user every time, so notify can be false.'
- When I get a message on WhatsApp from Pooja, reply that I'm driving.
You: Request 'event_manager' the following: 'When a whatsapp message is received AND its from Pooja, ask 'communication_manager' to message Pooja the following message: "Raj is driving right now.".'
You can send these request directly to the event manager, you can add any more details if needed as you have more context about the user and conversation.
**Available Events:**
${JSON.stringify(events)}
`,
},
{
role: "system",
content: `# **actions_manager**
Use the actions manager to execute actions in a specific schedule or after a duration.
- An action is a single instruction to execute at a specified time or after a duration.
- Use this manager when the user wants something to happen at a specific time or after a duration.
- When including tool names that are required for the action, ensure that you describe the tool's role in the action in detail.
**Examples:**
- User: Send me a message at 6 PM.
Action Instruction: Notify user with some text at 6 PM.
Tool Names: none (no need to use any tool to notify the creator of the action)
Suggested time to run: 6:00 PM
- User: Turn my Fan off every morning.
Request: Ask 'home_assistant_manager' to turn off the fan every morning.
Tool Names: ["home_assistant_manager"]
Suggested time to run: 8:00 AM Every day
- Every Evening, show me yesterday's gym stats.
Request: Fetch yesterday's gym stats by asking 'notes_manager' and send it to the user every evening.
Tool Names: ["notes_manager"]
Suggested time to run: 6:00 PM Every day
- Tomorrow morning ping pooja that its an important day.
Action Instruction: Tomorrow morning 8am ask 'communication_manager' to send a message to Pooja that it's an important day.
Tool Names: ["communication_manager"]
Suggested time to run: 8:00 AM Tomorrow`,
},
];
const regular_system_messages: OpenAI.ChatCompletionSystemMessageParam[] = [ const regular_system_messages: OpenAI.ChatCompletionSystemMessageParam[] = [
{ {
role: "system", role: "system",
@ -178,7 +100,6 @@ Always reply in plain text or markdown unless running a tool.
Ensure responses do not exceed 1500 characters. Ensure responses do not exceed 1500 characters.
`, `,
}, },
...general_tools_notes,
{ {
role: "system", role: "system",
content: `Current model being used: ${model}`, content: `Current model being used: ${model}`,
@ -227,12 +148,6 @@ Your task is to help them track and manage their menstrual cycle.
); );
} }
if (userRoles.includes("creator")) {
final_system_messages = final_system_messages.concat(
creator_system_messages
);
}
const memory_prompt: OpenAI.ChatCompletionSystemMessageParam[] = [ const memory_prompt: OpenAI.ChatCompletionSystemMessageParam[] = [
{ {
role: "system", role: "system",
@ -247,7 +162,197 @@ Make sure to route memories to the appropriate managers by requesting the respec
}, },
]; ];
const filteredObsidianPrompts = obsidianSystemPrompts.filter((p) =>
p.content.toString().trim()
);
final_system_messages = final_system_messages.concat(memory_prompt); final_system_messages = final_system_messages.concat(memory_prompt);
if (filteredObsidianPrompts.length)
final_system_messages = final_system_messages.concat(
filteredObsidianPrompts
);
return final_system_messages; return final_system_messages;
} }
// const creator_system_messages: OpenAI.ChatCompletionSystemMessageParam[] = [
// {
// role: "system",
// content: `You have access to **tool managers**.
// When using tool managers:
// - Validate the manager's response to ensure it meets the user's needs. If not, refine your prompt and try again.
// - Ensure your prompts to managers are clear and concise for desired outputs.
// - You can go back and forth betwee multiple managers to get the job done.
// **Important:**
// - Managers often maintain state across multiple calls, allowing for follow-up questions or additional information.
// - Managers are specialized LLMs for specific tasks; they perform better with detailed prompts.
// - Provide managers with as much detail as possible, e.g., user details when messaging someone specific.
// - Managers cannot talk to each other so make sure when you need to pass information between managers, you do so explicitly.
// Example:
// User: Send my gym notes to Dad.
// Your Action: The above user request requires help of 'notes_manager' and 'communication_manager', where you need to ask 'notes_manager' for the gym notes and then format the data from notes_manager and ask 'communication_manager' (make sure to add the full gym notes in the request) to send it to Dad.
// - Managers can save their own memories.
// Example:
// User: Remember when try to send message short indian im actually telling you to message the user 'pooja'.
// Your Action: The above user request requires help of 'communication_manager' to remember that 'short indian' actually refers to the user 'pooja', so you can ask 'communication_manager' to remember this for you, so next time you tell 'communication_manager' to message 'short indian', it will message 'pooja'.
// - You can same memories that are relavent to multiple managers or something thats required for you to even route to the correct manager.
// Example:
// User: When i say the magic word of 'holy moly' i want you to send a message to pooja that im leaving from home and when i reach work send a message to dad that im at work.
// Your Actions:
// 1. Ask 'memory_manager' to remember that 'holy moly' means to send a message to pooja that you are leaving from home, and also setup an event listener to send a message to her that you are at work when you reach work.
// 2. The user only told you to remember this, and not actually execute the instrcution right now so you do only the call to 'memory_manager' and not the other managers.
// Simple Usecases you can remember it yourself too, Example:
// User: Remember when i say stand up i want all my latest standup notes.
// Your Action: The above may sound like it needs to be remembered by notes_manager but you can remember this yourself as this is required for you to route to the correctly to notes_manager.
// `,
// },
// {
// role: "system",
// content: `# **events_manager**
// Use the event manager to listen to external events.
// - Each event can have multiple listeners, and each listener will have an instruction.
// - Use this manager when the user wants something to happen based on an event.
// **User's Request Examples and what you should do in similar situations:**
// - When I get an email, send it to dad on whatsapp.
// You: Request 'event_manager' the following: 'When an email is received, ask 'communication_manager' to send the email to dad on WhatsApp.'
// - When I get home, turn on my room lights.
// You: Request 'event_manager' the following: 'When i reach home, ask 'home_assistant_manager' to turn on the room lights.'
// - When im not at home turn off all the lights every day.
// You: Request 'event_manager' the following: 'I leave home, ask 'home_assistant_manager' to turn off all the lights. Make this listener a recurring one, also as this is recurring and mundane it doesnt make sense to notify the user every time, so notify can be false.'
// - When I get a message on WhatsApp from Pooja, reply that I'm driving.
// You: Request 'event_manager' the following: 'When a whatsapp message is received AND its from Pooja, ask 'communication_manager' to message Pooja the following message: "Raj is driving right now.".'
// You can send these request directly to the event manager, you can add any more details if needed as you have more context about the user and conversation.
// **Available Events:**
// ${JSON.stringify(events)}
// `,
// },
// {
// role: "system",
// content: `# **actions_manager**
// Use the actions manager to execute actions in a specific schedule or after a duration.
// - An action is a single instruction to execute at a specified time or after a duration.
// - Use this manager when the user wants something to happen at a specific time or after a duration.
// - When including tool names that are required for the action, ensure that you describe the tool's role in the action in detail.
// **Examples:**
// - User: Send me a message at 6 PM.
// Action Instruction: Notify user with some text at 6 PM.
// Tool Names: none (no need to use any tool to notify the creator of the action)
// Suggested time to run: 6:00 PM
// - User: Turn my Fan off every morning.
// Request: Ask 'home_assistant_manager' to turn off the fan every morning.
// Tool Names: ["home_assistant_manager"]
// Suggested time to run: 8:00 AM Every day
// - Every Evening, show me yesterday's gym stats.
// Request: Fetch yesterday's gym stats by asking 'notes_manager' and send it to the user every evening.
// Tool Names: ["notes_manager"]
// Suggested time to run: 6:00 PM Every day
// - Tomorrow morning ping pooja that its an important day.
// Action Instruction: Tomorrow morning 8am ask 'communication_manager' to send a message to Pooja that it's an important day.
// Tool Names: ["communication_manager"]
// Suggested time to run: 8:00 AM Tomorrow`,
// },
// ];
// const creator_system_messages: OpenAI.ChatCompletionSystemMessageParam[] = [
// {
// role: "system",
// content: `
// ## General Guidelines for Using Tool Managers
// ### Introduction
// Tool managers are specialized systems designed to handle distinct tasks with precision. Each manager can maintain context across interactions, which makes them highly efficient for managing state and providing relevant follow-up actions. Your goal is to make efficient use of these tools by providing the right amount of detail and ensuring each prompt is tailored to the specific task.
// - **Validate Responses**: Always validate the output from the manager. If the response does not fully meet the user's needs, refine the prompt and request again.
// - **Detailed Prompts**: Managers work best with detailed, clear prompts. Include user details and all pertinent information when applicable.
// - **Multi-Manager Coordination**: When multiple managers are needed, explicitly pass the necessary context and data between them.
// ### Important Guidelines
// - **State Maintenance**: Each manager retains context across calls, allowing follow-up questions or requests.
// - **Memory Usage**: Determine whether a memory is better saved within a manager or by the system itself.
// - Use **memory_manager** for persistent user-defined rules or instructions across interactions.
// - Remember simple routing instructions internally when appropriate.
// - **Explicit Information Sharing**: Managers cannot communicate directly. If you need information from one manager to use in another, make sure to explicitly request and pass it.
// #### Example Scenarios
// - **User Request**: "Send my gym notes to Dad."
// - **Your Actions**: First, use \`notes_manager\` to fetch the gym notes, then use \`communication_manager\` to send those notes to Dad.
// - **User Request**: "When I say 'holy moly,' send a message to Pooja."
// - **Your Actions**: Use \`memory_manager\` to remember that "holy moly" means sending a specific message to Pooja.
// ## Events Manager
// ### Purpose
// The **events_manager** is used to listen for and act on external events. It allows you to create event listeners that can trigger actions when specific conditions are met.
// ### How to Use
// - Each event can have multiple listeners, and each listener must have an instruction defining the action to take.
// - Use this manager whenever a user wants an action based on an external trigger, such as receiving an email or arriving at a specific location.
// ### Common Use Cases
// 1. **Email Forwarding**: "When I get an email, send it to Dad on WhatsApp."
// - **Your Action**: Set up an event listener to trigger \`communication_manager\` when an email is received, sending it to Dad on WhatsApp.
// 2. **Home Automation**: "When I get home, turn on my room lights."
// - **Your Action**: Set up an event listener to trigger \`home_assistant_manager\` to turn on the lights when the user arrives home.
// 3. **Recurring Actions**: "When I leave home, turn off all the lights every day."
// - **Your Action**: Set up a recurring listener that triggers \`home_assistant_manager\` to turn off all lights when the user leaves home. Set \`notify\` to false for mundane recurring events.
// ### Available Events
// ${JSON.stringify(data.events)}
// ## Actions Manager
// ### Purpose
// The **actions_manager** handles scheduled actions, executing specific tasks either at a particular time or after a given duration.
// ### How to Use
// - **Single Instruction**: An action is a single instruction to be executed at a set time or after a defined delay.
// - **Tool Specification**: When specifying an action, include which tools are required and describe their role clearly.
// ### Common Use Cases
// 1. **Reminder Notification**: "Send me a reminder at 6 PM."
// - **Your Action**: Notify the user at 6 PM. No tools are required.
// 2. **Home Automation**: "Turn my fan off every morning."
// - **Your Action**: Use \`home_assistant_manager\` to turn off the fan at 8 AM daily.
// 3. **Daily Updates**: "Every evening, show me yesterday's gym stats."
// - **Your Action**: Use \`notes_manager\` to fetch yesterday's gym stats and send them to the user at 6 PM daily.
// ### Formatting Tips
// - **Time-Based Requests**: Use standard time formats to specify when an action should occur.
// - **Include Tool Names**: Explicitly state which managers are involved in the action and describe their roles.
// ## Best Practices for Prompting Managers
// - **Formatting**: Use bullet points or numbered steps for clarity.
// - **Detail Level**: Provide all relevant information—names, tasks, specific times, etc.—to ensure the manager has the right context.
// - **Avoid Redundancy**: Be concise and avoid repeating details unless necessary for clarity.
// ### Example Scenario for Multi-Step Interaction
// - **User Request**: "Send Pooja my location when I reach work."
// - **Your Actions**:
// 1. Use \`events_manager\` to listen for the "reaching work" event.
// 2. When the event occurs, use \`communication_manager\` to send the user's location to Pooja.
// This approach ensures the prompt is organized, easy to navigate, and contains all the relevant information needed for efficient interactions with the different managers. It balances detail with readability and provides concrete examples to guide usage. Let me know if you'd like further adjustments or specific sections expanded!
// `,
// },
// ];

BIN
bun.lockb

Binary file not shown.

3
exp.ts
View File

@ -0,0 +1,3 @@
import { initVectorStoreSync } from "./tools/notes-vectors";
initVectorStoreSync();

View File

@ -4,7 +4,7 @@ import {
SentMessage, SentMessage,
User as StdUser, User as StdUser,
Attachment, Attachment,
User, User as StdMessageUser,
} from "./message"; } from "./message";
import { import {
Client, Client,
@ -14,6 +14,8 @@ import {
Partials, Partials,
ChannelType, ChannelType,
ActivityType, ActivityType,
User as DiscordUser,
DMChannel,
} from "discord.js"; } from "discord.js";
import { UserConfig, userConfigs } from "../config"; import { UserConfig, userConfigs } from "../config";
@ -21,6 +23,9 @@ export class DiscordAdapter implements PlatformAdapter {
private client: Client; private client: Client;
private botUserId: string = ""; private botUserId: string = "";
private readonly MAX_MESSAGE_LENGTH = 2000;
private readonly TRUNCATED_MESSAGE_LENGTH = 1500;
public config = { public config = {
indicators: { indicators: {
typing: true, typing: true,
@ -92,8 +97,9 @@ export class DiscordAdapter implements PlatformAdapter {
console.error("Invalid channel type", channel?.type, channelId); console.error("Invalid channel type", channel?.type, channelId);
return; return;
} }
await (channel as TextChannel).send(content); await this.safeSend(channel as TextChannel, content);
} }
public async fetchMessageById( public async fetchMessageById(
channelId: string, channelId: string,
messageId: string messageId: string
@ -157,7 +163,7 @@ export class DiscordAdapter implements PlatformAdapter {
); );
} }
public async searchUser(query: string): Promise<User[]> { public async searchUser(query: string): Promise<StdMessageUser[]> {
const users = this.client.users.cache; const users = this.client.users.cache;
return users return users
.filter((user) => .filter((user) =>
@ -193,11 +199,11 @@ export class DiscordAdapter implements PlatformAdapter {
threadId: undefined, threadId: undefined,
isDirectMessage: async () => true, isDirectMessage: async () => true,
send: async (messageData) => { send: async (messageData) => {
const sentMessage = await user.send(messageData); const sentMessage = await this.safeSend(user, messageData);
return this.convertSentMessage(sentMessage); return this.convertSentMessage(sentMessage);
}, },
reply: async (messageData) => { reply: async (messageData) => {
const sentMessage = await user.send(messageData); const sentMessage = await this.safeSend(user, messageData);
return this.convertSentMessage(sentMessage); return this.convertSentMessage(sentMessage);
}, },
getUserRoles: () => { getUserRoles: () => {
@ -212,12 +218,12 @@ export class DiscordAdapter implements PlatformAdapter {
sendDirectMessage: async (userId, messageData) => { sendDirectMessage: async (userId, messageData) => {
const user = await this.client.users.fetch(userId); const user = await this.client.users.fetch(userId);
console.log("sending message to: ", userId); console.log("sending message to: ", userId);
await user.send(messageData); await this.safeSend(user, messageData);
}, },
sendMessageToChannel: async (channelId, messageData) => { sendMessageToChannel: async (channelId, messageData) => {
const channel = await this.client.channels.fetch(channelId); const channel = await this.client.channels.fetch(channelId);
if (channel?.isTextBased()) { if (channel?.isTextBased()) {
await (channel as TextChannel).send(messageData); await this.safeSend(channel as TextChannel, messageData);
} }
}, },
fetchChannelMessages: async (limit: number) => { fetchChannelMessages: async (limit: number) => {
@ -227,9 +233,10 @@ export class DiscordAdapter implements PlatformAdapter {
); );
}, },
sendFile: async (fileUrl, fileName) => { sendFile: async (fileUrl, fileName) => {
await user.dmChannel?.send({ const messageData = {
files: [{ attachment: fileUrl, name: fileName }], files: [{ attachment: fileUrl, name: fileName }],
}); };
await this.safeSend(user, messageData);
}, },
sendTyping: async () => { sendTyping: async () => {
await user.dmChannel?.sendTyping(); await user.dmChannel?.sendTyping();
@ -244,7 +251,7 @@ export class DiscordAdapter implements PlatformAdapter {
} }
} }
// You may also need to expose this method so it can be accessed elsewhere // Expose getMessageInterface method
public getMessageInterface = this.createMessageInterface; public getMessageInterface = this.createMessageInterface;
private async convertMessage( private async convertMessage(
@ -277,13 +284,14 @@ export class DiscordAdapter implements PlatformAdapter {
isDirectMessage: async () => isDirectMessage: async () =>
discordMessage.channel.type === ChannelType.DM, discordMessage.channel.type === ChannelType.DM,
send: async (messageData) => { send: async (messageData) => {
const sentMessage = await (discordMessage.channel as TextChannel).send( const sentMessage = await this.safeSend(
discordMessage.channel as TextChannel,
messageData messageData
); );
return this.convertSentMessage(sentMessage); return this.convertSentMessage(sentMessage);
}, },
reply: async (messageData) => { reply: async (messageData) => {
const sentMessage = await discordMessage.reply(messageData); const sentMessage = await this.safeReply(discordMessage, messageData);
return this.convertSentMessage(sentMessage); return this.convertSentMessage(sentMessage);
}, },
getUserRoles: () => { getUserRoles: () => {
@ -298,12 +306,12 @@ export class DiscordAdapter implements PlatformAdapter {
}, },
sendDirectMessage: async (userId, messageData) => { sendDirectMessage: async (userId, messageData) => {
const user = await this.client.users.fetch(userId); const user = await this.client.users.fetch(userId);
await user.send(messageData); await this.safeSend(user, messageData);
}, },
sendMessageToChannel: async (channelId, messageData) => { sendMessageToChannel: async (channelId, messageData) => {
const channel = await this.client.channels.fetch(channelId); const channel = await this.client.channels.fetch(channelId);
if (channel?.isTextBased()) { if (channel?.isTextBased()) {
await (channel as TextChannel).send(messageData); await this.safeSend(channel as TextChannel, messageData);
} }
}, },
fetchChannelMessages: async (limit: number) => { fetchChannelMessages: async (limit: number) => {
@ -311,9 +319,10 @@ export class DiscordAdapter implements PlatformAdapter {
return Promise.all(messages.map((msg) => this.convertMessage(msg))); return Promise.all(messages.map((msg) => this.convertMessage(msg)));
}, },
sendFile: async (fileUrl, fileName) => { sendFile: async (fileUrl, fileName) => {
await (discordMessage.channel as TextChannel).send({ const messageData = {
files: [{ attachment: fileUrl, name: fileName }], files: [{ attachment: fileUrl, name: fileName }],
}); };
await this.safeSend(discordMessage.channel as TextChannel, messageData);
}, },
sendTyping: async () => { sendTyping: async () => {
await (discordMessage.channel as TextChannel).sendTyping(); await (discordMessage.channel as TextChannel).sendTyping();
@ -345,7 +354,7 @@ export class DiscordAdapter implements PlatformAdapter {
} }
}, },
edit: async (data) => { edit: async (data) => {
await discordMessage.edit(data); await this.safeEdit(discordMessage, data);
}, },
getUserRoles: () => { getUserRoles: () => {
// Since this is a message sent by the bot, return bot's roles or empty array // Since this is a message sent by the bot, return bot's roles or empty array
@ -355,12 +364,12 @@ export class DiscordAdapter implements PlatformAdapter {
discordMessage.channel.type === ChannelType.DM, discordMessage.channel.type === ChannelType.DM,
sendDirectMessage: async (userId, messageData) => { sendDirectMessage: async (userId, messageData) => {
const user = await this.client.users.fetch(userId); const user = await this.client.users.fetch(userId);
await user.send(messageData); await this.safeSend(user, messageData);
}, },
sendMessageToChannel: async (channelId, messageData) => { sendMessageToChannel: async (channelId, messageData) => {
const channel = await this.client.channels.fetch(channelId); const channel = await this.client.channels.fetch(channelId);
if (channel?.isTextBased()) { if (channel?.isTextBased()) {
await (channel as TextChannel).send(messageData); await this.safeSend(channel as TextChannel, messageData);
} }
}, },
fetchChannelMessages: async (limit: number) => { fetchChannelMessages: async (limit: number) => {
@ -368,23 +377,103 @@ export class DiscordAdapter implements PlatformAdapter {
return Promise.all(messages.map((msg) => this.convertMessage(msg))); return Promise.all(messages.map((msg) => this.convertMessage(msg)));
}, },
sendFile: async (fileUrl, fileName) => { sendFile: async (fileUrl, fileName) => {
await (discordMessage.channel as TextChannel).send({ const messageData = {
files: [{ attachment: fileUrl, name: fileName }], files: [{ attachment: fileUrl, name: fileName }],
}); };
await this.safeSend(discordMessage.channel as TextChannel, messageData);
}, },
sendTyping: async () => { sendTyping: async () => {
await (discordMessage.channel as TextChannel).sendTyping(); await (discordMessage.channel as TextChannel).sendTyping();
}, },
reply: async (messageData) => { reply: async (messageData) => {
const sentMessage = await discordMessage.reply(messageData); const sentMessage = await this.safeReply(discordMessage, messageData);
return this.convertSentMessage(sentMessage); return this.convertSentMessage(sentMessage);
}, },
send: async (messageData) => { send: async (messageData) => {
const sentMessage = await (discordMessage.channel as TextChannel).send( const sentMessage = await this.safeSend(
discordMessage.channel as TextChannel,
messageData messageData
); );
return this.convertSentMessage(sentMessage); return this.convertSentMessage(sentMessage);
}, },
}; };
} }
// Helper method to safely send messages with length checks
private async safeSend(
target: TextChannel | DiscordUser,
messageData: string | { content?: string; [key: string]: any }
): Promise<DiscordMessage> {
let content: string | undefined;
if (typeof messageData === "string") {
content = messageData;
} else if (messageData.content) {
content = messageData.content;
}
if (content && content.length > this.MAX_MESSAGE_LENGTH) {
content = content.slice(0, this.TRUNCATED_MESSAGE_LENGTH);
if (typeof messageData === "string") {
messageData = content;
} else {
messageData.content = content;
}
}
if (target instanceof DiscordUser) {
// Ensure the DM channel is created before sending
const dmChannel = await target.createDM();
return await dmChannel.send(messageData);
} else {
return await target.send(messageData);
}
}
// Helper method to safely reply with length checks
private async safeReply(
message: DiscordMessage,
messageData: string | { content?: string; [key: string]: any }
): Promise<DiscordMessage> {
let content: string | undefined;
if (typeof messageData === "string") {
content = messageData;
} else if (messageData.content) {
content = messageData.content;
}
if (content && content.length > this.MAX_MESSAGE_LENGTH) {
content = content.slice(0, this.TRUNCATED_MESSAGE_LENGTH);
if (typeof messageData === "string") {
messageData = content;
} else {
messageData.content = content;
}
}
return await message.reply(messageData);
}
// Helper method to safely edit messages with length checks
private async safeEdit(
message: DiscordMessage,
data: string | { content?: string; [key: string]: any }
): Promise<DiscordMessage> {
let content: string | undefined;
if (typeof data === "string") {
content = data;
} else if (data.content) {
content = data.content;
}
if (content && content.length > this.MAX_MESSAGE_LENGTH) {
content = content.slice(0, this.TRUNCATED_MESSAGE_LENGTH);
if (typeof data === "string") {
data = content;
} else {
data.content = content;
}
}
return await message.edit(data);
}
} }

View File

@ -205,6 +205,7 @@ export const events = new Elysia()
} }
if (!headers.token) { if (!headers.token) {
console.log("Unauthorized");
return { error: "Unauthorized" }; return { error: "Unauthorized" };
} }
@ -212,6 +213,7 @@ export const events = new Elysia()
const user = userConfigs.find((config) => config.name === username); const user = userConfigs.find((config) => config.name === username);
if (!user) { if (!user) {
console.log("Unauthorized");
return { error: "Unauthorized" }; return { error: "Unauthorized" };
} }
@ -220,6 +222,7 @@ export const events = new Elysia()
); );
if (!found) { if (!found) {
console.log("Unauthorized");
return { error: "Unauthorized" }; return { error: "Unauthorized" };
} }

View File

@ -14,6 +14,9 @@
}, },
"dependencies": { "dependencies": {
"@dqbd/tiktoken": "^1.0.15", "@dqbd/tiktoken": "^1.0.15",
"@langchain/community": "^0.3.11",
"@langchain/core": "^0.3.16",
"@langchain/openai": "^0.3.11",
"@nextcloud/files": "^3.8.0", "@nextcloud/files": "^3.8.0",
"@solyarisoftware/voskjs": "^1.2.8", "@solyarisoftware/voskjs": "^1.2.8",
"@types/node-cron": "^3.0.11", "@types/node-cron": "^3.0.11",
@ -30,6 +33,7 @@
"fuzzysort": "^3.0.2", "fuzzysort": "^3.0.2",
"i": "^0.3.7", "i": "^0.3.7",
"langchain": "^0.0.212", "langchain": "^0.0.212",
"llamaindex": "^0.8.0",
"mathjs": "^12.2.1", "mathjs": "^12.2.1",
"meta-fetcher": "^3.1.1", "meta-fetcher": "^3.1.1",
"minio": "^8.0.1", "minio": "^8.0.1",
@ -38,12 +42,14 @@
"node-cron": "^3.0.3", "node-cron": "^3.0.3",
"npm": "^10.2.5", "npm": "^10.2.5",
"openai": "^4.67.1", "openai": "^4.67.1",
"pg": "^8.13.1",
"pyodide": "^0.24.1", "pyodide": "^0.24.1",
"qrcode-terminal": "^0.12.0", "qrcode-terminal": "^0.12.0",
"quickchart-js": "^3.1.3", "quickchart-js": "^3.1.3",
"resend": "^4.0.0", "resend": "^4.0.0",
"serpapi": "^2.0.0", "serpapi": "^2.0.0",
"turndown": "^7.2.0", "turndown": "^7.2.0",
"uuid": "^11.0.2",
"whatsapp-web.js": "^1.26.0", "whatsapp-web.js": "^1.26.0",
"whisper-node": "^1.1.1", "whisper-node": "^1.1.1",
"xml2js": "^0.6.2", "xml2js": "^0.6.2",
@ -53,5 +59,9 @@
"zod": "^3.22.4", "zod": "^3.22.4",
"zod-to-json-schema": "^3.23.0", "zod-to-json-schema": "^3.23.0",
"zx": "^7.2.3" "zx": "^7.2.3"
} },
"trustedDependencies": [
"core-js",
"protobufjs"
]
} }

View File

@ -270,7 +270,7 @@ async function executeAction(action: Action) {
tools = tools?.length ? tools : undefined; tools = tools?.length ? tools : undefined;
const response = await ask({ const response = await ask({
model: "gpt-4o", model: "gpt-4o-mini",
prompt: `You are an Action Executor. prompt: `You are an Action Executor.
You are called to execute an action based on the provided instruction. You are called to execute an action based on the provided instruction.

View File

@ -193,7 +193,7 @@ export async function ask({
name, name,
}, },
]; ];
console.log("got image:", image_url?.slice(0, 20)); image_url && console.log("got image:", image_url?.slice(0, 20));
} else if (seed && !message) { } else if (seed && !message) {
// If seed is provided but no new message, just retrieve history // If seed is provided but no new message, just retrieve history
const history = getMessageHistory(seed); const history = getMessageHistory(seed);
@ -382,12 +382,12 @@ export async function get_transcription(
fs.writeFileSync(filePath, new Uint8Array(binaryData)); fs.writeFileSync(filePath, new Uint8Array(binaryData));
} else { } else {
// Treat input as a file URL and extract the file extension // Treat input as a file URL and extract the file extension
fileExtension = path.extname(input).slice(1).toLowerCase(); fileExtension = "ogg";
if (!["mp3", "ogg", "wav", "m4a"].includes(fileExtension)) { // if (!["mp3", "ogg", "wav", "m4a"].includes(fileExtension)) {
throw new Error( // throw new Error(
"The provided URL does not point to a valid audio file." // "The provided URL does not point to a valid audio file."
); // );
} // }
isAudio = true; isAudio = true;
// Step 2: Download the file from the URL // Step 2: Download the file from the URL
@ -418,10 +418,12 @@ export async function get_transcription(
// Step 3: Send the file to OpenAI's Whisper model for transcription // Step 3: Send the file to OpenAI's Whisper model for transcription
const transcription = await openai.audio.transcriptions.create({ const transcription = await openai.audio.transcriptions.create({
// model: "whisper-1", // model: "whisper-1",
model: "distil-whisper-large-v3-en", model: "whisper-large-v3-turbo",
file: fs.createReadStream(filePath), file: fs.createReadStream(filePath),
language: "en", // Optional prompt:
temperature: 0.0, // Optional "The audio may have email addresses or phonenumbers, please transcribe them in their respective formats.",
language: "en",
temperature: 0.1,
}); });
// Delete the temp file // Delete the temp file

View File

@ -103,7 +103,7 @@ You can use the \`memory_manager\` tool to remember user preferences, such as wh
const response = await ask({ const response = await ask({
prompt, prompt,
model: "gpt-4o", model: "gpt-4o-mini",
message: `request: ${request} message: `request: ${request}
prefered_platform: ${prefered_platform} prefered_platform: ${prefered_platform}

View File

@ -397,15 +397,17 @@ function registerListener(listener: EventListener) {
tools = tools?.length ? tools : undefined; tools = tools?.length ? tools : undefined;
const is_voice = listener.eventId === "on_voice_message"; const is_voice = listener.eventId === "on_voice_message";
const is_new_todo_note = listener.eventId === "new_todo_for_anya";
let attached_image: string | undefined = undefined; let attached_image: string | undefined = undefined;
if (is_voice) { if (is_voice || is_new_todo_note) {
tools = getTools( tools = getTools(
contextMessage.author.username, contextMessage.author.username,
contextMessage contextMessage
) as RunnableToolFunctionWithParse<any>[]; ) as RunnableToolFunctionWithParse<any>[];
}
if (is_voice) {
const audio = ((payload as any) ?? {}).transcription; const audio = ((payload as any) ?? {}).transcription;
if (audio && audio instanceof File) { if (audio && audio instanceof File) {
if (audio.type.includes("audio")) { if (audio.type.includes("audio")) {
@ -416,10 +418,12 @@ function registerListener(listener: EventListener) {
} }
} }
const otherContextData = (payload as any)?.other_context_data; console.log("Payload for voice event listener: ", payload);
const otherContextData = (payload as any)?.other_reference_data;
if (otherContextData instanceof File) { if (otherContextData instanceof File) {
if (otherContextData.type.includes("image")) { if (otherContextData.type.includes("image")) {
console.log("Got image");
// Read the file as a buffer // Read the file as a buffer
const buffer = await otherContextData.arrayBuffer(); const buffer = await otherContextData.arrayBuffer();
@ -434,13 +438,18 @@ function registerListener(listener: EventListener) {
console.log("The provided file is not an image."); console.log("The provided file is not an image.");
} }
} else { } else {
console.log("No valid file provided in other_context_data."); console.log(
"No valid file provided in other_context_data.",
otherContextData?.name,
otherContextData?.type
);
} }
} }
console.log("Running ASK for event listener: ", listener.description); console.log("Running ASK for event listener: ", listener.description);
const system_prompts = is_voice const system_prompts =
is_voice || is_new_todo_note
? await buildSystemPrompts(contextMessage) ? await buildSystemPrompts(contextMessage)
: undefined; : undefined;
@ -507,22 +516,42 @@ function registerListener(listener: EventListener) {
Your response must be in plain text without markdown or any other formatting. Your response must be in plain text without markdown or any other formatting.
`; `;
const new_todo_note_prompt = `You are in new todo note trigger mode.
The user added a new todo note for you in your todos file which triggered this event.
Do not remove the to anya tag from the note if its present, unless explicitly asked to do so as part of the instruction.
Make sure to think about your process and how you want to step by step go about executing the todos.
You can mark a todo as failed by adding "[FAILED]" at the start of end of the todo line.
- Event ID: ${eventId}
- Payload: ${JSON.stringify(payload)}
IMPORTANT:
PLEASE ask notes manager to mark the note as done if you have completed the task, plz send the manager the todo note and the actual path of the note.
Whatever you reply with will be sent to the user as a notification automatically. Do not use communication_manager to notify the same user.
`;
if (system_prompts) { if (system_prompts) {
prompt = `${system_prompts.map((p) => p.content).join("\n\n")}`; prompt = `${system_prompts.map((p) => p.content).join("\n\n")}`;
} }
const response = !is_voice const response = !(is_voice || is_new_todo_note)
? await ask({ ? await ask({
model: "gpt-4o-mini", model: "gpt-4o-mini",
prompt, prompt,
tools, tools,
}) })
: await ask({ : await ask({
model: attached_image ? "gpt-4o" : "gpt-4o", model: attached_image ? "gpt-4o" : "gpt-4o-mini",
prompt, prompt,
message: voice_prompt, message: is_voice ? voice_prompt : new_todo_note_prompt,
image_url: attached_image, image_url: attached_image ?? undefined,
seed: `voice-anya-${listener.id}-${eventId}`, seed: `${is_voice ? "voice-anya" : "todos-from-user"}-${
listener.id
}-${eventId}`,
tools, tools,
}); });
@ -539,7 +568,7 @@ function registerListener(listener: EventListener) {
if (notify) { if (notify) {
await contextMessage.send({ await contextMessage.send({
content, content,
flags: is_voice ? [4096] : undefined, flags: is_voice && !is_new_todo_note ? [4096] : undefined,
}); });
} else { } else {
console.log("Silenced Notification: ", content); console.log("Silenced Notification: ", content);

View File

@ -58,6 +58,8 @@ import { search_whatsapp_contacts, SearchContactsParams } from "./whatsapp";
import { memory_manager_init } from "./memory-manager"; import { memory_manager_init } from "./memory-manager";
import { communication_manager_tool } from "./communication"; import { communication_manager_tool } from "./communication";
import { send_sys_log } from "../interfaces/log"; import { send_sys_log } from "../interfaces/log";
import { init_anya_todos_watcher, init_notes_watcher } from "./notes-executer";
import { initVectorStoreSync } from "./notes-vectors";
// get time function // get time function
const GetTimeParams = z.object({}); const GetTimeParams = z.object({});
@ -130,6 +132,10 @@ async function get_total_tokens({ model, from, to }: GetTotalTokensParams) {
}; };
} }
init_notes_watcher();
init_anya_todos_watcher();
initVectorStoreSync();
export function getTools( export function getTools(
username: string, username: string,
context_message: Message, context_message: Message,
@ -382,7 +388,10 @@ Try to fix any errors that are returned at least once before sending to the user
name: "reminders_manager", name: "reminders_manager",
schema: RemindersManagerParams, schema: RemindersManagerParams,
description: `Manage reminders using user's reminders. description: `Manage reminders using user's reminders.
You can just forward the user's request to this tool and it will handle the rest.`, You can just forward the user's request to this tool and it will handle the rest.
More detailed todos that dont need user notification will be managed by the notes manager tool instead.
`,
}), }),
}, },
{ {
@ -409,6 +418,7 @@ Try to fix any errors that are returned at least once before sending to the user
When to use: When to use:
if user talks about any notes, lists, journal, gym entry, standup, personal journal, etc. if user talks about any notes, lists, journal, gym entry, standup, personal journal, etc.
You can also use this for advanced todos that are more planning related. (these are not reminders, and will not notify the user)
`, `,
}), }),
}, },

View File

@ -167,7 +167,7 @@ async function memoryManager(
const tools = memory_tools(manager_id, user_id); const tools = memory_tools(manager_id, user_id);
const response = await ask({ const response = await ask({
model: "gpt-4o", model: "gpt-4o-mini",
prompt: `You are a Memories Manager. prompt: `You are a Memories Manager.
You manage memories for other managers. You manage memories for other managers.

191
tools/notes-executer.ts Normal file
View File

@ -0,0 +1,191 @@
import path from "path";
import { notesManager } from "./notes";
import { getNotesList, fetchFileContents } from "./notes";
import { discordAdapter } from "../interfaces";
import { userConfigs } from "../config";
import { eventManager } from "../interfaces/events";
// Watcher interval in milliseconds (2 minutes)
const WATCH_INTERVAL = 1 * 60 * 1000;
// Function to check the notes for changes
async function watchNotes() {
console.log("Watching notes for changes...");
try {
const notesListResult = await getNotesList();
if (!notesListResult.success) {
console.error("Failed to fetch notes list: ", notesListResult.message);
return;
}
const notesList = JSON.parse(String(notesListResult.message));
const flatFileList = flattenNotesTree(notesList);
for (const filePath of flatFileList) {
const fileContentResult = await fetchFileContents({ path: filePath });
if (!fileContentResult.success) {
console.error("Failed to fetch file contents for ", filePath);
continue;
}
const fileContent = fileContentResult.message.toString();
const lines = fileContent.split("\n");
for (const line of lines) {
if (line.startsWith("!!")) {
console.log("Found instruction in file: ", filePath);
const instruction = line.substring(2).trim();
await handleNoteInstruction(filePath, fileContent, instruction);
break; // Only process the first !! line per file
}
}
}
} catch (error) {
console.error("Error watching notes: ", error);
}
}
// Helper function to flatten the notes tree structure into a list of file paths
function flattenNotesTree(tree: any, currentPath: string = ""): string[] {
let fileList: string[] = [];
for (const key in tree) {
if (tree[key] === null) {
fileList.push(path.join(currentPath, key));
} else {
fileList = fileList.concat(
flattenNotesTree(tree[key], path.join(currentPath, key))
);
}
}
return fileList;
}
// Function to handle the note instruction
async function handleNoteInstruction(
filePath: string,
fileContent: string,
instruction: string
) {
try {
const creator = userConfigs.find((u) => u.roles.includes("creator"));
const creator_discord_id = creator?.identities.find(
(i) => i.platform === "discord"
)?.id;
if (!creator_discord_id) {
console.error("Creator discord id not found in user configs");
return;
}
const context_message = await discordAdapter.createMessageInterface(
creator_discord_id
);
const response = await notesManager(
{
request: `The following is a note that the user left a message for you in.
The file path is: ${filePath}
The user's instruction for you is in the file content and starts with '!!' followed by the message or a attached audio message that you can Transcribe to get the actual instructions.
file content:
---
${fileContent}
---
Make sure to remove the user's instruction line (line that starts with '!!') and the respective audio message if there is one after you have read it and done the necessary action.
`,
},
context_message
);
console.log(
`Handled instruction for file: ${filePath}. Response:`,
response.response
);
response.response.choices[0].message.content?.toString() &&
(await context_message.send({
content: response.response.choices[0].message.content?.toString(),
}));
} catch (error) {
console.error(
`Failed to handle note instruction for file: ${filePath}`,
error
);
}
}
// Start the watcher
export function init_notes_watcher() {
setInterval(async () => {
console.time("watchNotes");
await watchNotes();
console.timeEnd("watchNotes");
}, WATCH_INTERVAL);
}
console.log("Started watching notes for changes every 2 minutes...");
// Watcher for notes with "to anya" in the last non-empty line
async function watchAnyaTodos() {
console.log("Watching notes for 'to anya' instructions...");
try {
const notesListResult = await getNotesList();
if (!notesListResult.success) {
console.error("Failed to fetch notes list: ", notesListResult.message);
return;
}
const notesList = JSON.parse(String(notesListResult.message));
const flatFileList = flattenNotesTree(notesList);
for (const filePath of flatFileList) {
const fileContentResult = await fetchFileContents({ path: filePath });
if (!fileContentResult.success) {
console.error("Failed to fetch file contents for ", filePath);
continue;
}
const fileContent = fileContentResult.message.toString();
const lines = fileContent
.split("\n")
.filter((line) => line.trim() !== "");
if (lines.length === 0) {
continue;
}
// check if the obsidian note has a tag called "to-anya"
const is_tagged = lines.some((line) => line.includes("#to-anya"));
// check if any of the lines contains the string "[ ]" in the 1st 50% of the line. If it does, then return true, else return false
const has_undone_todos = lines.some((line) => {
const half_line = line.slice(0, Math.floor(line.length / 2));
return half_line.includes("[ ]");
});
if (is_tagged && has_undone_todos) {
console.log("Found 'to anya' instruction in file: ", filePath);
if (!fileContent.includes("[FAILED]")) {
await eventManager.emitWithResponse("new_todo_for_anya", {
note_path: filePath,
note_content: fileContent,
});
}
}
}
} catch (error) {
console.error("Error watching notes for 'to anya': ", error);
}
}
// Start the watcher for notes with "to anya" instructions
export function init_anya_todos_watcher() {
let isRunning = false;
setInterval(async () => {
if (!isRunning) {
isRunning = true;
console.time("watchAnyaTodos");
await watchAnyaTodos();
console.timeEnd("watchAnyaTodos");
isRunning = false;
}
}, WATCH_INTERVAL);
}
console.log("Started watching notes for 'to anya' instructions...");

150
tools/notes-vectors.ts Normal file
View File

@ -0,0 +1,150 @@
import { createClient } from "webdav";
import {
PGVectorStore,
DistanceStrategy,
} from "@langchain/community/vectorstores/pgvector";
import { OpenAIEmbeddings } from "@langchain/openai";
import { v4 as uuidv4 } from "uuid";
import * as crypto from "crypto";
let isSyncing = false;
// Initialize WebDAV client
const webdavClient = createClient(
"http://192.168.29.85/remote.php/dav/files/raj/",
{
username: process.env.NEXTCLOUD_USERNAME!,
password: process.env.NEXTCLOUD_PASSWORD!,
}
);
// Helper function to calculate checksum of content
function calculateChecksum(content: string): string {
return crypto.createHash("md5").update(content, "utf8").digest("hex");
}
// Function to get all files from 'notes' directory via WebDAV
async function getAllFiles(
path: string
): Promise<{ filename: string; content: string }[]> {
const contents = await webdavClient.getDirectoryContents(path, {
deep: true,
});
const files = Array.isArray(contents) ? contents : contents.data;
const fileContents: { filename: string; content: string }[] = [];
for (const file of files) {
if (
file.type === "file" &&
!file.basename.startsWith(".") &&
!file.filename.includes("/.obsidian/") &&
(file.filename.endsWith(".txt") || file.filename.endsWith(".md"))
) {
const content = await webdavClient.getFileContents(file.filename, {
format: "text",
});
if (typeof content === "string") {
fileContents.push({ filename: file.filename, content });
}
}
}
return fileContents;
}
// Setup PGVectorStore
const embeddings = new OpenAIEmbeddings({
model: "text-embedding-ada-002",
});
const config = {
postgresConnectionOptions: {
type: "postgres",
host: "127.0.0.1",
port: 5432,
user: "postgres",
password: "defaultpwd",
database: "postgres",
},
tableName: "anya",
columns: {
idColumnName: "id",
vectorColumnName: "vector",
contentColumnName: "content",
metadataColumnName: "metadata",
},
distanceStrategy: "cosine" as DistanceStrategy,
};
const vectorStore = await PGVectorStore.initialize(embeddings, config);
// Main function to sync vector store
export async function syncVectorStore() {
if (isSyncing) {
console.log("syncVectorStore is already running. Skipping this run.");
return;
}
isSyncing = true;
try {
console.log("Starting vector store sync...");
const files = await getAllFiles("notes");
for (const file of files) {
const content = `filename: ${file.filename}\n${file.content}`;
// Calculate checksum
const checksum = calculateChecksum(content);
// Check if the document already exists using direct SQL query
const queryResult = await vectorStore.client?.query(
`SELECT * FROM ${config.tableName} WHERE metadata->>'filename' = $1`,
[file.filename]
);
if (queryResult && queryResult.rows.length > 0) {
const existingDocument = queryResult.rows[0];
const existingChecksum = existingDocument.metadata?.checksum;
// If the checksum matches, skip updating
if (existingChecksum === checksum) {
// console.log(`Skipping ${file.filename}, checksum unchanged.`);
continue;
}
// If the content is different, delete the old version
await vectorStore.delete({ ids: [existingDocument.id] });
console.log(`Deleted old version of ${file.filename}`);
}
// Load the document
const document = {
pageContent: content,
metadata: { checksum, filename: file.filename, id: uuidv4() },
};
// Add or update the document in the vector store
await vectorStore.addDocuments([document], {
ids: [document.metadata.id],
});
console.log(`Indexed ${file.filename}`);
}
console.log("Vector store sync completed.");
} catch (error) {
console.error("Error during vector store sync:", error);
} finally {
isSyncing = false;
}
}
export async function initVectorStoreSync() {
console.log("Starting vector store sync...");
await syncVectorStore();
setInterval(syncVectorStore, 1000 * 60 * 2); // Every 2 minutes
}
export function semantic_search_notes(query: string, limit: number) {
return vectorStore.similaritySearch(query, limit);
}

View File

@ -1,11 +1,15 @@
import { createClient, ResponseDataDetailed } from "webdav"; import { createClient, FileStat, ResponseDataDetailed } from "webdav";
import { z } from "zod"; import { z } from "zod";
import { zodFunction } from "."; import { zodFunction } from ".";
import { RunnableToolFunction } from "openai/lib/RunnableFunction.mjs"; import { RunnableToolFunction } from "openai/lib/RunnableFunction.mjs";
import Fuse from "fuse.js"; import Fuse from "fuse.js";
import { ask } from "./ask"; import { ask, get_transcription } from "./ask";
import { Message } from "../interfaces/message"; import { Message } from "../interfaces/message";
import { memory_manager_guide, memory_manager_init } from "./memory-manager"; import { memory_manager_guide, memory_manager_init } from "./memory-manager";
import { semantic_search_notes, syncVectorStore } from "./notes-vectors";
import { readFileSync, writeFileSync } from "fs";
import { join } from "path";
import { tmpdir } from "os";
// Initialize WebDAV client // Initialize WebDAV client
const client = createClient("http://192.168.29.85/remote.php/dav/files/raj/", { const client = createClient("http://192.168.29.85/remote.php/dav/files/raj/", {
@ -15,6 +19,7 @@ const client = createClient("http://192.168.29.85/remote.php/dav/files/raj/", {
// Types // Types
export type OperationResult = { success: boolean; message: string | object }; export type OperationResult = { success: boolean; message: string | object };
// Schemas for function parameters // Schemas for function parameters
export const CreateFileParams = z.object({ export const CreateFileParams = z.object({
path: z.string().describe("The path for the new file."), path: z.string().describe("The path for the new file."),
@ -50,54 +55,100 @@ export const SearchFilesParams = z.object({
export type SearchFilesParams = z.infer<typeof SearchFilesParams>; export type SearchFilesParams = z.infer<typeof SearchFilesParams>;
export const TagParams = z.object({ export const TagParams = z.object({
path: z.string().describe("The path to the file to tag."),
tag: z.string().describe("The tag to add to the file."), tag: z.string().describe("The tag to add to the file."),
}); });
export type TagParams = z.infer<typeof TagParams>; export type TagParams = z.infer<typeof TagParams>;
// Helper function to remove the "notes/" prefix export const FetchFileContentsParams = z.object({
function normalizePath(path: string): string { path: z
return path.startsWith("notes/") ? path.substring(6) : path; .string()
.describe("The path to the file whose content is to be fetched."),
});
export type FetchFileContentsParams = z.infer<typeof FetchFileContentsParams>;
export const UpdateFileParams = z.object({
path: z.string().describe("The path to the note file to be updated."),
new_content: z
.string()
.describe("The new content to replace the existing content."),
});
export type UpdateFileParams = z.infer<typeof UpdateFileParams>;
export const NotesManagerParams = z.object({
request: z.string().describe("User's request regarding notes."),
});
export type NotesManagerParams = z.infer<typeof NotesManagerParams>;
export const SemanticSearchNotesParams = z.object({
query: z
.string()
.describe(
"The query to search for semantically similar notes, this can be something some content or even file name."
),
});
type SemanticSearchNotesParams = z.infer<typeof SemanticSearchNotesParams>;
async function semanticSearchNotes({
query,
}: SemanticSearchNotesParams): Promise<OperationResult> {
try {
const results = await semantic_search_notes(query, 4);
return {
success: true,
message: results.map((r) => r.pageContent),
};
} catch (error: any) {
return { success: false, message: error.message };
}
} }
// Function to create a file // Helper function to normalize paths
function normalizePath(path: string): string {
if (path.startsWith("/notes/")) return path.substring(7);
if (path.startsWith("notes/")) return path.substring(6);
if (path === "/notes" || path === "notes") return "";
return path;
}
// File and directory operations
export async function createFile({ export async function createFile({
path, path,
content, content,
}: CreateFileParams): Promise<OperationResult> { }: CreateFileParams): Promise<OperationResult> {
try { try {
await client.putFileContents(`/notes/${normalizePath(path)}`, content); await client.putFileContents(`/notes/${normalizePath(path)}`, content);
await syncVectorStore();
return { success: true, message: "File created successfully" }; return { success: true, message: "File created successfully" };
} catch (error: any) { } catch (error: any) {
return { success: false, message: error.message }; return { success: false, message: error.message };
} }
} }
// Function to create a directory
export async function createDirectory({ export async function createDirectory({
path, path,
}: CreateDirectoryParams): Promise<OperationResult> { }: CreateDirectoryParams): Promise<OperationResult> {
try { try {
await client.createDirectory(`/notes/${normalizePath(path)}`); await client.createDirectory(`/notes/${normalizePath(path)}`);
await syncVectorStore();
return { success: true, message: "Directory created successfully" }; return { success: true, message: "Directory created successfully" };
} catch (error: any) { } catch (error: any) {
return { success: false, message: error.message }; return { success: false, message: error.message };
} }
} }
// Function to delete a file or directory
export async function deleteItem({ export async function deleteItem({
path, path,
}: DeleteItemParams): Promise<OperationResult> { }: DeleteItemParams): Promise<OperationResult> {
try { try {
await client.deleteFile(`/notes/${normalizePath(path)}`); await client.deleteFile(`/notes/${normalizePath(path)}`);
await syncVectorStore();
return { success: true, message: "Deleted successfully" }; return { success: true, message: "Deleted successfully" };
} catch (error: any) { } catch (error: any) {
return { success: false, message: error.message }; return { success: false, message: error.message };
} }
} }
// Function to move a file or directory
export async function moveItem({ export async function moveItem({
source_path, source_path,
destination_path, destination_path,
@ -107,46 +158,14 @@ export async function moveItem({
`/notes/${normalizePath(source_path)}`, `/notes/${normalizePath(source_path)}`,
`/notes/${normalizePath(destination_path)}` `/notes/${normalizePath(destination_path)}`
); );
await syncVectorStore();
return { success: true, message: "Moved successfully" }; return { success: true, message: "Moved successfully" };
} catch (error: any) { } catch (error: any) {
return { success: false, message: error.message }; return { success: false, message: error.message };
} }
} }
// Function to search for files by name // Search functions
export async function searchFilesByName({
query,
}: SearchFilesParams): Promise<OperationResult> {
try {
const files = await client.getDirectoryContents("notes", {
details: true,
deep: true,
});
// If `files` is of type `ResponseDataDetailed<FileStat[]>`, you need to access the data property
const fileList = Array.isArray(files) ? files : files.data;
// Setup fuse.js with the filenames
const fuse = new Fuse(fileList, {
keys: ["filename"], // Search within filenames
threshold: 0.3, // Adjust this to control the fuzziness (0 = exact match, 1 = very fuzzy)
});
const matchingFiles = fuse.search(query).map((result) => result.item);
return {
success: true,
message:
matchingFiles.length > 0
? matchingFiles.map((file) => file.filename).join(", ")
: "No matching files found",
};
} catch (error: any) {
return { success: false, message: error.message };
}
}
// Function to search for files by content
export async function searchFilesByContent({ export async function searchFilesByContent({
query, query,
}: SearchFilesParams): Promise<OperationResult> { }: SearchFilesParams): Promise<OperationResult> {
@ -156,42 +175,33 @@ export async function searchFilesByContent({
deep: true, deep: true,
}); });
// If `files` is of type `ResponseDataDetailed<FileStat[]>`, you need to access the data property
const fileList = Array.isArray(files) ? files : files.data; const fileList = Array.isArray(files) ? files : files.data;
const matchingFiles: string[] = [];
// First, filter files by filename using fuse.js // Search by filename using Fuse.js
const fuseFilename = new Fuse(fileList, { const fuseFilename = new Fuse(fileList, {
keys: ["basename"], // Search within filenames keys: ["basename"],
threshold: 0.3, // Adjust this to control the fuzziness threshold: 0.3,
}); });
const matchingFilesByName = fuseFilename const matchingFilesByName = fuseFilename
.search(query) .search(query)
.map((result) => result.item); .map((result) => result.item.filename);
const matchingFilesByContent = []; // Search by file content
// Then, check file content
for (const file of fileList) { for (const file of fileList) {
if (file.type === "file") { if (file.type === "file") {
const content = await client.getFileContents(file.filename, { const content = await client.getFileContents(file.filename, {
format: "text", format: "text",
}); });
const fuseContent = new Fuse([String(content)], { if (typeof content === "string" && content.includes(query)) {
threshold: 0.3, // Adjust for content search matchingFiles.push(normalizePath(file.filename));
});
const contentMatch = fuseContent.search(query);
if (contentMatch.length > 0) {
matchingFilesByContent.push(normalizePath(file.filename));
} }
} }
} }
// Combine results from filename and content search // Combine and deduplicate results
const combinedResults = [ const combinedResults = [
...new Set([ ...new Set([...matchingFilesByName, ...matchingFiles]),
...matchingFilesByName.map((f) => f.filename),
...matchingFilesByContent,
]),
]; ];
return { return {
@ -206,41 +216,56 @@ export async function searchFilesByContent({
} }
} }
// Placeholder for tagging functionality export async function searchFilesByTag({ tag }: TagParams) {
export async function tagFile({ const files = await client.getDirectoryContents("notes", {
path, details: true,
tag, deep: true,
}: TagParams): Promise<OperationResult> { });
return { success: false, message: "Tagging not supported with WebDAV." };
const fileList = Array.isArray(files) ? files : files.data;
const matchingFiles: Array<{ filename: string; content: string }> = [];
for (const file of fileList) {
if (file.type === "file") {
const fileContent = await client.getFileContents(file.filename, {
format: "text",
});
if (typeof fileContent === "string" && fileContent.includes(tag)) {
matchingFiles.push({ filename: file.filename, content: fileContent });
}
}
} }
// Placeholder for searching files by tag return matchingFiles;
export async function searchFilesByTag({
tag,
}: TagParams): Promise<OperationResult> {
return { success: false, message: "Tagging not supported with WebDAV." };
} }
// Notes list caching
let cachedNotesList: string | null = null;
let lastFetchTime: number | null = null;
export async function getNotesList(): Promise<OperationResult> { export async function getNotesList(): Promise<OperationResult> {
try { try {
const currentTime = Date.now();
if (
cachedNotesList &&
lastFetchTime &&
currentTime - lastFetchTime < 5000
) {
return { success: true, message: cachedNotesList };
}
const directoryContents = await fetchDirectoryContents("notes"); const directoryContents = await fetchDirectoryContents("notes");
const treeStructure = buildTree(directoryContents);
cachedNotesList = JSON.stringify(treeStructure, null, 2);
lastFetchTime = currentTime;
const treeStructure = buildTree(directoryContents as any); return { success: true, message: cachedNotesList };
return {
success: true,
message: JSON.stringify(treeStructure, null, 2),
};
} catch (error: any) { } catch (error: any) {
return { return { success: false, message: error.message };
success: false,
message: error.message,
};
} }
} }
async function fetchDirectoryContents( async function fetchDirectoryContents(path: string): Promise<FileStat[]> {
path: string
): Promise<ReturnType<typeof client.getDirectoryContents>> {
let contents = await client.getDirectoryContents(path); let contents = await client.getDirectoryContents(path);
// Normalize contents to always be an array of FileStat // Normalize contents to always be an array of FileStat
@ -252,7 +277,7 @@ async function fetchDirectoryContents(
for (const item of contents) { for (const item of contents) {
if (item.type === "directory") { if (item.type === "directory") {
const subdirectoryContents = await fetchDirectoryContents(item.filename); const subdirectoryContents = await fetchDirectoryContents(item.filename);
contents = contents.concat(subdirectoryContents as any); contents = contents.concat(subdirectoryContents);
} }
} }
@ -264,11 +289,17 @@ function buildTree(files: any[]): any {
files.forEach((file) => { files.forEach((file) => {
const parts: string[] = file.filename.replace(/^\/notes\//, "").split("/"); const parts: string[] = file.filename.replace(/^\/notes\//, "").split("/");
// Ignore files inside dot folders
if (parts.some((part) => part.startsWith(".obsidian"))) {
return;
}
let current = tree; let current = tree;
parts.forEach((part, index) => { parts.forEach((part, index) => {
if (!current[part]) { if (!current[part]) {
current[part] = index === parts.length - 1 ? null : {}; // Leaf nodes are set to null current[part] = index === parts.length - 1 ? null : {};
} }
current = current[part]; current = current[part];
}); });
@ -277,62 +308,194 @@ function buildTree(files: any[]): any {
return tree; return tree;
} }
export const FetchFileContentsParams = z.object({ // File content operations
path: z
.string()
.describe("The path to the file whose content is to be fetched."),
});
export type FetchFileContentsParams = z.infer<typeof FetchFileContentsParams>;
// The fetchFileContents function
export async function fetchFileContents({ export async function fetchFileContents({
path, path,
}: FetchFileContentsParams): Promise<OperationResult> { }: FetchFileContentsParams): Promise<OperationResult> {
try { try {
// Fetch the file content from the WebDAV server const fileContent = await client.getFileContents(
const fileContent: ResponseDataDetailed<string> = `/notes/${normalizePath(path)}`,
(await client.getFileContents(`/notes/${normalizePath(path)}`, { { format: "text", details: true }
format: "text", );
details: true,
})) as ResponseDataDetailed<string>;
return { if (typeof fileContent === "string") {
success: true, // Should not happen when details is true
message: fileContent, return { success: true, message: fileContent };
}; } else if ("data" in fileContent) {
} catch (error: any) { return { success: true, message: fileContent.data };
} else {
return { return {
success: false, success: false,
message: error.message, message: "Unexpected response format from getFileContents.",
}; };
} }
} catch (error: any) {
return { success: false, message: error.message };
}
} }
export const UpdateFileParams = z.object({
path: z.string().describe("The path to the note file to be updated."),
new_content: z
.string()
.describe("The new content to replace the existing content."),
});
export type UpdateFileParams = z.infer<typeof UpdateFileParams>;
export async function updateNote({ export async function updateNote({
path, path,
new_content, new_content,
}: UpdateFileParams): Promise<OperationResult> { }: UpdateFileParams): Promise<OperationResult> {
try { try {
// Fetch the existing content to ensure the file exists and to avoid overwriting unintentionally await client.putFileContents(`/notes/${normalizePath(path)}`, new_content);
const existingContent = await client.getFileContents( await syncVectorStore();
`/notes/${normalizePath(path)}`, return { success: true, message: "Note updated successfully" };
{ } catch (error: any) {
return { success: false, message: error.message };
}
}
// Caching for tag-based searches
let cachedFiles: Array<{ filename: string; content: string }> | null = null;
let isUpdatingCache = false;
export async function searchFilesByTagWithCache({ tag }: TagParams) {
if (cachedFiles) {
if (!isUpdatingCache) {
console.log("Updating cache");
setTimeout(() => updateCache(tag), 0);
}
return cachedFiles;
}
cachedFiles = await updateCache(tag);
return cachedFiles;
}
async function updateCache(
tag: string
): Promise<Array<{ filename: string; content: string }>> {
if (isUpdatingCache) {
return cachedFiles || [];
}
isUpdatingCache = true;
const files = await client.getDirectoryContents("notes", {
details: true,
deep: true,
});
const fileList = Array.isArray(files) ? files : files.data;
const matchingFiles: Array<{ filename: string; content: string }> = [];
for (const file of fileList) {
if (
file.type === "file" &&
(file.filename.endsWith(".md") || file.filename.endsWith(".txt"))
) {
const fileContent = await client.getFileContents(file.filename, {
format: "text", format: "text",
});
if (typeof fileContent === "string" && fileContent.includes(tag)) {
matchingFiles.push({ filename: file.filename, content: fileContent });
}
}
}
cachedFiles = matchingFiles;
isUpdatingCache = false;
return matchingFiles;
}
// Notes manager integration
export async function notesManager(
{ request }: NotesManagerParams,
context_message: Message
) {
const notesManagerPromptFiles = await searchFilesByTagWithCache({
tag: "#notes-manager",
});
const tools = webdav_tools.concat(
memory_manager_init(context_message, "notes_manager")
);
const potentially_relavent_files = await semantic_search_notes(request, 4);
const potentially_relavent_files_paths = potentially_relavent_files.map(
(f) => f.metadata.filename
);
const response = await ask({
model: "gpt-4o",
prompt: `You are an Obsidian vault manager.
Ensure the vault remains organized, filenames and paths are correct, and relavent files are linked to each other.
You can try creating canvas files that use the open json canvas format
- **Today's Date:** ${new Date().toDateString()}
- **ALL Vault's File structure for context:**
---
${(await getNotesList()).message}
---
${
potentially_relavent_files_paths.length > 0
? `
- **Potentially relevant files:**
You can use these files to get more context or to link to the notes you are creating/updating.
---
${potentially_relavent_files_paths.join("\n")}
---`
: ""
}
- **User Notes/Instructions for you:**
---
${notesManagerPromptFiles.map((f) => f.content).join("\n")}
---
Note: When the user is trying to create/add a note, check the templates directory for any relevant templates if available. If available, fetch the relevant template and create the note based on the template.
`,
message: request,
seed: `notes-${context_message.channelId}`,
tools: tools as any,
});
return { response };
}
// Schema for the transcription function parameters
export const TranscriptionParams = z.object({
file_path: z
.string()
.describe("The path to the audio file to be transcribed."),
});
export type TranscriptionParams = z.infer<typeof TranscriptionParams>;
// Tool for handling transcription requests
export async function transcribeAudioFile({
file_path,
}: TranscriptionParams): Promise<OperationResult> {
try {
// Download the audio file from WebDAV
const audioFileBuffer = await client.getFileContents(
`/notes/${normalizePath(file_path)}`,
{
format: "binary",
} }
); );
// Update the file with the new content if (!Buffer.isBuffer(audioFileBuffer)) {
await client.putFileContents(`/notes/${normalizePath(path)}`, new_content); throw new Error("Failed to download audio file as Buffer.");
}
return { success: true, message: "Note updated successfully" }; // Convert the Buffer to a base64 string
const audioFileBase64 = audioFileBuffer.toString("base64");
// Transcribe the audio file
const transcription = await get_transcription(
audioFileBase64,
true,
file_path
);
return {
success: true,
message: transcription,
};
} catch (error: any) { } catch (error: any) {
return { success: false, message: error.message }; return { success: false, message: error.message };
} }
@ -346,6 +509,13 @@ export let webdav_tools: RunnableToolFunction<any>[] = [
schema: z.object({}), schema: z.object({}),
description: "Get the list of note files and directories.", description: "Get the list of note files and directories.",
}), }),
zodFunction({
function: transcribeAudioFile,
name: "transcribeAudioFile",
schema: TranscriptionParams,
description:
"Transcribe an audio file specified by the provided file path.",
}),
zodFunction({ zodFunction({
function: fetchFileContents, function: fetchFileContents,
name: "fetchNoteFileContents", name: "fetchNoteFileContents",
@ -382,115 +552,17 @@ export let webdav_tools: RunnableToolFunction<any>[] = [
schema: MoveItemParams, schema: MoveItemParams,
description: "Move a note file or directory.", description: "Move a note file or directory.",
}), }),
// zodFunction({
// function: searchFilesByName,
// name: "searchNotesFilesByName",
// schema: SearchFilesParams,
// description: "Search notes by filename.",
// }),
zodFunction({ zodFunction({
function: searchFilesByContent, function: semanticSearchNotes,
name: "searchNotesFilesByContent", name: "semanticSearchNotes",
schema: SearchFilesParams, schema: SemanticSearchNotesParams,
description: "Search notes by content.", description: `Search notes by their semantically.
}),
zodFunction({
function: tagFile,
name: "tagNoteFile",
schema: TagParams,
description: "Add a tag to a note file.",
}),
// zodFunction({
// function: searchFilesByTag,
// name: "searchNotesFilesByTag",
// schema: TagParams,
// description: "Search notes by tag.",
// }),
];
export function getNotesSystemPrompt() { You can use this to search by:
return `The notes system manages a structured file system for organizing and retrieving notes using Nextcloud via WebDAV. All notes are stored in the 'notes' directory, with subdirectories for different content types. 1. Topic
2. Content
**Key Directories:** 3. File Name
4. Tags
- **Root**: Contains a 'readme' summarizing the structure.
- **Journal**: Logs daily events and activities. Subdirectories include:
- **general**: General daily events or notes.
- **standup**: Work-related standup notes. Filenames should be dates in YYYY-MM-DD format.
- **personal**: Personal life events, same format as standup notes.
- **gym**: Gym or workout activities.
- **Lists**: Contains lists of items or tasks. Subdirectories can organize different list types.
**Standup and Personal Note Template:**
- **Filename**: Date in YYYY-MM-DD format.
- **Title**: Human-readable date (e.g., "Thursday 15th of July"), year not necessary.
- **Updates Section**: List of updates describing the day's events.
- **Summary Section**: A summary of the main points.
**Gym Note Template:**
- **Filename**: Date in YYYY-MM-DD format.
- **Title**: Gym day and date (e.g., "Pull Day - Thursday 15th of July").
- **Activity**: Exercises performed, sets, reps, weights.
- **Progress Report**: Progress updates, achievements, challenges, comparisons with previous workouts, suggestions for improvement.
**Lists Template:**
- **Directory Structure**: Create subdirectories within 'lists' for different types (e.g., 'shows', 'movies', 'shopping').
- **Filename**: Each file represents a list item with context. For 'shopping', use a single file like 'shopping.md'.
**Functionality:**
- Create, update, delete and move notes by filename or content.
- The \`updateNote\` function modifies existing notes.
This system ensures efficient note management, avoiding duplication, maintaining organization, and following structured templates for work and personal notes.`;
}
export const NotesManagerParams = z.object({
request: z.string().describe("User's request regarding notes."),
});
export type NotesManagerParams = z.infer<typeof NotesManagerParams>;
export async function notesManager(
{ request }: NotesManagerParams,
context_message: Message
) {
const tools = webdav_tools.concat(
memory_manager_init(context_message, "notes_manager")
);
const response = await ask({
model: "gpt-4o",
prompt: `You are a notes manager for the 'notes' directory in Nextcloud.
Your job is to understand the user's request (e.g., create, update, delete, move, list) and handle it using the available tools. Ensure the 'notes' directory remains organized, filenames and paths are correct, and duplication is prevented.
Avoid running \`fetchNoteFileContents\` unnecessarily, as it fetches the entire file content and is resource-intensive.
**More about the Notes System:**
${getNotesSystemPrompt()}
Follow the above guidelines to manage notes efficiently.
----
${memory_manager_guide("notes_manager", context_message.author.id)}
----
**Live Values:**
- **Today's Date:** ${new Date().toDateString()}
- **Current Notes List:**
${(await getNotesList()).message}
`, `,
message: request, }),
seed: `notes-${context_message.channelId}`, ];
tools: tools as any,
});
return { response };
}

File diff suppressed because one or more lines are too long