A developer-first persistent memory system for LLM applications. Get up and running in minutes.
npm install aimemory-core
import { createAIMemory, EmbeddingFunction } from 'ai-memory';
// Your embedding function (OpenAI, Cohere, etc.)
const embeddingFunction: EmbeddingFunction = async (text) => {
return await openai.embeddings.create({
model: 'text-embedding-3-small',
input: text,
}).then(res => res.data[0].embedding);
};
const brain = createAIMemory();
brain.setEmbeddingFunction(embeddingFunction);
// Remember facts with metadata
await brain.remember('User prefers dark mode', {
userId: 'user-123',
tags: ['preferences', 'ui'],
importance: 0.9,
});
// Semantic search with similarity threshold
const results = await brain.recall('user preferences', {
limit: 5,
threshold: 0.75,
});
// Build optimized context for LLM
const context = await brain.getContext('What does user prefer?');
console.log(context.messages);// Semantic search with filters
const results = await brain.recall('user preferences', {
limit: 10,
threshold: 0.7,
types: ['fact', 'preference'],
tags: ['ui', 'theme'],
});
// Query by specific criteria
const facts = await brain.getMemoriesByType('fact');
const userMemories = await brain.getMemoriesByUser('user-123');const brain = createAIMemory({
maxMemories: 10000,
defaultImportance: 0.5,
embeddingDimension: 1536,
// Context building configuration
context: {
maxTokens: 4000,
relevanceThreshold: 0.7,
memoryTypes: ['fact', 'preference', 'instruction'],
maxMemories: 10,
},
// Automatic cleanup settings
autoCleanup: {
enabled: true,
maxAge: 90 * 24 * 60 * 60 * 1000, // 90 days
minImportance: 0.3,
},
});Ready to build? Explore the API reference for complete documentation of all available methods.