Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 87 additions & 0 deletions .github/workflows/user-isolation-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
name: User Isolation Test

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
workflow_dispatch: # Allow manual runs

jobs:
test-user-isolation:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest

- name: Install dependencies
run: bun install

- name: Build package
run: bun run build

- name: Checkout OpenMemory
uses: actions/checkout@v4
with:
repository: 'CaviraOSS/OpenMemory'
path: 'openmemory-repo'

- name: Build and Start OpenMemory
run: |
cd openmemory-repo/backend
# Build the Docker image
docker build -t openmemory-test:latest .
# Run the container
docker run -d \
--name openmemory \
-p 8080:8080 \
-e OM_PORT=8080 \
-e OM_API_KEY=test-ci-key-for-isolation-testing \
-e OM_EMBEDDINGS=synthetic \
-e OM_EMBED_MODE=simple \
-e OM_VEC_DIM=256 \
openmemory-test:latest

- name: Wait for OpenMemory to be ready
run: |
echo "Waiting for OpenMemory to be healthy..."
for i in {1..60}; do
if curl -f http://localhost:8080/health 2>/dev/null; then
echo "✅ OpenMemory is ready!"
exit 0
fi
echo "Waiting... attempt $i/60"
sleep 2
done
echo "❌ OpenMemory failed to start"
docker logs openmemory
exit 1

- name: Show OpenMemory logs (if startup succeeded)
if: success()
run: docker logs openmemory --tail 50

- name: Run User Isolation Test
env:
OPENMEMORY_URL: http://localhost:8080
# Hardcoded test key - safe because this is an ephemeral, isolated instance
OPENMEMORY_API_KEY: test-ci-key-for-isolation-testing
# Note: ANTHROPIC_API_KEY not needed - test messages are tiny, no summarization triggered
run: node tests/user-isolation.test.js

- name: Show OpenMemory logs (if test failed)
if: failure()
run: docker logs openmemory

- name: Cleanup
if: always()
run: |
docker stop openmemory || true
docker rm openmemory || true

5 changes: 3 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
{
"name": "infinite-memory",
"version": "0.1.3",
"version": "0.1.5-beta.3",
"description": "Infinite context windows for Claude via OpenMemory semantic retrieval",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"type": "module",
"scripts": {
"build": "tsc",
"dev": "tsc --watch",
"clean": "rm -rf dist"
"clean": "rm -rf dist",
"test:isolation": "node tests/user-isolation.test.js"
},
"keywords": [
"claude",
Expand Down
79 changes: 58 additions & 21 deletions src/ContextManager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -183,18 +183,19 @@ export class ContextManager {
const latestMessage = messages[messages.length - 1];
const queryText = extractSearchableText(latestMessage);

const matches = await this.openMemory.queryRelevant(
const { userMemories, assistantMemories } = await this.openMemory.queryRelevant(
context.conversationId,
context.userId,
queryText,
20 // Get top 20 candidates
20 // Get top 20 candidates total (split between user/assistant)
);

console.log(`🔍 [InfiniteMemory] Found ${matches.length} relevant memories`);
const totalMatches = userMemories.length + assistantMemories.length;
console.log(`🔍 [InfiniteMemory] Found ${totalMatches} relevant memories (${userMemories.length} from user, ${assistantMemories.length} from assistant)`);

// Use OpenMemory's processed content directly (summarized memories)
// No need to fetch from Supabase - the summaries are perfect for context
if (matches.length === 0) {
if (totalMatches === 0) {
console.log(
`📭 [InfiniteMemory] No retrieved memories, using recent only`
);
Expand All @@ -210,27 +211,41 @@ export class ContextManager {
};
}

// Token-aware limiting: only include matches that fit within budget
// Token-aware limiting: include matches from both user and assistant within budget
// Reserve space for recent messages + historical context
const remainingBudget = inputBudget - recentTokens;
const fittingMatches = [];
const fittingUserMemories = [];
const fittingAssistantMemories = [];
let totalContextTokens = 0;

for (const match of matches) {
// Interleave user and assistant memories by relevance
const allMemories = [
...userMemories.map(m => ({ ...m, role: 'user' as const })),
...assistantMemories.map(m => ({ ...m, role: 'assistant' as const })),
].sort((a, b) => b.score - a.score); // Sort by relevance score

for (const match of allMemories) {
const matchTokens = Math.ceil(match.content.length / 4);
// Rough estimate for JSON formatting overhead (~50 tokens per match)
const formattedTokens = matchTokens + 50;

if (totalContextTokens + formattedTokens <= remainingBudget) {
fittingMatches.push(match);
if (match.role === 'user') {
fittingUserMemories.push(match);
} else {
fittingAssistantMemories.push(match);
}
totalContextTokens += formattedTokens;
} else {
console.log(`⚠️ [InfiniteMemory] Stopping at ${fittingMatches.length}/${matches.length} matches to stay within budget`);
const totalFitting = fittingUserMemories.length + fittingAssistantMemories.length;
console.log(`⚠️ [InfiniteMemory] Stopping at ${totalFitting}/${allMemories.length} matches to stay within budget`);
break;
}
}

if (fittingMatches.length === 0) {
const totalFittingMatches = fittingUserMemories.length + fittingAssistantMemories.length;

if (totalFittingMatches === 0) {
console.log(
`📭 [InfiniteMemory] No memories fit within budget, using recent only`
);
Expand All @@ -246,43 +261,65 @@ export class ContextManager {
};
}

console.log(`📊 [InfiniteMemory] Using ${fittingMatches.length} memories (~${totalContextTokens.toLocaleString()} tokens) within budget`);
console.log(`📊 [InfiniteMemory] Using ${totalFittingMatches} memories (~${totalContextTokens.toLocaleString()} tokens) within budget (${fittingUserMemories.length} user, ${fittingAssistantMemories.length} assistant)`);

// Format memories as JSON objects for clear delineation
const memoryObjects = fittingMatches.map((match) => {
// Format user memories
const userMemoryObjects = fittingUserMemories.map((match) => {
const memoryObj: any = {
content: match.content,
relevance: match.score,
};

// Add timestamp if available
if (match.timestamp) {
memoryObj.timestamp_ms = match.timestamp;
}

return JSON.stringify(memoryObj, null, 2);
});

const historicalContext = `=== Relevant context from past conversations ===\nEach memory is a JSON object with timestamp_ms (Unix epoch), content, and relevance score.\nMore recent timestamps and higher relevance scores are more important.\n\n${memoryObjects.join('\n\n')}`;
// Format assistant memories
const assistantMemoryObjects = fittingAssistantMemories.map((match) => {
const memoryObj: any = {
content: match.content,
relevance: match.score,
};
if (match.timestamp) {
memoryObj.timestamp_ms = match.timestamp;
}
return JSON.stringify(memoryObj, null, 2);
});

// Build historical context with clear attribution
let historicalContext = `=== Relevant context from past conversations ===\n`;
historicalContext += `Each memory is a JSON object with timestamp_ms (Unix epoch), content, and relevance score.\n`;
historicalContext += `More recent timestamps and higher relevance scores are more important.\n\n`;

if (fittingUserMemories.length > 0) {
historicalContext += `=== What you told me ===\n`;
historicalContext += `${userMemoryObjects.join('\n\n')}\n\n`;
}

if (fittingAssistantMemories.length > 0) {
historicalContext += `=== What I told you ===\n`;
historicalContext += `${assistantMemoryObjects.join('\n\n')}`;
}

const contextTokens = Math.ceil(historicalContext.length / 4);

console.log(
`✅ [InfiniteMemory] Context built: ${fittingMatches.length} memories (${contextTokens.toLocaleString()} tokens) + ${recentCount} recent messages`
`✅ [InfiniteMemory] Context built: ${totalFittingMatches} memories (${contextTokens.toLocaleString()} tokens) + ${recentCount} recent messages`
);
console.log('📜 [InfiniteMemory] Historical context (sorted by relevance + recency):');
console.log('📜 [InfiniteMemory] Historical context:');
console.log('─'.repeat(80));
console.log(historicalContext);
console.log('─'.repeat(80));
console.log('💡 [InfiniteMemory] Note: OpenMemory uses temporal decay - recent memories are prioritized');
console.log('💡 [InfiniteMemory] Note: Memories separated by speaker for clear attribution');

return {
messages: recentMessages,
historicalContext,
metadata: {
estimatedTokens: recentTokens + contextTokens,
recentCount,
retrievedCount: fittingMatches.length,
retrievedCount: totalFittingMatches,
usedOpenMemory: true,
},
};
Expand Down
Loading