Skip to content

Commit 8243898

Browse files
committed
fix: hang
1 parent 04a796d commit 8243898

File tree

3 files changed

+68
-83
lines changed

3 files changed

+68
-83
lines changed

jupyter_ai_agents/extension.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -108,16 +108,12 @@ def initialize_settings(self):
108108
token = self.serverapp.token
109109
self.log.info(f"Jupyter server URL: {base_url}")
110110

111-
# Create MCP server connection to jupyter-mcp-server
112-
self.log.info("Creating MCP server connection to jupyter-mcp-server...")
113-
mcp_server = create_mcp_server(base_url, token)
114-
self.log.info("MCP server connection created")
115-
116-
# Create chat agent with MCP server toolset
111+
# Create chat agent without eagerly attaching MCP server tools
112+
# We'll create the MCP connection per request to avoid async context issues
117113
default_model = config.get_default_model()
118114
self.log.info(f"Creating chat agent with model: {default_model}")
119-
agent = create_chat_agent(model=default_model, mcp_server=mcp_server)
120-
self.log.info("Chat agent created with MCP tools")
115+
agent = create_chat_agent(model=default_model, mcp_server=None)
116+
self.log.info("Chat agent created; MCP tools will be attached per request")
121117

122118
# Create MCP tool manager for additional MCP servers
123119
mcp_manager = MCPToolManager()
@@ -135,7 +131,8 @@ def initialize_settings(self):
135131
self.settings['chat_agent'] = agent
136132
self.settings['mcp_manager'] = mcp_manager
137133
self.settings['chat_config'] = config
138-
self.settings['jupyter_mcp_server'] = mcp_server
134+
self.settings['chat_base_url'] = base_url
135+
self.settings['chat_token'] = token
139136

140137
self.log.info("Jupyter AI Agents extension initialized successfully")
141138

jupyter_ai_agents/handlers/chat.py

Lines changed: 61 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
from starlette.requests import Request
1515
from starlette.datastructures import Headers
1616

17+
from ..tools import create_mcp_server
18+
1719
logger = logging.getLogger(__name__)
1820

1921

@@ -80,81 +82,67 @@ async def post(self):
8082
self.finish(json.dumps({"error": "Chat agent not initialized"}))
8183
return
8284

83-
# Create request adapter (Starlette-compatible)
84-
tornado_request = TornadoRequestAdapter(self)
85-
86-
# Parse request body to extract model if specified
87-
try:
88-
body = await tornado_request.json()
89-
model = body.get('model') if isinstance(body, dict) else None
90-
except:
91-
model = None
92-
93-
# Get builtin tools (empty list - tools metadata is only for UI display)
94-
# The actual pydantic-ai tools are registered in the agent itself
95-
builtin_tools = []
96-
97-
# Create usage limits for the agent
98-
from pydantic_ai import UsageLimits
99-
usage_limits = UsageLimits(
100-
output_tokens_limit=5000,
101-
total_tokens_limit=100000,
102-
)
103-
104-
# Use VercelAIAdapter.dispatch_request (new API)
105-
# This is now a classmethod that takes the request and agent directly
106-
response = await VercelAIAdapter.dispatch_request(
107-
tornado_request,
108-
agent=agent,
109-
model=model,
110-
# usage=usage_limits,
111-
builtin_tools=builtin_tools,
112-
)
113-
114-
# Set headers from FastAPI response
115-
for key, value in response.headers.items():
116-
self.set_header(key, value)
117-
118-
# Stream the response body
119-
# FastAPI StreamingResponse has body_iterator
120-
# Wrap in try-except to catch cancel scope errors
121-
if hasattr(response, 'body_iterator'):
122-
try:
123-
async for chunk in response.body_iterator:
124-
"""
125-
# Filter out benign cancel scope errors from the stream
126-
# These are internal anyio errors that don't affect functionality
127-
if isinstance(chunk, bytes):
128-
chunk_str = chunk.decode('utf-8', errors='ignore')
129-
else:
130-
chunk_str = str(chunk)
131-
132-
# Skip chunks that contain cancel scope errors
133-
if 'cancel scope' in chunk_str.lower() and 'error' in chunk_str.lower():
134-
self.log.debug(f"Filtered out begin cancel scope error from stream")
135-
continue
136-
"""
85+
# Lazily create the MCP server connection for this request
86+
base_url = self.settings.get('chat_base_url')
87+
token = self.settings.get('chat_token')
88+
mcp_server = create_mcp_server(base_url, token)
13789

138-
# Write the chunk
139-
if isinstance(chunk, bytes):
140-
self.write(chunk)
141-
else:
142-
self.write(chunk.encode('utf-8') if isinstance(chunk, str) else chunk)
143-
await self.flush()
144-
except Exception as stream_error:
145-
# Log but don't crash - the stream might have completed successfully
146-
# Cancel scope errors often happen during cleanup after successful completion
147-
self.log.debug(f"Stream iteration completed with: {stream_error}")
148-
else:
149-
# Fallback for non-streaming response
150-
body = response.body
151-
if isinstance(body, bytes):
152-
self.write(body)
153-
else:
154-
self.write(body.encode('utf-8') if isinstance(body, str) else body)
90+
async with mcp_server:
91+
# Create request adapter (Starlette-compatible)
92+
tornado_request = TornadoRequestAdapter(self)
93+
94+
# Parse request body to extract model if specified
95+
try:
96+
body = await tornado_request.json()
97+
model = body.get('model') if isinstance(body, dict) else None
98+
except Exception:
99+
model = None
100+
101+
# Get builtin tools (empty list - tools metadata is only for UI display)
102+
# The actual pydantic-ai tools are registered in the agent itself
103+
builtin_tools = []
104+
105+
# Create usage limits for the agent
106+
usage_limits = UsageLimits(
107+
tool_calls_limit=5,
108+
output_tokens_limit=5000,
109+
total_tokens_limit=100000,
110+
)
111+
112+
# Use VercelAIAdapter.dispatch_request (new API)
113+
response = await VercelAIAdapter.dispatch_request(
114+
tornado_request,
115+
agent=agent,
116+
model=model,
117+
usage_limits=usage_limits,
118+
toolsets=[mcp_server],
119+
builtin_tools=builtin_tools,
120+
)
155121

156-
# Finish the response
157-
self.finish()
122+
# Set headers from FastAPI response
123+
for key, value in response.headers.items():
124+
self.set_header(key, value)
125+
126+
# Stream the response body
127+
if hasattr(response, 'body_iterator'):
128+
try:
129+
async for chunk in response.body_iterator:
130+
if isinstance(chunk, bytes):
131+
self.write(chunk)
132+
else:
133+
self.write(chunk.encode('utf-8') if isinstance(chunk, str) else chunk)
134+
await self.flush()
135+
except Exception as stream_error:
136+
self.log.debug(f"Stream iteration completed with: {stream_error}")
137+
else:
138+
body = response.body
139+
if isinstance(body, bytes):
140+
self.write(body)
141+
else:
142+
self.write(body.encode('utf-8') if isinstance(body, str) else body)
143+
144+
# Finish the response while MCP context is active
145+
self.finish()
158146

159147
except Exception as e:
160148
self.log.error(f"Error in chat handler: {e}", exc_info=True)

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@datalayer/jupyter-ai-agents",
3-
"version": "0.18.0",
3+
"version": "0.19.0",
44
"description": "Jupyter AI Agents.",
55
"keywords": [
66
"ai",

0 commit comments

Comments
 (0)