diff --git a/backend/api/config.py b/backend/api/config.py deleted file mode 100644 index be8ec86..0000000 --- a/backend/api/config.py +++ /dev/null @@ -1,20 +0,0 @@ -from pydantic_settings import BaseSettings, SettingsConfigDict - - -class Settings(BaseSettings): - model_config = SettingsConfigDict( - env_file="/opt/.env", - env_ignore_empty=True, - extra="ignore", - ) - - model: str = "gpt-4o-mini-2024-07-18" - openai_api_key: str = "" - mcp_server_port: int = 8050 - - pg_url: str = "postgres://postgres" - pg_user: str = "postgres" - pg_pass: str = "postgres" - - -settings = Settings() diff --git a/backend/api/core/agent/orchestration.py b/backend/api/core/agent/orchestration.py new file mode 100644 index 0000000..84a9b5a --- /dev/null +++ b/backend/api/core/agent/orchestration.py @@ -0,0 +1,76 @@ +import functools + +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.runnables.base import RunnableSequence +from langchain_core.tools import StructuredTool +from langchain_openai import ChatOpenAI +from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver +from langgraph.graph import MessagesState, StateGraph +from langgraph.graph.state import CompiledStateGraph +from langgraph.prebuilt import ToolNode, tools_condition + +from api.core.agent.prompts import SYSTEM_PROMPT + + +class State(MessagesState): + next: str + + +def agent_factory( + llm: ChatOpenAI, tools: list[StructuredTool], system_prompt: str +) -> RunnableSequence: + prompt = ChatPromptTemplate.from_messages( + [ + ("system", system_prompt), + MessagesPlaceholder(variable_name="messages"), + ] + ) + if tools: + agent = prompt | llm.bind_tools(tools) + else: + agent = prompt | llm + return agent + + +def agent_node_factory( + state: State, + agent: RunnableSequence, +) -> State: + result = agent.invoke(state) + return dict(messages=[result]) + + +def graph_factory( + agent_node: functools.partial, + tools: list[StructuredTool], + checkpointer: AsyncPostgresSaver | None = None, + name: str = "agent_node", +) -> CompiledStateGraph: + graph_builder = StateGraph(State) + graph_builder.add_node(name, agent_node) + graph_builder.add_node("tools", ToolNode(tools)) + + graph_builder.add_conditional_edges(name, tools_condition) + graph_builder.add_edge("tools", name) + + graph_builder.set_entry_point(name) + graph = graph_builder.compile(checkpointer=checkpointer) + return graph + + +def get_graph( + llm: ChatOpenAI, + tools: list[StructuredTool] = [], + system_prompt: str = SYSTEM_PROMPT, + name: str = "agent_node", + checkpointer: AsyncPostgresSaver | None = None, +) -> CompiledStateGraph: + agent = agent_factory(llm, tools, system_prompt) + worker_node = functools.partial(agent_node_factory, agent=agent) + return graph_factory(worker_node, tools, checkpointer, name) + + +def get_config(): + return dict( + configurable=dict(thread_id="1"), + ) diff --git a/backend/api/core/agent/persistence.py b/backend/api/core/agent/persistence.py new file mode 100644 index 0000000..419b49f --- /dev/null +++ b/backend/api/core/agent/persistence.py @@ -0,0 +1,43 @@ +from contextlib import asynccontextmanager +from typing import AsyncGenerator + +import psycopg +import psycopg.errors +import uvicorn +from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver +from psycopg_pool import AsyncConnectionPool + +from api.core.logs import uvicorn + + +@asynccontextmanager +async def checkpointer_context( + conn_str: str, +) -> AsyncGenerator[AsyncPostgresSaver]: + """ + Async context manager that sets up and yields a LangGraph checkpointer. + + Uses a psycopg async connection pool to initialize AsyncPostgresSaver. + Skips setup if checkpointer is already configured. + + Args: + conn_str (str): PostgreSQL connection string. + + Yields: + AsyncPostgresSaver: The initialized checkpointer. + """ + # NOTE: LangGraph AsyncPostgresSaver does not support SQLAlchemy ORM Connections. + # A compatible psycopg connection is created via the connection pool to connect to the checkpointer. + async with AsyncConnectionPool( + conninfo=conn_str, + kwargs=dict(prepare_threshold=None), + ) as pool: + checkpointer = AsyncPostgresSaver(pool) + try: + await checkpointer.setup() + except ( + psycopg.errors.DuplicateColumn, + psycopg.errors.ActiveSqlTransaction, + ): + uvicorn.warning("Skipping checkpointer setup — already configured.") + yield checkpointer diff --git a/backend/api/core/agent/prompts/__init__.py b/backend/api/core/agent/prompts/__init__.py new file mode 100644 index 0000000..32d33ba --- /dev/null +++ b/backend/api/core/agent/prompts/__init__.py @@ -0,0 +1,9 @@ +import os + + +def read_system_prompt(): + with open(os.path.join(os.path.dirname(__file__), "system.md"), "r") as f: + return f.read() + + +SYSTEM_PROMPT = read_system_prompt() diff --git a/backend/api/core/agent/prompts/system.md b/backend/api/core/agent/prompts/system.md new file mode 100644 index 0000000..64e0dbc --- /dev/null +++ b/backend/api/core/agent/prompts/system.md @@ -0,0 +1 @@ +You are a helpful assistant. diff --git a/backend/api/core/config.py b/backend/api/core/config.py new file mode 100644 index 0000000..8fa4842 --- /dev/null +++ b/backend/api/core/config.py @@ -0,0 +1,33 @@ +from pydantic import PostgresDsn, computed_field +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + model_config = SettingsConfigDict( + env_file="/opt/.env", + env_ignore_empty=True, + extra="ignore", + ) + + model: str = "gpt-4o-mini-2024-07-18" + openai_api_key: str = "" + mcp_server_port: int = 8050 + + postgres_dsn: PostgresDsn = ( + "postgresql+psycopg://postgres:password@example.supabase.com:6543/postgres" + ) + + @computed_field + @property + def orm_conn_str(self) -> str: + return self.postgres_dsn.encoded_string() + + @computed_field + @property + def checkpoint_conn_str(self) -> str: + # NOTE: LangGraph AsyncPostgresSaver has some issues + # with specifying psycopg driver explicitly + return self.postgres_dsn.encoded_string().replace("+psycopg", "") + + +settings = Settings() diff --git a/backend/api/core/dependencies.py b/backend/api/core/dependencies.py new file mode 100644 index 0000000..20218e7 --- /dev/null +++ b/backend/api/core/dependencies.py @@ -0,0 +1,49 @@ +from contextlib import asynccontextmanager +from typing import Annotated, AsyncGenerator + +from fastapi import Depends +from langchain_mcp_adapters.tools import load_mcp_tools +from langchain_openai import ChatOpenAI +from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine + +from api.core.agent.persistence import checkpointer_context +from api.core.config import settings +from api.core.mcps import mcp_sse_client +from api.core.models import Resource + + +def get_llm() -> ChatOpenAI: + return ChatOpenAI( + streaming=True, + model=settings.model, + temperature=0, + api_key=settings.openai_api_key, + stream_usage=True, + ) + + +LLMDep = Annotated[ChatOpenAI, Depends(get_llm)] + + +engine: AsyncEngine = create_async_engine(settings.orm_conn_str) + + +def get_engine() -> AsyncEngine: + return engine + + +EngineDep = Annotated[AsyncEngine, Depends(get_engine)] + + +@asynccontextmanager +async def setup_graph() -> AsyncGenerator[Resource]: + async with checkpointer_context( + settings.checkpoint_conn_str + ) as checkpointer: + async with mcp_sse_client() as session: + tools = await load_mcp_tools(session) + yield Resource( + checkpointer=checkpointer, + tools=tools, + session=session, + ) diff --git a/backend/api/core/logs.py b/backend/api/core/logs.py new file mode 100644 index 0000000..3251c0a --- /dev/null +++ b/backend/api/core/logs.py @@ -0,0 +1,7 @@ +from logging import getLogger + +from rich.pretty import pprint as print + +print # facade + +uvicorn = getLogger("uvicorn") diff --git a/backend/api/core/mcps.py b/backend/api/core/mcps.py new file mode 100644 index 0000000..cf4797b --- /dev/null +++ b/backend/api/core/mcps.py @@ -0,0 +1,27 @@ +from contextlib import asynccontextmanager +from typing import AsyncGenerator + +from mcp import ClientSession +from mcp.client.sse import sse_client + +from api.core.config import settings + + +@asynccontextmanager +async def mcp_sse_client() -> AsyncGenerator[ClientSession]: + """ + Creates and initializes an MCP client session over SSE. + + Establishes an SSE connection to the MCP server and yields an initialized + `ClientSession` for communication. + + Yields: + ClientSession: An initialized MCP client session. + """ + async with sse_client(f"http://mcp:{settings.mcp_server_port}/sse") as ( + read_stream, + write_stream, + ): + async with ClientSession(read_stream, write_stream) as session: + await session.initialize() + yield session diff --git a/backend/api/core/models.py b/backend/api/core/models.py new file mode 100644 index 0000000..338c50e --- /dev/null +++ b/backend/api/core/models.py @@ -0,0 +1,13 @@ +from langchain_core.tools import StructuredTool +from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver +from mcp import ClientSession +from pydantic import BaseModel + + +class Resource(BaseModel): + checkpointer: AsyncPostgresSaver + tools: list[StructuredTool] + session: ClientSession + + class Config: + arbitrary_types_allowed = True diff --git a/backend/api/dependencies.py b/backend/api/dependencies.py deleted file mode 100644 index f2c3ea7..0000000 --- a/backend/api/dependencies.py +++ /dev/null @@ -1,23 +0,0 @@ -from typing import Annotated, Iterable - -from config import settings -from fastapi import Depends -from langchain_openai import ChatOpenAI - - -def llm_factory() -> ChatOpenAI: - llm = ChatOpenAI( - streaming=True, - model=settings.model, - temperature=0, - api_key=settings.openai_api_key, - stream_usage=True, - ) - return llm - - -def get_llm_session() -> Iterable[ChatOpenAI]: - yield llm_factory() - - -LLMDep = Annotated[ChatOpenAI, Depends(get_llm_session)] diff --git a/backend/api/main.py b/backend/api/main.py index f280ded..78070e1 100644 --- a/backend/api/main.py +++ b/backend/api/main.py @@ -1,7 +1,8 @@ from fastapi import FastAPI -from api.routers import llms, mcps +from api.routers import checkpoints, llms, mcps app = FastAPI(swagger_ui_parameters={"tryItOutEnabled": True}) app.include_router(llms.router, prefix="/v1") app.include_router(mcps.router, prefix="/v1") +app.include_router(checkpoints.router, prefix="/v1") diff --git a/backend/api/pyproject.toml b/backend/api/pyproject.toml index c37d206..5af4aba 100644 --- a/backend/api/pyproject.toml +++ b/backend/api/pyproject.toml @@ -10,15 +10,17 @@ dependencies = [ "fastapi[standard]==0.115.11", "langchain==0.3.6", "langchain-community==0.3.4", + "langchain-mcp-adapters>=0.0.9", "langchain-openai==0.2.3", - "langchain-postgres==0.0.12", "langfuse==2.60.2", "langgraph==0.2.39", + "langgraph-checkpoint-postgres>=2.0.21", "mcp[cli]>=1.6.0", "prometheus-client==0.21.1", "psycopg[binary]==3.2.3", "pydantic-settings==2.6.0", "pypdf==5.1.0", "rich==13.9.4", + "sqlmodel>=0.0.24", "sse-starlette==2.1.3", ] diff --git a/backend/api/routers/checkpoints.py b/backend/api/routers/checkpoints.py new file mode 100644 index 0000000..2158c96 --- /dev/null +++ b/backend/api/routers/checkpoints.py @@ -0,0 +1,38 @@ +from fastapi import APIRouter +from sqlalchemy import text + +from api.core.dependencies import EngineDep +from api.core.logs import uvicorn + +TABLES = [ + "checkpoints", + "checkpoint_migrations", + "checkpoint_blobs", + "checkpoint_writes", +] +router = APIRouter(tags=["checkpoints"]) + + +@router.delete("/truncate") +async def truncate_checkpoints(engine: EngineDep): + """ + Truncates all checkpoint-related tables from LangGraph AsyncPostgresSaver. + + This operation removes all records from the following tables: + - checkpoints + - checkpoint_migrations + - checkpoint_blobs + - checkpoint_writes + + **Warning**: This action is irreversible and should be used with caution. Ensure proper backups are in place + before performing this operation. + """ + + async with engine.begin() as conn: + for table in TABLES: + await conn.execute(text(f"TRUNCATE TABLE {table};")) + uvicorn.info(f"Truncated table {table}") + return { + "status": "success", + "message": "All checkpoint-related tables truncated successfully.", + } diff --git a/backend/api/routers/llms.py b/backend/api/routers/llms.py index 625712e..b7035a8 100644 --- a/backend/api/routers/llms.py +++ b/backend/api/routers/llms.py @@ -1,22 +1,73 @@ from typing import AsyncGenerator +import psycopg.errors from fastapi import APIRouter +from langchain_core.messages import HumanMessage +from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver from sse_starlette.sse import EventSourceResponse from starlette.responses import Response -from api.dependencies import LLMDep +from api.core.agent.orchestration import get_config, get_graph +from api.core.dependencies import LLMDep, setup_graph +from api.core.logs import print, uvicorn router = APIRouter(tags=["chat"]) -async def stream( +@router.get("/chat/completions") +async def completions(query: str, llm: LLMDep) -> Response: + """ + Stream model completions as Server-Sent Events (SSE). + + This endpoint sends the model's responses in real-time as they are generated, + allowing for a continuous stream of data to the client. + """ + return EventSourceResponse(stream_completions(query, llm)) + + +@router.get("/chat/agent") +async def agent(query: str, llm: LLMDep) -> Response: + """Stream LangGraph completions as Server-Sent Events (SSE). + + This endpoint streams LangGraph-generated events in real-time, allowing the client + to receive responses as they are processed, useful for agent-based workflows. + """ + return EventSourceResponse(stream_graph(query, llm)) + + +async def stream_completions( query: str, llm: LLMDep ) -> AsyncGenerator[dict[str, str], None]: async for chunk in llm.astream_events(query): yield dict(data=chunk) -@router.get("/chat/completions") -async def completions(query: str, llm: LLMDep) -> Response: - """Stream completions via Server Sent Events""" - return EventSourceResponse(stream(query, llm)) +async def checkpointer_setup(pool): + checkpointer = AsyncPostgresSaver(pool) + try: + await checkpointer.setup() + except ( + psycopg.errors.DuplicateColumn, + psycopg.errors.ActiveSqlTransaction, + ): + uvicorn.warning("Skipping checkpointer setup — already configured.") + return checkpointer + + +async def stream_graph( + query: str, + llm: LLMDep, +) -> AsyncGenerator[dict[str, str], None]: + async with setup_graph() as resource: + graph = get_graph( + llm, + tools=resource.tools, + checkpointer=resource.checkpointer, + ) + config = get_config() + events = dict(messages=[HumanMessage(content=query)]) + + async for event in graph.astream_events(events, config, version="v2"): + if event.get("event").endswith("end"): + print(event) + yield dict(data=event) diff --git a/backend/api/routers/mcps.py b/backend/api/routers/mcps.py index 1f25b43..c474af8 100644 --- a/backend/api/routers/mcps.py +++ b/backend/api/routers/mcps.py @@ -1,27 +1,14 @@ -from contextlib import asynccontextmanager from typing import Iterable -from config import settings from fastapi import APIRouter -from mcp import ClientSession, types -from mcp.client.sse import sse_client +from mcp import types +from api.core.mcps import mcp_sse_client from shared_mcp.models import ToolRequest router = APIRouter(prefix="/mcps", tags=["mcps"]) -@asynccontextmanager -async def mcp_sse_client(): - async with sse_client(f"http://mcp:{settings.mcp_server_port}/sse") as ( - read_stream, - write_stream, - ): - async with ClientSession(read_stream, write_stream) as session: - await session.initialize() - yield session - - @router.get("/list-tools") async def list_tools() -> Iterable[types.Tool]: """ diff --git a/backend/api/uv.lock b/backend/api/uv.lock index b787ee6..15724ba 100644 --- a/backend/api/uv.lock +++ b/backend/api/uv.lock @@ -88,16 +88,18 @@ dependencies = [ { name = "fastapi", extra = ["standard"] }, { name = "langchain" }, { name = "langchain-community" }, + { name = "langchain-mcp-adapters" }, { name = "langchain-openai" }, - { name = "langchain-postgres" }, { name = "langfuse" }, { name = "langgraph" }, + { name = "langgraph-checkpoint-postgres" }, { name = "mcp", extra = ["cli"] }, { name = "prometheus-client" }, { name = "psycopg", extra = ["binary"] }, { name = "pydantic-settings" }, { name = "pypdf" }, { name = "rich" }, + { name = "sqlmodel" }, { name = "sse-starlette" }, ] @@ -108,16 +110,18 @@ requires-dist = [ { name = "fastapi", extras = ["standard"], specifier = "==0.115.11" }, { name = "langchain", specifier = "==0.3.6" }, { name = "langchain-community", specifier = "==0.3.4" }, + { name = "langchain-mcp-adapters", specifier = ">=0.0.9" }, { name = "langchain-openai", specifier = "==0.2.3" }, - { name = "langchain-postgres", specifier = "==0.0.12" }, { name = "langfuse", specifier = "==2.60.2" }, { name = "langgraph", specifier = "==0.2.39" }, + { name = "langgraph-checkpoint-postgres", specifier = ">=2.0.21" }, { name = "mcp", extras = ["cli"], specifier = ">=1.6.0" }, { name = "prometheus-client", specifier = "==0.21.1" }, { name = "psycopg", extras = ["binary"], specifier = "==3.2.3" }, { name = "pydantic-settings", specifier = "==2.6.0" }, { name = "pypdf", specifier = "==5.1.0" }, { name = "rich", specifier = "==13.9.4" }, + { name = "sqlmodel", specifier = ">=0.0.24" }, { name = "sse-starlette", specifier = "==2.1.3" }, ] @@ -554,34 +558,30 @@ wheels = [ ] [[package]] -name = "langchain-openai" -version = "0.2.3" +name = "langchain-mcp-adapters" +version = "0.0.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, - { name = "openai" }, - { name = "tiktoken" }, + { name = "mcp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/41/31/82c8a33354dd0a59438973cfdfc771fde0df2c9fb8388e0c23dc36119959/langchain_openai-0.2.3.tar.gz", hash = "sha256:e142031704de1104735f503f76352c53b27ac0a2806466392993c4508c42bf0c", size = 42572, upload_time = "2024-10-18T15:27:26.933Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/48/dc5544f5b919b4ff9e736ec8db71217431c585c5c87acd3ab7558cc06cee/langchain_mcp_adapters-0.0.9.tar.gz", hash = "sha256:9ecd10fc420d98b3c14115bbca3174575e0a4ea29bd125ef39d11191a72ff1a1", size = 14827, upload_time = "2025-04-16T15:03:05.158Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/66/ea/dcc59d9b818a4d7f25d4d6b3018355a0e0243a351b1d4ef8b26ec107ee00/langchain_openai-0.2.3-py3-none-any.whl", hash = "sha256:f498c94817c980cb302439b95d3f3275cdf2743e022ee674692c75898523cf57", size = 49907, upload_time = "2024-10-18T15:27:25.214Z" }, + { url = "https://files.pythonhosted.org/packages/6f/24/3a4be149e8db15936533357f987b4b89c74c7f039427d6229679dbcc53b9/langchain_mcp_adapters-0.0.9-py3-none-any.whl", hash = "sha256:7c3dedd7830de826f418706c8a2fe388afcf8daf2037a1b39d1e065a5eacb082", size = 10065, upload_time = "2025-04-16T15:03:04.324Z" }, ] [[package]] -name = "langchain-postgres" -version = "0.0.12" +name = "langchain-openai" +version = "0.2.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, - { name = "numpy" }, - { name = "pgvector" }, - { name = "psycopg" }, - { name = "psycopg-pool" }, - { name = "sqlalchemy" }, + { name = "openai" }, + { name = "tiktoken" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/54/87/9c1363b2a39c369c2786fee287aeecc20fed2d98f40b9dab5a9ab208f5e6/langchain_postgres-0.0.12.tar.gz", hash = "sha256:fe44c8073345463720355c86b676c56fc867d5c0995066353f60f2a5d01d0d0d", size = 21476, upload_time = "2024-09-13T17:09:04.029Z" } +sdist = { url = "https://files.pythonhosted.org/packages/41/31/82c8a33354dd0a59438973cfdfc771fde0df2c9fb8388e0c23dc36119959/langchain_openai-0.2.3.tar.gz", hash = "sha256:e142031704de1104735f503f76352c53b27ac0a2806466392993c4508c42bf0c", size = 42572, upload_time = "2024-10-18T15:27:26.933Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/21/9f/00fdc9a1da7e856e9cc3705ae0155b8d7bf0e1317e5bc8fe1354944ebb3f/langchain_postgres-0.0.12-py3-none-any.whl", hash = "sha256:b3a8e8fa318ecea1874675ae2bed08fc320e7d4c4f65e016f95808d1164dad51", size = 21909, upload_time = "2024-09-13T17:09:02.791Z" }, + { url = "https://files.pythonhosted.org/packages/66/ea/dcc59d9b818a4d7f25d4d6b3018355a0e0243a351b1d4ef8b26ec107ee00/langchain_openai-0.2.3-py3-none-any.whl", hash = "sha256:f498c94817c980cb302439b95d3f3275cdf2743e022ee674692c75898523cf57", size = 49907, upload_time = "2024-10-18T15:27:25.214Z" }, ] [[package]] @@ -642,6 +642,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bc/60/30397e8fd2b7dead3754aa79d708caff9dbb371f30b4cd21802c60f6b921/langgraph_checkpoint-2.0.24-py3-none-any.whl", hash = "sha256:3836e2909ef2387d1fa8d04ee3e2a353f980d519fd6c649af352676dc73d66b8", size = 42028, upload_time = "2025-04-02T22:47:33.017Z" }, ] +[[package]] +name = "langgraph-checkpoint-postgres" +version = "2.0.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langgraph-checkpoint" }, + { name = "orjson" }, + { name = "psycopg" }, + { name = "psycopg-pool" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/51/18138b116807180c97093890dd7955f0e68eabcba97d323a61275bab45b6/langgraph_checkpoint_postgres-2.0.21.tar.gz", hash = "sha256:921915fd3de534b4c84469f93d03046c1ef1f224e44629212b172ec3e9b72ded", size = 30371, upload_time = "2025-04-18T16:31:50.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/31/d5f4a7dd63dddfdb85209a3cbc1778b14bc0dddadb431e34938956f45e8c/langgraph_checkpoint_postgres-2.0.21-py3-none-any.whl", hash = "sha256:f0a50f2c1496778e00ea888415521bb2b7789a12052aa5ae54d82cf517b271e8", size = 39440, upload_time = "2025-04-18T16:31:48.838Z" }, +] + [[package]] name = "langgraph-sdk" version = "0.1.61" @@ -881,17 +896,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload_time = "2024-11-08T09:47:44.722Z" }, ] -[[package]] -name = "pgvector" -version = "0.2.5" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "numpy" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/29/bb/4686b1090a7c68fa367e981130a074dc6c1236571d914ffa6e05c882b59d/pgvector-0.2.5-py2.py3-none-any.whl", hash = "sha256:5e5e93ec4d3c45ab1fa388729d56c602f6966296e19deee8878928c6d567e41b", size = 9638, upload_time = "2024-02-07T19:35:03.8Z" }, -] - [[package]] name = "prometheus-client" version = "0.21.1" @@ -1244,6 +1248,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/7c/5fc8e802e7506fe8b55a03a2e1dab156eae205c91bee46305755e086d2e2/sqlalchemy-2.0.40-py3-none-any.whl", hash = "sha256:32587e2e1e359276957e6fe5dad089758bc042a971a8a09ae8ecf7a8fe23d07a", size = 1903894, upload_time = "2025-03-27T18:40:43.796Z" }, ] +[[package]] +name = "sqlmodel" +version = "0.0.24" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "sqlalchemy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/86/4b/c2ad0496f5bdc6073d9b4cef52be9c04f2b37a5773441cc6600b1857648b/sqlmodel-0.0.24.tar.gz", hash = "sha256:cc5c7613c1a5533c9c7867e1aab2fd489a76c9e8a061984da11b4e613c182423", size = 116780, upload_time = "2025-03-07T05:43:32.887Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/91/484cd2d05569892b7fef7f5ceab3bc89fb0f8a8c0cde1030d383dbc5449c/sqlmodel-0.0.24-py3-none-any.whl", hash = "sha256:6778852f09370908985b667d6a3ab92910d0d5ec88adcaf23dbc242715ff7193", size = 28622, upload_time = "2025-03-07T05:43:30.37Z" }, +] + [[package]] name = "sse-starlette" version = "2.1.3" diff --git a/envs/backend.env b/envs/backend.env index e570b8b..c439a50 100644 --- a/envs/backend.env +++ b/envs/backend.env @@ -1 +1,2 @@ OPENAI_API_KEY= +POSTGRES_DSN=