3、fastapi-mcp
from fastapi import FastAPI from fastapi_mcp import FastApiMCP app = FastAPI() FastApiMCP(app).mount() # MCP服务即刻生效!4、langchain
import asyncio
import pathlib
import sys
import typing as t
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.tools import BaseTool
from langchain_groq import ChatGroq
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from langchain_mcp import MCPToolkit
async def run(tools: list[BaseTool], prompt: str) -> str:
# 注意:原文使用了Groq,但为了更通用,这里可以替换为 OpenAI 或其他模型
# model = ChatGroq(model_name="llama-3.1-8b-instant", stop_sequences=None)
# 假设您已设置 OPENAI_API_KEY 环境变量
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4o-mini") # 使用 OpenAI 模型示例
tools_map = {tool.name: tool for tool in tools}
tools_model = model.bind_tools(tools)
messages: list[BaseMessage] = [HumanMessage(prompt)]
ai_message = t.cast(AIMessage, await tools_model.ainvoke(messages))
messages.append(ai_message)
for tool_call in ai_message.tool_calls:
# LangChain 工具名称通常是小写的
selected_tool = tools_map[tool_call["name"].lower()]
tool_msg = await selected_tool.ainvoke(tool_call["args"]) # 传递参数
messages.append(tool_msg) # 应为 ToolMessage 类型,但此处简化处理
# 注意:更完整的实现需要创建 ToolMessage
# from langchain_core.messages import ToolMessage
# tool_msg = ToolMessage(content=..., tool_call_id=tool_call['id'])
# 再次调用模型以获得最终的自然语言响应
return await (tools_model | StrOutputParser()).ainvoke(messages)
async def main(prompt: str) -> None:
server_params = StdioServerParameters(
command="npx",
# 确保您已全局安装或 npx 可以找到此包
args=["-y", "@modelcontextprotocol/server-filesystem", str(pathlib.Path(__file__).parent.parent)],
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
toolkit = MCPToolkit(session=session)
await toolkit.initialize()
response = await run(toolkit.get_tools(), prompt)
print(response)
if __name__ == "__main__":
prompt = sys.argv[1] if len(sys.argv) > 1 else "Read and summarize the file ./readme.md"
# 确保当前目录下有一个 readme.md 文件
asyncio.run(main(prompt))5、Chainlit import chainlit as cl
from mcp import ClientSession
@cl.on_mcp_connect
async def on_mcp_connect(connection, session: ClientSession):
"""当建立 MCP 连接时调用"""
# 在此处添加连接建立后的逻辑
print(f"MCP Connection '{connection.name}' established.")
# 可以将 session 存储在用户会话中以供后续使用
cl.user_session.set("mcp_session_" + connection.name, session)
cl.user_session.set("mcp_connection_" + connection.name, connection)
@cl.on_mcp_disconnect
async def on_mcp_disconnect(name: str, session: ClientSession):
"""当 MCP 连接终止时调用"""
# 在此处添加连接断开后的逻辑
print(f"MCP Connection '{name}' disconnected.")
# 从用户会话中移除 session
cl.user_session.set("mcp_session_" + name, None)
cl.user_session.set("mcp_connection_" + name, None)6、Agno MCP # (假设 env 字典包含必要的环境变量,如 API 密钥)
# import os
# from dotenv import load_dotenv
# load_dotenv()
# env = os.environ.copy() # 获取环境变量
# import contextlib
# from agno import Agent
# from agno.tools.mcp import stdio_client, StdioServerParameters # 导入相关类
# from agno.chat import OpenAIChat # 导入聊天模型
# from textwrap import dedent # 用于多行字符串
airbnb_server_params = StdioServerParameters(
command="npx",
args=["-y", "@openbnb/mcp-server-airbnb", "--ignore-robots-txt"],
env=env, # 传递环境变量
)
maps_server_params = StdioServerParameters(
command="npx", args=["-y", "@modelcontextprotocol/server-google-maps"], env=env # 传递环境变量
)
# 使用 AsyncExitStack 管理多个异步上下文
async with contextlib.AsyncExitStack() as stack:
# 进入 stdio_client 上下文并获取客户端对象
airbnb_client, _ = await stack.enter_async_context(stdio_client(airbnb_server_params))
maps_client, _ = await stack.enter_async_context(stdio_client(maps_server_params))
# 创建 Airbnb 智能体,并将 MCP 客户端作为工具传递
airbnb_agent = Agent(
name="Airbnb",
role="Airbnb Agent",
model=OpenAIChat("gpt-4o"), # 指定 LLM 模型
tools=[airbnb_client], # 将 MCP 客户端作为工具
instructions=dedent("""\
You are an agent that can find Airbnb listings for a given location.\
"""),
add_datetime_to_instructions=True,
)
# ... (可以类似地定义其他智能体,如 Maps 智能体)7、Upsonic
import os
from dotenv import load_dotenv
from upsonic import Task, Agent # Direct 未在示例中使用, Search 从 tools 导入
from upsonic.tools import MCPTool, Search # 导入 MCPTool 和 Search
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
if not openai_api_key:
raise ValueError("OPENAI_API_KEY not found in .env file")
# Upsonic 通常会自动从环境变量加载密钥,但显式设置也无妨
# os.environ["OPENAI_API_KEY"] = openai_api_key
# 定义 MCP 工具类
class HackerNewsMCP(MCPTool):
# MCPTool 需要 name 和 description
name = "HackerNewsMCP"
description = "Fetches top stories from Hacker News via MCP server."
# command 和 args 定义如何启动 MCP 服务器
command = "uvx" # 使用 uvx 启动 Python MCP 服务器
args = ["mcp-hn"] # 指定 MCP 服务器包名
# 假设 mcp-hn 是一个已安装的 Python 包,提供 MCP 服务
# 需要先安装: pip install mcp-hn (如果存在这样的包)
# 创建任务
task = Task(
"Analyze the top 5 HackerNews stories for today. Provide a brief summary of each story, "
"identify any common themes or trends, and highlight which stories might be most relevant "
"for someone interested in AI and software development.",
tools=[HackerNewsMCP, Search] # 将 MCP 工具和内置搜索工具传递给任务
)
# 创建智能体
agent = Agent(
"Tech News Analyst",
# description 而不是 company_url/objective 更常用
description="An AI agent analyzing tech news from HackerNews.",
# company_url="https://news.ycombinator.com/", # 可以保留
# company_objective="To provide insightful analysis of tech industry news and trends" # 可以保留
)
# 执行任务
print("Analyzing HackerNews stories...")
# 使用 agent.do(task) 执行任务并获取结果
result = agent.do(task)
print(result) # 打印结果
# 或者使用 print_do 直接打印执行过程和结果
# agent.print_do(task) 网友回复


