我们以qwen为例,qwen也是兼容openai协议的,搭建一个api代理后,访问本地localhost就能流式输出结果。
点击查看python代码
# main.py import os import time import hashlib import json from typing import List, Optional import httpx from fastapi import FastAPI, Request, HTTPException, Depends from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from starlette.responses import StreamingResponse API_KEY ="千问的apikey,去百炼申请" BASE_URL ="https://dashscope.aliyuncs.com/compatible-mode/v1" # --- 2. Pydantic 模型 (与OpenAI API兼容) --- # 复用之前的模型,因为我们的代理需要理解请求结构 class ChatMessage(BaseModel): role: str content: str class ChatCompletionRequest(BaseModel): model: str messages: List[ChatMessage] stream: Optional[bool] = False # 其他OpenAI参数... temperature: Optional[float] = 0.7 # --- 3. FastAPI 应用和代理端点 --- app = FastAPI(title="OpenAI-Compatible Proxy") app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # 使用 httpx.AsyncClient 来创建可复用的、支持HTTP/2的客户端 # 这是进行异步API调用的推荐方式 client = httpx.AsyncClient(base_url=BASE_URL) @app.post("/v1/chat/completions", dependencies=[Depends(verify_signature)]) async def chat_completions_proxy(request: Request): headers = { "Content-Type": "application/json", "Authorization": f"Bearer {API_KEY}", "Accept": request.headers.get("Accept", "text/event-stream"), } body = await request.body() # 定义正确的端点路径 endpoint_path = "/chat/completions" try: # 使用正确的端点路径,而不是基地址 backend_req = client.build_request( method="POST", url=endpoint_path, headers=headers, content=body, timeout=300.0 ) # 【调试技巧】: 在这里打印出将要发送到后端的信息 print("--- Forwarding to Backend ---") print(f"URL: {backend_req.url}") print(f"Headers: {backend_req.headers}") print(f"Body: {backend_req.content.decode()}") print("---------------------------") backend_resp = await client.send(backend_req, stream=True) except httpx.RequestError as e: raise HTTPException(status_code=503, detail=f"后端服务请求失败: {e}") if backend_resp.status_code != 200: error_content = await backend_resp.aread() # 打印后端返回的原始错误,方便调试 print(f"Backend Error ({backend_resp.status_code}): {error_content.decode()}") raise HTTPException( status_code=backend_resp.status_code, # 将后端的错误码透传给客户端 detail=f"后端服务返回错误: {error_content.decode()}" ) return StreamingResponse( backend_resp.aiter_bytes(), status_code=backend_resp.status_code, media_type=backend_resp.headers.get("Content-Type"), background=backend_resp.aclose ) # --- 5. 运行服务器 --- if __name__ == "__main__": import uvicorn uvicorn.run("chatproxy:app", host="127.0.0.1", port=8100, reload=False)
网友回复
有没有类似豆包pc端ai大模型编程代码块折叠右侧流式输出带预览的前后端代码?
nodejs有没有很快的目录爬虫和通配符文件查找库?
js如何流式输出ai的回答并折叠代码块,点击代码块右侧可预览代码?
ai大模型如何将文章转换成可视化一目了然的图片流程图图表?
大模型生成html版本的ui原型图和ppt演示文档的系统提示词怎么写?
rtsp视频直播流如何转换成websocket流在h5页面上观看?
为啥coze会开源工作流agent coze studio?
如何检测网页是通过收藏夹打开的?
python如何实现类似php的http动态脚本请求处理响应代码?
js如何实现类似php的http动态脚本请求处理响应代码?