Python调用文心一言API全攻略:从入门到实战
2025.09.17 10:18浏览量:1简介:本文详细介绍如何通过Python调用文心一言API接口,涵盖环境配置、请求封装、错误处理及典型应用场景,提供完整代码示例与最佳实践。
一、接口调用前的技术准备
1.1 核心依赖库安装
调用文心一言API需安装requests
库(HTTP请求)和json
库(数据解析),推荐使用虚拟环境管理依赖:
python -m venv ernie_env
source ernie_env/bin/activate # Linux/Mac
ernie_env\Scripts\activate # Windows
pip install requests
对于复杂场景可补充安装pydantic
(数据校验)和loguru
(日志管理)。
1.2 认证机制解析
文心一言API采用API Key+Secret双因子认证,需在百度智能云控制台获取:
密钥需通过HTTPS加密传输,建议使用环境变量存储:
import os
os.environ['ERNIE_API_KEY'] = 'your_api_key'
os.environ['ERNIE_SECRET_KEY'] = 'your_secret_key'
二、HTTP请求实现详解
2.1 基础请求结构
核心请求包含以下要素:
- URL:
https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions
- Headers:必须包含
Content-Type: application/json
- Body:JSON格式请求参数
完整请求示例:
import requests
import json
import os
from datetime import datetime
def call_ernie_api(prompt, model="ernie-3.5-turbo"):
url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions"
headers = {
'Content-Type': 'application/json',
'X-BD-API-KEY': os.getenv('ERNIE_API_KEY')
}
data = {
"messages": [{"role": "user", "content": prompt}],
"model": model
}
try:
response = requests.post(
url,
headers=headers,
data=json.dumps(data),
timeout=10
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Request failed: {e}")
return None
2.2 高级参数配置
- 温度参数(temperature):控制生成随机性(0.1-1.0)
- 最大长度(max_tokens):限制响应长度(默认2048)
- 系统指令(system_message):设定模型行为准则
优化后的请求示例:
def advanced_call(prompt, temperature=0.7, max_tokens=1024):
data = {
"messages": [
{"role": "system", "content": "你是一个专业的技术助手"},
{"role": "user", "content": prompt}
],
"model": "ernie-4.0-turbo",
"temperature": temperature,
"max_tokens": max_tokens
}
# 其余代码同上...
三、错误处理与最佳实践
3.1 常见错误类型
错误码 | 含义 | 解决方案 |
---|---|---|
401 | 认证失败 | 检查API Key有效性 |
429 | 速率限制 | 实现指数退避算法 |
500 | 服务端错误 | 重试3次后报备 |
3.2 重试机制实现
from time import sleep
import random
def call_with_retry(prompt, max_retries=3):
for attempt in range(max_retries):
result = call_ernie_api(prompt)
if result and 'error_code' not in result:
return result
wait_time = min(2 ** attempt + random.uniform(0, 1), 10)
sleep(wait_time)
return {"error": "Max retries exceeded"}
四、典型应用场景实现
4.1 批量文本生成
def batch_generate(prompts, batch_size=5):
results = []
for i in range(0, len(prompts), batch_size):
batch = prompts[i:i+batch_size]
# 并行处理逻辑(可使用threading或asyncio)
for prompt in batch:
response = call_ernie_api(prompt)
if response:
results.append({
"prompt": prompt,
"response": response['result']
})
return results
4.2 流式响应处理
对于长文本生成,建议实现流式接收:
def stream_response(prompt):
# 实际API需支持流式传输
# 伪代码示例:
chunks = []
while True:
chunk = receive_chunk() # 需API支持
if not chunk:
break
chunks.append(chunk)
print(chunk, end='', flush=True)
return ''.join(chunks)
五、性能优化策略
5.1 请求缓存
from functools import lru_cache
@lru_cache(maxsize=100)
def cached_call(prompt):
return call_ernie_api(prompt)
5.2 异步处理架构
import asyncio
import aiohttp
async def async_call(prompt):
async with aiohttp.ClientSession() as session:
async with session.post(
url,
headers=headers,
data=json.dumps(data)
) as resp:
return await resp.json()
# 批量调用示例
async def main():
prompts = ["问题1", "问题2", "问题3"]
tasks = [async_call(p) for p in prompts]
return await asyncio.gather(*tasks)
六、安全合规建议
七、完整项目示例
# ernie_bot_client.py
import os
import json
import requests
from typing import Optional, Dict, Any
class ErnieBotClient:
def __init__(self, api_key: str = None, secret_key: str = None):
self.api_key = api_key or os.getenv('ERNIE_API_KEY')
self.secret_key = secret_key or os.getenv('ERNIE_SECRET_KEY')
self.base_url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop"
def _get_auth_headers(self) -> Dict[str, str]:
return {
'Content-Type': 'application/json',
'X-BD-API-KEY': self.api_key
}
def chat_completion(
self,
prompt: str,
model: str = "ernie-3.5-turbo",
temperature: float = 0.7,
max_tokens: int = 2048
) -> Optional[Dict[str, Any]]:
"""
单轮对话接口
:param prompt: 用户输入
:param model: 模型名称
:param temperature: 创造力参数
:param max_tokens: 最大生成长度
:return: API响应或None
"""
url = f"{self.base_url}/chat/completions"
payload = {
"messages": [{"role": "user", "content": prompt}],
"model": model,
"temperature": temperature,
"max_tokens": max_tokens
}
try:
response = requests.post(
url,
headers=self._get_auth_headers(),
data=json.dumps(payload),
timeout=15
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"API调用失败: {str(e)}")
return None
# 使用示例
if __name__ == "__main__":
client = ErnieBotClient()
response = client.chat_completion(
prompt="用Python实现快速排序算法",
temperature=0.3
)
if response:
print("生成结果:", response.get('result', ''))
八、常见问题解答
Q1:如何降低API调用成本?
- 使用更高效的模型(如ernie-tiny)
- 压缩prompt长度
- 实现结果缓存
Q2:如何提高响应速度?
Q3:多轮对话如何实现?
class Conversation:
def __init__(self):
self.history = []
def add_message(self, role, content):
self.history.append({"role": role, "content": content})
def get_response(self, prompt, client):
self.add_message("user", prompt)
system_msg = "当前对话上下文:" + "\n".join(
f"{msg['role']}: {msg['content']}"
for msg in self.history[-5:] # 限制上下文长度
)
# 实际调用需调整API参数
response = client.chat_completion(prompt)
self.add_message("assistant", response['result'])
return response
通过系统化的技术实现和工程优化,Python调用文心一言API可构建出稳定高效的人工智能应用。开发者应根据具体场景选择合适的模型参数和架构设计,同时严格遵守数据安全和隐私保护规范。
发表评论
登录后可评论,请前往 登录 或 注册