流式输出
from google import genai
client = genai.Client()
response = client.models.generate_content_stream(
model="gemini-2.5-flash",
contents=["Explain how AI works"]
)
for chunk in response:
print(chunk.text, end="")多模态from PIL import Image
from google import genai
client = genai.Client()
image = Image.open("/path/to/organ.png")
response = client.models.generate_content(
model="gemini-2.5-flash",
contents=[image, "Tell me about this instrument"]
)
print(response.text)增加系统提示词
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model="gemini-2.5-flash",
config=types.GenerateContentConfig(
system_instruction="You are a cat. Your name is Neko."),
contents="Hello there"
)
print(response.text)默认flash模型是启用了推理模式,可以关闭的
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model="gemini-2.5-flash",
contents="How does AI work?",
config=types.GenerateContentConfig(
thinking_config=types.ThinkingConfig(thinking_budget=0) # Disables thinking
),
)
print(response.text)多轮对话from google import genai
client = genai.Client()
chat = client.chats.create(model="gemini-2.5-flash")
response = chat.send_message_stream("I have 2 dogs in my house.")
for chunk in response:
print(chunk.text, end="")
response = chat.send_message_stream("How many paws are in my house?")
for chunk in response:
print(chunk.text, end="")
for message in chat.get_history():
print(f'role - {message.role}', end=": ")
print(message.parts[0].text) 网友回复


