微信扫码
添加专属顾问
我要投稿
import os
from dotenv import load_dotenv
from langchain_openai import AzureChatOpenAI
from langchain_core.messages import HumanMessage
# 加载环境变量和设置模型
load_dotenv()
model = AzureChatOpenAI(
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
openai_api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
)
# 第一次对话
message = HumanMessage(content="I am Bob")
response = model.invoke([message])
print("Model's response:")
print(response.content)
# 第二次对话
message = HumanMessage(content="What's my name?")
response = model.invoke([message])
print("Model's response:")
print(response.content)
Model's response:
Hello Bob! It's nice to meet you. Is there anything I can help you with today?
Model's response:
I apologize, but I don't have any prior context or information about your name. Each interaction with me starts fresh, and I don't retain information from previous conversations. If you'd like me to know your name, you'll need to tell me in this current conversation. So, may I ask what your name is?
import os
from dotenv import load_dotenv
from langchain_openai import AzureChatOpenAI
from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph
# 加载环境变量和设置模型
load_dotenv()
model = AzureChatOpenAI(
model_name="gpt-4",
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
openai_api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
)
# 设置对话图和记忆
workflow = StateGraph(state_schema=MessagesState)
def call_model(state: MessagesState):
response = model.invoke(state["messages"])
return {"messages": response}
workflow.add_edge(START, "model")
workflow.add_node("model", call_model)
memory = MemorySaver()
app = workflow.compile(checkpointer=memory)
# 进行对话
config = {"configurable": {"thread_id": "tom"}}
# 第一次对话
query = "Hi! I'm Bob."
input_messages = [HumanMessage(query)]
output = app.invoke({"messages": input_messages}, config)
output["messages"][-1].pretty_print()
# 第二次对话
query = "What's my name?"
input_messages = [HumanMessage(query)]
output = app.invoke({"messages": input_messages}, config)
output["messages"][-1].pretty_print()
Human: Hi! I'm Bob.
AI: Hello Bob! It's nice to meet you. How can I assist you today?
53AI,企业落地大模型首选服务商
产品:场景落地咨询+大模型应用平台+行业解决方案
承诺:免费POC验证,效果达标后再合作。零风险落地应用大模型,已交付160+中大型企业
2025-12-18
谷歌发布Gemini 3 Flash,口述即原型,速度堪比搜索引擎
2025-12-18
2026 开年AI对谈:the year of R | 对谈真格基金戴雨森
2025-12-18
再论Skill:Agent 落地第一性原理
2025-12-18
Gemini 3 Flash闪电来袭:智力竟反超Pro!速度快3倍,全球免费
2025-12-18
Gemini 3 Flash 可能是 Google 最狠的一步棋
2025-12-18
Cursor 又“危险”了?谷歌深夜祭出 Gemini 3 Flash!编码能力反超 Gemini 3 Pro,价格还更低
2025-12-17
腾讯大模型「变阵」:成立 AI Infra 部,姚顺雨出任首席 AI 科学家
2025-12-17
OpenAI发布了其实时API的新模型
2025-09-19
2025-10-26
2025-10-02
2025-09-29
2025-10-07
2025-09-30
2025-11-19
2025-10-20
2025-11-13
2025-10-02
2025-12-16
2025-12-15
2025-12-14
2025-12-12
2025-12-12
2025-12-11
2025-12-09
2025-12-08