微信扫码
添加专属顾问
我要投稿
from langchain_core.documents import Document
documents = [
Document(
page_content="狗是很好的伴侣,以忠诚和友好而闻名。",
metadata={"source": "mammal-pets-doc"},
),
Document(
page_content="猫是独立的宠物,通常喜欢自己的空间。",
metadata={"source": "mammal-pets-doc"},
),
Document(
page_content="金鱼是初学者喜欢的宠物,只需要相对简单的照顾。",
metadata={"source": "fish-pets-doc"},
),
Document(
page_content="鹦鹉是聪明的鸟类,能够模仿人类说话。",
metadata={"source": "bird-pets-doc"},
),
Document(
page_content="兔子是社交动物,需要大量空间来跳跃。",
metadata={"source": "mammal-pets-doc"},
),
]
from langchain_chroma import Chroma
from langchain_openai import AzureOpenAIEmbeddings
embeddings = AzureOpenAIEmbeddings(
azure_endpoint=env_vars.get("AZURE_OPENAI_ENDPOINT"),
azure_deployment=env_vars.get("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"),
openai_api_version=env_vars.get("AZURE_OPENAI_EMBEDDING_API_VERSION"),
api_key=env_vars.get("AZURE_OPENAI_API_KEY"),
)
vectorstore = Chroma.from_documents(
documents,
embedding=embeddings,
)
retriever = vectorstore.as_retriever(
search_type="similarity",
search_kwargs={"k": 1},
)
template = """
根据提供的上下文回答这个问题。
问题: {question}
上下文:
{context}
回答:
"""
prompt = ChatPromptTemplate.from_template(template)
def rag_chain(question: str) -> str:
# 检索相关文档
retrieved_docs = retriever.invoke(question)
# 将检索到的文档格式化为上下文
context = "\n".join(doc.page_content for doc in retrieved_docs)
# 使用问题和上下文格式化提示
formatted_prompt = prompt.format(question=question, context=context)
# 获取模型的响应
response = model.invoke(formatted_prompt)
return response.content
import os
from dotenv import load_dotenv, dotenv_values
from langchain_openai import AzureChatOpenAI
from langchain_core.messages import HumanMessage
from langchain_core.messages import AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import AzureOpenAIEmbeddings
from langchain.schema import StrOutputParser
# Get the directory of the current script
current_dir = os.path.dirname(os.path.abspath(__file__))
# Construct the path to the .env file in the same directory as rag.py
env_path = os.path.join(current_dir, '.env')
# Load environment variables from .env file
load_dotenv(env_path)
# Load .env file contents as a dictionary
env_vars = dotenv_values(env_path)
# Load LangSmith configuration from environment variables
LANGCHAIN_TRACING_V2 = os.getenv("LANGCHAIN_TRACING_V2", "false").lower() == "true"
LANGCHAIN_API_KEY = os.getenv("LANGCHAIN_API_KEY")
from langchain_core.documents import Document
documents = [
Document(
page_content="狗是很好的伴侣,以忠诚和友好而闻名。",
metadata={"source": "mammal-pets-doc"},
),
Document(
page_content="猫是独立的宠物,通常喜欢自己的空间。",
metadata={"source": "mammal-pets-doc"},
),
Document(
page_content="金鱼是初学者喜欢的宠物,只需要相对简单的照顾。",
metadata={"source": "fish-pets-doc"},
),
Document(
page_content="鹦鹉是聪明的鸟类,能够模仿人类说话。",
metadata={"source": "bird-pets-doc"},
),
Document(
page_content="兔子是社交动物,需要大量空间来跳跃。",
metadata={"source": "mammal-pets-doc"},
),
]
from langchain_chroma import Chroma
# 设置 Azure OpenAI Embeddings
# 注意:嵌入模型的质量对检索结果有重大影响
# 确保使用适合中文的高质量嵌入模型
embeddings = AzureOpenAIEmbeddings(
azure_endpoint=env_vars.get("AZURE_OPENAI_ENDPOINT"),
azure_deployment=env_vars.get("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"),
openai_api_version=env_vars.get("AZURE_OPENAI_EMBEDDING_API_VERSION"),
api_key=env_vars.get("AZURE_OPENAI_API_KEY"),
)
vectorstore = Chroma.from_documents(
documents,
embedding=embeddings,
)
from langchain_core.documents import Document
retriever = vectorstore.as_retriever(
search_type="similarity",
search_kwargs={"k": 1},
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
model = AzureChatOpenAI(
model_name="gpt-4o",
azure_endpoint=env_vars.get("AZURE_OPENAI_ENDPOINT"),
azure_deployment=env_vars.get("AZURE_OPENAI_DEPLOYMENT_NAME"),
openai_api_version=env_vars.get("AZURE_OPENAI_API_VERSION"),
api_key=env_vars.get("AZURE_OPENAI_API_KEY"),
)
template = """
根据提供的上下文回答这个问题。
问题: {question}
上下文:
{context}
回答:
"""
prompt = ChatPromptTemplate.from_template(template)
def rag_chain(question: str) -> str:
# 检索相关文档
retrieved_docs = retriever.invoke(question)
# 将检索到的文档格式化为单个上下文字符串
context = "\n".join(doc.page_content for doc in retrieved_docs)
# 使用问题和上下文格式化提示
formatted_prompt = prompt.format(question=question, context=context)
# 获取模型的响应
response = model.invoke(formatted_prompt)
return response.content
# 使用示例
response = rag_chain("最好的宠物是什么?")
print(response)
53AI,企业落地大模型首选服务商
产品:场景落地咨询+大模型应用平台+行业解决方案
承诺:免费POC验证,效果达标后再合作。零风险落地应用大模型,已交付160+中大型企业
2026-02-18
函数计算 AgentRun 重磅上线知识库功能,赋能智能体更“懂”你
2026-02-15
当RAG遇上Agent记忆:为什么相似度检索会"塌方"?
2026-02-15
查个问题还要全图跑一遍?DA-RAG说我只取一瓢
2026-02-14
OpenClaw 终于能"记住"事了!我花了 3 周折腾出的长期记忆系统
2026-02-13
深度解析 PageIndex:无向量 RAG 框架的技术实现与原理剖析
2026-02-12
走进 OceanBase 向量背后的算法库 —— VSAG
2026-02-11
开源:我们复刻了OpenClaw的mem系统,为所有Agent打造透明、可控的记忆
2026-02-10
Data Agent Ready Database:下一代企业数仓架构
2025-12-04
2025-12-03
2026-01-15
2025-12-02
2026-01-02
2025-12-23
2025-12-07
2025-12-18
2026-02-03
2025-11-23
2026-02-15
2026-02-04
2026-02-03
2026-01-19
2026-01-12
2026-01-08
2026-01-02
2025-12-23