news 2026/5/3 8:20:09

06-大语言模型(LLM)与应用——LangChain应用开发

作者头像

张小明

前端开发工程师

1.2k 24
文章封面图
06-大语言模型(LLM)与应用——LangChain应用开发

LangChain应用开发(Chain、Memory、Callback、RAG应用)

一、LangChain概述

1.1 为什么需要LangChain?

importnumpyasnpimportmatplotlib.pyplotaspltfrommatplotlib.patchesimportRectangle,FancyBboxPatchimportwarnings warnings.filterwarnings('ignore')print("="*60)print("LangChain:构建大模型应用的框架")print("="*60)# LangChain核心组件fig,ax=plt.subplots(figsize=(12,8))ax.axis('off')# 中心center=plt.Circle((0.5,0.5),0.12,color='lightcoral',ec='black')ax.add_patch(center)ax.text(0.5,0.5,'LangChain\n核心',ha='center',va='center',fontsize=10,fontweight='bold')# 六大模块modules={'Models':(0.15,0.75),'Prompts':(0.85,0.75),'Chains':(0.15,0.25),'Agents':(0.85,0.25),'Memory':(0.5,0.85),'Indexes':(0.5,0.15)}module_descs={'Models':'模型接口\n(LLM/Chat/Embeddings)','Prompts':'提示管理\n(模板/示例选择器)','Chains':'链式调用\n(组合多个操作)','Agents':'智能代理\n(自主决策执行)','Memory':'记忆管理\n(对话历史)','Indexes':'索引检索\n(文档/向量库)'}formodule,(x,y)inmodules.items():circle=plt.Circle((x,y),0.1,color='lightblue',ec='black')ax.add_patch(circle)ax.text(x,y,module_descs[module],ha='center',va='center',fontsize=7)# 连接到中心ax.annotate('',xy=(x,y),xytext=(0.5,0.5),arrowprops=dict(arrowstyle='-',color='gray',lw=1,alpha=0.5))ax.set_xlim(0,1)ax.set_ylim(0,1)ax.set_title('LangChain六大核心模块',fontsize=14)plt.tight_layout()plt.show()print("\n💡 LangChain解决的问题:")print(" 1. 统一接口:标准化大模型调用")print(" 2. 组件复用:避免重复开发")print(" 3. 流程编排:轻松构建复杂应用")print(" 4. 生态集成:与各种工具无缝对接")

二、Chain(链式调用)

2.1 基础Chain

defchain_basics():"""基础Chain"""print("\n"+"="*60)print("Chain:链式调用")print("="*60)fig,ax=plt.subplots(figsize=(12,4))ax.axis('off')# LLMChain流程steps=[("输入",0.1),("Prompt\n模板",0.3),("LLM\n模型",0.5),("输出\n解析",0.7),("结果",0.9)]forstep,xinsteps:circle=plt.Circle((x,0.5),0.08,color='lightblue',ec='black')ax.add_patch(circle)ax.text(x,0.5,step,ha='center',va='center',fontsize=8)ifx<0.85:ax.annotate('',xy=(x+0.18,0.5),xytext=(x+0.1,0.5),arrowprops=dict(arrowstyle='->',lw=2))ax.set_xlim(0,1)ax.set_ylim(0,1)ax.set_title('LLMChain:最简单的链式调用',fontsize=12)plt.tight_layout()plt.show()code=""" from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_openai import ChatOpenAI # 1. 创建Prompt模板 prompt = PromptTemplate( input_variables=["topic"], template="请用三句话解释什么是{topic}。" ) # 2. 初始化模型 llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.7) # 3. 创建Chain chain = LLMChain(llm=llm, prompt=prompt) # 4. 运行 result = chain.run(topic="机器学习") print(result) """print(code)chain_basics()

2.2 多Chain组合

defchain_composition():"""多Chain组合"""print("\n"+"="*60)print("多Chain组合")print("="*60)fig,ax=plt.subplots(figsize=(12,5))ax.axis('off')# SequentialChain流程steps=[("输入",0.05),("Chain1\n翻译",0.25),("Chain2\n摘要",0.45),("Chain3\n情感",0.65),("输出",0.85)]forstep,xinsteps:circle=plt.Circle((x,0.5),0.1,color='lightgreen',ec='black')ax.add_patch(circle)ax.text(x,0.5,step,ha='center',va='center',fontsize=7)ifx<0.75:ax.annotate('',xy=(x+0.18,0.5),xytext=(x+0.1,0.5),arrowprops=dict(arrowstyle='->',lw=2))ax.set_xlim(0,1)ax.set_ylim(0,1)ax.set_title('SequentialChain:串联多个Chain',fontsize=12)plt.tight_layout()plt.show()code=""" from langchain.chains import SequentialChain # 定义多个Chain translate_chain = LLMChain( llm=llm, prompt=PromptTemplate( input_variables=["text"], template="将以下文本翻译成英文: {text}" ), output_key="english_text" ) summarize_chain = LLMChain( llm=llm, prompt=PromptTemplate( input_variables=["english_text"], template="总结以下英文文本: {english_text}" ), output_key="summary" ) sentiment_chain = LLMChain( llm=llm, prompt=PromptTemplate( input_variables=["summary"], template="分析以下文本的情感: {summary}" ), output_key="sentiment" ) # 组合Chain overall_chain = SequentialChain( chains=[translate_chain, summarize_chain, sentiment_chain], input_variables=["text"], output_variables=["english_text", "summary", "sentiment"], verbose=True ) result = overall_chain({"text": "这部电影非常精彩!"}) print(result) """print(code)chain_composition()

三、Memory(记忆管理)

3.1 记忆类型

defmemory_types():"""记忆类型"""print("\n"+"="*60)print("Memory:对话记忆")print("="*60)fig,axes=plt.subplots(2,2,figsize=(14,10))# 1. BufferMemoryax1=axes[0,0]ax1.axis('off')ax1.set_title('ConversationBufferMemory',fontsize=10)ax1.text(0.1,0.8,'保存所有对话',fontsize=8)ax1.text(0.1,0.7,'用户: 我叫张三',fontsize=7)ax1.text(0.1,0.62,'AI: 你好张三!',fontsize=7)ax1.text(0.1,0.54,'用户: 我喜欢编程',fontsize=7)ax1.text(0.1,0.46,'AI: 编程很有趣!',fontsize=7)ax1.text(0.1,0.38,'用户: 我叫什么?',fontsize=7)ax1.text(0.1,0.3,'AI: 你叫张三',fontsize=7,color='green')# 2. WindowMemoryax2=axes[0,1]ax2.axis('off')ax2.set_title('ConversationBufferWindowMemory',fontsize=10)ax2.text(0.1,0.8,'只保留最近K轮',fontsize=8)ax2.text(0.1,0.7,'用户: 我叫张三',fontsize=7,alpha=0.3)ax2.text(0.1,0.62,'AI: 你好张三!',fontsize=7,alpha=0.3)ax2.text(0.1,0.54,'用户: 我喜欢编程',fontsize=7)ax2.text(0.1,0.46,'AI: 编程很有趣!',fontsize=7)ax2.text(0.1,0.38,'用户: 我叫什么?',fontsize=7)ax2.text(0.1,0.3,'AI: 你叫张三',fontsize=7,color='green')# 3. SummaryMemoryax3=axes[1,0]ax3.axis('off')ax3.set_title('ConversationSummaryMemory',fontsize=10)ax3.text(0.1,0.8,'总结压缩历史',fontsize=8)ax3.text(0.1,0.7,'摘要: 用户叫张三,喜欢编程',fontsize=7)ax3.text(0.1,0.62,'用户: 我叫什么?',fontsize=7)ax3.text(0.1,0.54,'AI: 你叫张三',fontsize=7,color='green')# 4. VectorStoreMemoryax4=axes[1,1]ax4.axis('off')ax4.set_title('VectorStoreRetrieverMemory',fontsize=10)ax4.text(0.1,0.8,'向量检索相关记忆',fontsize=8)ax4.text(0.1,0.7,'存储: 张三-喜欢编程',fontsize=7)ax4.text(0.1,0.62,'查询: 编程爱好',fontsize=7)ax4.text(0.1,0.54,'检索: 张三-喜欢编程',fontsize=7,color='green')plt.suptitle('LangChain记忆类型',fontsize=14)plt.tight_layout()plt.show()code=""" from langchain.memory import ( ConversationBufferMemory, ConversationBufferWindowMemory, ConversationSummaryMemory, VectorStoreRetrieverMemory ) # 1. BufferMemory memory = ConversationBufferMemory() memory.save_context({"input": "我叫张三"}, {"output": "你好张三!"}) memory.save_context({"input": "我喜欢编程"}, {"output": "编程很有趣!"}) print(memory.load_memory_variables({})) # 2. WindowMemory(保留最近2轮) window_memory = ConversationBufferWindowMemory(k=2) # 3. SummaryMemory summary_memory = ConversationSummaryMemory(llm=llm) # 4. VectorStoreMemory retriever = vectorstore.as_retriever(k=2) vector_memory = VectorStoreRetrieverMemory(retriever=retriever) vector_memory.save_context({"input": "我喜欢Python"}, {"output": "Python很好"}) """print(code)memory_types()

四、Callback(回调机制)

4.1 Callback使用

defcallback_usage():"""Callback使用"""print("\n"+"="*60)print("Callback:监控和日志")print("="*60)code=""" from langchain.callbacks import StdOutCallbackHandler from langchain.callbacks.base import BaseCallbackHandler from langchain.chains import LLMChain # 1. 内置回调 chain = LLMChain( llm=llm, prompt=prompt, callbacks=[StdOutCallbackHandler()] # 打印到控制台 ) # 2. 自定义回调 class CustomCallbackHandler(BaseCallbackHandler): def on_llm_start(self, serialized, prompts, **kwargs): print(f"LLM开始,提示词: {prompts}") def on_llm_end(self, response, **kwargs): print(f"LLM结束,响应: {response}") def on_chain_start(self, serialized, inputs, **kwargs): print(f"Chain开始,输入: {inputs}") def on_chain_end(self, outputs, **kwargs): print(f"Chain结束,输出: {outputs}") def on_tool_start(self, serialized, input_str, **kwargs): print(f"工具开始,输入: {input_str}") def on_tool_end(self, output, **kwargs): print(f"工具结束,输出: {output}") # 3. 使用回调 chain = LLMChain( llm=llm, prompt=prompt, callbacks=[CustomCallbackHandler()] ) # 4. 多回调 chain = LLMChain( llm=llm, prompt=prompt, callbacks=[StdOutCallbackHandler(), CustomCallbackHandler()] ) """print(code)callback_usage()

五、RAG应用

5.1 完整RAG系统

defrag_system():"""RAG系统"""print("\n"+"="*60)print("LangChain RAG系统")print("="*60)fig,ax=plt.subplots(figsize=(12,6))ax.axis('off')# 流程steps=[("文档加载",0.1,0.7),("文本分割",0.3,0.7),("向量存储",0.5,0.7),("检索",0.7,0.7),("生成",0.9,0.7),]forlabel,x,yinsteps:circle=plt.Circle((x,y),0.08,color='lightblue',ec='black')ax.add_patch(circle)ax.text(x,y,label,ha='center',va='center',fontsize=7)ifx<0.85:ax.annotate('',xy=(x+0.18,y),xytext=(x+0.1,y),arrowprops=dict(arrowstyle='->',lw=1))ax.set_xlim(0,1)ax.set_ylim(0,1)ax.set_title('RAG系统流程',fontsize=12)plt.tight_layout()plt.show()code=""" from langchain.document_loaders import TextLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI # 1. 加载文档 loader = TextLoader("knowledge.txt") documents = loader.load() # 2. 文档分割 text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200 ) texts = text_splitter.split_documents(documents) # 3. 创建向量库 embeddings = OpenAIEmbeddings() vectorstore = Chroma.from_documents( documents=texts, embedding=embeddings ) # 4. 创建检索器 retriever = vectorstore.as_retriever(search_kwargs={"k": 3}) # 5. 创建问答链 qa_chain = RetrievalQA.from_chain_type( llm=ChatOpenAI(model="gpt-3.5-turbo"), chain_type="stuff", retriever=retriever, return_source_documents=True ) # 6. 提问 query = "什么是LangChain?" result = qa_chain({"query": query}) print(f"问题: {query}") print(f"答案: {result['result']}") print(f"来源: {result['source_documents']}") """print(code)rag_system()

5.2 对话式RAG

defconversational_rag():"""对话式RAG"""print("\n"+"="*60)print("对话式RAG")print("="*60)code=""" from langchain.memory import ConversationBufferMemory from langchain.chains import ConversationalRetrievalChain # 创建对话式检索链 qa_chain = ConversationalRetrievalChain.from_llm( llm=ChatOpenAI(model="gpt-3.5-turbo"), retriever=vectorstore.as_retriever(), memory=ConversationBufferMemory( memory_key="chat_history", return_messages=True ), verbose=True ) # 多轮对话 result1 = qa_chain({"question": "什么是LangChain?"}) print(result1['answer']) result2 = qa_chain({"question": "它有什么优点?"}) print(result2['answer']) """print(code)conversational_rag()

六、总结

组件功能常用类
Chain流程编排LLMChain, SequentialChain
Memory记忆管理BufferMemory, SummaryMemory
Callback监控日志StdOutCallbackHandler
RAG检索增强RetrievalQA, ConversationalRetrievalChain

LangChain最佳实践:

  1. 使用Pipeline封装流程
  2. 合理选择记忆类型
  3. 添加Callback监控
  4. RAG注意文档分块大小
版权声明: 本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若内容造成侵权/违法违规/事实不符,请联系邮箱:809451989@qq.com进行投诉反馈,一经查实,立即删除!
网站建设 2026/5/3 8:12:00

如何精准下载GitHub资源:DownGit的突破性解决方案完整指南

如何精准下载GitHub资源&#xff1a;DownGit的突破性解决方案完整指南 【免费下载链接】DownGit Create GitHub Resource Download Link 项目地址: https://gitcode.com/gh_mirrors/do/DownGit 你是否曾因只想下载GitHub仓库中的单个文件或特定目录而不得不克隆整个项目…

作者头像 李华
网站建设 2026/5/3 8:11:52

百度网盘直链解析工具:三步实现免客户端高速下载

百度网盘直链解析工具&#xff1a;三步实现免客户端高速下载 【免费下载链接】baidu-wangpan-parse 获取百度网盘分享文件的下载地址 项目地址: https://gitcode.com/gh_mirrors/ba/baidu-wangpan-parse 还在为百度网盘龟速下载而烦恼吗&#xff1f;百度网盘直链解析工具…

作者头像 李华
网站建设 2026/5/3 8:11:26

RDPWrap终极指南:免费解锁Windows远程桌面多用户并发连接

RDPWrap终极指南&#xff1a;免费解锁Windows远程桌面多用户并发连接 【免费下载链接】rdpwrap RDP Wrapper Library 项目地址: https://gitcode.com/gh_mirrors/rd/rdpwrap 你是否曾因Windows家庭版或专业版无法支持多用户同时远程连接而烦恼&#xff1f;想要在个人电脑…

作者头像 李华
网站建设 2026/5/3 8:10:25

猫抓浏览器扩展实战:3步掌握网页视频音频资源高效下载

猫抓浏览器扩展实战&#xff1a;3步掌握网页视频音频资源高效下载 【免费下载链接】cat-catch 猫抓 浏览器资源嗅探扩展 / cat-catch Browser Resource Sniffing Extension 项目地址: https://gitcode.com/GitHub_Trending/ca/cat-catch 你是否经常遇到这样的情况&#…

作者头像 李华
网站建设 2026/5/3 7:57:45

DoL-Lyra整合包:Degrees of Lewdity汉化版终极安装指南 [特殊字符]

DoL-Lyra整合包&#xff1a;Degrees of Lewdity汉化版终极安装指南 &#x1f3ae; 【免费下载链接】DOL-CHS-MODS Degrees of Lewdity 整合 项目地址: https://gitcode.com/gh_mirrors/do/DOL-CHS-MODS 想要体验Degrees of Lewdity汉化版却苦于复杂的MOD安装&#xff1f…

作者头像 李华