LangChain是一个用于构建基于LLM的应用程序的框架,其核心模块包括:
组件 | 功能 | 实现类(Python) |
---|---|---|
AgentExecutor | 运行Agent逻辑 | langchain.agents.AgentExecutor |
Tool | 扩展外部能力 | langchain.tools.BaseTool |
Memory | 历史记录存储 | langchain.memory.ConversationBufferMemory |
pip install langchain openai python-dotenv
创建.env
文件存储API密钥:
OPENAI_API_KEY=sk-xxxSERPAPI_API_KEY=xxx
from langchain.agents import AgentType, initialize_agentfrom langchain.llms import OpenAIfrom langchain.tools import Tooldef search(query: str) -> str: return "Results: " + query # 模拟搜索tools = [ Tool( name="Search", func=search, description="Useful for answering questions" )]llm = OpenAI(temperature=0)agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)agent.run("What is LangChain?")
from langchain.tools import BaseToolfrom typing import Optionalclass CalculatorTool(BaseTool): name = "Calculator" description = "Performs math calculations" def _run(self, expression: str) -> str: try: return str(eval(expression)) except: return "Invalid expression"tools.append(CalculatorTool())
from langchain.memory import ConversationBufferMemorymemory = ConversationBufferMemory(memory_key="chat_history")agent = initialize_agent( tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, memory=memory, verbose=True)agent.run("Calculate 2+2") # 记忆会被保留
from langchain.agents import AgentExecutorfrom langchain.agents.agent import AgentOutputParserfrom langchain.schema import AgentActionclass CustomAgent(AgentExecutor): def plan(self, inputs): # 自定义决策逻辑 return AgentAction(tool="Search", tool_input=inputs["input"])agent1 = CustomAgent(...)agent2 = initialize_agent(...)
import org.python.util.PythonInterpreter;public class JythonAgent { public static void main(String[] args) { PythonInterpreter py = new PythonInterpreter(); py.exec("from langchain.agents import load_tools"); py.exec("agent = load_tools(['serpapi'])"); py.exec("print(agent.run('What is Java?'))"); }}
<dependency> <groupId>io.github.langchain4jgroupId> <artifactId>langchain4jartifactId> <version>0.1.0version>dependency>
import dev.langchain4j.agent.tool.Tool;import dev.langchain4j.model.openai.OpenAiChatModel;public class JavaAgent { @Tool("Searches the web") String search(String query) { return "Results for " + query; } public static void main(String[] args) { OpenAiChatModel model = OpenAiChatModel.withApiKey("sk-xxx"); JavaAgent agent = new JavaAgent(); String result = model.generate("Search: LangChain4j"); System.out.println(result); }}
@RestControllerpublic class AgentController { @PostMapping("/ask") public String ask(@RequestBody String question) { OpenAiChatModel model = OpenAiChatModel.withApiKey("sk-xxx"); return model.generate(question); }}
from langchain.tools import MultimodalToolfrom PIL import Imageclass ImageAnalyzer(MultimodalTool): def _run(self, image_path: str) -> str: img = Image.open(image_path) return f"Image size: {img.size}"tools.append(ImageAnalyzer())
from langchain.experimental import AutoGPTagent = AutoGPT.from_llm_and_tools( llm=OpenAI(temperature=0), tools=tools, memory=memory)agent.run("Write a report about AI trends")
import redisfrom langchain.agents import Agentclass DistributedAgent(Agent): def __init__(self, redis_client): self.redis = redis_client def send_message(self, agent_id: str, message: str): self.redis.publish(agent_id, message)
import asynciofrom langchain.agents import AgentExecutorasync def async_agent_run(): agent = AgentExecutor(...) await agent.arun("Async query")asyncio.run(async_agent_run())
from langchain.vectorstores import Pineconefrom langchain.embeddings import OpenAIEmbeddingsembeddings = OpenAIEmbeddings()vectorstore = Pinecone.from_documents( documents, embeddings, index_name="my-index")
apiVersion: apps/v1kind: Deploymentmetadata: name: langchain-agentspec: containers: - name: agent image: python:3.9 command: ["python", "/app/agent.py"]