import os
from langchain_deepseek import ChatDeepSeek
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
os.environ['DEEPSEEK_API_KEY']='sk-'
os.environ['TAVILY_API_KEY']='tvly-dev-'
## create llm
model=ChatDeepSeek(model='deepseek-chat',temperature=0,streaming=True)
## create search tool
search=TavilySearchResults(max_results=3)
tools=[search]
## create memory
memory=MemorySaver()
## create React agent
agent_executor=create_react_agent(model,tools,checkpointer=memory)
##设置会话配置
config={"configurable":{"thread_id":"example_session"}}
def ask_agent(question):
for step in agent_executor.stream(
{"messages":[HumanMessage(content=question)]},
config=config,
stream_mode="values",
):
step['messages'][-1].pretty_print()
if __name__=="__main__":
ask_agent("what is the capital of China?")
文章评论