when i run the notebook :https://github.com/gkamradt/langchain-tutorials/blob/main/getting_started/Quickstart%20Guide.ipynb
Chains: Combine LLMs and prompts in multi-step workflows
`
#!pip install google-search-results
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.llms import OpenAI
Load the model
llm = OpenAI(temperature=0)
Load in some tools to use
os.environ["SERPAPI_API_KEY"] = ""
tools = load_tools(["serpapi", "llm-math"], llm=llm)
Finally, let's initialize an agent with:
1. The tools
2. The language model
3. The type of agent we want to use.
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
See list of agents types here
Now let's test it out!
agent.run("Who is the current leader of Japan? What is the largest prime number that is smaller than their age?")
`
here is the return:
`
Entering new AgentExecutor chain...
I need to find out who the leader of Japan is and then calculate the largest prime number that is smaller than their age.
Action: Search
Action Input: "current leader of Japan"
Observation: Fumio Kishida
Thought: I need to find out the age of the leader of Japan
Action: Search
Action Input: "age of Fumio Kishida"
Observation: 65 years
Thought: I need to calculate the largest prime number that is smaller than 65
Action: Calculator
Action Input: 65
ValueError Traceback (most recent call last)
Cell In[18], line 2
1 # Now let's test it out!
----> 2 agent.run("Who is the current leader of Japan? What is the largest prime number that is smaller than their age?")
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/chains/base.py:213, in Chain.run(self, *args, **kwargs)
211 if len(args) != 1:
212 raise ValueError("run
supports only one positional argument.")
--> 213 return self(args[0])[self.output_keys[0]]
215 if kwargs and not args:
216 return self(kwargs)[self.output_keys[0]]
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/chains/base.py:116, in Chain.call(self, inputs, return_only_outputs)
114 except (KeyboardInterrupt, Exception) as e:
115 self.callback_manager.on_chain_error(e, verbose=self.verbose)
--> 116 raise e
117 self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
118 return self.prep_outputs(inputs, outputs, return_only_outputs)
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/chains/base.py:113, in Chain.call(self, inputs, return_only_outputs)
107 self.callback_manager.on_chain_start(
108 {"name": self.class.name},
109 inputs,
110 verbose=self.verbose,
111 )
112 try:
--> 113 outputs = self._call(inputs)
114 except (KeyboardInterrupt, Exception) as e:
115 self.callback_manager.on_chain_error(e, verbose=self.verbose)
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/agents/agent.py:792, in AgentExecutor._call(self, inputs)
790 # We now enter the agent loop (until it returns something).
791 while self._should_continue(iterations, time_elapsed):
--> 792 next_step_output = self._take_next_step(
793 name_to_tool_map, color_mapping, inputs, intermediate_steps
794 )
795 if isinstance(next_step_output, AgentFinish):
796 return self._return(next_step_output, intermediate_steps)
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/agents/agent.py:695, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps)
693 tool_run_kwargs["llm_prefix"] = ""
694 # We then call the tool on the tool input to get an observation
--> 695 observation = tool.run(
696 agent_action.tool_input,
697 verbose=self.verbose,
698 color=color,
699 **tool_run_kwargs,
700 )
701 else:
702 tool_run_kwargs = self.agent.tool_run_logging_kwargs()
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/tools/base.py:107, in BaseTool.run(self, tool_input, verbose, start_color, color, **kwargs)
105 except (Exception, KeyboardInterrupt) as e:
106 self.callback_manager.on_tool_error(e, verbose=verbose_)
--> 107 raise e
108 self.callback_manager.on_tool_end(
109 observation, verbose=verbose_, color=color, name=self.name, **kwargs
110 )
111 return observation
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/tools/base.py:104, in BaseTool.run(self, tool_input, verbose, start_color, color, **kwargs)
102 try:
103 tool_args, tool_kwargs = _to_args_and_kwargs(tool_input)
--> 104 observation = self.run(*tool_args, **tool_kwargs)
105 except (Exception, KeyboardInterrupt) as e:
106 self.callback_manager.on_tool_error(e, verbose=verbose)
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/agents/tools.py:31, in Tool._run(self, *args, **kwargs)
29 def _run(self, *args: Any, **kwargs: Any) -> str:
30 """Use the tool."""
---> 31 return self.func(*args, **kwargs)
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/chains/base.py:213, in Chain.run(self, *args, **kwargs)
211 if len(args) != 1:
212 raise ValueError("run
supports only one positional argument.")
--> 213 return self(args[0])[self.output_keys[0]]
215 if kwargs and not args:
216 return self(kwargs)[self.output_keys[0]]
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/chains/base.py:116, in Chain.call(self, inputs, return_only_outputs)
114 except (KeyboardInterrupt, Exception) as e:
115 self.callback_manager.on_chain_error(e, verbose=self.verbose)
--> 116 raise e
117 self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
118 return self.prep_outputs(inputs, outputs, return_only_outputs)
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/chains/base.py:113, in Chain.call(self, inputs, return_only_outputs)
107 self.callback_manager.on_chain_start(
108 {"name": self.class.name},
109 inputs,
110 verbose=self.verbose,
111 )
112 try:
--> 113 outputs = self._call(inputs)
114 except (KeyboardInterrupt, Exception) as e:
115 self.callback_manager.on_chain_error(e, verbose=self.verbose)
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/chains/llm_math/base.py:130, in LLMMathChain._call(self, inputs)
126 self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose)
127 llm_output = llm_executor.predict(
128 question=inputs[self.input_key], stop=["```output"]
129 )
--> 130 return self._process_llm_result(llm_output)
File /opt/miniconda3/envs/py38_langchain/lib/python3.8/site-packages/langchain/chains/llm_math/base.py:86, in LLMMathChain._process_llm_result(self, llm_output)
84 answer = "Answer: " + llm_output.split("Answer:")[-1]
85 else:
---> 86 raise ValueError(f"unknown format from LLM: {llm_output}")
87 return {self.output_key: answer}
ValueError: unknown format from LLM: This is not a math problem and cannot be translated into an expression that can be executed using Python's numexpr library.`