classReActAgent(AgentRunner):"""ReAct agent. Subclasses AgentRunner with a ReActAgentWorker. For the legacy implementation see: ```python from llama_index.core.agent.legacy.react.base import ReActAgent ``` """def__init__(self,tools:Sequence[BaseTool],llm:LLM,memory:BaseMemory,max_iterations:int=10,react_chat_formatter:Optional[ReActChatFormatter]=None,output_parser:Optional[ReActOutputParser]=None,callback_manager:Optional[CallbackManager]=None,verbose:bool=False,tool_retriever:Optional[ObjectRetriever[BaseTool]]=None,context:Optional[str]=None,)->None:"""Init params."""callback_manager=callback_managerorllm.callback_managerifcontextandreact_chat_formatter:raiseValueError("Cannot provide both context and react_chat_formatter")ifcontext:react_chat_formatter=ReActChatFormatter.from_context(context)step_engine=ReActAgentWorker.from_tools(tools=tools,tool_retriever=tool_retriever,llm=llm,max_iterations=max_iterations,react_chat_formatter=react_chat_formatter,output_parser=output_parser,callback_manager=callback_manager,verbose=verbose,)super().__init__(step_engine,memory=memory,llm=llm,callback_manager=callback_manager,)@classmethoddeffrom_tools(cls,tools:Optional[List[BaseTool]]=None,tool_retriever:Optional[ObjectRetriever[BaseTool]]=None,llm:Optional[LLM]=None,chat_history:Optional[List[ChatMessage]]=None,memory:Optional[BaseMemory]=None,memory_cls:Type[BaseMemory]=ChatMemoryBuffer,max_iterations:int=10,react_chat_formatter:Optional[ReActChatFormatter]=None,output_parser:Optional[ReActOutputParser]=None,callback_manager:Optional[CallbackManager]=None,verbose:bool=False,context:Optional[str]=None,**kwargs:Any,)->"ReActAgent":"""Convenience constructor method from set of of BaseTools (Optional). NOTE: kwargs should have been exhausted by this point. In other words the various upstream components such as BaseSynthesizer (response synthesizer) or BaseRetriever should have picked up off their respective kwargs in their constructions. Returns: ReActAgent """llm=llmorSettings.llmifcallback_managerisnotNone:llm.callback_manager=callback_managermemory=memoryormemory_cls.from_defaults(chat_history=chat_historyor[],llm=llm)returncls(tools=toolsor[],tool_retriever=tool_retriever,llm=llm,memory=memory,max_iterations=max_iterations,react_chat_formatter=react_chat_formatter,output_parser=output_parser,callback_manager=callback_manager,verbose=verbose,context=context,)def_get_prompt_modules(self)->PromptMixinType:"""Get prompt modules."""return{"agent_worker":self.agent_worker}
Convenience constructor method from set of of BaseTools (Optional).
NOTE: kwargs should have been exhausted by this point. In other words
the various upstream components such as BaseSynthesizer (response synthesizer)
or BaseRetriever should have picked up off their respective kwargs in their
constructions.
@classmethoddeffrom_tools(cls,tools:Optional[List[BaseTool]]=None,tool_retriever:Optional[ObjectRetriever[BaseTool]]=None,llm:Optional[LLM]=None,chat_history:Optional[List[ChatMessage]]=None,memory:Optional[BaseMemory]=None,memory_cls:Type[BaseMemory]=ChatMemoryBuffer,max_iterations:int=10,react_chat_formatter:Optional[ReActChatFormatter]=None,output_parser:Optional[ReActOutputParser]=None,callback_manager:Optional[CallbackManager]=None,verbose:bool=False,context:Optional[str]=None,**kwargs:Any,)->"ReActAgent":"""Convenience constructor method from set of of BaseTools (Optional). NOTE: kwargs should have been exhausted by this point. In other words the various upstream components such as BaseSynthesizer (response synthesizer) or BaseRetriever should have picked up off their respective kwargs in their constructions. Returns: ReActAgent """llm=llmorSettings.llmifcallback_managerisnotNone:llm.callback_manager=callback_managermemory=memoryormemory_cls.from_defaults(chat_history=chat_historyor[],llm=llm)returncls(tools=toolsor[],tool_retriever=tool_retriever,llm=llm,memory=memory,max_iterations=max_iterations,react_chat_formatter=react_chat_formatter,output_parser=output_parser,callback_manager=callback_manager,verbose=verbose,context=context,)
classReActAgentWorker(BaseAgentWorker):"""OpenAI Agent worker."""def__init__(self,tools:Sequence[BaseTool],llm:LLM,max_iterations:int=10,react_chat_formatter:Optional[ReActChatFormatter]=None,output_parser:Optional[ReActOutputParser]=None,callback_manager:Optional[CallbackManager]=None,verbose:bool=False,tool_retriever:Optional[ObjectRetriever[BaseTool]]=None,)->None:self._llm=llmself.callback_manager=callback_managerorllm.callback_managerself._max_iterations=max_iterationsself._react_chat_formatter=react_chat_formatterorReActChatFormatter()self._output_parser=output_parserorReActOutputParser()self._verbose=verboseiflen(tools)>0andtool_retrieverisnotNone:raiseValueError("Cannot specify both tools and tool_retriever")eliflen(tools)>0:self._get_tools=lambda_:toolseliftool_retrieverisnotNone:tool_retriever_c=cast(ObjectRetriever[BaseTool],tool_retriever)self._get_tools=lambdamessage:tool_retriever_c.retrieve(message)else:self._get_tools=lambda_:[]@classmethoddeffrom_tools(cls,tools:Optional[Sequence[BaseTool]]=None,tool_retriever:Optional[ObjectRetriever[BaseTool]]=None,llm:Optional[LLM]=None,max_iterations:int=10,react_chat_formatter:Optional[ReActChatFormatter]=None,output_parser:Optional[ReActOutputParser]=None,callback_manager:Optional[CallbackManager]=None,verbose:bool=False,**kwargs:Any,)->"ReActAgentWorker":"""Convenience constructor method from set of of BaseTools (Optional). NOTE: kwargs should have been exhausted by this point. In other words the various upstream components such as BaseSynthesizer (response synthesizer) or BaseRetriever should have picked up off their respective kwargs in their constructions. Returns: ReActAgent """llm=llmorSettings.llmifcallback_managerisnotNone:llm.callback_manager=callback_managerreturncls(tools=toolsor[],tool_retriever=tool_retriever,llm=llm,max_iterations=max_iterations,react_chat_formatter=react_chat_formatter,output_parser=output_parser,callback_manager=callback_manager,verbose=verbose,)def_get_prompts(self)->PromptDictType:"""Get prompts."""# TODO: the ReAct formatter does not explicitly specify PromptTemplate# objects, but wrap it in this to obey the interfacesys_header=self._react_chat_formatter.system_headerreturn{"system_prompt":PromptTemplate(sys_header)}def_update_prompts(self,prompts:PromptDictType)->None:"""Update prompts."""if"system_prompt"inprompts:sys_prompt=cast(PromptTemplate,prompts["system_prompt"])self._react_chat_formatter.system_header=sys_prompt.templatedefinitialize_step(self,task:Task,**kwargs:Any)->TaskStep:"""Initialize step from task."""sources:List[ToolOutput]=[]current_reasoning:List[BaseReasoningStep]=[]# temporary memory for new messagesnew_memory=ChatMemoryBuffer.from_defaults()# initialize task statetask_state={"sources":sources,"current_reasoning":current_reasoning,"new_memory":new_memory,}task.extra_state.update(task_state)returnTaskStep(task_id=task.task_id,step_id=str(uuid.uuid4()),input=task.input,step_state={"is_first":True},)defget_tools(self,input:str)->List[AsyncBaseTool]:"""Get tools."""return[adapt_to_async_tool(t)fortinself._get_tools(input)]def_extract_reasoning_step(self,output:ChatResponse,is_streaming:bool=False)->Tuple[str,List[BaseReasoningStep],bool]:""" Extracts the reasoning step from the given output. This method parses the message content from the output, extracts the reasoning step, and determines whether the processing is complete. It also performs validation checks on the output and handles possible errors. """ifoutput.message.contentisNone:raiseValueError("Got empty message.")message_content=output.message.contentcurrent_reasoning=[]try:reasoning_step=self._output_parser.parse(message_content,is_streaming)exceptBaseExceptionasexc:raiseValueError(f"Could not parse output: {message_content}")fromexcifself._verbose:print_text(f"{reasoning_step.get_content()}\n",color="pink")current_reasoning.append(reasoning_step)ifreasoning_step.is_done:returnmessage_content,current_reasoning,Truereasoning_step=cast(ActionReasoningStep,reasoning_step)ifnotisinstance(reasoning_step,ActionReasoningStep):raiseValueError(f"Expected ActionReasoningStep, got {reasoning_step}")returnmessage_content,current_reasoning,Falsedef_process_actions(self,task:Task,tools:Sequence[AsyncBaseTool],output:ChatResponse,is_streaming:bool=False,)->Tuple[List[BaseReasoningStep],bool]:tools_dict:Dict[str,AsyncBaseTool]={tool.metadata.get_name():toolfortoolintools}_,current_reasoning,is_done=self._extract_reasoning_step(output,is_streaming)ifis_done:returncurrent_reasoning,True# call tool with inputreasoning_step=cast(ActionReasoningStep,current_reasoning[-1])tool=tools_dict[reasoning_step.action]withself.callback_manager.event(CBEventType.FUNCTION_CALL,payload={EventPayload.FUNCTION_CALL:reasoning_step.action_input,EventPayload.TOOL:tool.metadata,},)asevent:try:tool_output=tool.call(**reasoning_step.action_input)exceptExceptionase:tool_output=ToolOutput(content=f"Error: {e!s}",tool_name=tool.metadata.name,raw_input={"kwargs":reasoning_step.action_input},raw_output=e,)event.on_end(payload={EventPayload.FUNCTION_OUTPUT:str(tool_output)})task.extra_state["sources"].append(tool_output)observation_step=ObservationReasoningStep(observation=str(tool_output))current_reasoning.append(observation_step)ifself._verbose:print_text(f"{observation_step.get_content()}\n",color="blue")returncurrent_reasoning,Falseasyncdef_aprocess_actions(self,task:Task,tools:Sequence[AsyncBaseTool],output:ChatResponse,is_streaming:bool=False,)->Tuple[List[BaseReasoningStep],bool]:tools_dict={tool.metadata.name:toolfortoolintools}_,current_reasoning,is_done=self._extract_reasoning_step(output,is_streaming)ifis_done:returncurrent_reasoning,True# call tool with inputreasoning_step=cast(ActionReasoningStep,current_reasoning[-1])tool=tools_dict[reasoning_step.action]withself.callback_manager.event(CBEventType.FUNCTION_CALL,payload={EventPayload.FUNCTION_CALL:reasoning_step.action_input,EventPayload.TOOL:tool.metadata,},)asevent:try:tool_output=awaittool.acall(**reasoning_step.action_input)exceptExceptionase:tool_output=ToolOutput(content=f"Error: {e!s}",tool_name=tool.metadata.name,raw_input={"kwargs":reasoning_step.action_input},raw_output=e,)event.on_end(payload={EventPayload.FUNCTION_OUTPUT:str(tool_output)})task.extra_state["sources"].append(tool_output)observation_step=ObservationReasoningStep(observation=str(tool_output))current_reasoning.append(observation_step)ifself._verbose:print_text(f"{observation_step.get_content()}\n",color="blue")returncurrent_reasoning,Falsedef_get_response(self,current_reasoning:List[BaseReasoningStep],sources:List[ToolOutput],)->AgentChatResponse:"""Get response from reasoning steps."""iflen(current_reasoning)==0:raiseValueError("No reasoning steps were taken.")eliflen(current_reasoning)==self._max_iterations:raiseValueError("Reached max iterations.")ifisinstance(current_reasoning[-1],ResponseReasoningStep):response_step=cast(ResponseReasoningStep,current_reasoning[-1])response_str=response_step.responseelse:response_str=current_reasoning[-1].get_content()# TODO: add sources from reasoning stepsreturnAgentChatResponse(response=response_str,sources=sources)def_get_task_step_response(self,agent_response:AGENT_CHAT_RESPONSE_TYPE,step:TaskStep,is_done:bool)->TaskStepOutput:"""Get task step response."""ifis_done:new_steps=[]else:new_steps=[step.get_next_step(step_id=str(uuid.uuid4()),# NOTE: input is unusedinput=None,)]returnTaskStepOutput(output=agent_response,task_step=step,is_last=is_done,next_steps=new_steps,)def_infer_stream_chunk_is_final(self,chunk:ChatResponse)->bool:"""Infers if a chunk from a live stream is the start of the final reasoning step. (i.e., and should eventually become ResponseReasoningStep — not part of this function's logic tho.). Args: chunk (ChatResponse): the current chunk stream to check Returns: bool: Boolean on whether the chunk is the start of the final response """latest_content=chunk.message.contentiflatest_content:# doesn't follow thought-action formatiflen(latest_content)>len("Thought")andnotlatest_content.startswith("Thought"):returnTrueelif"Answer: "inlatest_content:returnTruereturnFalsedef_add_back_chunk_to_stream(self,chunk:ChatResponse,chat_stream:Generator[ChatResponse,None,None])->Generator[ChatResponse,None,None]:"""Helper method for adding back initial chunk stream of final response back to the rest of the chat_stream. Args: chunk (ChatResponse): the chunk to add back to the beginning of the chat_stream. Return: Generator[ChatResponse, None, None]: the updated chat_stream """updated_stream=chain.from_iterable(# need to add back partial response chunk[unit_generator(chunk),chat_stream,])# use cast to avoid mypy issue with chain and Generatorupdated_stream_c:Generator[ChatResponse,None,None]=cast(Generator[ChatResponse,None,None],updated_stream)returnupdated_stream_casyncdef_async_add_back_chunk_to_stream(self,chunk:ChatResponse,chat_stream:AsyncGenerator[ChatResponse,None])->AsyncGenerator[ChatResponse,None]:"""Helper method for adding back initial chunk stream of final response back to the rest of the chat_stream. NOTE: this itself is not an async function. Args: chunk (ChatResponse): the chunk to add back to the beginning of the chat_stream. Return: AsyncGenerator[ChatResponse, None]: the updated async chat_stream """yieldchunkasyncforiteminchat_stream:yielditemdef_run_step(self,step:TaskStep,task:Task,)->TaskStepOutput:"""Run step."""ifstep.inputisnotNone:add_user_step_to_reasoning(step,task.extra_state["new_memory"],task.extra_state["current_reasoning"],verbose=self._verbose,)# TODO: see if we want to do step-based inputstools=self.get_tools(task.input)input_chat=self._react_chat_formatter.format(tools,chat_history=task.memory.get()+task.extra_state["new_memory"].get_all(),current_reasoning=task.extra_state["current_reasoning"],)# send promptchat_response=self._llm.chat(input_chat)# given react prompt outputs, call tools or return responsereasoning_steps,is_done=self._process_actions(task,tools,output=chat_response)task.extra_state["current_reasoning"].extend(reasoning_steps)agent_response=self._get_response(task.extra_state["current_reasoning"],task.extra_state["sources"])ifis_done:task.extra_state["new_memory"].put(ChatMessage(content=agent_response.response,role=MessageRole.ASSISTANT))returnself._get_task_step_response(agent_response,step,is_done)asyncdef_arun_step(self,step:TaskStep,task:Task,)->TaskStepOutput:"""Run step."""ifstep.inputisnotNone:add_user_step_to_reasoning(step,task.extra_state["new_memory"],task.extra_state["current_reasoning"],verbose=self._verbose,)# TODO: see if we want to do step-based inputstools=self.get_tools(task.input)input_chat=self._react_chat_formatter.format(tools,chat_history=task.memory.get()+task.extra_state["new_memory"].get_all(),current_reasoning=task.extra_state["current_reasoning"],)# send promptchat_response=awaitself._llm.achat(input_chat)# given react prompt outputs, call tools or return responsereasoning_steps,is_done=awaitself._aprocess_actions(task,tools,output=chat_response)task.extra_state["current_reasoning"].extend(reasoning_steps)agent_response=self._get_response(task.extra_state["current_reasoning"],task.extra_state["sources"])ifis_done:task.extra_state["new_memory"].put(ChatMessage(content=agent_response.response,role=MessageRole.ASSISTANT))returnself._get_task_step_response(agent_response,step,is_done)def_run_step_stream(self,step:TaskStep,task:Task,)->TaskStepOutput:"""Run step."""ifstep.inputisnotNone:add_user_step_to_reasoning(step,task.extra_state["new_memory"],task.extra_state["current_reasoning"],verbose=self._verbose,)# TODO: see if we want to do step-based inputstools=self.get_tools(task.input)input_chat=self._react_chat_formatter.format(tools,chat_history=task.memory.get()+task.extra_state["new_memory"].get_all(),current_reasoning=task.extra_state["current_reasoning"],)chat_stream=self._llm.stream_chat(input_chat)# iterate over stream, break out if is final answer after the "Answer: "full_response=ChatResponse(message=ChatMessage(content=None,role="assistant"))is_done=Falseforlatest_chunkinchat_stream:full_response=latest_chunkis_done=self._infer_stream_chunk_is_final(latest_chunk)ifis_done:breakifnotis_done:# given react prompt outputs, call tools or return responsereasoning_steps,_=self._process_actions(task,tools=tools,output=full_response,is_streaming=True)task.extra_state["current_reasoning"].extend(reasoning_steps)# use _get_response to return intermediate responseagent_response:AGENT_CHAT_RESPONSE_TYPE=self._get_response(task.extra_state["current_reasoning"],task.extra_state["sources"])else:# Get the response in a separate thread so we can yield the responseresponse_stream=self._add_back_chunk_to_stream(chunk=latest_chunk,chat_stream=chat_stream)agent_response=StreamingAgentChatResponse(chat_stream=response_stream,sources=task.extra_state["sources"],)thread=Thread(target=agent_response.write_response_to_history,args=(task.extra_state["new_memory"],),kwargs={"on_stream_end_fn":partial(self.finalize_task,task)},)thread.start()returnself._get_task_step_response(agent_response,step,is_done)asyncdef_arun_step_stream(self,step:TaskStep,task:Task,)->TaskStepOutput:"""Run step."""ifstep.inputisnotNone:add_user_step_to_reasoning(step,task.extra_state["new_memory"],task.extra_state["current_reasoning"],verbose=self._verbose,)# TODO: see if we want to do step-based inputstools=self.get_tools(task.input)input_chat=self._react_chat_formatter.format(tools,chat_history=task.memory.get()+task.extra_state["new_memory"].get_all(),current_reasoning=task.extra_state["current_reasoning"],)chat_stream=awaitself._llm.astream_chat(input_chat)# iterate over stream, break out if is final answer after the "Answer: "full_response=ChatResponse(message=ChatMessage(content=None,role="assistant"))is_done=Falseasyncforlatest_chunkinchat_stream:full_response=latest_chunkis_done=self._infer_stream_chunk_is_final(latest_chunk)ifis_done:breakifnotis_done:# given react prompt outputs, call tools or return responsereasoning_steps,_=self._process_actions(task,tools=tools,output=full_response,is_streaming=True)task.extra_state["current_reasoning"].extend(reasoning_steps)# use _get_response to return intermediate responseagent_response:AGENT_CHAT_RESPONSE_TYPE=self._get_response(task.extra_state["current_reasoning"],task.extra_state["sources"])else:# Get the response in a separate thread so we can yield the responseresponse_stream=self._async_add_back_chunk_to_stream(chunk=latest_chunk,chat_stream=chat_stream)agent_response=StreamingAgentChatResponse(achat_stream=response_stream,sources=task.extra_state["sources"],)# create task to write chat response to historyasyncio.create_task(agent_response.awrite_response_to_history(task.extra_state["new_memory"],on_stream_end_fn=partial(self.finalize_task,task),))# wait until response writing is doneawaitagent_response._is_function_false_event.wait()returnself._get_task_step_response(agent_response,step,is_done)@trace_method("run_step")defrun_step(self,step:TaskStep,task:Task,**kwargs:Any)->TaskStepOutput:"""Run step."""returnself._run_step(step,task)@trace_method("run_step")asyncdefarun_step(self,step:TaskStep,task:Task,**kwargs:Any)->TaskStepOutput:"""Run step (async)."""returnawaitself._arun_step(step,task)@trace_method("run_step")defstream_step(self,step:TaskStep,task:Task,**kwargs:Any)->TaskStepOutput:"""Run step (stream)."""# TODO: figure out if we need a different type for TaskStepOutputreturnself._run_step_stream(step,task)@trace_method("run_step")asyncdefastream_step(self,step:TaskStep,task:Task,**kwargs:Any)->TaskStepOutput:"""Run step (async stream)."""returnawaitself._arun_step_stream(step,task)deffinalize_task(self,task:Task,**kwargs:Any)->None:"""Finalize task, after all the steps are completed."""# add new messages to memorytask.memory.set(task.memory.get_all()+task.extra_state["new_memory"].get_all())# reset new memorytask.extra_state["new_memory"].reset()defset_callback_manager(self,callback_manager:CallbackManager)->None:"""Set callback manager."""# TODO: make this abstractmethod (right now will break some agent impls)self.callback_manager=callback_manager
Convenience constructor method from set of of BaseTools (Optional).
NOTE: kwargs should have been exhausted by this point. In other words
the various upstream components such as BaseSynthesizer (response synthesizer)
or BaseRetriever should have picked up off their respective kwargs in their
constructions.
@classmethoddeffrom_tools(cls,tools:Optional[Sequence[BaseTool]]=None,tool_retriever:Optional[ObjectRetriever[BaseTool]]=None,llm:Optional[LLM]=None,max_iterations:int=10,react_chat_formatter:Optional[ReActChatFormatter]=None,output_parser:Optional[ReActOutputParser]=None,callback_manager:Optional[CallbackManager]=None,verbose:bool=False,**kwargs:Any,)->"ReActAgentWorker":"""Convenience constructor method from set of of BaseTools (Optional). NOTE: kwargs should have been exhausted by this point. In other words the various upstream components such as BaseSynthesizer (response synthesizer) or BaseRetriever should have picked up off their respective kwargs in their constructions. Returns: ReActAgent """llm=llmorSettings.llmifcallback_managerisnotNone:llm.callback_manager=callback_managerreturncls(tools=toolsor[],tool_retriever=tool_retriever,llm=llm,max_iterations=max_iterations,react_chat_formatter=react_chat_formatter,output_parser=output_parser,callback_manager=callback_manager,verbose=verbose,)
definitialize_step(self,task:Task,**kwargs:Any)->TaskStep:"""Initialize step from task."""sources:List[ToolOutput]=[]current_reasoning:List[BaseReasoningStep]=[]# temporary memory for new messagesnew_memory=ChatMemoryBuffer.from_defaults()# initialize task statetask_state={"sources":sources,"current_reasoning":current_reasoning,"new_memory":new_memory,}task.extra_state.update(task_state)returnTaskStep(task_id=task.task_id,step_id=str(uuid.uuid4()),input=task.input,step_state={"is_first":True},)
Source code in llama-index-core/llama_index/core/agent/react/step.py
634635636637638
@trace_method("run_step")defstream_step(self,step:TaskStep,task:Task,**kwargs:Any)->TaskStepOutput:"""Run step (stream)."""# TODO: figure out if we need a different type for TaskStepOutputreturnself._run_step_stream(step,task)
Source code in llama-index-core/llama_index/core/agent/react/step.py
647648649650651652653654
deffinalize_task(self,task:Task,**kwargs:Any)->None:"""Finalize task, after all the steps are completed."""# add new messages to memorytask.memory.set(task.memory.get_all()+task.extra_state["new_memory"].get_all())# reset new memorytask.extra_state["new_memory"].reset()
Source code in llama-index-core/llama_index/core/agent/react/step.py
656657658659
defset_callback_manager(self,callback_manager:CallbackManager)->None:"""Set callback manager."""# TODO: make this abstractmethod (right now will break some agent impls)self.callback_manager=callback_manager
classReActChatFormatter(BaseAgentChatFormatter):"""ReAct chat formatter."""system_header:str=REACT_CHAT_SYSTEM_HEADER# defaultcontext:str=""# not needed w/ defaultdefformat(self,tools:Sequence[BaseTool],chat_history:List[ChatMessage],current_reasoning:Optional[List[BaseReasoningStep]]=None,)->List[ChatMessage]:"""Format chat history into list of ChatMessage."""current_reasoning=current_reasoningor[]format_args={"tool_desc":"\n".join(get_react_tool_descriptions(tools)),"tool_names":", ".join([tool.metadata.get_name()fortoolintools]),}ifself.context:format_args["context"]=self.contextfmt_sys_header=self.system_header.format(**format_args)# format reasoning history as alternating user and assistant messages# where the assistant messages are thoughts and actions and the user# messages are observationsreasoning_history=[]forreasoning_stepincurrent_reasoning:ifisinstance(reasoning_step,ObservationReasoningStep):message=ChatMessage(role=MessageRole.USER,content=reasoning_step.get_content(),)else:message=ChatMessage(role=MessageRole.ASSISTANT,content=reasoning_step.get_content(),)reasoning_history.append(message)return[ChatMessage(role=MessageRole.SYSTEM,content=fmt_sys_header),*chat_history,*reasoning_history,]@classmethoddeffrom_defaults(cls,system_header:Optional[str]=None,context:Optional[str]=None,)->"ReActChatFormatter":"""Create ReActChatFormatter from defaults."""ifnotsystem_header:system_header=(REACT_CHAT_SYSTEM_HEADERifnotcontextelseCONTEXT_REACT_CHAT_SYSTEM_HEADER)returnReActChatFormatter(system_header=system_header,context=contextor"",)@classmethoddeffrom_context(cls,context:str)->"ReActChatFormatter":"""Create ReActChatFormatter from context. NOTE: deprecated """logger.warning("ReActChatFormatter.from_context is deprecated, please use `from_defaults` instead.")returnReActChatFormatter.from_defaults(system_header=CONTEXT_REACT_CHAT_SYSTEM_HEADER,context=context)
defformat(self,tools:Sequence[BaseTool],chat_history:List[ChatMessage],current_reasoning:Optional[List[BaseReasoningStep]]=None,)->List[ChatMessage]:"""Format chat history into list of ChatMessage."""current_reasoning=current_reasoningor[]format_args={"tool_desc":"\n".join(get_react_tool_descriptions(tools)),"tool_names":", ".join([tool.metadata.get_name()fortoolintools]),}ifself.context:format_args["context"]=self.contextfmt_sys_header=self.system_header.format(**format_args)# format reasoning history as alternating user and assistant messages# where the assistant messages are thoughts and actions and the user# messages are observationsreasoning_history=[]forreasoning_stepincurrent_reasoning:ifisinstance(reasoning_step,ObservationReasoningStep):message=ChatMessage(role=MessageRole.USER,content=reasoning_step.get_content(),)else:message=ChatMessage(role=MessageRole.ASSISTANT,content=reasoning_step.get_content(),)reasoning_history.append(message)return[ChatMessage(role=MessageRole.SYSTEM,content=fmt_sys_header),*chat_history,*reasoning_history,]
@classmethoddeffrom_defaults(cls,system_header:Optional[str]=None,context:Optional[str]=None,)->"ReActChatFormatter":"""Create ReActChatFormatter from defaults."""ifnotsystem_header:system_header=(REACT_CHAT_SYSTEM_HEADERifnotcontextelseCONTEXT_REACT_CHAT_SYSTEM_HEADER)returnReActChatFormatter(system_header=system_header,context=contextor"",)
Source code in llama-index-core/llama_index/core/agent/react/formatter.py
118119120121122123124125126127128129130
@classmethoddeffrom_context(cls,context:str)->"ReActChatFormatter":"""Create ReActChatFormatter from context. NOTE: deprecated """logger.warning("ReActChatFormatter.from_context is deprecated, please use `from_defaults` instead.")returnReActChatFormatter.from_defaults(system_header=CONTEXT_REACT_CHAT_SYSTEM_HEADER,context=context)