Skip to content

Secgpt

SecGPTPack #

Bases: BaseLlamaPack

SecGPT Hub.

A central trustworthy entity that routes user queries to appropriate isolated apps.

Source code in llama-index-packs/llama-index-packs-secgpt/llama_index/packs/secgpt/hub.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
class SecGPTPack(BaseLlamaPack):
    """SecGPT Hub.

    A central trustworthy entity that routes user queries to appropriate isolated apps.
    """

    def __init__(
        self,
        tools: Sequence[BaseTool],
        tool_specs: Sequence[BaseToolSpec],
        llm: LLM = None,
        memory: BaseMemory = None,
        output_parser: Optional[ReActOutputParser] = None,
        verbose: bool = False,
        handle_reasoning_failure_fn: Optional[
            Callable[[CallbackManager, Exception], ToolOutput]
        ] = None,
        user_id: Optional[str] = "0",
    ) -> None:
        """Initialize the SecGPTPack.

        Args:
            tools (Sequence[BaseTool]): A sequence of available tools.
            tool_specs (Sequence[BaseToolSpec]): Specifications for the tools.
            llm (LLM, optional): Language Model used for processing. Defaults to Settings.llm.
            memory (BaseMemory, optional): Memory component to keep track of conversation history. Defaults to ChatMemoryBuffer.
            output_parser (Optional[ReActOutputParser], optional): Parser for handling the output. Defaults to None.
            verbose (bool, optional): Flag to enable verbose output. Defaults to False.
            handle_reasoning_failure_fn (Optional[Callable[[CallbackManager, Exception], ToolOutput]], optional): Callable function to handle reasoning failures. Defaults to None.
            user_id (Optional[str], optional): User identifier. Defaults to "0".
        """
        self.llm = llm or Settings.llm
        self.memory = memory or ChatMemoryBuffer.from_defaults(
            chat_history=[], llm=self.llm
        )
        self.output_parser = output_parser
        self.verbose = verbose
        self.handle_reasoning_failure_fn = handle_reasoning_failure_fn
        self.user_id = user_id

        self.planner = HubPlanner(self.llm)
        self.tool_importer = ToolImporter(tools, tool_specs)
        self.hub_operator = HubOperator(self.tool_importer, self.user_id)

    def chat(
        self,
        query: str,
    ) -> str:
        """Process a user query and generate a response.

        Args:
            query (str): The user query to process.

        Returns:
            str: The response generated by SecGPT.
        """
        memory_content = self.memory.get()
        self.memory.put(ChatMessage(role=MessageRole.USER, content=(query)))
        tool_info = self.tool_importer.get_tool_info()
        plan = self.planner.plan_generate(query, tool_info, memory_content)
        response = self.hub_operator.run(query, plan)
        self.memory.put(ChatMessage(role=MessageRole.CHATBOT, content=(response)))

        return response

chat #

chat(query: str) -> str

Process a user query and generate a response.

Parameters:

Name Type Description Default
query str

The user query to process.

required

Returns:

Name Type Description
str str

The response generated by SecGPT.

Source code in llama-index-packs/llama-index-packs-secgpt/llama_index/packs/secgpt/hub.py
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def chat(
    self,
    query: str,
) -> str:
    """Process a user query and generate a response.

    Args:
        query (str): The user query to process.

    Returns:
        str: The response generated by SecGPT.
    """
    memory_content = self.memory.get()
    self.memory.put(ChatMessage(role=MessageRole.USER, content=(query)))
    tool_info = self.tool_importer.get_tool_info()
    plan = self.planner.plan_generate(query, tool_info, memory_content)
    response = self.hub_operator.run(query, plan)
    self.memory.put(ChatMessage(role=MessageRole.CHATBOT, content=(response)))

    return response