Agentic rag with llamaindex and vertexai managed index
Build Agentic RAG using Vertex AI managed index¶
Author(s) | Dave Wang |
Install Libraries¶
In [ ]:
Copied!
!pip install --upgrade google-cloud-aiplatform llama-index llama-index-vector-stores-vertexaivectorsearch llama_index-llms-vertex llama-index-llms-gemini
!pip install --upgrade google-cloud-aiplatform llama-index llama-index-vector-stores-vertexaivectorsearch llama_index-llms-vertex llama-index-llms-gemini
In [ ]:
Copied!
!pip install llama-index-indices-managed-vertexai
!pip install llama-index-indices-managed-vertexai
Restart current runtime¶
To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which will restart the current kernel.
In [ ]:
Copied!
# Colab only
# Automatically restart kernel after installs so that your environment can access the new packages
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# Colab only
# Automatically restart kernel after installs so that your environment can access the new packages
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
Authenticate your notebook environment (Colab only)¶
If you are running this notebook on Google Colab, you will need to authenticate your environment. To do this, run the new cell below. This step is not required if you are using Vertex AI Workbench.
In [ ]:
Copied!
# Colab only
import sys
if "google.colab" in sys.modules:
from google.colab import auth
auth.authenticate_user()
# Colab only
import sys
if "google.colab" in sys.modules:
from google.colab import auth
auth.authenticate_user()
In [ ]:
Copied!
# If you're using JupyterLab instance, uncomment and run the below code.
#!gcloud auth login
# If you're using JupyterLab instance, uncomment and run the below code.
#!gcloud auth login
Import libraries¶
In [ ]:
Copied!
# import modules needed
from llama_index.core import (
StorageContext,
Settings,
VectorStoreIndex,
SummaryIndex,
SimpleDirectoryReader,
)
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
MetadataFilters,
MetadataFilter,
FilterOperator,
)
from llama_index.llms.vertex import Vertex
from llama_index.embeddings.vertex import VertexTextEmbedding
from typing import List, Optional
from llama_index.core.vector_stores import FilterCondition
from llama_index.core.tools import FunctionTool
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores import MetadataFilters
from pathlib import Path
from llama_index.core.agent import FunctionCallingAgent
# import modules needed
from llama_index.core import (
StorageContext,
Settings,
VectorStoreIndex,
SummaryIndex,
SimpleDirectoryReader,
)
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
MetadataFilters,
MetadataFilter,
FilterOperator,
)
from llama_index.llms.vertex import Vertex
from llama_index.embeddings.vertex import VertexTextEmbedding
from typing import List, Optional
from llama_index.core.vector_stores import FilterCondition
from llama_index.core.tools import FunctionTool
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores import MetadataFilters
from pathlib import Path
from llama_index.core.agent import FunctionCallingAgent
Define Google Cloud project information and initialize Vertex AI¶
Initialize the Vertex AI SDK for Python for your project:
In [ ]:
Copied!
# TODO : Set values as per your requirements
# Project and Storage Constants
PROJECT_ID = "<your project id>"
REGION = "us-central1"
GCS_BUCKET_NAME = "<your bucket name>"
GCS_BUCKET_URI = f"gs://{GCS_BUCKET_NAME}"
# TODO : Set values as per your requirements
# Project and Storage Constants
PROJECT_ID = ""
REGION = "us-central1"
GCS_BUCKET_NAME = ""
GCS_BUCKET_URI = f"gs://{GCS_BUCKET_NAME}"
In [ ]:
Copied!
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
Download Sample Documents for Testing¶
In [ ]:
Copied!
urls = [
"https://openreview.net/pdf?id=VtmBAGCN7o",
"https://openreview.net/pdf?id=6PmJoRfdaK",
"https://openreview.net/pdf?id=LzPWWPAdY4",
"https://openreview.net/pdf?id=VTF8yNQM66",
"https://openreview.net/pdf?id=hSyW5go0v8",
"https://openreview.net/pdf?id=9WD9KwssyT",
"https://openreview.net/pdf?id=yV6fD7LYkF",
"https://openreview.net/pdf?id=hnrB5YHoYu",
"https://openreview.net/pdf?id=WbWtOYIzIK",
"https://openreview.net/pdf?id=c5pwL0Soay",
"https://openreview.net/pdf?id=TpD2aG1h0D",
]
papers = [
"metagpt.pdf",
"longlora.pdf",
"loftq.pdf",
"swebench.pdf",
"selfrag.pdf",
"zipformer.pdf",
"values.pdf",
"finetune_fair_diffusion.pdf",
"knowledge_card.pdf",
"metra.pdf",
"vr_mcl.pdf",
]
import requests
def download_file(url, file_path):
"""Downloads a file from a given URL and saves it to the specified file path.
Args:
url: The URL of the file to download.
file_path: The path to save the downloaded file.
"""
response = requests.get(url, stream=True)
response.raise_for_status() # Raise an exception for non-200 status codes
with open(file_path, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # Filter out keep-alive new chunks
f.write(chunk)
print(f"Downloaded file from {url} to {file_path}")
for url, paper in zip(urls, papers):
download_file(url, paper)
urls = [
"https://openreview.net/pdf?id=VtmBAGCN7o",
"https://openreview.net/pdf?id=6PmJoRfdaK",
"https://openreview.net/pdf?id=LzPWWPAdY4",
"https://openreview.net/pdf?id=VTF8yNQM66",
"https://openreview.net/pdf?id=hSyW5go0v8",
"https://openreview.net/pdf?id=9WD9KwssyT",
"https://openreview.net/pdf?id=yV6fD7LYkF",
"https://openreview.net/pdf?id=hnrB5YHoYu",
"https://openreview.net/pdf?id=WbWtOYIzIK",
"https://openreview.net/pdf?id=c5pwL0Soay",
"https://openreview.net/pdf?id=TpD2aG1h0D",
]
papers = [
"metagpt.pdf",
"longlora.pdf",
"loftq.pdf",
"swebench.pdf",
"selfrag.pdf",
"zipformer.pdf",
"values.pdf",
"finetune_fair_diffusion.pdf",
"knowledge_card.pdf",
"metra.pdf",
"vr_mcl.pdf",
]
import requests
def download_file(url, file_path):
"""Downloads a file from a given URL and saves it to the specified file path.
Args:
url: The URL of the file to download.
file_path: The path to save the downloaded file.
"""
response = requests.get(url, stream=True)
response.raise_for_status() # Raise an exception for non-200 status codes
with open(file_path, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # Filter out keep-alive new chunks
f.write(chunk)
print(f"Downloaded file from {url} to {file_path}")
for url, paper in zip(urls, papers):
download_file(url, paper)
Enable async for the notebook¶
In [ ]:
Copied!
import nest_asyncio
nest_asyncio.apply()
import nest_asyncio
nest_asyncio.apply()
In [ ]:
Copied!
from llama_index.indices.managed.vertexai import VertexAIIndex
# TODO(developer): Replace these values with your project information
project_id = PROJECT_ID
location = "us-central1"
from llama_index.indices.managed.vertexai import VertexAIIndex
# TODO(developer): Replace these values with your project information
project_id = PROJECT_ID
location = "us-central1"
In [ ]:
Copied!
# configure embedding model
embed_model = VertexTextEmbedding(
model_name="text-embedding-004",
project=PROJECT_ID,
location=REGION,
)
vertex_gemini = Vertex(
model="gemini-1.5-pro-preview-0514",
temperature=0,
context_window=100000,
additional_kwargs={},
)
# setup the index/query process, ie the embedding model (and completion if used)
Settings.embed_model = embed_model
Settings.llm = vertex_gemini
# configure embedding model
embed_model = VertexTextEmbedding(
model_name="text-embedding-004",
project=PROJECT_ID,
location=REGION,
)
vertex_gemini = Vertex(
model="gemini-1.5-pro-preview-0514",
temperature=0,
context_window=100000,
additional_kwargs={},
)
# setup the index/query process, ie the embedding model (and completion if used)
Settings.embed_model = embed_model
Settings.llm = vertex_gemini
In [ ]:
Copied!
# Optional: If creating a new corpus
corpus_display_name = "my-corpus"
corpus_description = "Vertex AI Corpus for LlamaIndex"
# Create a corpus or provide an existing corpus ID
index = VertexAIIndex(
project_id,
location,
corpus_display_name=corpus_display_name,
corpus_description=corpus_description,
)
print(f"Newly created corpus name is {index.corpus_name}.")
# Upload local file
file_name = index.insert_file(
file_path="longlora.pdf",
metadata={
"display_name": "long_lora",
"description": "Long Lora paper",
},
)
# Optional: If creating a new corpus
corpus_display_name = "my-corpus"
corpus_description = "Vertex AI Corpus for LlamaIndex"
# Create a corpus or provide an existing corpus ID
index = VertexAIIndex(
project_id,
location,
corpus_display_name=corpus_display_name,
corpus_description=corpus_description,
)
print(f"Newly created corpus name is {index.corpus_name}.")
# Upload local file
file_name = index.insert_file(
file_path="longlora.pdf",
metadata={
"display_name": "long_lora",
"description": "Long Lora paper",
},
)
In [ ]:
Copied!
index.list_files()
index.list_files()
Create Llamaindex query engine and retriever¶
In [ ]:
Copied!
# Querying
query_engine = index.as_query_engine()
# Retrieving
retriever = index.as_retriever()
# Querying
query_engine = index.as_query_engine()
# Retrieving
retriever = index.as_retriever()
In [ ]:
Copied!
response = query_engine.query("What is long lora?")
print(response)
response = query_engine.query("What is long lora?")
print(response)
In [ ]:
Copied!
nodes = retriever.retrieve("What is long lora?")
for n in nodes:
print(n.text)
nodes = retriever.retrieve("What is long lora?")
for n in nodes:
print(n.text)
Task 1: Router query engine¶
Create vector index¶
In [ ]:
Copied!
# load documents
documents = SimpleDirectoryReader(input_files=["metagpt.pdf"]).load_data()
# load documents
documents = SimpleDirectoryReader(input_files=["metagpt.pdf"]).load_data()
In [ ]:
Copied!
# define index from vector store
index.insert_file(
file_path="metagpt.pdf",
metadata={
"display_name": "metagpt",
"description": "metagpt",
},
)
# define index from vector store
index.insert_file(
file_path="metagpt.pdf",
metadata={
"display_name": "metagpt",
"description": "metagpt",
},
)
In [ ]:
Copied!
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
Create summary index¶
In [ ]:
Copied!
summary_index = SummaryIndex(nodes)
summary_index = SummaryIndex(nodes)
Create query engine from vector store¶
In [ ]:
Copied!
summary_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = index.as_query_engine()
summary_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = index.as_query_engine()
In [ ]:
Copied!
summary_query_engine.query("what's the summary of the document?")
summary_query_engine.query("what's the summary of the document?")
Create tools from query engines¶
In [ ]:
Copied!
summary_tool = QueryEngineTool.from_defaults(
query_engine=summary_query_engine,
description=("Useful for summarization questions related to MetaGPT"),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context from the MetaGPT paper."
),
)
summary_tool = QueryEngineTool.from_defaults(
query_engine=summary_query_engine,
description=("Useful for summarization questions related to MetaGPT"),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context from the MetaGPT paper."
),
)
In [ ]:
Copied!
from llama_index.core.query_engine.router_query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector
query_engine = RouterQueryEngine(
selector=LLMSingleSelector.from_defaults(),
query_engine_tools=[
summary_tool,
vector_tool,
],
verbose=True,
)
from llama_index.core.query_engine.router_query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector
query_engine = RouterQueryEngine(
selector=LLMSingleSelector.from_defaults(),
query_engine_tools=[
summary_tool,
vector_tool,
],
verbose=True,
)
In [ ]:
Copied!
response = query_engine.query("What is the summary of the document?")
print(str(response))
response = query_engine.query("What is the summary of the document?")
print(str(response))
In [ ]:
Copied!
print(len(response.source_nodes))
print(len(response.source_nodes))
In [ ]:
Copied!
response = query_engine.query(
"How do agents share information with other agents?"
)
print(str(response))
response = query_engine.query(
"How do agents share information with other agents?"
)
print(str(response))
Task 2: Tool calling¶
Create auto-retrieval tools with parameters¶
In [ ]:
Copied!
query_engine = index.as_query_engine(
similarity_top_k=2,
filters=MetadataFilters.from_dicts([{"key": "page_label", "value": "2"}]),
)
response = query_engine.query(
"What are some high-level results of MetaGPT?",
)
query_engine = index.as_query_engine(
similarity_top_k=2,
filters=MetadataFilters.from_dicts([{"key": "page_label", "value": "2"}]),
)
response = query_engine.query(
"What are some high-level results of MetaGPT?",
)
In [ ]:
Copied!
summary_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
summary_tool = QueryEngineTool.from_defaults(
query_engine=summary_query_engine,
description=("Useful for summarization questions related to MetaGPT"),
)
summary_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
summary_tool = QueryEngineTool.from_defaults(
query_engine=summary_query_engine,
description=("Useful for summarization questions related to MetaGPT"),
)
In [ ]:
Copied!
print(str(response))
print(str(response))
Define auto-retrieval tools for function calling¶
In [ ]:
Copied!
def vector_query(query: str, page_numbers: List[str]) -> str:
"""Perform a vector search over an index.
query (str): the string query to be embedded.
page_numbers (List[str]): Filter by set of pages. Leave BLANK if we want to perform a vector search
over all pages. Otherwise, filter by the set of specified pages.
"""
metadata_dicts = [{"key": "page_label", "value": p} for p in page_numbers]
query_engine = index.as_query_engine(
similarity_top_k=2,
filters=MetadataFilters.from_dicts(
metadata_dicts, condition=FilterCondition.OR
),
)
response = query_engine.query(query)
return response
vector_query_tool = FunctionTool.from_defaults(
fn=vector_query,
# name='vector_query'
)
def vector_query(query: str, page_numbers: List[str]) -> str:
"""Perform a vector search over an index.
query (str): the string query to be embedded.
page_numbers (List[str]): Filter by set of pages. Leave BLANK if we want to perform a vector search
over all pages. Otherwise, filter by the set of specified pages.
"""
metadata_dicts = [{"key": "page_label", "value": p} for p in page_numbers]
query_engine = index.as_query_engine(
similarity_top_k=2,
filters=MetadataFilters.from_dicts(
metadata_dicts, condition=FilterCondition.OR
),
)
response = query_engine.query(query)
return response
vector_query_tool = FunctionTool.from_defaults(
fn=vector_query,
# name='vector_query'
)
In [ ]:
Copied!
def summary_query(
query: str,
) -> str:
"""Perform a summary of document
query (str): the string query to be embedded.
"""
summary_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
response = summary_engine.query(query)
return response
summary_tool = FunctionTool.from_defaults(
fn=summary_query,
# name='summary_query'
)
def summary_query(
query: str,
) -> str:
"""Perform a summary of document
query (str): the string query to be embedded.
"""
summary_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
response = summary_engine.query(query)
return response
summary_tool = FunctionTool.from_defaults(
fn=summary_query,
# name='summary_query'
)
In [ ]:
Copied!
response = vertex_gemini.predict_and_call(
[vector_query_tool, summary_tool],
"What are the MetaGPT comparisons with ChatDev described on page 8?",
verbose=True,
)
response = vertex_gemini.predict_and_call(
[vector_query_tool, summary_tool],
"What are the MetaGPT comparisons with ChatDev described on page 8?",
verbose=True,
)
In [ ]:
Copied!
response = vertex_gemini.predict_and_call(
[summary_tool, vector_query_tool],
"What is a summary of the paper?",
verbose=True,
)
response = vertex_gemini.predict_and_call(
[summary_tool, vector_query_tool],
"What is a summary of the paper?",
verbose=True,
)
Task 3: Building an Agent Reasoning Loop¶
In [ ]:
Copied!
# TODO: abstract all of this into a function that takes in a PDF file name
def get_doc_tools(
file_path: str,
name: str,
) -> str:
"""Get vector query and summary query tools from a document."""
# load documents
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
vector_index = index
vector_index.insert_file(
file_path=file_path,
metadata={
"display_name": f"vector_index_{name}",
"description": f"vector_index_{name}",
},
)
summary_index = SummaryIndex(nodes)
def vector_query(
query: str, page_numbers: Optional[List[str]] = None
) -> str:
"""Use to answer questions over the MetaGPT paper.
Useful if you have specific questions over the MetaGPT paper.
Always leave page_numbers as None UNLESS there is a specific page you want to search for.
Args:
query (str): the string query to be embedded.
page_numbers (Optional[List[str]]): Filter by set of pages. Leave as NONE
if we want to perform a vector search
over all pages. Otherwise, filter by the set of specified pages.
"""
page_numbers = page_numbers or []
metadata_dicts = [
{"key": "page_label", "value": p} for p in page_numbers
]
query_engine = vector_index.as_query_engine(
similarity_top_k=2,
filters=MetadataFilters.from_dicts(
metadata_dicts, condition=FilterCondition.OR
),
)
response = query_engine.query(query)
return response
vector_query_tool = FunctionTool.from_defaults(
name=f"vector_tool_{name}", fn=vector_query
)
def summary_query(
query: str,
) -> str:
"""Perform a summary of document
query (str): the string query to be embedded.
"""
summary_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
response = summary_engine.query(query)
return response
summary_tool = FunctionTool.from_defaults(
fn=summary_query, name=f"summary_tool_{name}"
)
return vector_query_tool, summary_tool
# TODO: abstract all of this into a function that takes in a PDF file name
def get_doc_tools(
file_path: str,
name: str,
) -> str:
"""Get vector query and summary query tools from a document."""
# load documents
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
vector_index = index
vector_index.insert_file(
file_path=file_path,
metadata={
"display_name": f"vector_index_{name}",
"description": f"vector_index_{name}",
},
)
summary_index = SummaryIndex(nodes)
def vector_query(
query: str, page_numbers: Optional[List[str]] = None
) -> str:
"""Use to answer questions over the MetaGPT paper.
Useful if you have specific questions over the MetaGPT paper.
Always leave page_numbers as None UNLESS there is a specific page you want to search for.
Args:
query (str): the string query to be embedded.
page_numbers (Optional[List[str]]): Filter by set of pages. Leave as NONE
if we want to perform a vector search
over all pages. Otherwise, filter by the set of specified pages.
"""
page_numbers = page_numbers or []
metadata_dicts = [
{"key": "page_label", "value": p} for p in page_numbers
]
query_engine = vector_index.as_query_engine(
similarity_top_k=2,
filters=MetadataFilters.from_dicts(
metadata_dicts, condition=FilterCondition.OR
),
)
response = query_engine.query(query)
return response
vector_query_tool = FunctionTool.from_defaults(
name=f"vector_tool_{name}", fn=vector_query
)
def summary_query(
query: str,
) -> str:
"""Perform a summary of document
query (str): the string query to be embedded.
"""
summary_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
response = summary_engine.query(query)
return response
summary_tool = FunctionTool.from_defaults(
fn=summary_query, name=f"summary_tool_{name}"
)
return vector_query_tool, summary_tool
In [ ]:
Copied!
vector_query_tool, summary_tool = get_doc_tools("metagpt.pdf", "metagpt")
vector_query_tool, summary_tool = get_doc_tools("metagpt.pdf", "metagpt")
In [ ]:
Copied!
# Create Vertex AI client
vertex_gemini = Vertex(
model="gemini-1.5-flash-preview-0514", context_window=100000
)
# Create Vertex AI client
vertex_gemini = Vertex(
model="gemini-1.5-flash-preview-0514", context_window=100000
)
In [ ]:
Copied!
# Create Agent
agent = FunctionCallingAgent.from_tools(
[vector_query_tool, summary_tool], llm=vertex_gemini, verbose=True
)
# Create Agent
agent = FunctionCallingAgent.from_tools(
[vector_query_tool, summary_tool], llm=vertex_gemini, verbose=True
)
In [ ]:
Copied!
response = agent.query(
"what are agent roles in MetaGPT, "
"and then how they communicate with each other."
)
response = agent.query(
"what are agent roles in MetaGPT, "
"and then how they communicate with each other."
)
Task 4: Multi-document agent¶
In [ ]:
Copied!
papers = [
"metagpt.pdf",
"longlora.pdf",
"loftq.pdf",
"swebench.pdf",
"selfrag.pdf",
"zipformer.pdf",
"values.pdf",
"knowledge_card.pdf",
"metra.pdf",
]
papers = [
"metagpt.pdf",
"longlora.pdf",
"loftq.pdf",
"swebench.pdf",
"selfrag.pdf",
"zipformer.pdf",
"values.pdf",
"knowledge_card.pdf",
"metra.pdf",
]
In [ ]:
Copied!
paper_to_tools_dict = {}
for paper in papers:
print(f"Getting tools for paper: {paper}")
vector_tool, summary_tool = get_doc_tools(paper, Path(paper).stem)
paper_to_tools_dict[paper] = [vector_tool, summary_tool]
paper_to_tools_dict = {}
for paper in papers:
print(f"Getting tools for paper: {paper}")
vector_tool, summary_tool = get_doc_tools(paper, Path(paper).stem)
paper_to_tools_dict[paper] = [vector_tool, summary_tool]
In [ ]:
Copied!
all_tools = [t for paper in papers for t in paper_to_tools_dict[paper]]
all_tools = [t for paper in papers for t in paper_to_tools_dict[paper]]
In [ ]:
Copied!
# define an "object" index and retriever over these tools
from llama_index.core import VectorStoreIndex
from llama_index.core.objects import ObjectIndex
obj_index = ObjectIndex.from_objects(
all_tools,
index_cls=VectorStoreIndex,
)
# define an "object" index and retriever over these tools
from llama_index.core import VectorStoreIndex
from llama_index.core.objects import ObjectIndex
obj_index = ObjectIndex.from_objects(
all_tools,
index_cls=VectorStoreIndex,
)
In [ ]:
Copied!
obj_retriever = obj_index.as_retriever(similarity_top_k=3)
obj_retriever = obj_index.as_retriever(similarity_top_k=3)
In [ ]:
Copied!
agent = FunctionCallingAgent.from_tools(
tool_retriever=obj_retriever,
llm=vertex_gemini,
system_prompt=""" \
You are an agent designed to answer queries over a set of given papers.
Please use the tools provided to answer a question as possible. Do not rely on prior knowledge. Summarize your answer\
""",
verbose=True,
)
agent = FunctionCallingAgent.from_tools(
tool_retriever=obj_retriever,
llm=vertex_gemini,
system_prompt=""" \
You are an agent designed to answer queries over a set of given papers.
Please use the tools provided to answer a question as possible. Do not rely on prior knowledge. Summarize your answer\
""",
verbose=True,
)
In [ ]:
Copied!
response = agent.query(
"Compare and contrast the LoRA papers (LongLoRA, LoftQ) "
"Analyze the approach in each paper first."
)
response = agent.query(
"Compare and contrast the LoRA papers (LongLoRA, LoftQ) "
"Analyze the approach in each paper first."
)