Finetune Embeddings#
In this notebook, we show users how to finetune their own embedding models.
We go through three main sections:
Preparing the data (our
generate_qa_embedding_pairs
function makes this easy)Finetuning the model (using our
SentenceTransformersFinetuneEngine
)Evaluating the model on a validation knowledge corpus
Generate Corpus#
First, we create the corpus of text chunks by leveraging LlamaIndex to load some financial PDFs, and parsing/chunking into plain text chunks.
import json
from llama_index import SimpleDirectoryReader
from llama_index.node_parser import SentenceSplitter
from llama_index.schema import MetadataMode
Download Data
!mkdir -p 'data/10k/'
!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/uber_2021.pdf' -O 'data/10k/uber_2021.pdf'
!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10k/lyft_2021.pdf' -O 'data/10k/lyft_2021.pdf'
TRAIN_FILES = ["./data/10k/lyft_2021.pdf"]
VAL_FILES = ["./data/10k/uber_2021.pdf"]
TRAIN_CORPUS_FPATH = "./data/train_corpus.json"
VAL_CORPUS_FPATH = "./data/val_corpus.json"
def load_corpus(files, verbose=False):
if verbose:
print(f"Loading files {files}")
reader = SimpleDirectoryReader(input_files=files)
docs = reader.load_data()
if verbose:
print(f"Loaded {len(docs)} docs")
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(docs, show_progress=verbose)
if verbose:
print(f"Parsed {len(nodes)} nodes")
return nodes
We do a very naive train/val split by having the Lyft corpus as the train dataset, and the Uber corpus as the val dataset.
train_nodes = load_corpus(TRAIN_FILES, verbose=True)
val_nodes = load_corpus(VAL_FILES, verbose=True)
Loading files ['./data/10k/lyft_2021.pdf']
Loaded 238 docs
Parsed 344 nodes
Loading files ['./data/10k/uber_2021.pdf']
Loaded 307 docs
Parsed 410 nodes
Generate synthetic queries#
Now, we use an LLM (gpt-3.5-turbo) to generate questions using each text chunk in the corpus as context.
Each pair of (generated question, text chunk used as context) becomes a datapoint in the finetuning dataset (either for training or evaluation).
from llama_index.finetuning import (
generate_qa_embedding_pairs,
EmbeddingQAFinetuneDataset,
)
import os
OPENAI_API_TOKEN = "sk-"
os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN
from llama_index.llms import OpenAI
train_dataset = generate_qa_embedding_pairs(
llm=OpenAI(model="gpt-3.5-turbo"), nodes=train_nodes
)
val_dataset = generate_qa_embedding_pairs(
llm=OpenAI(model="gpt-3.5-turbo"), nodes=val_nodes
)
train_dataset.save_json("train_dataset.json")
val_dataset.save_json("val_dataset.json")
100%|ββββββββββ| 344/344 [12:51<00:00, 2.24s/it]
100%|ββββββββββ| 410/410 [16:07<00:00, 2.36s/it]
# [Optional] Load
train_dataset = EmbeddingQAFinetuneDataset.from_json("train_dataset.json")
val_dataset = EmbeddingQAFinetuneDataset.from_json("val_dataset.json")
Run Embedding Finetuning#
from llama_index.finetuning import SentenceTransformersFinetuneEngine
finetune_engine = SentenceTransformersFinetuneEngine(
train_dataset,
model_id="BAAI/bge-small-en",
model_output_path="test_model",
val_dataset=val_dataset,
)
finetune_engine.finetune()
embed_model = finetune_engine.get_finetuned_model()
embed_model
HuggingFaceEmbedding(model_name='test_model', embed_batch_size=10, callback_manager=<llama_index.callbacks.base.CallbackManager object at 0x2cc3d5cd0>, tokenizer_name='test_model', max_length=512, pooling=<Pooling.CLS: 'cls'>, normalize=True, query_instruction=None, text_instruction=None, cache_folder=None)
Evaluate Finetuned Model#
In this section, we evaluate 3 different embedding models:
proprietary OpenAI embedding,
open source
BAAI/bge-small-en
, andour finetuned embedding model.
We consider 2 evaluation approaches:
a simple custom hit rate metric
using
InformationRetrievalEvaluator
from sentence_transformers
We show that finetuning on synthetic (LLM-generated) dataset significantly improve upon an opensource embedding model.
from llama_index.embeddings import OpenAIEmbedding
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.schema import TextNode
from tqdm.notebook import tqdm
import pandas as pd
Define eval function#
Option 1: We use a simple hit rate metric for evaluation:
for each (query, relevant_doc) pair,
we retrieve top-k documents with the query, and
itβs a hit if the results contain the relevant_doc.
This approach is very simple and intuitive, and we can apply it to both the proprietary OpenAI embedding as well as our open source and fine-tuned embedding models.
def evaluate(
dataset,
embed_model,
top_k=5,
verbose=False,
):
corpus = dataset.corpus
queries = dataset.queries
relevant_docs = dataset.relevant_docs
service_context = ServiceContext.from_defaults(embed_model=embed_model)
nodes = [TextNode(id_=id_, text=text) for id_, text in corpus.items()]
index = VectorStoreIndex(
nodes, service_context=service_context, show_progress=True
)
retriever = index.as_retriever(similarity_top_k=top_k)
eval_results = []
for query_id, query in tqdm(queries.items()):
retrieved_nodes = retriever.retrieve(query)
retrieved_ids = [node.node.node_id for node in retrieved_nodes]
expected_id = relevant_docs[query_id][0]
is_hit = expected_id in retrieved_ids # assume 1 relevant doc
eval_result = {
"is_hit": is_hit,
"retrieved": retrieved_ids,
"expected": expected_id,
"query": query_id,
}
eval_results.append(eval_result)
return eval_results
Option 2: We use the InformationRetrievalEvaluator
from sentence_transformers.
This provides a more comprehensive suite of metrics, but we can only run it against the sentencetransformers compatible models (open source and our finetuned model, not the OpenAI embedding model).
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers import SentenceTransformer
from pathlib import Path
def evaluate_st(
dataset,
model_id,
name,
):
corpus = dataset.corpus
queries = dataset.queries
relevant_docs = dataset.relevant_docs
evaluator = InformationRetrievalEvaluator(
queries, corpus, relevant_docs, name=name
)
model = SentenceTransformer(model_id)
output_path = "results/"
Path(output_path).mkdir(exist_ok=True, parents=True)
return evaluator(model, output_path=output_path)
Run Evals#
OpenAI#
Note: this might take a few minutes to run since we have to embed the corpus and queries
ada = OpenAIEmbedding()
ada_val_results = evaluate(val_dataset, ada)
df_ada = pd.DataFrame(ada_val_results)
hit_rate_ada = df_ada["is_hit"].mean()
hit_rate_ada
0.8779904306220095
BAAI/bge-small-en#
bge = "local:BAAI/bge-small-en"
bge_val_results = evaluate(val_dataset, bge)
df_bge = pd.DataFrame(bge_val_results)
hit_rate_bge = df_bge["is_hit"].mean()
hit_rate_bge
0.7930622009569378
evaluate_st(val_dataset, "BAAI/bge-small-en", name="bge")
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
Cell In[59], line 1
----> 1 evaluate_st(val_dataset, "BAAI/bge-small-en", name='bge')
Cell In[49], line 15, in evaluate_st(dataset, model_id, name)
13 evaluator = InformationRetrievalEvaluator(queries, corpus, relevant_docs, name=name)
14 model = SentenceTransformer(model_id)
---> 15 return evaluator(model, output_path='results/')
File ~/Programming/gpt_index/.venv/lib/python3.10/site-packages/sentence_transformers/evaluation/InformationRetrievalEvaluator.py:104, in InformationRetrievalEvaluator.__call__(self, model, output_path, epoch, steps, *args, **kwargs)
102 csv_path = os.path.join(output_path, self.csv_file)
103 if not os.path.isfile(csv_path):
--> 104 fOut = open(csv_path, mode="w", encoding="utf-8")
105 fOut.write(",".join(self.csv_headers))
106 fOut.write("\n")
FileNotFoundError: [Errno 2] No such file or directory: 'results/Information-Retrieval_evaluation_bge_results.csv'
Finetuned#
finetuned = "local:test_model"
val_results_finetuned = evaluate(val_dataset, finetuned)
df_finetuned = pd.DataFrame(val_results_finetuned)
hit_rate_finetuned = df_finetuned["is_hit"].mean()
hit_rate_finetuned
evaluate_st(val_dataset, "test_model", name="finetuned")
Summary of Results#
Hit rate#
df_ada["model"] = "ada"
df_bge["model"] = "bge"
df_finetuned["model"] = "fine_tuned"
We can see that fine-tuning our small open-source embedding model drastically improve its retrieval quality (even approaching the quality of the proprietary OpenAI embedding)!
df_all = pd.concat([df_ada, df_bge, df_finetuned])
df_all.groupby("model").mean("is_hit")
InformationRetrievalEvaluator#
df_st_bge = pd.read_csv(
"results/Information-Retrieval_evaluation_bge_results.csv"
)
df_st_finetuned = pd.read_csv(
"results/Information-Retrieval_evaluation_finetuned_results.csv"
)
We can see that embedding finetuning improves metrics consistently across the suite of eval metrics
df_st_bge["model"] = "bge"
df_st_finetuned["model"] = "fine_tuned"
df_st_all = pd.concat([df_st_bge, df_st_finetuned])
df_st_all = df_st_all.set_index("model")
df_st_all