Skip to content

Index

BaseEmbedding #

Bases: TransformComponent, DispatcherSpanMixin

Base class for embeddings.

Parameters:

Name Type Description Default
model_name str

The name of the embedding model.

'unknown'
embed_batch_size int

The batch size for embedding calls.

10
callback_manager CallbackManager
<llama_index.core.callbacks.base.CallbackManager object at 0x7f66125be270>
num_workers int | None

The number of workers to use for async embedding calls.

None
Source code in llama-index-core/llama_index/core/base/embeddings/base.py
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
class BaseEmbedding(TransformComponent, DispatcherSpanMixin):
    """Base class for embeddings."""

    model_config = ConfigDict(
        protected_namespaces=("pydantic_model_",), arbitrary_types_allowed=True
    )
    model_name: str = Field(
        default="unknown", description="The name of the embedding model."
    )
    embed_batch_size: int = Field(
        default=DEFAULT_EMBED_BATCH_SIZE,
        description="The batch size for embedding calls.",
        gt=0,
        le=2048,
    )
    callback_manager: CallbackManager = Field(
        default_factory=lambda: CallbackManager([]), exclude=True
    )
    num_workers: Optional[int] = Field(
        default=None,
        description="The number of workers to use for async embedding calls.",
    )

    @field_validator("callback_manager")
    @classmethod
    def check_callback_manager(cls, v: CallbackManager) -> CallbackManager:
        if v is None:
            return CallbackManager([])
        return v

    @abstractmethod
    def _get_query_embedding(self, query: str) -> Embedding:
        """
        Embed the input query synchronously.

        Subclasses should implement this method. Reference get_query_embedding's
        docstring for more information.
        """

    @abstractmethod
    async def _aget_query_embedding(self, query: str) -> Embedding:
        """
        Embed the input query asynchronously.

        Subclasses should implement this method. Reference get_query_embedding's
        docstring for more information.
        """

    @dispatcher.span
    def get_query_embedding(self, query: str) -> Embedding:
        """
        Embed the input query.

        When embedding a query, depending on the model, a special instruction
        can be prepended to the raw query string. For example, "Represent the
        question for retrieving supporting documents: ". If you're curious,
        other examples of predefined instructions can be found in
        embeddings/huggingface_utils.py.
        """
        model_dict = self.to_dict()
        model_dict.pop("api_key", None)
        dispatcher.event(
            EmbeddingStartEvent(
                model_dict=model_dict,
            )
        )
        with self.callback_manager.event(
            CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
        ) as event:
            query_embedding = self._get_query_embedding(query)

            event.on_end(
                payload={
                    EventPayload.CHUNKS: [query],
                    EventPayload.EMBEDDINGS: [query_embedding],
                },
            )
        dispatcher.event(
            EmbeddingEndEvent(
                chunks=[query],
                embeddings=[query_embedding],
            )
        )
        return query_embedding

    @dispatcher.span
    async def aget_query_embedding(self, query: str) -> Embedding:
        """Get query embedding."""
        model_dict = self.to_dict()
        model_dict.pop("api_key", None)
        dispatcher.event(
            EmbeddingStartEvent(
                model_dict=model_dict,
            )
        )
        with self.callback_manager.event(
            CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
        ) as event:
            query_embedding = await self._aget_query_embedding(query)

            event.on_end(
                payload={
                    EventPayload.CHUNKS: [query],
                    EventPayload.EMBEDDINGS: [query_embedding],
                },
            )
        dispatcher.event(
            EmbeddingEndEvent(
                chunks=[query],
                embeddings=[query_embedding],
            )
        )
        return query_embedding

    def get_agg_embedding_from_queries(
        self,
        queries: List[str],
        agg_fn: Optional[Callable[..., Embedding]] = None,
    ) -> Embedding:
        """Get aggregated embedding from multiple queries."""
        query_embeddings = [self.get_query_embedding(query) for query in queries]
        agg_fn = agg_fn or mean_agg
        return agg_fn(query_embeddings)

    async def aget_agg_embedding_from_queries(
        self,
        queries: List[str],
        agg_fn: Optional[Callable[..., Embedding]] = None,
    ) -> Embedding:
        """Async get aggregated embedding from multiple queries."""
        query_embeddings = [await self.aget_query_embedding(query) for query in queries]
        agg_fn = agg_fn or mean_agg
        return agg_fn(query_embeddings)

    @abstractmethod
    def _get_text_embedding(self, text: str) -> Embedding:
        """
        Embed the input text synchronously.

        Subclasses should implement this method. Reference get_text_embedding's
        docstring for more information.
        """

    async def _aget_text_embedding(self, text: str) -> Embedding:
        """
        Embed the input text asynchronously.

        Subclasses can implement this method if there is a true async
        implementation. Reference get_text_embedding's docstring for more
        information.
        """
        # Default implementation just falls back on _get_text_embedding
        return self._get_text_embedding(text)

    def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
        """
        Embed the input sequence of text synchronously.

        Subclasses can implement this method if batch queries are supported.
        """
        # Default implementation just loops over _get_text_embedding
        return [self._get_text_embedding(text) for text in texts]

    async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]:
        """
        Embed the input sequence of text asynchronously.

        Subclasses can implement this method if batch queries are supported.
        """
        return await asyncio.gather(
            *[self._aget_text_embedding(text) for text in texts]
        )

    @dispatcher.span
    def get_text_embedding(self, text: str) -> Embedding:
        """
        Embed the input text.

        When embedding text, depending on the model, a special instruction
        can be prepended to the raw text string. For example, "Represent the
        document for retrieval: ". If you're curious, other examples of
        predefined instructions can be found in embeddings/huggingface_utils.py.
        """
        model_dict = self.to_dict()
        model_dict.pop("api_key", None)
        dispatcher.event(
            EmbeddingStartEvent(
                model_dict=model_dict,
            )
        )
        with self.callback_manager.event(
            CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
        ) as event:
            text_embedding = self._get_text_embedding(text)

            event.on_end(
                payload={
                    EventPayload.CHUNKS: [text],
                    EventPayload.EMBEDDINGS: [text_embedding],
                }
            )
        dispatcher.event(
            EmbeddingEndEvent(
                chunks=[text],
                embeddings=[text_embedding],
            )
        )
        return text_embedding

    @dispatcher.span
    async def aget_text_embedding(self, text: str) -> Embedding:
        """Async get text embedding."""
        model_dict = self.to_dict()
        model_dict.pop("api_key", None)
        dispatcher.event(
            EmbeddingStartEvent(
                model_dict=model_dict,
            )
        )
        with self.callback_manager.event(
            CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
        ) as event:
            text_embedding = await self._aget_text_embedding(text)

            event.on_end(
                payload={
                    EventPayload.CHUNKS: [text],
                    EventPayload.EMBEDDINGS: [text_embedding],
                }
            )
        dispatcher.event(
            EmbeddingEndEvent(
                chunks=[text],
                embeddings=[text_embedding],
            )
        )
        return text_embedding

    @dispatcher.span
    def get_text_embedding_batch(
        self,
        texts: List[str],
        show_progress: bool = False,
        **kwargs: Any,
    ) -> List[Embedding]:
        """Get a list of text embeddings, with batching."""
        cur_batch: List[str] = []
        result_embeddings: List[Embedding] = []

        queue_with_progress = enumerate(
            get_tqdm_iterable(texts, show_progress, "Generating embeddings")
        )

        model_dict = self.to_dict()
        model_dict.pop("api_key", None)
        for idx, text in queue_with_progress:
            cur_batch.append(text)
            if idx == len(texts) - 1 or len(cur_batch) == self.embed_batch_size:
                # flush
                dispatcher.event(
                    EmbeddingStartEvent(
                        model_dict=model_dict,
                    )
                )
                with self.callback_manager.event(
                    CBEventType.EMBEDDING,
                    payload={EventPayload.SERIALIZED: self.to_dict()},
                ) as event:
                    embeddings = self._get_text_embeddings(cur_batch)
                    result_embeddings.extend(embeddings)
                    event.on_end(
                        payload={
                            EventPayload.CHUNKS: cur_batch,
                            EventPayload.EMBEDDINGS: embeddings,
                        },
                    )
                dispatcher.event(
                    EmbeddingEndEvent(
                        chunks=cur_batch,
                        embeddings=embeddings,
                    )
                )
                cur_batch = []

        return result_embeddings

    @dispatcher.span
    async def aget_text_embedding_batch(
        self, texts: List[str], show_progress: bool = False
    ) -> List[Embedding]:
        """Asynchronously get a list of text embeddings, with batching."""
        num_workers = self.num_workers

        model_dict = self.to_dict()
        model_dict.pop("api_key", None)

        cur_batch: List[str] = []
        callback_payloads: List[Tuple[str, List[str]]] = []
        result_embeddings: List[Embedding] = []
        embeddings_coroutines: List[Coroutine] = []
        for idx, text in enumerate(texts):
            cur_batch.append(text)
            if idx == len(texts) - 1 or len(cur_batch) == self.embed_batch_size:
                # flush
                dispatcher.event(
                    EmbeddingStartEvent(
                        model_dict=model_dict,
                    )
                )
                event_id = self.callback_manager.on_event_start(
                    CBEventType.EMBEDDING,
                    payload={EventPayload.SERIALIZED: self.to_dict()},
                )
                callback_payloads.append((event_id, cur_batch))
                embeddings_coroutines.append(self._aget_text_embeddings(cur_batch))
                cur_batch = []

        # flatten the results of asyncio.gather, which is a list of embeddings lists
        nested_embeddings = []

        if num_workers and num_workers > 1:
            nested_embeddings = await run_jobs(
                embeddings_coroutines,
                show_progress=show_progress,
                workers=self.num_workers,
                desc="Generating embeddings",
            )
        else:
            if show_progress:
                try:
                    from tqdm.asyncio import tqdm_asyncio

                    nested_embeddings = await tqdm_asyncio.gather(
                        *embeddings_coroutines,
                        total=len(embeddings_coroutines),
                        desc="Generating embeddings",
                    )
                except ImportError:
                    nested_embeddings = await asyncio.gather(*embeddings_coroutines)
            else:
                nested_embeddings = await asyncio.gather(*embeddings_coroutines)

        result_embeddings = [
            embedding for embeddings in nested_embeddings for embedding in embeddings
        ]

        for (event_id, text_batch), embeddings in zip(
            callback_payloads, nested_embeddings
        ):
            dispatcher.event(
                EmbeddingEndEvent(
                    chunks=text_batch,
                    embeddings=embeddings,
                )
            )
            self.callback_manager.on_event_end(
                CBEventType.EMBEDDING,
                payload={
                    EventPayload.CHUNKS: text_batch,
                    EventPayload.EMBEDDINGS: embeddings,
                },
                event_id=event_id,
            )

        return result_embeddings

    def similarity(
        self,
        embedding1: Embedding,
        embedding2: Embedding,
        mode: SimilarityMode = SimilarityMode.DEFAULT,
    ) -> float:
        """Get embedding similarity."""
        return similarity(embedding1=embedding1, embedding2=embedding2, mode=mode)

    def __call__(self, nodes: Sequence[BaseNode], **kwargs: Any) -> Sequence[BaseNode]:
        embeddings = self.get_text_embedding_batch(
            [node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes],
            **kwargs,
        )

        for node, embedding in zip(nodes, embeddings):
            node.embedding = embedding

        return nodes

    async def acall(
        self, nodes: Sequence[BaseNode], **kwargs: Any
    ) -> Sequence[BaseNode]:
        embeddings = await self.aget_text_embedding_batch(
            [node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes],
            **kwargs,
        )

        for node, embedding in zip(nodes, embeddings):
            node.embedding = embedding

        return nodes

get_query_embedding #

get_query_embedding(query: str) -> Embedding

Embed the input query.

When embedding a query, depending on the model, a special instruction can be prepended to the raw query string. For example, "Represent the question for retrieving supporting documents: ". If you're curious, other examples of predefined instructions can be found in embeddings/huggingface_utils.py.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
@dispatcher.span
def get_query_embedding(self, query: str) -> Embedding:
    """
    Embed the input query.

    When embedding a query, depending on the model, a special instruction
    can be prepended to the raw query string. For example, "Represent the
    question for retrieving supporting documents: ". If you're curious,
    other examples of predefined instructions can be found in
    embeddings/huggingface_utils.py.
    """
    model_dict = self.to_dict()
    model_dict.pop("api_key", None)
    dispatcher.event(
        EmbeddingStartEvent(
            model_dict=model_dict,
        )
    )
    with self.callback_manager.event(
        CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
    ) as event:
        query_embedding = self._get_query_embedding(query)

        event.on_end(
            payload={
                EventPayload.CHUNKS: [query],
                EventPayload.EMBEDDINGS: [query_embedding],
            },
        )
    dispatcher.event(
        EmbeddingEndEvent(
            chunks=[query],
            embeddings=[query_embedding],
        )
    )
    return query_embedding

aget_query_embedding async #

aget_query_embedding(query: str) -> Embedding

Get query embedding.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
@dispatcher.span
async def aget_query_embedding(self, query: str) -> Embedding:
    """Get query embedding."""
    model_dict = self.to_dict()
    model_dict.pop("api_key", None)
    dispatcher.event(
        EmbeddingStartEvent(
            model_dict=model_dict,
        )
    )
    with self.callback_manager.event(
        CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
    ) as event:
        query_embedding = await self._aget_query_embedding(query)

        event.on_end(
            payload={
                EventPayload.CHUNKS: [query],
                EventPayload.EMBEDDINGS: [query_embedding],
            },
        )
    dispatcher.event(
        EmbeddingEndEvent(
            chunks=[query],
            embeddings=[query_embedding],
        )
    )
    return query_embedding

get_agg_embedding_from_queries #

get_agg_embedding_from_queries(queries: List[str], agg_fn: Optional[Callable[..., Embedding]] = None) -> Embedding

Get aggregated embedding from multiple queries.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
181
182
183
184
185
186
187
188
189
def get_agg_embedding_from_queries(
    self,
    queries: List[str],
    agg_fn: Optional[Callable[..., Embedding]] = None,
) -> Embedding:
    """Get aggregated embedding from multiple queries."""
    query_embeddings = [self.get_query_embedding(query) for query in queries]
    agg_fn = agg_fn or mean_agg
    return agg_fn(query_embeddings)

aget_agg_embedding_from_queries async #

aget_agg_embedding_from_queries(queries: List[str], agg_fn: Optional[Callable[..., Embedding]] = None) -> Embedding

Async get aggregated embedding from multiple queries.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
191
192
193
194
195
196
197
198
199
async def aget_agg_embedding_from_queries(
    self,
    queries: List[str],
    agg_fn: Optional[Callable[..., Embedding]] = None,
) -> Embedding:
    """Async get aggregated embedding from multiple queries."""
    query_embeddings = [await self.aget_query_embedding(query) for query in queries]
    agg_fn = agg_fn or mean_agg
    return agg_fn(query_embeddings)

get_text_embedding #

get_text_embedding(text: str) -> Embedding

Embed the input text.

When embedding text, depending on the model, a special instruction can be prepended to the raw text string. For example, "Represent the document for retrieval: ". If you're curious, other examples of predefined instructions can be found in embeddings/huggingface_utils.py.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
@dispatcher.span
def get_text_embedding(self, text: str) -> Embedding:
    """
    Embed the input text.

    When embedding text, depending on the model, a special instruction
    can be prepended to the raw text string. For example, "Represent the
    document for retrieval: ". If you're curious, other examples of
    predefined instructions can be found in embeddings/huggingface_utils.py.
    """
    model_dict = self.to_dict()
    model_dict.pop("api_key", None)
    dispatcher.event(
        EmbeddingStartEvent(
            model_dict=model_dict,
        )
    )
    with self.callback_manager.event(
        CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
    ) as event:
        text_embedding = self._get_text_embedding(text)

        event.on_end(
            payload={
                EventPayload.CHUNKS: [text],
                EventPayload.EMBEDDINGS: [text_embedding],
            }
        )
    dispatcher.event(
        EmbeddingEndEvent(
            chunks=[text],
            embeddings=[text_embedding],
        )
    )
    return text_embedding

aget_text_embedding async #

aget_text_embedding(text: str) -> Embedding

Async get text embedding.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
@dispatcher.span
async def aget_text_embedding(self, text: str) -> Embedding:
    """Async get text embedding."""
    model_dict = self.to_dict()
    model_dict.pop("api_key", None)
    dispatcher.event(
        EmbeddingStartEvent(
            model_dict=model_dict,
        )
    )
    with self.callback_manager.event(
        CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
    ) as event:
        text_embedding = await self._aget_text_embedding(text)

        event.on_end(
            payload={
                EventPayload.CHUNKS: [text],
                EventPayload.EMBEDDINGS: [text_embedding],
            }
        )
    dispatcher.event(
        EmbeddingEndEvent(
            chunks=[text],
            embeddings=[text_embedding],
        )
    )
    return text_embedding

get_text_embedding_batch #

get_text_embedding_batch(texts: List[str], show_progress: bool = False, **kwargs: Any) -> List[Embedding]

Get a list of text embeddings, with batching.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
@dispatcher.span
def get_text_embedding_batch(
    self,
    texts: List[str],
    show_progress: bool = False,
    **kwargs: Any,
) -> List[Embedding]:
    """Get a list of text embeddings, with batching."""
    cur_batch: List[str] = []
    result_embeddings: List[Embedding] = []

    queue_with_progress = enumerate(
        get_tqdm_iterable(texts, show_progress, "Generating embeddings")
    )

    model_dict = self.to_dict()
    model_dict.pop("api_key", None)
    for idx, text in queue_with_progress:
        cur_batch.append(text)
        if idx == len(texts) - 1 or len(cur_batch) == self.embed_batch_size:
            # flush
            dispatcher.event(
                EmbeddingStartEvent(
                    model_dict=model_dict,
                )
            )
            with self.callback_manager.event(
                CBEventType.EMBEDDING,
                payload={EventPayload.SERIALIZED: self.to_dict()},
            ) as event:
                embeddings = self._get_text_embeddings(cur_batch)
                result_embeddings.extend(embeddings)
                event.on_end(
                    payload={
                        EventPayload.CHUNKS: cur_batch,
                        EventPayload.EMBEDDINGS: embeddings,
                    },
                )
            dispatcher.event(
                EmbeddingEndEvent(
                    chunks=cur_batch,
                    embeddings=embeddings,
                )
            )
            cur_batch = []

    return result_embeddings

aget_text_embedding_batch async #

aget_text_embedding_batch(texts: List[str], show_progress: bool = False) -> List[Embedding]

Asynchronously get a list of text embeddings, with batching.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
@dispatcher.span
async def aget_text_embedding_batch(
    self, texts: List[str], show_progress: bool = False
) -> List[Embedding]:
    """Asynchronously get a list of text embeddings, with batching."""
    num_workers = self.num_workers

    model_dict = self.to_dict()
    model_dict.pop("api_key", None)

    cur_batch: List[str] = []
    callback_payloads: List[Tuple[str, List[str]]] = []
    result_embeddings: List[Embedding] = []
    embeddings_coroutines: List[Coroutine] = []
    for idx, text in enumerate(texts):
        cur_batch.append(text)
        if idx == len(texts) - 1 or len(cur_batch) == self.embed_batch_size:
            # flush
            dispatcher.event(
                EmbeddingStartEvent(
                    model_dict=model_dict,
                )
            )
            event_id = self.callback_manager.on_event_start(
                CBEventType.EMBEDDING,
                payload={EventPayload.SERIALIZED: self.to_dict()},
            )
            callback_payloads.append((event_id, cur_batch))
            embeddings_coroutines.append(self._aget_text_embeddings(cur_batch))
            cur_batch = []

    # flatten the results of asyncio.gather, which is a list of embeddings lists
    nested_embeddings = []

    if num_workers and num_workers > 1:
        nested_embeddings = await run_jobs(
            embeddings_coroutines,
            show_progress=show_progress,
            workers=self.num_workers,
            desc="Generating embeddings",
        )
    else:
        if show_progress:
            try:
                from tqdm.asyncio import tqdm_asyncio

                nested_embeddings = await tqdm_asyncio.gather(
                    *embeddings_coroutines,
                    total=len(embeddings_coroutines),
                    desc="Generating embeddings",
                )
            except ImportError:
                nested_embeddings = await asyncio.gather(*embeddings_coroutines)
        else:
            nested_embeddings = await asyncio.gather(*embeddings_coroutines)

    result_embeddings = [
        embedding for embeddings in nested_embeddings for embedding in embeddings
    ]

    for (event_id, text_batch), embeddings in zip(
        callback_payloads, nested_embeddings
    ):
        dispatcher.event(
            EmbeddingEndEvent(
                chunks=text_batch,
                embeddings=embeddings,
            )
        )
        self.callback_manager.on_event_end(
            CBEventType.EMBEDDING,
            payload={
                EventPayload.CHUNKS: text_batch,
                EventPayload.EMBEDDINGS: embeddings,
            },
            event_id=event_id,
        )

    return result_embeddings

similarity #

similarity(embedding1: Embedding, embedding2: Embedding, mode: SimilarityMode = DEFAULT) -> float

Get embedding similarity.

Source code in llama-index-core/llama_index/core/base/embeddings/base.py
433
434
435
436
437
438
439
440
def similarity(
    self,
    embedding1: Embedding,
    embedding2: Embedding,
    mode: SimilarityMode = SimilarityMode.DEFAULT,
) -> float:
    """Get embedding similarity."""
    return similarity(embedding1=embedding1, embedding2=embedding2, mode=mode)

resolve_embed_model #

resolve_embed_model(embed_model: Optional[EmbedType] = None, callback_manager: Optional[CallbackManager] = None) -> BaseEmbedding

Resolve embed model.

Source code in llama-index-core/llama_index/core/embeddings/utils.py
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
def resolve_embed_model(
    embed_model: Optional[EmbedType] = None,
    callback_manager: Optional[CallbackManager] = None,
) -> BaseEmbedding:
    """Resolve embed model."""
    from llama_index.core.settings import Settings

    try:
        from llama_index.core.bridge.langchain import Embeddings as LCEmbeddings
    except ImportError:
        LCEmbeddings = None  # type: ignore

    if embed_model == "default":
        if os.getenv("IS_TESTING"):
            embed_model = MockEmbedding(embed_dim=8)
            embed_model.callback_manager = callback_manager or Settings.callback_manager
            return embed_model

        try:
            from llama_index.embeddings.openai import (
                OpenAIEmbedding,
            )  # pants: no-infer-dep

            from llama_index.embeddings.openai.utils import (
                validate_openai_api_key,
            )  # pants: no-infer-dep

            embed_model = OpenAIEmbedding()
            validate_openai_api_key(embed_model.api_key)  # type: ignore
        except ImportError:
            raise ImportError(
                "`llama-index-embeddings-openai` package not found, "
                "please run `pip install llama-index-embeddings-openai`"
            )
        except ValueError as e:
            raise ValueError(
                "\n******\n"
                "Could not load OpenAI embedding model. "
                "If you intended to use OpenAI, please check your OPENAI_API_KEY.\n"
                "Original error:\n"
                f"{e!s}"
                "\nConsider using embed_model='local'.\n"
                "Visit our documentation for more embedding options: "
                "https://docs.llamaindex.ai/en/stable/module_guides/models/"
                "embeddings.html#modules"
                "\n******"
            )
    # for image multi-modal embeddings
    elif isinstance(embed_model, str) and embed_model.startswith("clip"):
        try:
            from llama_index.embeddings.clip import ClipEmbedding  # pants: no-infer-dep

            clip_model_name = (
                embed_model.split(":")[1] if ":" in embed_model else "ViT-B/32"
            )
            embed_model = ClipEmbedding(model_name=clip_model_name)
        except ImportError as e:
            raise ImportError(
                "`llama-index-embeddings-clip` package not found, "
                "please run `pip install llama-index-embeddings-clip` and `pip install git+https://github.com/openai/CLIP.git`"
            )

    if isinstance(embed_model, str):
        try:
            from llama_index.embeddings.huggingface import (
                HuggingFaceEmbedding,
            )  # pants: no-infer-dep

            splits = embed_model.split(":", 1)
            is_local = splits[0]
            model_name = splits[1] if len(splits) > 1 else None
            if is_local != "local":
                raise ValueError(
                    "embed_model must start with str 'local' or of type BaseEmbedding"
                )

            cache_folder = os.path.join(get_cache_dir(), "models")
            os.makedirs(cache_folder, exist_ok=True)

            embed_model = HuggingFaceEmbedding(
                model_name=model_name, cache_folder=cache_folder
            )
        except ImportError:
            raise ImportError(
                "`llama-index-embeddings-huggingface` package not found, "
                "please run `pip install llama-index-embeddings-huggingface`"
            )

    if LCEmbeddings is not None and isinstance(embed_model, LCEmbeddings):
        try:
            from llama_index.embeddings.langchain import (
                LangchainEmbedding,
            )  # pants: no-infer-dep

            embed_model = LangchainEmbedding(embed_model)
        except ImportError as e:
            raise ImportError(
                "`llama-index-embeddings-langchain` package not found, "
                "please run `pip install llama-index-embeddings-langchain`"
            )

    if embed_model is None:
        print("Embeddings have been explicitly disabled. Using MockEmbedding.")
        embed_model = MockEmbedding(embed_dim=1)

    assert isinstance(embed_model, BaseEmbedding)

    embed_model.callback_manager = callback_manager or Settings.callback_manager

    return embed_model