Skip to content

Index

Vector store index types.

VectorStoreQueryResult dataclass #

Vector store query result.

Parameters:

Name Type Description Default
nodes Sequence[BaseNode] | None
None
similarities List[float] | None
None
ids List[str] | None
None
Source code in llama-index-core/llama_index/core/vector_stores/types.py
36
37
38
39
40
41
42
@dataclass
class VectorStoreQueryResult:
    """Vector store query result."""

    nodes: Optional[Sequence[BaseNode]] = None
    similarities: Optional[List[float]] = None
    ids: Optional[List[str]] = None

VectorStoreQueryMode #

Bases: str, Enum

Vector store query mode.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
class VectorStoreQueryMode(str, Enum):
    """Vector store query mode."""

    DEFAULT = "default"
    SPARSE = "sparse"
    HYBRID = "hybrid"
    TEXT_SEARCH = "text_search"
    SEMANTIC_HYBRID = "semantic_hybrid"

    # fit learners
    SVM = "svm"
    LOGISTIC_REGRESSION = "logistic_regression"
    LINEAR_REGRESSION = "linear_regression"

    # maximum marginal relevance
    MMR = "mmr"

FilterOperator #

Bases: str, Enum

Vector store filter operator.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
class FilterOperator(str, Enum):
    """Vector store filter operator."""

    # TODO add more operators
    EQ = "=="  # default operator (string, int, float)
    GT = ">"  # greater than (int, float)
    LT = "<"  # less than (int, float)
    NE = "!="  # not equal to (string, int, float)
    GTE = ">="  # greater than or equal to (int, float)
    LTE = "<="  # less than or equal to (int, float)
    IN = "in"  # In array (string or number)
    NIN = "nin"  # Not in array (string or number)
    ANY = "any"  # Contains any (array of strings)
    ALL = "all"  # Contains all (array of strings)
    TEXT_MATCH = "text_match"  # full text match (allows you to search for a specific substring, token or phrase within the text field)
    CONTAINS = "contains"  # metadata array contains value (string or number)
    IS_EMPTY = "is_empty"  # the field is not exist or empty (null or empty array)

FilterCondition #

Bases: str, Enum

Vector store filter conditions to combine different filters.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
82
83
84
85
86
87
class FilterCondition(str, Enum):
    """Vector store filter conditions to combine different filters."""

    # TODO add more conditions
    AND = "and"
    OR = "or"

MetadataFilter #

Bases: BaseModel

Comprehensive metadata filter for vector stores to support more operators.

Value uses Strict* types, as int, float and str are compatible types and were all converted to string before.

See: https://docs.pydantic.dev/latest/usage/types/#strict-types

Parameters:

Name Type Description Default
key str
required
value Annotated[int, Strict] | Annotated[float, Strict] | Annotated[str, Strict] | List[Annotated[str, Strict]] | List[Annotated[float, Strict]] | List[Annotated[int, Strict]] | None
required
operator FilterOperator
<FilterOperator.EQ: '=='>
Source code in llama-index-core/llama_index/core/vector_stores/types.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
class MetadataFilter(BaseModel):
    r"""Comprehensive metadata filter for vector stores to support more operators.

    Value uses Strict* types, as int, float and str are compatible types and were all
    converted to string before.

    See: https://docs.pydantic.dev/latest/usage/types/#strict-types
    """

    key: str
    value: Optional[
        Union[
            StrictInt,
            StrictFloat,
            StrictStr,
            List[StrictStr],
            List[StrictFloat],
            List[StrictInt],
        ]
    ]
    operator: FilterOperator = FilterOperator.EQ

    @classmethod
    def from_dict(
        cls,
        filter_dict: Dict,
    ) -> "MetadataFilter":
        """Create MetadataFilter from dictionary.

        Args:
            filter_dict: Dict with key, value and operator.

        """
        return MetadataFilter.model_validate(filter_dict)

from_dict classmethod #

from_dict(filter_dict: Dict) -> MetadataFilter

Create MetadataFilter from dictionary.

Parameters:

Name Type Description Default
filter_dict Dict

Dict with key, value and operator.

required
Source code in llama-index-core/llama_index/core/vector_stores/types.py
112
113
114
115
116
117
118
119
120
121
122
123
@classmethod
def from_dict(
    cls,
    filter_dict: Dict,
) -> "MetadataFilter":
    """Create MetadataFilter from dictionary.

    Args:
        filter_dict: Dict with key, value and operator.

    """
    return MetadataFilter.model_validate(filter_dict)

MetadataFilters #

Bases: BaseModel

Metadata filters for vector stores.

Parameters:

Name Type Description Default
filters List[Union[MetadataFilter, MetadataFilters]]
required
condition FilterCondition | None
<FilterCondition.AND: 'and'>
Source code in llama-index-core/llama_index/core/vector_stores/types.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
class MetadataFilters(BaseModel):
    """Metadata filters for vector stores."""

    # Exact match filters and Advanced filters with operators like >, <, >=, <=, !=, etc.
    filters: List[Union[MetadataFilter, ExactMatchFilter, "MetadataFilters"]]
    # and/or such conditions for combining different filters
    condition: Optional[FilterCondition] = FilterCondition.AND

    @classmethod
    @deprecated(
        "`from_dict()` is deprecated. "
        "Please use `MetadataFilters(filters=.., condition='and')` directly instead."
    )
    def from_dict(cls, filter_dict: Dict) -> "MetadataFilters":
        """Create MetadataFilters from json."""
        filters = []
        for k, v in filter_dict.items():
            filter = MetadataFilter(key=k, value=v, operator=FilterOperator.EQ)
            filters.append(filter)
        return cls(filters=filters)

    @classmethod
    def from_dicts(
        cls,
        filter_dicts: List[Dict],
        condition: Optional[FilterCondition] = FilterCondition.AND,
    ) -> "MetadataFilters":
        """Create MetadataFilters from dicts.

        This takes in a list of individual MetadataFilter objects, along
        with the condition.

        Args:
            filter_dicts: List of dicts, each dict is a MetadataFilter.
            condition: FilterCondition to combine different filters.

        """
        return cls(
            filters=[
                MetadataFilter.from_dict(filter_dict) for filter_dict in filter_dicts
            ],
            condition=condition,
        )

    def legacy_filters(self) -> List[ExactMatchFilter]:
        """Convert MetadataFilters to legacy ExactMatchFilters."""
        filters = []
        for filter in self.filters:
            if (
                isinstance(filter, MetadataFilters)
                or filter.operator != FilterOperator.EQ
            ):
                raise ValueError(
                    "Vector Store only supports exact match filters. "
                    "Please use ExactMatchFilter or FilterOperator.EQ instead."
                )
            filters.append(ExactMatchFilter(key=filter.key, value=filter.value))
        return filters

from_dict classmethod #

from_dict(filter_dict: Dict) -> MetadataFilters

Create MetadataFilters from json.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
144
145
146
147
148
149
150
151
152
153
154
155
@classmethod
@deprecated(
    "`from_dict()` is deprecated. "
    "Please use `MetadataFilters(filters=.., condition='and')` directly instead."
)
def from_dict(cls, filter_dict: Dict) -> "MetadataFilters":
    """Create MetadataFilters from json."""
    filters = []
    for k, v in filter_dict.items():
        filter = MetadataFilter(key=k, value=v, operator=FilterOperator.EQ)
        filters.append(filter)
    return cls(filters=filters)

from_dicts classmethod #

from_dicts(filter_dicts: List[Dict], condition: Optional[FilterCondition] = AND) -> MetadataFilters

Create MetadataFilters from dicts.

This takes in a list of individual MetadataFilter objects, along with the condition.

Parameters:

Name Type Description Default
filter_dicts List[Dict]

List of dicts, each dict is a MetadataFilter.

required
condition Optional[FilterCondition]

FilterCondition to combine different filters.

AND
Source code in llama-index-core/llama_index/core/vector_stores/types.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
@classmethod
def from_dicts(
    cls,
    filter_dicts: List[Dict],
    condition: Optional[FilterCondition] = FilterCondition.AND,
) -> "MetadataFilters":
    """Create MetadataFilters from dicts.

    This takes in a list of individual MetadataFilter objects, along
    with the condition.

    Args:
        filter_dicts: List of dicts, each dict is a MetadataFilter.
        condition: FilterCondition to combine different filters.

    """
    return cls(
        filters=[
            MetadataFilter.from_dict(filter_dict) for filter_dict in filter_dicts
        ],
        condition=condition,
    )

legacy_filters #

legacy_filters() -> List[ExactMatchFilter]

Convert MetadataFilters to legacy ExactMatchFilters.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
180
181
182
183
184
185
186
187
188
189
190
191
192
193
def legacy_filters(self) -> List[ExactMatchFilter]:
    """Convert MetadataFilters to legacy ExactMatchFilters."""
    filters = []
    for filter in self.filters:
        if (
            isinstance(filter, MetadataFilters)
            or filter.operator != FilterOperator.EQ
        ):
            raise ValueError(
                "Vector Store only supports exact match filters. "
                "Please use ExactMatchFilter or FilterOperator.EQ instead."
            )
        filters.append(ExactMatchFilter(key=filter.key, value=filter.value))
    return filters

VectorStoreQuerySpec #

Bases: BaseModel

Schema for a structured request for vector store (i.e. to be converted to a VectorStoreQuery).

Currently only used by VectorIndexAutoRetriever.

Parameters:

Name Type Description Default
query str
required
filters List[MetadataFilter]
required
top_k int | None
None
Source code in llama-index-core/llama_index/core/vector_stores/types.py
196
197
198
199
200
201
202
203
204
205
class VectorStoreQuerySpec(BaseModel):
    """Schema for a structured request for vector store
    (i.e. to be converted to a VectorStoreQuery).

    Currently only used by VectorIndexAutoRetriever.
    """

    query: str
    filters: List[MetadataFilter]
    top_k: Optional[int] = None

MetadataInfo #

Bases: BaseModel

Information about a metadata filter supported by a vector store.

Currently only used by VectorIndexAutoRetriever.

Parameters:

Name Type Description Default
name str
required
type str
required
description str
required
Source code in llama-index-core/llama_index/core/vector_stores/types.py
208
209
210
211
212
213
214
215
216
class MetadataInfo(BaseModel):
    """Information about a metadata filter supported by a vector store.

    Currently only used by VectorIndexAutoRetriever.
    """

    name: str
    type: str
    description: str

VectorStoreInfo #

Bases: BaseModel

Information about a vector store (content and supported metadata filters).

Currently only used by VectorIndexAutoRetriever.

Parameters:

Name Type Description Default
metadata_info List[MetadataInfo]
required
content_info str
required
Source code in llama-index-core/llama_index/core/vector_stores/types.py
219
220
221
222
223
224
225
226
class VectorStoreInfo(BaseModel):
    """Information about a vector store (content and supported metadata filters).

    Currently only used by VectorIndexAutoRetriever.
    """

    metadata_info: List[MetadataInfo]
    content_info: str

VectorStoreQuery dataclass #

Vector store query.

Parameters:

Name Type Description Default
query_embedding List[float] | None
None
similarity_top_k int
1
doc_ids List[str] | None
None
node_ids List[str] | None
None
query_str str | None
None
output_fields List[str] | None
None
embedding_field str | None
None
mode VectorStoreQueryMode
<VectorStoreQueryMode.DEFAULT: 'default'>
alpha float | None
None
filters MetadataFilters | None
None
mmr_threshold float | None
None
sparse_top_k int | None
None
hybrid_top_k int | None
None
Source code in llama-index-core/llama_index/core/vector_stores/types.py
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
@dataclass
class VectorStoreQuery:
    """Vector store query."""

    query_embedding: Optional[List[float]] = None
    similarity_top_k: int = 1
    doc_ids: Optional[List[str]] = None
    node_ids: Optional[List[str]] = None
    query_str: Optional[str] = None
    output_fields: Optional[List[str]] = None
    embedding_field: Optional[str] = None

    mode: VectorStoreQueryMode = VectorStoreQueryMode.DEFAULT

    # NOTE: only for hybrid search (0 for bm25, 1 for vector search)
    alpha: Optional[float] = None

    # metadata filters
    filters: Optional[MetadataFilters] = None

    # only for mmr
    mmr_threshold: Optional[float] = None

    # NOTE: currently only used by postgres hybrid search
    sparse_top_k: Optional[int] = None
    # NOTE: return top k results from hybrid search. similarity_top_k is used for dense search top k
    hybrid_top_k: Optional[int] = None

VectorStore #

Bases: Protocol

Abstract vector store protocol.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
@runtime_checkable
class VectorStore(Protocol):
    """Abstract vector store protocol."""

    stores_text: bool
    is_embedding_query: bool = True

    @property
    def client(self) -> Any:
        """Get client."""
        ...

    def add(
        self,
        nodes: List[BaseNode],
        **add_kwargs: Any,
    ) -> List[str]:
        """Add nodes with embedding to vector store."""
        ...

    async def async_add(
        self,
        nodes: List[BaseNode],
        **kwargs: Any,
    ) -> List[str]:
        """
        Asynchronously add nodes with embedding to vector store.
        NOTE: this is not implemented for all vector stores. If not implemented,
        it will just call add synchronously.
        """
        return self.add(nodes)

    def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
        """
        Delete nodes using with ref_doc_id."""
        ...

    async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
        """
        Delete nodes using with ref_doc_id.
        NOTE: this is not implemented for all vector stores. If not implemented,
        it will just call delete synchronously.
        """
        self.delete(ref_doc_id, **delete_kwargs)

    def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
        """Query vector store."""
        ...

    async def aquery(
        self, query: VectorStoreQuery, **kwargs: Any
    ) -> VectorStoreQueryResult:
        """
        Asynchronously query vector store.
        NOTE: this is not implemented for all vector stores. If not implemented,
        it will just call query synchronously.
        """
        return self.query(query, **kwargs)

    def persist(
        self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
    ) -> None:
        return None

client property #

client: Any

Get client.

add #

add(nodes: List[BaseNode], **add_kwargs: Any) -> List[str]

Add nodes with embedding to vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
270
271
272
273
274
275
276
def add(
    self,
    nodes: List[BaseNode],
    **add_kwargs: Any,
) -> List[str]:
    """Add nodes with embedding to vector store."""
    ...

async_add async #

async_add(nodes: List[BaseNode], **kwargs: Any) -> List[str]

Asynchronously add nodes with embedding to vector store. NOTE: this is not implemented for all vector stores. If not implemented, it will just call add synchronously.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
278
279
280
281
282
283
284
285
286
287
288
async def async_add(
    self,
    nodes: List[BaseNode],
    **kwargs: Any,
) -> List[str]:
    """
    Asynchronously add nodes with embedding to vector store.
    NOTE: this is not implemented for all vector stores. If not implemented,
    it will just call add synchronously.
    """
    return self.add(nodes)

delete #

delete(ref_doc_id: str, **delete_kwargs: Any) -> None

Delete nodes using with ref_doc_id.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
290
291
292
293
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
    """
    Delete nodes using with ref_doc_id."""
    ...

adelete async #

adelete(ref_doc_id: str, **delete_kwargs: Any) -> None

Delete nodes using with ref_doc_id. NOTE: this is not implemented for all vector stores. If not implemented, it will just call delete synchronously.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
295
296
297
298
299
300
301
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
    """
    Delete nodes using with ref_doc_id.
    NOTE: this is not implemented for all vector stores. If not implemented,
    it will just call delete synchronously.
    """
    self.delete(ref_doc_id, **delete_kwargs)

query #

query(query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult

Query vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
303
304
305
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
    """Query vector store."""
    ...

aquery async #

aquery(query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult

Asynchronously query vector store. NOTE: this is not implemented for all vector stores. If not implemented, it will just call query synchronously.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
307
308
309
310
311
312
313
314
315
async def aquery(
    self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
    """
    Asynchronously query vector store.
    NOTE: this is not implemented for all vector stores. If not implemented,
    it will just call query synchronously.
    """
    return self.query(query, **kwargs)

BasePydanticVectorStore #

Bases: BaseComponent, ABC

Abstract vector store protocol.

Parameters:

Name Type Description Default
stores_text bool
required
is_embedding_query bool
True
Source code in llama-index-core/llama_index/core/vector_stores/types.py
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
class BasePydanticVectorStore(BaseComponent, ABC):
    """Abstract vector store protocol."""

    model_config = ConfigDict(arbitrary_types_allowed=True)
    stores_text: bool
    is_embedding_query: bool = True

    @property
    @abstractmethod
    def client(self) -> Any:
        """Get client."""

    def get_nodes(
        self,
        node_ids: Optional[List[str]] = None,
        filters: Optional[MetadataFilters] = None,
    ) -> List[BaseNode]:
        """Get nodes from vector store."""
        raise NotImplementedError("get_nodes not implemented")

    async def aget_nodes(
        self,
        node_ids: Optional[List[str]] = None,
        filters: Optional[MetadataFilters] = None,
    ) -> List[BaseNode]:
        """Asynchronously get nodes from vector store."""
        return self.get_nodes(node_ids, filters)

    @abstractmethod
    def add(
        self,
        nodes: Sequence[BaseNode],
        **kwargs: Any,
    ) -> List[str]:
        """Add nodes to vector store."""

    async def async_add(
        self,
        nodes: Sequence[BaseNode],
        **kwargs: Any,
    ) -> List[str]:
        """
        Asynchronously add nodes to vector store.
        NOTE: this is not implemented for all vector stores. If not implemented,
        it will just call add synchronously.
        """
        return self.add(nodes, **kwargs)

    @abstractmethod
    def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
        """
        Delete nodes using with ref_doc_id."""

    async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
        """
        Delete nodes using with ref_doc_id.
        NOTE: this is not implemented for all vector stores. If not implemented,
        it will just call delete synchronously.
        """
        self.delete(ref_doc_id, **delete_kwargs)

    def delete_nodes(
        self,
        node_ids: Optional[List[str]] = None,
        filters: Optional[MetadataFilters] = None,
        **delete_kwargs: Any,
    ) -> None:
        """Delete nodes from vector store."""
        raise NotImplementedError("delete_nodes not implemented")

    async def adelete_nodes(
        self,
        node_ids: Optional[List[str]] = None,
        filters: Optional[MetadataFilters] = None,
        **delete_kwargs: Any,
    ) -> None:
        """Asynchronously delete nodes from vector store."""
        self.delete_nodes(node_ids, filters)

    def clear(self) -> None:
        """Clear all nodes from configured vector store."""
        raise NotImplementedError("clear not implemented")

    async def aclear(self) -> None:
        """Asynchronously clear all nodes from configured vector store."""
        self.clear()

    @abstractmethod
    def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
        """Query vector store."""

    async def aquery(
        self, query: VectorStoreQuery, **kwargs: Any
    ) -> VectorStoreQueryResult:
        """
        Asynchronously query vector store.
        NOTE: this is not implemented for all vector stores. If not implemented,
        it will just call query synchronously.
        """
        return self.query(query, **kwargs)

    def persist(
        self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
    ) -> None:
        return None

client abstractmethod property #

client: Any

Get client.

get_nodes #

get_nodes(node_ids: Optional[List[str]] = None, filters: Optional[MetadataFilters] = None) -> List[BaseNode]

Get nodes from vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
336
337
338
339
340
341
342
def get_nodes(
    self,
    node_ids: Optional[List[str]] = None,
    filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
    """Get nodes from vector store."""
    raise NotImplementedError("get_nodes not implemented")

aget_nodes async #

aget_nodes(node_ids: Optional[List[str]] = None, filters: Optional[MetadataFilters] = None) -> List[BaseNode]

Asynchronously get nodes from vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
344
345
346
347
348
349
350
async def aget_nodes(
    self,
    node_ids: Optional[List[str]] = None,
    filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
    """Asynchronously get nodes from vector store."""
    return self.get_nodes(node_ids, filters)

add abstractmethod #

add(nodes: Sequence[BaseNode], **kwargs: Any) -> List[str]

Add nodes to vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
352
353
354
355
356
357
358
@abstractmethod
def add(
    self,
    nodes: Sequence[BaseNode],
    **kwargs: Any,
) -> List[str]:
    """Add nodes to vector store."""

async_add async #

async_add(nodes: Sequence[BaseNode], **kwargs: Any) -> List[str]

Asynchronously add nodes to vector store. NOTE: this is not implemented for all vector stores. If not implemented, it will just call add synchronously.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
360
361
362
363
364
365
366
367
368
369
370
async def async_add(
    self,
    nodes: Sequence[BaseNode],
    **kwargs: Any,
) -> List[str]:
    """
    Asynchronously add nodes to vector store.
    NOTE: this is not implemented for all vector stores. If not implemented,
    it will just call add synchronously.
    """
    return self.add(nodes, **kwargs)

delete abstractmethod #

delete(ref_doc_id: str, **delete_kwargs: Any) -> None

Delete nodes using with ref_doc_id.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
372
373
374
375
@abstractmethod
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
    """
    Delete nodes using with ref_doc_id."""

adelete async #

adelete(ref_doc_id: str, **delete_kwargs: Any) -> None

Delete nodes using with ref_doc_id. NOTE: this is not implemented for all vector stores. If not implemented, it will just call delete synchronously.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
377
378
379
380
381
382
383
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
    """
    Delete nodes using with ref_doc_id.
    NOTE: this is not implemented for all vector stores. If not implemented,
    it will just call delete synchronously.
    """
    self.delete(ref_doc_id, **delete_kwargs)

delete_nodes #

delete_nodes(node_ids: Optional[List[str]] = None, filters: Optional[MetadataFilters] = None, **delete_kwargs: Any) -> None

Delete nodes from vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
385
386
387
388
389
390
391
392
def delete_nodes(
    self,
    node_ids: Optional[List[str]] = None,
    filters: Optional[MetadataFilters] = None,
    **delete_kwargs: Any,
) -> None:
    """Delete nodes from vector store."""
    raise NotImplementedError("delete_nodes not implemented")

adelete_nodes async #

adelete_nodes(node_ids: Optional[List[str]] = None, filters: Optional[MetadataFilters] = None, **delete_kwargs: Any) -> None

Asynchronously delete nodes from vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
394
395
396
397
398
399
400
401
async def adelete_nodes(
    self,
    node_ids: Optional[List[str]] = None,
    filters: Optional[MetadataFilters] = None,
    **delete_kwargs: Any,
) -> None:
    """Asynchronously delete nodes from vector store."""
    self.delete_nodes(node_ids, filters)

clear #

clear() -> None

Clear all nodes from configured vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
403
404
405
def clear(self) -> None:
    """Clear all nodes from configured vector store."""
    raise NotImplementedError("clear not implemented")

aclear async #

aclear() -> None

Asynchronously clear all nodes from configured vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
407
408
409
async def aclear(self) -> None:
    """Asynchronously clear all nodes from configured vector store."""
    self.clear()

query abstractmethod #

query(query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult

Query vector store.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
411
412
413
@abstractmethod
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
    """Query vector store."""

aquery async #

aquery(query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult

Asynchronously query vector store. NOTE: this is not implemented for all vector stores. If not implemented, it will just call query synchronously.

Source code in llama-index-core/llama_index/core/vector_stores/types.py
415
416
417
418
419
420
421
422
423
async def aquery(
    self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
    """
    Asynchronously query vector store.
    NOTE: this is not implemented for all vector stores. If not implemented,
    it will just call query synchronously.
    """
    return self.query(query, **kwargs)