Skip to content

Index

Evaluation modules.

BaseEvaluator #

Bases: PromptMixin

Base Evaluator class.

Source code in llama-index-core/llama_index/core/evaluation/base.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
class BaseEvaluator(PromptMixin):
    """Base Evaluator class."""

    def _get_prompt_modules(self) -> PromptMixinType:
        """Get prompt modules."""
        return {}

    def evaluate(
        self,
        query: Optional[str] = None,
        response: Optional[str] = None,
        contexts: Optional[Sequence[str]] = None,
        **kwargs: Any,
    ) -> EvaluationResult:
        """Run evaluation with query string, retrieved contexts,
        and generated response string.

        Subclasses can override this method to provide custom evaluation logic and
        take in additional arguments.
        """
        return asyncio_run(
            self.aevaluate(
                query=query,
                response=response,
                contexts=contexts,
                **kwargs,
            )
        )

    @abstractmethod
    async def aevaluate(
        self,
        query: Optional[str] = None,
        response: Optional[str] = None,
        contexts: Optional[Sequence[str]] = None,
        **kwargs: Any,
    ) -> EvaluationResult:
        """Run evaluation with query string, retrieved contexts,
        and generated response string.

        Subclasses can override this method to provide custom evaluation logic and
        take in additional arguments.
        """
        raise NotImplementedError

    def evaluate_response(
        self,
        query: Optional[str] = None,
        response: Optional[Response] = None,
        **kwargs: Any,
    ) -> EvaluationResult:
        """Run evaluation with query string and generated Response object.

        Subclasses can override this method to provide custom evaluation logic and
        take in additional arguments.
        """
        response_str: Optional[str] = None
        contexts: Optional[Sequence[str]] = None
        if response is not None:
            response_str = response.response
            contexts = [node.get_content() for node in response.source_nodes]

        return self.evaluate(
            query=query, response=response_str, contexts=contexts, **kwargs
        )

    async def aevaluate_response(
        self,
        query: Optional[str] = None,
        response: Optional[Response] = None,
        **kwargs: Any,
    ) -> EvaluationResult:
        """Run evaluation with query string and generated Response object.

        Subclasses can override this method to provide custom evaluation logic and
        take in additional arguments.
        """
        response_str: Optional[str] = None
        contexts: Optional[Sequence[str]] = None
        if response is not None:
            response_str = response.response
            contexts = [node.get_content() for node in response.source_nodes]

        return await self.aevaluate(
            query=query, response=response_str, contexts=contexts, **kwargs
        )

evaluate #

evaluate(query: Optional[str] = None, response: Optional[str] = None, contexts: Optional[Sequence[str]] = None, **kwargs: Any) -> EvaluationResult

Run evaluation with query string, retrieved contexts, and generated response string.

Subclasses can override this method to provide custom evaluation logic and take in additional arguments.

Source code in llama-index-core/llama_index/core/evaluation/base.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def evaluate(
    self,
    query: Optional[str] = None,
    response: Optional[str] = None,
    contexts: Optional[Sequence[str]] = None,
    **kwargs: Any,
) -> EvaluationResult:
    """Run evaluation with query string, retrieved contexts,
    and generated response string.

    Subclasses can override this method to provide custom evaluation logic and
    take in additional arguments.
    """
    return asyncio_run(
        self.aevaluate(
            query=query,
            response=response,
            contexts=contexts,
            **kwargs,
        )
    )

aevaluate abstractmethod async #

aevaluate(query: Optional[str] = None, response: Optional[str] = None, contexts: Optional[Sequence[str]] = None, **kwargs: Any) -> EvaluationResult

Run evaluation with query string, retrieved contexts, and generated response string.

Subclasses can override this method to provide custom evaluation logic and take in additional arguments.

Source code in llama-index-core/llama_index/core/evaluation/base.py
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
@abstractmethod
async def aevaluate(
    self,
    query: Optional[str] = None,
    response: Optional[str] = None,
    contexts: Optional[Sequence[str]] = None,
    **kwargs: Any,
) -> EvaluationResult:
    """Run evaluation with query string, retrieved contexts,
    and generated response string.

    Subclasses can override this method to provide custom evaluation logic and
    take in additional arguments.
    """
    raise NotImplementedError

evaluate_response #

evaluate_response(query: Optional[str] = None, response: Optional[Response] = None, **kwargs: Any) -> EvaluationResult

Run evaluation with query string and generated Response object.

Subclasses can override this method to provide custom evaluation logic and take in additional arguments.

Source code in llama-index-core/llama_index/core/evaluation/base.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
def evaluate_response(
    self,
    query: Optional[str] = None,
    response: Optional[Response] = None,
    **kwargs: Any,
) -> EvaluationResult:
    """Run evaluation with query string and generated Response object.

    Subclasses can override this method to provide custom evaluation logic and
    take in additional arguments.
    """
    response_str: Optional[str] = None
    contexts: Optional[Sequence[str]] = None
    if response is not None:
        response_str = response.response
        contexts = [node.get_content() for node in response.source_nodes]

    return self.evaluate(
        query=query, response=response_str, contexts=contexts, **kwargs
    )

aevaluate_response async #

aevaluate_response(query: Optional[str] = None, response: Optional[Response] = None, **kwargs: Any) -> EvaluationResult

Run evaluation with query string and generated Response object.

Subclasses can override this method to provide custom evaluation logic and take in additional arguments.

Source code in llama-index-core/llama_index/core/evaluation/base.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
async def aevaluate_response(
    self,
    query: Optional[str] = None,
    response: Optional[Response] = None,
    **kwargs: Any,
) -> EvaluationResult:
    """Run evaluation with query string and generated Response object.

    Subclasses can override this method to provide custom evaluation logic and
    take in additional arguments.
    """
    response_str: Optional[str] = None
    contexts: Optional[Sequence[str]] = None
    if response is not None:
        response_str = response.response
        contexts = [node.get_content() for node in response.source_nodes]

    return await self.aevaluate(
        query=query, response=response_str, contexts=contexts, **kwargs
    )

EvaluationResult #

Bases: BaseModel

Evaluation result.

Output of an BaseEvaluator.

Source code in llama-index-core/llama_index/core/evaluation/base.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
class EvaluationResult(BaseModel):
    """Evaluation result.

    Output of an BaseEvaluator.
    """

    query: Optional[str] = Field(default=None, description="Query string")
    contexts: Optional[Sequence[str]] = Field(
        default=None, description="Context strings"
    )
    response: Optional[str] = Field(default=None, description="Response string")
    passing: Optional[bool] = Field(
        default=None, description="Binary evaluation result (passing or not)"
    )
    feedback: Optional[str] = Field(
        default=None, description="Feedback or reasoning for the response"
    )
    score: Optional[float] = Field(default=None, description="Score for the response")
    pairwise_source: Optional[str] = Field(
        default=None,
        description=(
            "Used only for pairwise and specifies whether it is from original order of"
            " presented answers or flipped order"
        ),
    )
    invalid_result: bool = Field(
        default=False, description="Whether the evaluation result is an invalid one."
    )
    invalid_reason: Optional[str] = Field(
        default=None, description="Reason for invalid evaluation."
    )

BatchEvalRunner #

Batch evaluation runner.

Parameters:

Name Type Description Default
evaluators Dict[str, BaseEvaluator]

Dictionary of evaluators.

required
workers int

Number of workers to use for parallelization. Defaults to 2.

2
show_progress bool

Whether to show progress bars. Defaults to False.

False
Source code in llama-index-core/llama_index/core/evaluation/batch_runner.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
class BatchEvalRunner:
    """
    Batch evaluation runner.

    Args:
        evaluators (Dict[str, BaseEvaluator]): Dictionary of evaluators.
        workers (int): Number of workers to use for parallelization.
            Defaults to 2.
        show_progress (bool): Whether to show progress bars. Defaults to False.

    """

    def __init__(
        self,
        evaluators: Dict[str, BaseEvaluator],
        workers: int = 2,
        show_progress: bool = False,
    ):
        self.evaluators = evaluators
        self.workers = workers
        self.semaphore = asyncio.Semaphore(self.workers)
        self.show_progress = show_progress
        self.asyncio_mod = asyncio_module(show_progress=self.show_progress)

    def _format_results(
        self, results: List[Tuple[str, EvaluationResult]]
    ) -> Dict[str, List[EvaluationResult]]:
        """Format results."""
        # Format results
        results_dict: Dict[str, List[EvaluationResult]] = {
            name: [] for name in self.evaluators
        }
        for name, result in results:
            results_dict[name].append(result)

        return results_dict

    def _validate_and_clean_inputs(
        self,
        *inputs_list: Any,
    ) -> List[Any]:
        """
        Validate and clean input lists.

        Enforce that at least one of the inputs is not None.
        Make sure that all inputs have the same length.
        Make sure that None inputs are replaced with [None] * len(inputs).

        """
        assert len(inputs_list) > 0
        # first, make sure at least one of queries or response_strs is not None
        input_len: Optional[int] = None
        for inputs in inputs_list:
            if inputs is not None:
                input_len = len(inputs)
                break
        if input_len is None:
            raise ValueError("At least one item in inputs_list must be provided.")

        new_inputs_list = []
        for inputs in inputs_list:
            if inputs is None:
                new_inputs_list.append([None] * input_len)
            else:
                if len(inputs) != input_len:
                    raise ValueError("All inputs must have the same length.")
                new_inputs_list.append(inputs)
        return new_inputs_list

    def _validate_nested_eval_kwargs_types(
        self, eval_kwargs_lists: Dict[str, Any]
    ) -> Dict[str, Any]:
        """
        Ensure eval kwargs are acceptable format.
            either a Dict[str, List] or a Dict[str, Dict[str, List]].

        Allows use of different kwargs (e.g. references) with different evaluators
            while keeping backwards compatibility for single evaluators

        """
        if not isinstance(eval_kwargs_lists, dict):
            raise ValueError(
                f"eval_kwargs_lists must be a dict. Got {eval_kwargs_lists}"
            )

        for evaluator, eval_kwargs in eval_kwargs_lists.items():
            if isinstance(eval_kwargs, list):
                # maintain backwards compatibility - for use with single evaluator
                eval_kwargs_lists[evaluator] = self._validate_and_clean_inputs(
                    eval_kwargs
                )[0]
            elif isinstance(eval_kwargs, dict):
                # for use with multiple evaluators
                for k in eval_kwargs:
                    v = eval_kwargs[k]
                    if not isinstance(v, list):
                        raise ValueError(
                            f"nested inner values in eval_kwargs must be a list. Got {evaluator}: {k}: {v}"
                        )
                    eval_kwargs_lists[evaluator][k] = self._validate_and_clean_inputs(
                        v
                    )[0]
            else:
                raise ValueError(
                    f"eval_kwargs must be a list or a dict. Got {evaluator}: {eval_kwargs}"
                )
        return eval_kwargs_lists

    def _get_eval_kwargs(
        self, eval_kwargs_lists: Dict[str, Any], idx: int
    ) -> Dict[str, Any]:
        """
        Get eval kwargs from eval_kwargs_lists at a given idx.

        Since eval_kwargs_lists is a dict of lists, we need to get the
        value at idx for each key.

        """
        return {k: v[idx] for k, v in eval_kwargs_lists.items()}

    async def aevaluate_response_strs(
        self,
        queries: Optional[List[str]] = None,
        response_strs: Optional[List[str]] = None,
        contexts_list: Optional[List[List[str]]] = None,
        **eval_kwargs_lists: Dict[str, Any],
    ) -> Dict[str, List[EvaluationResult]]:
        """
        Evaluate query, response pairs.

        This evaluates queries, responses, contexts as string inputs.
        Can supply additional kwargs to the evaluator in eval_kwargs_lists.

        Args:
            queries (Optional[List[str]]): List of query strings. Defaults to None.
            response_strs (Optional[List[str]]): List of response strings.
                Defaults to None.
            contexts_list (Optional[List[List[str]]]): List of context lists.
                Defaults to None.
            **eval_kwargs_lists (Dict[str, Any]): Dict of either dicts or lists
                of kwargs to pass to evaluator. Defaults to None.
                    multiple evaluators: {evaluator: {kwarg: [list of values]},...}
                    single evaluator:    {kwarg: [list of values]}

        """
        queries, response_strs, contexts_list = self._validate_and_clean_inputs(
            queries, response_strs, contexts_list
        )
        eval_kwargs_lists = self._validate_nested_eval_kwargs_types(eval_kwargs_lists)

        # boolean to check if using multi kwarg evaluator
        multi_kwargs = len(eval_kwargs_lists) > 0 and isinstance(
            next(iter(eval_kwargs_lists.values())), dict
        )

        # run evaluations
        eval_jobs = []
        for idx, query in enumerate(cast(List[str], queries)):
            response_str = cast(List, response_strs)[idx]
            contexts = cast(List, contexts_list)[idx]
            for name, evaluator in self.evaluators.items():
                if multi_kwargs:
                    # multi-evaluator - get appropriate runtime kwargs if present
                    kwargs = (
                        eval_kwargs_lists[name] if name in eval_kwargs_lists else {}
                    )
                else:
                    # single evaluator (maintain backwards compatibility)
                    kwargs = eval_kwargs_lists
                eval_kwargs = self._get_eval_kwargs(kwargs, idx)
                eval_jobs.append(
                    eval_worker(
                        self.semaphore,
                        evaluator,
                        name,
                        query=query,
                        response_str=response_str,
                        contexts=contexts,
                        eval_kwargs=eval_kwargs,
                    )
                )
        results = await self.asyncio_mod.gather(*eval_jobs)

        # Format results
        return self._format_results(results)

    async def aevaluate_responses(
        self,
        queries: Optional[List[str]] = None,
        responses: Optional[List[Response]] = None,
        **eval_kwargs_lists: Dict[str, Any],
    ) -> Dict[str, List[EvaluationResult]]:
        """
        Evaluate query, response pairs.

        This evaluates queries and response objects.

        Args:
            queries (Optional[List[str]]): List of query strings. Defaults to None.
            responses (Optional[List[Response]]): List of response objects.
                Defaults to None.
            **eval_kwargs_lists (Dict[str, Any]): Dict of either dicts or lists
                of kwargs to pass to evaluator. Defaults to None.
                    multiple evaluators: {evaluator: {kwarg: [list of values]},...}
                    single evaluator:    {kwarg: [list of values]}

        """
        queries, responses = self._validate_and_clean_inputs(queries, responses)
        eval_kwargs_lists = self._validate_nested_eval_kwargs_types(eval_kwargs_lists)

        # boolean to check if using multi kwarg evaluator
        multi_kwargs = len(eval_kwargs_lists) > 0 and isinstance(
            next(iter(eval_kwargs_lists.values())), dict
        )

        # run evaluations
        eval_jobs = []
        for idx, query in enumerate(cast(List[str], queries)):
            response = cast(List, responses)[idx]
            for name, evaluator in self.evaluators.items():
                if multi_kwargs:
                    # multi-evaluator - get appropriate runtime kwargs if present
                    kwargs = (
                        eval_kwargs_lists[name] if name in eval_kwargs_lists else {}
                    )
                else:
                    # single evaluator (maintain backwards compatibility)
                    kwargs = eval_kwargs_lists
                eval_kwargs = self._get_eval_kwargs(kwargs, idx)
                eval_jobs.append(
                    eval_response_worker(
                        self.semaphore,
                        evaluator,
                        name,
                        query=query,
                        response=response,
                        eval_kwargs=eval_kwargs,
                    )
                )
        results = await self.asyncio_mod.gather(*eval_jobs)

        # Format results
        return self._format_results(results)

    async def aevaluate_queries(
        self,
        query_engine: BaseQueryEngine,
        queries: Optional[List[str]] = None,
        **eval_kwargs_lists: Dict[str, Any],
    ) -> Dict[str, List[EvaluationResult]]:
        """
        Evaluate queries.

        Args:
            query_engine (BaseQueryEngine): Query engine.
            queries (Optional[List[str]]): List of query strings. Defaults to None.
            **eval_kwargs_lists (Dict[str, Any]): Dict of lists of kwargs to
                pass to evaluator. Defaults to None.

        """
        if queries is None:
            raise ValueError("`queries` must be provided")

        # gather responses
        response_jobs = []
        for query in queries:
            response_jobs.append(response_worker(self.semaphore, query_engine, query))
        responses = await self.asyncio_mod.gather(*response_jobs)

        return await self.aevaluate_responses(
            queries=queries,
            responses=responses,
            **eval_kwargs_lists,
        )

    def evaluate_response_strs(
        self,
        queries: Optional[List[str]] = None,
        response_strs: Optional[List[str]] = None,
        contexts_list: Optional[List[List[str]]] = None,
        **eval_kwargs_lists: Dict[str, Any],
    ) -> Dict[str, List[EvaluationResult]]:
        """
        Evaluate query, response pairs.

        Sync version of aevaluate_response_strs.

        """
        return asyncio_run(
            self.aevaluate_response_strs(
                queries=queries,
                response_strs=response_strs,
                contexts_list=contexts_list,
                **eval_kwargs_lists,
            )
        )

    def evaluate_responses(
        self,
        queries: Optional[List[str]] = None,
        responses: Optional[List[Response]] = None,
        **eval_kwargs_lists: Dict[str, Any],
    ) -> Dict[str, List[EvaluationResult]]:
        """
        Evaluate query, response objs.

        Sync version of aevaluate_responses.

        """
        return asyncio_run(
            self.aevaluate_responses(
                queries=queries,
                responses=responses,
                **eval_kwargs_lists,
            )
        )

    def evaluate_queries(
        self,
        query_engine: BaseQueryEngine,
        queries: Optional[List[str]] = None,
        **eval_kwargs_lists: Dict[str, Any],
    ) -> Dict[str, List[EvaluationResult]]:
        """
        Evaluate queries.

        Sync version of aevaluate_queries.

        """
        return asyncio_run(
            self.aevaluate_queries(
                query_engine=query_engine,
                queries=queries,
                **eval_kwargs_lists,
            )
        )

    def upload_eval_results(
        self,
        project_name: str,
        app_name: str,
        results: Dict[str, List[EvaluationResult]],
    ) -> None:
        """
        Upload the evaluation results to LlamaCloud.

        Args:
            project_name (str): The name of the project.
            app_name (str): The name of the app.
            results (Dict[str, List[EvaluationResult]]):
                The evaluation results, a mapping of metric name to a list of EvaluationResult objects.

        Examples:
            ```python
            results = batch_runner.evaluate_responses(...)

            batch_runner.upload_eval_results(
                project_name="my_project",
                app_name="my_app",
                results=results
            )
            ```
        """
        from llama_index.core.evaluation.eval_utils import upload_eval_results

        upload_eval_results(
            project_name=project_name, app_name=app_name, results=results
        )

aevaluate_response_strs async #

aevaluate_response_strs(queries: Optional[List[str]] = None, response_strs: Optional[List[str]] = None, contexts_list: Optional[List[List[str]]] = None, **eval_kwargs_lists: Dict[str, Any]) -> Dict[str, List[EvaluationResult]]

Evaluate query, response pairs.

This evaluates queries, responses, contexts as string inputs. Can supply additional kwargs to the evaluator in eval_kwargs_lists.

Parameters:

Name Type Description Default
queries Optional[List[str]]

List of query strings. Defaults to None.

None
response_strs Optional[List[str]]

List of response strings. Defaults to None.

None
contexts_list Optional[List[List[str]]]

List of context lists. Defaults to None.

None
**eval_kwargs_lists Dict[str, Any]

Dict of either dicts or lists of kwargs to pass to evaluator. Defaults to None. multiple evaluators: {evaluator: {kwarg: [list of values]},...} single evaluator: {kwarg: [list of values]}

{}
Source code in llama-index-core/llama_index/core/evaluation/batch_runner.py
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
async def aevaluate_response_strs(
    self,
    queries: Optional[List[str]] = None,
    response_strs: Optional[List[str]] = None,
    contexts_list: Optional[List[List[str]]] = None,
    **eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
    """
    Evaluate query, response pairs.

    This evaluates queries, responses, contexts as string inputs.
    Can supply additional kwargs to the evaluator in eval_kwargs_lists.

    Args:
        queries (Optional[List[str]]): List of query strings. Defaults to None.
        response_strs (Optional[List[str]]): List of response strings.
            Defaults to None.
        contexts_list (Optional[List[List[str]]]): List of context lists.
            Defaults to None.
        **eval_kwargs_lists (Dict[str, Any]): Dict of either dicts or lists
            of kwargs to pass to evaluator. Defaults to None.
                multiple evaluators: {evaluator: {kwarg: [list of values]},...}
                single evaluator:    {kwarg: [list of values]}

    """
    queries, response_strs, contexts_list = self._validate_and_clean_inputs(
        queries, response_strs, contexts_list
    )
    eval_kwargs_lists = self._validate_nested_eval_kwargs_types(eval_kwargs_lists)

    # boolean to check if using multi kwarg evaluator
    multi_kwargs = len(eval_kwargs_lists) > 0 and isinstance(
        next(iter(eval_kwargs_lists.values())), dict
    )

    # run evaluations
    eval_jobs = []
    for idx, query in enumerate(cast(List[str], queries)):
        response_str = cast(List, response_strs)[idx]
        contexts = cast(List, contexts_list)[idx]
        for name, evaluator in self.evaluators.items():
            if multi_kwargs:
                # multi-evaluator - get appropriate runtime kwargs if present
                kwargs = (
                    eval_kwargs_lists[name] if name in eval_kwargs_lists else {}
                )
            else:
                # single evaluator (maintain backwards compatibility)
                kwargs = eval_kwargs_lists
            eval_kwargs = self._get_eval_kwargs(kwargs, idx)
            eval_jobs.append(
                eval_worker(
                    self.semaphore,
                    evaluator,
                    name,
                    query=query,
                    response_str=response_str,
                    contexts=contexts,
                    eval_kwargs=eval_kwargs,
                )
            )
    results = await self.asyncio_mod.gather(*eval_jobs)

    # Format results
    return self._format_results(results)

aevaluate_responses async #

aevaluate_responses(queries: Optional[List[str]] = None, responses: Optional[List[Response]] = None, **eval_kwargs_lists: Dict[str, Any]) -> Dict[str, List[EvaluationResult]]

Evaluate query, response pairs.

This evaluates queries and response objects.

Parameters:

Name Type Description Default
queries Optional[List[str]]

List of query strings. Defaults to None.

None
responses Optional[List[Response]]

List of response objects. Defaults to None.

None
**eval_kwargs_lists Dict[str, Any]

Dict of either dicts or lists of kwargs to pass to evaluator. Defaults to None. multiple evaluators: {evaluator: {kwarg: [list of values]},...} single evaluator: {kwarg: [list of values]}

{}
Source code in llama-index-core/llama_index/core/evaluation/batch_runner.py
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
async def aevaluate_responses(
    self,
    queries: Optional[List[str]] = None,
    responses: Optional[List[Response]] = None,
    **eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
    """
    Evaluate query, response pairs.

    This evaluates queries and response objects.

    Args:
        queries (Optional[List[str]]): List of query strings. Defaults to None.
        responses (Optional[List[Response]]): List of response objects.
            Defaults to None.
        **eval_kwargs_lists (Dict[str, Any]): Dict of either dicts or lists
            of kwargs to pass to evaluator. Defaults to None.
                multiple evaluators: {evaluator: {kwarg: [list of values]},...}
                single evaluator:    {kwarg: [list of values]}

    """
    queries, responses = self._validate_and_clean_inputs(queries, responses)
    eval_kwargs_lists = self._validate_nested_eval_kwargs_types(eval_kwargs_lists)

    # boolean to check if using multi kwarg evaluator
    multi_kwargs = len(eval_kwargs_lists) > 0 and isinstance(
        next(iter(eval_kwargs_lists.values())), dict
    )

    # run evaluations
    eval_jobs = []
    for idx, query in enumerate(cast(List[str], queries)):
        response = cast(List, responses)[idx]
        for name, evaluator in self.evaluators.items():
            if multi_kwargs:
                # multi-evaluator - get appropriate runtime kwargs if present
                kwargs = (
                    eval_kwargs_lists[name] if name in eval_kwargs_lists else {}
                )
            else:
                # single evaluator (maintain backwards compatibility)
                kwargs = eval_kwargs_lists
            eval_kwargs = self._get_eval_kwargs(kwargs, idx)
            eval_jobs.append(
                eval_response_worker(
                    self.semaphore,
                    evaluator,
                    name,
                    query=query,
                    response=response,
                    eval_kwargs=eval_kwargs,
                )
            )
    results = await self.asyncio_mod.gather(*eval_jobs)

    # Format results
    return self._format_results(results)

aevaluate_queries async #

aevaluate_queries(query_engine: BaseQueryEngine, queries: Optional[List[str]] = None, **eval_kwargs_lists: Dict[str, Any]) -> Dict[str, List[EvaluationResult]]

Evaluate queries.

Parameters:

Name Type Description Default
query_engine BaseQueryEngine

Query engine.

required
queries Optional[List[str]]

List of query strings. Defaults to None.

None
**eval_kwargs_lists Dict[str, Any]

Dict of lists of kwargs to pass to evaluator. Defaults to None.

{}
Source code in llama-index-core/llama_index/core/evaluation/batch_runner.py
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
async def aevaluate_queries(
    self,
    query_engine: BaseQueryEngine,
    queries: Optional[List[str]] = None,
    **eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
    """
    Evaluate queries.

    Args:
        query_engine (BaseQueryEngine): Query engine.
        queries (Optional[List[str]]): List of query strings. Defaults to None.
        **eval_kwargs_lists (Dict[str, Any]): Dict of lists of kwargs to
            pass to evaluator. Defaults to None.

    """
    if queries is None:
        raise ValueError("`queries` must be provided")

    # gather responses
    response_jobs = []
    for query in queries:
        response_jobs.append(response_worker(self.semaphore, query_engine, query))
    responses = await self.asyncio_mod.gather(*response_jobs)

    return await self.aevaluate_responses(
        queries=queries,
        responses=responses,
        **eval_kwargs_lists,
    )

evaluate_response_strs #

evaluate_response_strs(queries: Optional[List[str]] = None, response_strs: Optional[List[str]] = None, contexts_list: Optional[List[List[str]]] = None, **eval_kwargs_lists: Dict[str, Any]) -> Dict[str, List[EvaluationResult]]

Evaluate query, response pairs.

Sync version of aevaluate_response_strs.

Source code in llama-index-core/llama_index/core/evaluation/batch_runner.py
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
def evaluate_response_strs(
    self,
    queries: Optional[List[str]] = None,
    response_strs: Optional[List[str]] = None,
    contexts_list: Optional[List[List[str]]] = None,
    **eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
    """
    Evaluate query, response pairs.

    Sync version of aevaluate_response_strs.

    """
    return asyncio_run(
        self.aevaluate_response_strs(
            queries=queries,
            response_strs=response_strs,
            contexts_list=contexts_list,
            **eval_kwargs_lists,
        )
    )

evaluate_responses #

evaluate_responses(queries: Optional[List[str]] = None, responses: Optional[List[Response]] = None, **eval_kwargs_lists: Dict[str, Any]) -> Dict[str, List[EvaluationResult]]

Evaluate query, response objs.

Sync version of aevaluate_responses.

Source code in llama-index-core/llama_index/core/evaluation/batch_runner.py
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
def evaluate_responses(
    self,
    queries: Optional[List[str]] = None,
    responses: Optional[List[Response]] = None,
    **eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
    """
    Evaluate query, response objs.

    Sync version of aevaluate_responses.

    """
    return asyncio_run(
        self.aevaluate_responses(
            queries=queries,
            responses=responses,
            **eval_kwargs_lists,
        )
    )

evaluate_queries #

evaluate_queries(query_engine: BaseQueryEngine, queries: Optional[List[str]] = None, **eval_kwargs_lists: Dict[str, Any]) -> Dict[str, List[EvaluationResult]]

Evaluate queries.

Sync version of aevaluate_queries.

Source code in llama-index-core/llama_index/core/evaluation/batch_runner.py
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
def evaluate_queries(
    self,
    query_engine: BaseQueryEngine,
    queries: Optional[List[str]] = None,
    **eval_kwargs_lists: Dict[str, Any],
) -> Dict[str, List[EvaluationResult]]:
    """
    Evaluate queries.

    Sync version of aevaluate_queries.

    """
    return asyncio_run(
        self.aevaluate_queries(
            query_engine=query_engine,
            queries=queries,
            **eval_kwargs_lists,
        )
    )

upload_eval_results #

upload_eval_results(project_name: str, app_name: str, results: Dict[str, List[EvaluationResult]]) -> None

Upload the evaluation results to LlamaCloud.

Parameters:

Name Type Description Default
project_name str

The name of the project.

required
app_name str

The name of the app.

required
results Dict[str, List[EvaluationResult]]

The evaluation results, a mapping of metric name to a list of EvaluationResult objects.

required

Examples:

results = batch_runner.evaluate_responses(...)

batch_runner.upload_eval_results(
    project_name="my_project",
    app_name="my_app",
    results=results
)
Source code in llama-index-core/llama_index/core/evaluation/batch_runner.py
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
def upload_eval_results(
    self,
    project_name: str,
    app_name: str,
    results: Dict[str, List[EvaluationResult]],
) -> None:
    """
    Upload the evaluation results to LlamaCloud.

    Args:
        project_name (str): The name of the project.
        app_name (str): The name of the app.
        results (Dict[str, List[EvaluationResult]]):
            The evaluation results, a mapping of metric name to a list of EvaluationResult objects.

    Examples:
        ```python
        results = batch_runner.evaluate_responses(...)

        batch_runner.upload_eval_results(
            project_name="my_project",
            app_name="my_app",
            results=results
        )
        ```
    """
    from llama_index.core.evaluation.eval_utils import upload_eval_results

    upload_eval_results(
        project_name=project_name, app_name=app_name, results=results
    )