Skip to content

tea_tasting.metrics.base #

Base classes for metrics.

AggrCols #

Bases: NamedTuple

Columns to be aggregated for a metric analysis.

Attributes:

Name Type Description
has_count bool

If True, include the sample size.

mean_cols Sequence[str]

Column names for calculation of sample means.

var_cols Sequence[str]

Column names for calculation of sample variances.

cov_cols Sequence[tuple[str, str]]

Pairs of column names for calculation of sample covariances.

MetricBase #

Bases: ABC, Generic[R], ReprMixin

Base class for metrics.

analyze(data, control, treatment, variant) abstractmethod #

Analyze a metric in an experiment.

Parameters:

Name Type Description Default
data IntoFrame | Table

Experimental data.

required
control object

Control variant.

required
treatment object

Treatment variant.

required
variant str

Variant column name.

required

Returns:

Type Description
R

Analysis result.

Source code in src/tea_tasting/metrics/base.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
@abc.abstractmethod
def analyze(
    self,
    data: narwhals.typing.IntoFrame | ibis.expr.types.Table,
    control: object,
    treatment: object,
    variant: str,
) -> R:
    """Analyze a metric in an experiment.

    Args:
        data: Experimental data.
        control: Control variant.
        treatment: Treatment variant.
        variant: Variant column name.

    Returns:
        Analysis result.
    """

MetricBaseAggregated #

Bases: MetricBase[R], _HasAggrCols

Base class for metrics, which are analyzed using aggregated statistics.

aggr_cols abstractmethod property #

Columns to be aggregated for an analysis.

analyze(data, control, treatment, variant=None) #

Analyze a metric in an experiment.

Parameters:

Name Type Description Default
data IntoFrame | Table | dict[object, Aggregates]

Experimental data.

required
control object

Control variant.

required
treatment object

Treatment variant.

required
variant str | None

Variant column name.

None

Returns:

Type Description
R

Analysis result.

Source code in src/tea_tasting/metrics/base.py
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
def analyze(
    self,
    data: narwhals.typing.IntoFrame | ibis.expr.types.Table | dict[
        object, tea_tasting.aggr.Aggregates],
    control: object,
    treatment: object,
    variant: str | None = None,
) -> R:
    """Analyze a metric in an experiment.

    Args:
        data: Experimental data.
        control: Control variant.
        treatment: Treatment variant.
        variant: Variant column name.

    Returns:
        Analysis result.
    """
    tea_tasting.utils.check_scalar(variant, "variant", typ=str | None)
    aggr = aggregate_by_variants(
        data,
        aggr_cols=self.aggr_cols,
        variant=variant,
    )
    return self.analyze_aggregates(
        control=aggr[control],
        treatment=aggr[treatment],
    )

analyze_aggregates(control, treatment) abstractmethod #

Analyze metric in an experiment using aggregated statistics.

Parameters:

Name Type Description Default
control Aggregates

Control data.

required
treatment Aggregates

Treatment data.

required

Returns:

Type Description
R

Analysis result.

Source code in src/tea_tasting/metrics/base.py
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
@abc.abstractmethod
def analyze_aggregates(
    self,
    control: tea_tasting.aggr.Aggregates,
    treatment: tea_tasting.aggr.Aggregates,
) -> R:
    """Analyze metric in an experiment using aggregated statistics.

    Args:
        control: Control data.
        treatment: Treatment data.

    Returns:
        Analysis result.
    """

MetricBaseGranular #

Bases: MetricBase[R], _HasCols

Base class for metrics, which are analyzed using granular data.

cols abstractmethod property #

Columns to be fetched for an analysis.

analyze(data, control, treatment, variant=None) #

Analyze a metric in an experiment.

Parameters:

Name Type Description Default
data IntoFrame | Table | dict[object, Table]

Experimental data.

required
control object

Control variant.

required
treatment object

Treatment variant.

required
variant str | None

Variant column name.

None

Returns:

Type Description
R

Analysis result.

Source code in src/tea_tasting/metrics/base.py
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
def analyze(
    self,
    data: (
        narwhals.typing.IntoFrame |
        ibis.expr.types.Table |
        dict[object, pa.Table]
    ),
    control: object,
    treatment: object,
    variant: str | None = None,
) -> R:
    """Analyze a metric in an experiment.

    Args:
        data: Experimental data.
        control: Control variant.
        treatment: Treatment variant.
        variant: Variant column name.

    Returns:
        Analysis result.
    """
    tea_tasting.utils.check_scalar(variant, "variant", typ=str | None)
    dfs = read_granular(
        data,
        cols=self.cols,
        variant=variant,
    )
    return self.analyze_granular(
        control=dfs[control],
        treatment=dfs[treatment],
    )

analyze_granular(control, treatment) abstractmethod #

Analyze metric in an experiment using granular data.

Parameters:

Name Type Description Default
control Table

Control data.

required
treatment Table

Treatment data.

required

Returns:

Type Description
R

Analysis result.

Source code in src/tea_tasting/metrics/base.py
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
@abc.abstractmethod
def analyze_granular(
    self,
    control: pa.Table,
    treatment: pa.Table,
) -> R:
    """Analyze metric in an experiment using granular data.

    Args:
        control: Control data.
        treatment: Treatment data.

    Returns:
        Analysis result.
    """

MetricPowerResults #

Bases: DictsReprMixin, UserList[P]

Power analysis results.

to_arrow() #

Convert the object to a PyArrow Table.

Source code in src/tea_tasting/utils.py
303
304
305
306
@_cache_method
def to_arrow(self) -> pa.Table:
    """Convert the object to a PyArrow Table."""
    return pa.Table.from_pylist(self.to_dicts())

to_dicts() #

"Convert the results to a sequence of dictionaries.

Source code in src/tea_tasting/metrics/base.py
46
47
48
49
@tea_tasting.utils._cache_method
def to_dicts(self) -> tuple[dict[str, object], ...]:
    """"Convert the results to a sequence of dictionaries."""
    return tuple((v if isinstance(v, dict) else v._asdict()) for v in self)

to_html(keys=None, formatter=get_and_format_num, *, max_rows=None, indent=None) #

Convert the object to HTML.

Default formatting rules:

  • If a name starts with "rel_" or equals to "power" consider it a percentage value. Round percentage values to 2 significant digits, multiply by 100 and add "%".
  • Round other values to 3 significant values.
  • If value is less than 0.001 or is greater than or equal to 10_000_000, format it in exponential presentation.
  • If a name ends with "_ci", consider it a confidence interval. Look up for attributes "{name}_lower" and "{name}_upper", and format the interval as "[{lower_bound}, {upper_bound}]".

Parameters:

Name Type Description Default
keys Sequence[str] | None

Keys to convert. If a key is not defined in the dictionary it's assumed to be None.

None
formatter Callable[[dict[str, object], str], str]

Custom formatter function. It should accept a dictionary of metric result attributes and an attribute name, and return a formatted attribute value.

get_and_format_num
max_rows int | None

Maximum number of rows to convert. If None, the default value will be used. If 0 or less, all rows will be converted.

None
indent str | None

Whitespace to insert for each indentation level. If None, do not indent.

None

Returns:

Type Description
str

A table with results rendered as HTML.

Source code in src/tea_tasting/utils.py
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
def to_html(
    self,
    keys: Sequence[str] | None = None,
    formatter: Callable[[dict[str, object], str], str] = get_and_format_num,
    *,
    max_rows: int | None = None,
    indent: str | None = None,
) -> str:
    """Convert the object to HTML.

    Default formatting rules:

    - If a name starts with `"rel_"` or equals to `"power"` consider it
        a percentage value. Round percentage values to 2 significant digits,
        multiply by `100` and add `"%"`.
    - Round other values to 3 significant values.
    - If value is less than `0.001` or is greater than or equal to `10_000_000`,
        format it in exponential presentation.
    - If a name ends with `"_ci"`, consider it a confidence interval.
        Look up for attributes `"{name}_lower"` and `"{name}_upper"`,
        and format the interval as `"[{lower_bound}, {upper_bound}]"`.

    Args:
        keys: Keys to convert. If a key is not defined in the dictionary
            it's assumed to be `None`.
        formatter: Custom formatter function. It should accept a dictionary
            of metric result attributes and an attribute name, and return
            a formatted attribute value.
        max_rows: Maximum number of rows to convert.
            If `None`, the default value will be used.
            If `0` or less, all rows will be converted.
        indent: Whitespace to insert for each indentation level. If `None`,
            do not indent.

    Returns:
        A table with results rendered as HTML.
    """
    if keys is None:
        keys = self.default_keys
    if max_rows is None:
        max_rows = self.default_max_rows

    table = ET.Element(
        "table",
        {"class": "dataframe", "style": "text-align: right;"},
    )
    thead = ET.SubElement(table, "thead")
    thead_tr = ET.SubElement(thead, "tr")
    for key in keys:
        th = ET.SubElement(thead_tr, "th")
        th.text = key
    tbody = ET.SubElement(table, "tbody")
    for pretty_dict in self.to_pretty_dicts(keys, formatter, max_rows=max_rows):
        tr = ET.SubElement(tbody, "tr")
        for key in keys:
            td = ET.SubElement(tr, "td")
            td.text = pretty_dict[key]
    if indent is not None:
        ET.indent(table, space=indent)
    return ET.tostring(table, encoding="unicode", method="html")

to_pandas() #

Convert the object to a Pandas DataFrame.

Source code in src/tea_tasting/utils.py
308
309
310
311
312
@_cache_method
def to_pandas(self) -> pd.DataFrame:
    """Convert the object to a Pandas DataFrame."""
    import pandas as pd
    return pd.DataFrame.from_records(self.to_dicts())

to_polars() #

Convert the object to a Polars DataFrame.

Source code in src/tea_tasting/utils.py
314
315
316
317
318
@_cache_method
def to_polars(self) -> pl.DataFrame:
    """Convert the object to a Polars DataFrame."""
    import polars as pl
    return pl.from_dicts(self.to_dicts())

to_pretty_dicts(keys=None, formatter=get_and_format_num, *, max_rows=None) #

Convert the object to a list of dictionaries with formatted values.

Default formatting rules:

  • If a name starts with "rel_" or equals to "power" consider it a percentage value. Round percentage values to 2 significant digits, multiply by 100 and add "%".
  • Round other values to 3 significant values.
  • If value is less than 0.001 or is greater than or equal to 10_000_000, format it in exponential presentation.
  • If a name ends with "_ci", consider it a confidence interval. Look up for attributes "{name}_lower" and "{name}_upper", and format the interval as "[{lower_bound}, {upper_bound}]".

Parameters:

Name Type Description Default
keys Sequence[str] | None

Keys to convert. If a key is not defined in the dictionary it's assumed to be None.

None
formatter Callable[[dict[str, object], str], str]

Custom formatter function. It should accept a dictionary of metric result attributes and an attribute name, and return a formatted attribute value.

get_and_format_num
max_rows int | None

Maximum number of rows to convert. If None, the default value will be used. If 0 or less, all rows will be converted.

None

Returns:

Type Description
list[dict[str, str]]

List of dictionaries with formatted values.

Source code in src/tea_tasting/utils.py
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
def to_pretty_dicts(
    self,
    keys: Sequence[str] | None = None,
    formatter: Callable[[dict[str, object], str], str] = get_and_format_num,
    *,
    max_rows: int | None = None,
) -> list[dict[str, str]]:
    """Convert the object to a list of dictionaries with formatted values.

    Default formatting rules:

    - If a name starts with `"rel_"` or equals to `"power"` consider it
        a percentage value. Round percentage values to 2 significant digits,
        multiply by `100` and add `"%"`.
    - Round other values to 3 significant values.
    - If value is less than `0.001` or is greater than or equal to `10_000_000`,
        format it in exponential presentation.
    - If a name ends with `"_ci"`, consider it a confidence interval.
        Look up for attributes `"{name}_lower"` and `"{name}_upper"`,
        and format the interval as `"[{lower_bound}, {upper_bound}]"`.

    Args:
        keys: Keys to convert. If a key is not defined in the dictionary
            it's assumed to be `None`.
        formatter: Custom formatter function. It should accept a dictionary
            of metric result attributes and an attribute name, and return
            a formatted attribute value.
        max_rows: Maximum number of rows to convert.
            If `None`, the default value will be used.
            If `0` or less, all rows will be converted.

    Returns:
        List of dictionaries with formatted values.
    """
    if keys is None:
        keys = self.default_keys
    if max_rows is None:
        max_rows = self.default_max_rows

    dicts = self.to_dicts()
    if max_rows <= 0 or len(dicts) <= max_rows:
        return [{key: formatter(data, key) for key in keys} for data in dicts]

    bottom = max_rows // 2
    top = max_rows - bottom
    return (
        [{key: formatter(data, key) for key in keys} for data in dicts[:top]] +
        [dict.fromkeys(keys, "…")] +
        [{key: formatter(data, key) for key in keys} for data in dicts[-bottom:]]
    )

to_string(keys=None, formatter=get_and_format_num, *, max_rows=None) #

Convert the object to a string.

Default formatting rules:

  • If a name starts with "rel_" or equals to "power" consider it a percentage value. Round percentage values to 2 significant digits, multiply by 100 and add "%".
  • Round other values to 3 significant values.
  • If value is less than 0.001 or is greater than or equal to 10_000_000, format it in exponential presentation.
  • If a name ends with "_ci", consider it a confidence interval. Look up for attributes "{name}_lower" and "{name}_upper", and format the interval as "[{lower_bound}, {upper_bound}]".

Parameters:

Name Type Description Default
keys Sequence[str] | None

Keys to convert. If a key is not defined in the dictionary it's assumed to be None.

None
formatter Callable[[dict[str, object], str], str]

Custom formatter function. It should accept a dictionary of metric result attributes and an attribute name, and return a formatted attribute value.

get_and_format_num
max_rows int | None

Maximum number of rows to convert. If None, the default value will be used. If 0 or less, all rows will be converted.

None

Returns:

Type Description
str

A table with results rendered as string.

Source code in src/tea_tasting/utils.py
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def to_string(
    self,
    keys: Sequence[str] | None = None,
    formatter: Callable[[dict[str, object], str], str] = get_and_format_num,
    *,
    max_rows: int | None = None,
) -> str:
    """Convert the object to a string.

    Default formatting rules:

    - If a name starts with `"rel_"` or equals to `"power"` consider it
        a percentage value. Round percentage values to 2 significant digits,
        multiply by `100` and add `"%"`.
    - Round other values to 3 significant values.
    - If value is less than `0.001` or is greater than or equal to `10_000_000`,
        format it in exponential presentation.
    - If a name ends with `"_ci"`, consider it a confidence interval.
        Look up for attributes `"{name}_lower"` and `"{name}_upper"`,
        and format the interval as `"[{lower_bound}, {upper_bound}]"`.

    Args:
        keys: Keys to convert. If a key is not defined in the dictionary
            it's assumed to be `None`.
        formatter: Custom formatter function. It should accept a dictionary
            of metric result attributes and an attribute name, and return
            a formatted attribute value.
        max_rows: Maximum number of rows to convert.
            If `None`, the default value will be used.
            If `0` or less, all rows will be converted.

    Returns:
        A table with results rendered as string.
    """
    if keys is None:
        keys = self.default_keys
    if max_rows is None:
        max_rows = self.default_max_rows

    pretty_dicts = self.to_pretty_dicts(keys, formatter, max_rows=max_rows)
    widths = {key: len(key) for key in keys}
    for pretty_dict in pretty_dicts:
        for key in keys:
            widths[key] = max(widths[key], len(pretty_dict[key]))

    sep = " "
    rows = [sep.join(key.rjust(widths[key]) for key in keys)]
    rows.extend(
        sep.join(pretty_dict[key].rjust(widths[key]) for key in keys)
        for pretty_dict in pretty_dicts
    )
    return "\n".join(rows)

with_defaults(*, keys=None, max_rows=None) #

Copies the object and sets the new default parameters.

Parameters:

Name Type Description Default
keys Sequence[str] | None

New default keys for the methods to_pretty_dicts, to_string, and to_html.

None
max_rows int | None

New default max_rows for the methods to_pretty_dicts, to_string, and to_html.

None

Returns:

Type Description
DictsReprMixinT

A copy of the object with the new default keys.

Source code in src/tea_tasting/utils.py
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
def with_defaults(
    self: DictsReprMixinT,
    *,
    keys: Sequence[str] | None = None,
    max_rows: int | None = None,
) -> DictsReprMixinT:
    """Copies the object and sets the new default parameters.

    Args:
        keys: New default `keys` for the methods `to_pretty_dicts`, `to_string`,
            and `to_html`.
        max_rows: New default `max_rows` for the methods `to_pretty_dicts`,
            `to_string`, and `to_html`.

    Returns:
        A copy of the object with the new default keys.
    """
    new_instance = self.__class__.__new__(self.__class__)
    new_instance.__dict__.update(self.__dict__)
    new_instance._cache = None
    if keys is not None:
        new_instance.default_keys = keys
    if max_rows is not None:
        new_instance.default_max_rows = max_rows
    return new_instance

with_keys(keys) #

Copies the object and sets the new default keys.

Parameters:

Name Type Description Default
keys Sequence[str]

New default keys for the methods to_pretty_dicts, to_string, and to_html.

required

Returns:

Type Description
DictsReprMixinT

A copy of the object with the new default keys.

Source code in src/tea_tasting/utils.py
516
517
518
519
520
521
522
523
524
525
526
def with_keys(self: DictsReprMixinT, keys: Sequence[str]) -> DictsReprMixinT:
    """Copies the object and sets the new default `keys`.

    Args:
        keys: New default `keys` for the methods `to_pretty_dicts`, `to_string`,
            and `to_html`.

    Returns:
        A copy of the object with the new default `keys`.
    """
    return self.with_defaults(keys=keys)

with_max_rows(max_rows) #

Copies the object and sets the new default max_rows.

Parameters:

Name Type Description Default
max_rows int

New default max_rows for the methods to_pretty_dicts, to_string, and to_html.

required

Returns:

Type Description
DictsReprMixinT

A copy of the object with the new default max_rows.

Source code in src/tea_tasting/utils.py
529
530
531
532
533
534
535
536
537
538
539
def with_max_rows(self: DictsReprMixinT, max_rows: int) -> DictsReprMixinT:
    """Copies the object and sets the new default `max_rows`.

    Args:
        max_rows: New default `max_rows` for the methods `to_pretty_dicts`,
            `to_string`, and `to_html`.

    Returns:
        A copy of the object with the new default `max_rows`.
    """
    return self.with_defaults(max_rows=max_rows)

PowerBase #

Bases: ABC, Generic[S], ReprMixin

Base class for the analysis of power.

solve_power(data, parameter='rel_effect_size') abstractmethod #

Solve for a parameter of the power of a test.

Parameters:

Name Type Description Default
data IntoFrame | Table

Sample data.

required
parameter Literal['power', 'effect_size', 'rel_effect_size', 'n_obs']

Parameter name.

'rel_effect_size'

Returns:

Type Description
S

Power analysis result.

Source code in src/tea_tasting/metrics/base.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
@abc.abstractmethod
def solve_power(
    self,
    data: narwhals.typing.IntoFrame | ibis.expr.types.Table,
    parameter: Literal[
        "power", "effect_size", "rel_effect_size", "n_obs"] = "rel_effect_size",
) -> S:
    """Solve for a parameter of the power of a test.

    Args:
        data: Sample data.
        parameter: Parameter name.

    Returns:
        Power analysis result.
    """

PowerBaseAggregated #

Bases: PowerBase[S], _HasAggrCols

Base class for the analysis of power using aggregated statistics.

aggr_cols abstractmethod property #

Columns to be aggregated for an analysis.

solve_power(data, parameter='rel_effect_size') #

Solve for a parameter of the power of a test.

Parameters:

Name Type Description Default
data IntoFrame | Table | Aggregates

Sample data.

required
parameter Literal['power', 'effect_size', 'rel_effect_size', 'n_obs']

Parameter name.

'rel_effect_size'

Returns:

Type Description
S

Power analysis result.

Source code in src/tea_tasting/metrics/base.py
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
def solve_power(
    self,
    data: (
        narwhals.typing.IntoFrame |
        ibis.expr.types.Table |
        tea_tasting.aggr.Aggregates
    ),
    parameter: Literal[
        "power", "effect_size", "rel_effect_size", "n_obs"] = "rel_effect_size",
) -> S:
    """Solve for a parameter of the power of a test.

    Args:
        data: Sample data.
        parameter: Parameter name.

    Returns:
        Power analysis result.
    """
    tea_tasting.utils.check_scalar(
        parameter,
        "parameter",
        in_={"power", "effect_size", "rel_effect_size", "n_obs"},
    )
    if not isinstance(data, tea_tasting.aggr.Aggregates):
        data = tea_tasting.aggr.read_aggregates(
            data=data,
            group_col=None,
            **self.aggr_cols._asdict(),
        )
    return self.solve_power_from_aggregates(data=data, parameter=parameter)

solve_power_from_aggregates(data, parameter='rel_effect_size') abstractmethod #

Solve for a parameter of the power of a test.

Parameters:

Name Type Description Default
data Aggregates

Sample data.

required
parameter Literal['power', 'effect_size', 'rel_effect_size', 'n_obs']

Parameter name.

'rel_effect_size'

Returns:

Type Description
S

Power analysis result.

Source code in src/tea_tasting/metrics/base.py
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
@abc.abstractmethod
def solve_power_from_aggregates(
    self,
    data: tea_tasting.aggr.Aggregates,
    parameter: Literal[
        "power", "effect_size", "rel_effect_size", "n_obs"] = "rel_effect_size",
) -> S:
    """Solve for a parameter of the power of a test.

    Args:
        data: Sample data.
        parameter: Parameter name.

    Returns:
        Power analysis result.
    """

aggregate_by_variants(data, aggr_cols, variant=None) #

Aggregate experimental data by variants.

Parameters:

Name Type Description Default
data IntoFrame | Table | dict[object, Aggregates]

Experimental data.

required
aggr_cols AggrCols

Columns to be aggregated.

required
variant str | None

Variant column name.

None

Returns:

Type Description
dict[object, Aggregates]

Experimental data as a dictionary of Aggregates.

Source code in src/tea_tasting/metrics/base.py
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
def aggregate_by_variants(
    data: (
        narwhals.typing.IntoFrame |
        ibis.expr.types.Table |
        dict[object, tea_tasting.aggr.Aggregates]
    ),
    aggr_cols: AggrCols,
    variant: str | None = None,
) ->  dict[object, tea_tasting.aggr.Aggregates]:
    """Aggregate experimental data by variants.

    Args:
        data: Experimental data.
        aggr_cols: Columns to be aggregated.
        variant: Variant column name.

    Returns:
        Experimental data as a dictionary of Aggregates.
    """
    if isinstance(data, dict):
        return data

    if variant is None:
        raise ValueError("The variant parameter is required but was not provided.")

    return tea_tasting.aggr.read_aggregates(
        data=data,
        group_col=variant,
        **aggr_cols._asdict(),
    )

read_granular(data, cols=(), variant=None) #

Read granular experimental data.

Parameters:

Name Type Description Default
data IntoFrame | Table | dict[object, Table]

Experimental data.

required
cols Sequence[str]

Columns to read.

()
variant str | None

Variant column name.

None

Returns:

Type Description
Table | dict[object, Table]

Experimental data as a dictionary of PyArrow Tables.

Source code in src/tea_tasting/metrics/base.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
def read_granular(
    data: narwhals.typing.IntoFrame | ibis.expr.types.Table | dict[object, pa.Table],
    cols: Sequence[str] = (),
    variant: str | None = None,
) -> pa.Table | dict[object, pa.Table]:
    """Read granular experimental data.

    Args:
        data: Experimental data.
        cols: Columns to read.
        variant: Variant column name.

    Returns:
        Experimental data as a dictionary of PyArrow Tables.
    """
    if isinstance(data, dict):
        return data

    variant_cols = () if variant is None else (variant,)
    if isinstance(data, ibis.expr.types.Table):
        if len(cols) + len(variant_cols) > 0:
            data = data.select(*cols, *variant_cols)
        table = data.to_pyarrow()
    else:
        data = nw.from_native(data)
        if isinstance(data, nw.LazyFrame):
            data = data.collect()
        if len(cols) + len(variant_cols) > 0:
            data = data.select(*cols, *variant_cols)
        table = data.to_arrow()

    if variant is None:
        return table

    variant_array = table[variant]
    if len(cols) > 0:
        table = table.select(cols)
    return {
        var: table.filter(pc.equal(variant_array, pa.scalar(var)))  # type: ignore
        for var in variant_array.unique().to_pylist()
    }