tea_tasting.metrics.mean
#
Metrics for the analysis of means.
Mean(value, covariate=None, *, alternative=None, confidence_level=None, equal_var=None, use_t=None, alpha=None, ratio=None, power=None, effect_size=None, rel_effect_size=None, n_obs=None)
#
Bases: RatioOfMeans
Metric for the analysis of means.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
value |
str
|
Metric value column name. |
required |
covariate |
str | None
|
Metric covariate column name. |
None
|
alternative |
Literal['two-sided', 'greater', 'less'] | None
|
Alternative hypothesis. |
None
|
confidence_level |
float | None
|
Confidence level for the confidence interval. |
None
|
equal_var |
bool | None
|
Defines whether equal variance is assumed. If |
None
|
use_t |
bool | None
|
Defines whether to use the Student's t-distribution ( |
None
|
alpha |
float | None
|
Significance level. Only for the analysis of power. |
None
|
ratio |
float | int | None
|
Ratio of the number of observations in the treatment relative to the control. Only for the analysis of power. |
None
|
power |
float | None
|
Statistical power. Only for the analysis of power. |
None
|
effect_size |
float | int | Sequence[float | int] | None
|
Absolute effect size. Difference between the two means. Only for the analysis of power. |
None
|
rel_effect_size |
float | Sequence[float] | None
|
Relative effect size. Difference between the two means, divided by the control mean. Only for the analysis of power. |
None
|
n_obs |
int | Sequence[int] | None
|
Number of observations in the control and in the treatment together. Only for the analysis of power. |
None
|
Parameter defaults
Defaults for parameters alpha
, alternative
, confidence_level
,
equal_var
, n_obs
, power
, ratio
, and use_t
can be changed
using the config_context
and set_context
functions.
See the Global configuration
reference for details.
References
Examples:
import tea_tasting as tt
experiment = tt.Experiment(
orders_per_user=tt.Mean("orders"),
revenue_per_user=tt.Mean("revenue"),
)
data = tt.make_users_data(seed=42)
result = experiment.analyze(data)
print(result)
#> metric control treatment rel_effect_size rel_effect_size_ci pvalue
#> orders_per_user 0.530 0.573 8.0% [-2.0%, 19%] 0.118
#> revenue_per_user 5.24 5.73 9.3% [-2.4%, 22%] 0.123
With CUPED:
experiment = tt.Experiment(
orders_per_user=tt.Mean("orders", "orders_covariate"),
revenue_per_user=tt.Mean("revenue", "revenue_covariate"),
)
data = tt.make_users_data(seed=42, covariates=True)
result = experiment.analyze(data)
print(result)
#> metric control treatment rel_effect_size rel_effect_size_ci pvalue
#> orders_per_user 0.523 0.581 11% [2.9%, 20%] 0.00733
#> revenue_per_user 5.12 5.85 14% [3.8%, 26%] 0.00675
Power analysis:
data = tt.make_users_data(
seed=42,
sessions_uplift=0,
orders_uplift=0,
revenue_uplift=0,
covariates=True,
)
orders_per_user = tt.Mean(
"orders",
"orders_covariate",
n_obs=(10_000, 20_000),
)
print(orders_per_user.solve_power(data)) # Solve for effect size.
#> power effect_size rel_effect_size n_obs
#> 80% 0.0374 7.2% 10000
#> 80% 0.0264 5.1% 20000
orders_per_user = tt.Mean(
"orders",
"orders_covariate",
rel_effect_size=0.05,
)
# Solve for the total number of observations.
print(orders_per_user.solve_power(data, "n_obs"))
#> power effect_size rel_effect_size n_obs
#> 80% 0.0260 5.0% 20733
orders_per_user = tt.Mean(
"orders",
"orders_covariate",
rel_effect_size=0.1,
)
# Solve for power. Infer number of observations from the sample.
print(orders_per_user.solve_power(data, "power"))
#> power effect_size rel_effect_size n_obs
#> 69% 0.0519 10% 4000
Source code in src/tea_tasting/metrics/mean.py
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 |
|
aggr_cols: AggrCols
property
#
Columns to be aggregated for a metric analysis.
analyze(data, control, treatment, variant=None)
#
Analyze a metric in an experiment.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
data |
DataFrame | Table | dict[Any, Aggregates]
|
Experimental data. |
required |
control |
Any
|
Control variant. |
required |
treatment |
Any
|
Treatment variant. |
required |
variant |
str | None
|
Variant column name. |
None
|
Returns:
Type | Description |
---|---|
R
|
Analysis result. |
Source code in src/tea_tasting/metrics/base.py
analyze_aggregates(control, treatment)
#
Analyze a metric in an experiment using aggregated statistics.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
control |
Aggregates
|
Control data. |
required |
treatment |
Aggregates
|
Treatment data. |
required |
Returns:
Type | Description |
---|---|
MeanResult
|
Analysis result. |
Source code in src/tea_tasting/metrics/mean.py
solve_power(data, parameter='rel_effect_size')
#
Solve for a parameter of the power of a test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
data |
DataFrame | Table | Aggregates
|
Sample data. |
required |
parameter |
Literal['power', 'effect_size', 'rel_effect_size', 'n_obs']
|
Parameter name. |
'rel_effect_size'
|
Returns:
Type | Description |
---|---|
S
|
Power analysis result. |
Source code in src/tea_tasting/metrics/base.py
solve_power_from_aggregates(data, parameter='rel_effect_size')
#
Solve for a parameter of the power of a test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
data |
Aggregates
|
Sample data. |
required |
parameter |
Literal['power', 'effect_size', 'rel_effect_size', 'n_obs']
|
Parameter name. |
'rel_effect_size'
|
Returns:
Type | Description |
---|---|
MeanPowerResults
|
Power analysis result. |
Source code in src/tea_tasting/metrics/mean.py
MeanPowerResult
#
Bases: NamedTuple
Power analysis results.
Attributes:
Name | Type | Description |
---|---|---|
power |
float
|
Statistical power. |
effect_size |
float
|
Absolute effect size. Difference between the two means. |
rel_effect_size |
float
|
Relative effect size. Difference between the two means, divided by the control mean. |
n_obs |
float
|
Number of observations in the control and in the treatment together. |
MeanResult
#
Bases: NamedTuple
Result of the analysis of means.
Attributes:
Name | Type | Description |
---|---|---|
control |
float
|
Control mean. |
treatment |
float
|
Treatment mean. |
effect_size |
float
|
Absolute effect size. Difference between the two means. |
effect_size_ci_lower |
float
|
Lower bound of the absolute effect size confidence interval. |
effect_size_ci_upper |
float
|
Upper bound of the absolute effect size confidence interval. |
rel_effect_size |
float
|
Relative effect size. Difference between the two means, divided by the control mean. |
rel_effect_size_ci_lower |
float
|
Lower bound of the relative effect size confidence interval. |
rel_effect_size_ci_upper |
float
|
Upper bound of the relative effect size confidence interval. |
pvalue |
float
|
P-value |
statistic |
float
|
Statistic (standardized effect size). |
RatioOfMeans(numer, denom=None, numer_covariate=None, denom_covariate=None, *, alternative=None, confidence_level=None, equal_var=None, use_t=None, alpha=None, ratio=None, power=None, effect_size=None, rel_effect_size=None, n_obs=None)
#
Bases: MetricBaseAggregated[MeanResult]
, PowerBaseAggregated[MeanPowerResults]
Metric for the analysis of ratios of means.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
numer |
str
|
Numerator column name. |
required |
denom |
str | None
|
Denominator column name. |
None
|
numer_covariate |
str | None
|
Covariate numerator column name. |
None
|
denom_covariate |
str | None
|
Covariate denominator column name. |
None
|
alternative |
Literal['two-sided', 'greater', 'less'] | None
|
Alternative hypothesis. |
None
|
confidence_level |
float | None
|
Confidence level for the confidence interval. |
None
|
equal_var |
bool | None
|
Defines whether equal variance is assumed. If |
None
|
use_t |
bool | None
|
Defines whether to use the Student's t-distribution ( |
None
|
alpha |
float | None
|
Significance level. Only for the analysis of power. |
None
|
ratio |
float | int | None
|
Ratio of the number of observations in the treatment relative to the control. Only for the analysis of power. |
None
|
power |
float | None
|
Statistical power. Only for the analysis of power. |
None
|
effect_size |
float | int | Sequence[float | int] | None
|
Absolute effect size. Difference between the two means. Only for the analysis of power. |
None
|
rel_effect_size |
float | Sequence[float] | None
|
Relative effect size. Difference between the two means, divided by the control mean. Only for the analysis of power. |
None
|
n_obs |
int | Sequence[int] | None
|
Number of observations in the control and in the treatment together. Only for the analysis of power. |
None
|
Parameter defaults
Defaults for parameters alpha
, alternative
, confidence_level
,
equal_var
, n_obs
, power
, ratio
, and use_t
can be changed
using the config_context
and set_context
functions.
See the Global configuration
reference for details.
References
Examples:
import tea_tasting as tt
experiment = tt.Experiment(
orders_per_session=tt.RatioOfMeans("orders", "sessions"),
)
data = tt.make_users_data(seed=42)
result = experiment.analyze(data)
print(result)
#> metric control treatment rel_effect_size rel_effect_size_ci pvalue
#> orders_per_session 0.266 0.289 8.8% [-0.89%, 19%] 0.0762
With CUPED:
experiment = tt.Experiment(
orders_per_session=tt.RatioOfMeans(
"orders",
"sessions",
"orders_covariate",
"sessions_covariate",
),
)
data = tt.make_users_data(seed=42, covariates=True)
result = experiment.analyze(data)
print(result)
#> metric control treatment rel_effect_size rel_effect_size_ci pvalue
#> orders_per_session 0.262 0.293 12% [4.2%, 21%] 0.00229
Power analysis:
data = tt.make_users_data(
seed=42,
sessions_uplift=0,
orders_uplift=0,
revenue_uplift=0,
covariates=True,
)
orders_per_session = tt.RatioOfMeans(
"orders",
"sessions",
"orders_covariate",
"sessions_covariate",
n_obs=(10_000, 20_000),
)
print(orders_per_session.solve_power(data)) # Solve for effect size.
#> power effect_size rel_effect_size n_obs
#> 80% 0.0177 6.8% 10000
#> 80% 0.0125 4.8% 20000
orders_per_session = tt.RatioOfMeans(
"orders",
"sessions",
"orders_covariate",
"sessions_covariate",
rel_effect_size=0.05,
)
# Solve for the total number of observations.
print(orders_per_session.solve_power(data, "n_obs"))
#> power effect_size rel_effect_size n_obs
#> 80% 0.0130 5.0% 18515
orders_per_session = tt.RatioOfMeans(
"orders",
"sessions",
"orders_covariate",
"sessions_covariate",
rel_effect_size=0.1,
)
# Solve for power. Infer number of observations from the sample.
print(orders_per_session.solve_power(data, "power"))
#> power effect_size rel_effect_size n_obs
#> 74% 0.0261 10% 4000
Source code in src/tea_tasting/metrics/mean.py
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
|
aggr_cols: AggrCols
property
#
Columns to be aggregated for a metric analysis.
analyze(data, control, treatment, variant=None)
#
Analyze a metric in an experiment.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
data |
DataFrame | Table | dict[Any, Aggregates]
|
Experimental data. |
required |
control |
Any
|
Control variant. |
required |
treatment |
Any
|
Treatment variant. |
required |
variant |
str | None
|
Variant column name. |
None
|
Returns:
Type | Description |
---|---|
R
|
Analysis result. |
Source code in src/tea_tasting/metrics/base.py
analyze_aggregates(control, treatment)
#
Analyze a metric in an experiment using aggregated statistics.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
control |
Aggregates
|
Control data. |
required |
treatment |
Aggregates
|
Treatment data. |
required |
Returns:
Type | Description |
---|---|
MeanResult
|
Analysis result. |
Source code in src/tea_tasting/metrics/mean.py
solve_power(data, parameter='rel_effect_size')
#
Solve for a parameter of the power of a test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
data |
DataFrame | Table | Aggregates
|
Sample data. |
required |
parameter |
Literal['power', 'effect_size', 'rel_effect_size', 'n_obs']
|
Parameter name. |
'rel_effect_size'
|
Returns:
Type | Description |
---|---|
S
|
Power analysis result. |
Source code in src/tea_tasting/metrics/base.py
solve_power_from_aggregates(data, parameter='rel_effect_size')
#
Solve for a parameter of the power of a test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
data |
Aggregates
|
Sample data. |
required |
parameter |
Literal['power', 'effect_size', 'rel_effect_size', 'n_obs']
|
Parameter name. |
'rel_effect_size'
|
Returns:
Type | Description |
---|---|
MeanPowerResults
|
Power analysis result. |