def test_BaseRLearner_without_p(generate_regression_data): y, X, treatment, tau, b, e = generate_regression_data() learner = BaseRLearner(learner=XGBRegressor()) # check the accuracy of the ATE estimation ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y) assert (ate_p >= lb) and (ate_p <= ub) assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD # check the accuracy of the CATE estimation with the bootstrap CI cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10) auuc_metrics = pd.DataFrame({ 'cate_p': cate_p.flatten(), 'W': treatment, 'y': y, 'treatment_effect_col': tau }) cumgain = get_cumgain(auuc_metrics, outcome_col='y', treatment_col='W', treatment_effect_col='tau') # Check if the cumulative gain when using the model's prediction is # higher than it would be under random targeting assert cumgain['cate_p'].sum() > cumgain['Random'].sum()
def test_BaseRLearner_without_p(generate_regression_data): y, X, treatment, tau, b, e = generate_regression_data() learner = BaseRLearner(learner=XGBRegressor()) # check the accuracy of the ATE estimation ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y) assert (ate_p >= lb) and (ate_p <= ub) assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD # check the accuracy of the CATE estimation with the bootstrap CI cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10) assert gini(tau, cate_p.flatten()) > .5
def test_pandas_input(generate_regression_data): y, X, treatment, tau, b, e = generate_regression_data() # convert to pandas types y = pd.Series(y) X = pd.DataFrame(X) treatment = pd.Series(treatment) try: learner = BaseSLearner(learner=LinearRegression()) ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True) except AttributeError: assert False try: learner = BaseTLearner(learner=LinearRegression()) ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y) except AttributeError: assert False try: learner = BaseXLearner(learner=LinearRegression()) ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e) except AttributeError: assert False try: learner = BaseRLearner(learner=LinearRegression()) ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e) except AttributeError: assert False try: learner = TMLELearner(learner=LinearRegression()) ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e) except AttributeError: assert False
def test_BaseRLearner(generate_regression_data): y, X, treatment, tau, b, e = generate_regression_data() learner = BaseRLearner(learner=XGBRegressor()) # check the accuracy of the ATE estimation ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e) assert (ate_p >= lb) and (ate_p <= ub) assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD # check pre-train model ate_p_pt, lb_pt, ub_pt = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e, pretrain=True) assert (ate_p_pt == ate_p) and (lb_pt == lb) and (ub_pt == ub) # check the accuracy of the CATE estimation with the bootstrap CI cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, p=e, return_ci=True, n_bootstraps=10) auuc_metrics = pd.DataFrame({ "cate_p": cate_p.flatten(), "W": treatment, "y": y, "treatment_effect_col": tau, }) cumgain = get_cumgain(auuc_metrics, outcome_col="y", treatment_col="W", treatment_effect_col="tau") # Check if the cumulative gain when using the model's prediction is # higher than it would be under random targeting assert cumgain["cate_p"].sum() > cumgain["Random"].sum() # basic test of using outcome_learner and effect_learner learner = BaseRLearner( learner=XGBRegressor(), outcome_learner=RandomForestRegressor(), effect_learner=RandomForestRegressor(), ) ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, p=e) assert (ate_p >= lb) and (ate_p <= ub) assert (ape(tau.mean(), ate_p) < ERROR_THRESHOLD * 5 ) # might need to look into higher ape