def process(b: int): # sample bootstrap from batch logged bandit feedback boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback( test_size=test_size, is_timeseries_split=True, random_state=b ) # train an evaluation on the training set of the logged bandit feedback data action_dist = counterfactual_policy.fit( context=boot_bandit_feedback["context"], action=boot_bandit_feedback["action"], reward=boot_bandit_feedback["reward"], pscore=boot_bandit_feedback["pscore"], position=boot_bandit_feedback["position"], ) # make action selections (predictions) action_dist = counterfactual_policy.predict( context=boot_bandit_feedback["context_test"] ) # estimate the policy value of a given counterfactual algorithm by the three OPE estimators. ipw = InverseProbabilityWeighting() return ipw.estimate_policy_value( reward=boot_bandit_feedback["reward_test"], action=boot_bandit_feedback["action_test"], position=boot_bandit_feedback["position_test"], pscore=boot_bandit_feedback["pscore_test"], action_dist=action_dist, )
def test_ipw_init_using_invalid_inputs( lambda_, use_estimated_pscore, err, description, ): with pytest.raises(err, match=f"{description}*"): _ = InverseProbabilityWeighting( lambda_=lambda_, use_estimated_pscore=use_estimated_pscore )
def test_ipw_using_random_evaluation_policy( synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray ) -> None: """ Test the format of ipw variants using synthetic bandit data and random evaluation policy """ action_dist = random_action_dist # prepare input dict input_dict = { k: v for k, v in synthetic_bandit_feedback.items() if k in ["reward", "action", "pscore", "position"] } input_dict["action_dist"] = action_dist # ipw estimators can be used without estimated_rewards_by_reg_model for estimator in [ipw, snipw, ipw_tuning_mse, ipw_tuning_slope]: estimated_policy_value = estimator.estimate_policy_value(**input_dict) assert isinstance( estimated_policy_value, float ), f"invalid type response: {estimator}" # ipw with estimated pscore ipw_estimated_pscore = InverseProbabilityWeighting(use_estimated_pscore=True) snipw_estimated_pscore = SelfNormalizedInverseProbabilityWeighting( use_estimated_pscore=True ) ipw_tuning_estimated_pscore = InverseProbabilityWeightingTuning( lambdas=[10, 1000], use_estimated_pscore=True ) input_dict["estimated_pscore"] = input_dict["pscore"] del input_dict["pscore"] for estimator in [ ipw_estimated_pscore, snipw_estimated_pscore, ipw_tuning_estimated_pscore, ]: estimated_policy_value = estimator.estimate_policy_value(**input_dict) assert isinstance( estimated_policy_value, float ), f"invalid type response: {estimator}" # remove necessary keys del input_dict["reward"] del input_dict["action"] for estimator in [ipw, snipw]: with pytest.raises( TypeError, match=re.escape( "estimate_policy_value() missing 2 required positional arguments: 'reward' and 'action'" ), ): _ = estimator.estimate_policy_value(**input_dict)
# hyperparameter for the regression model used in model dependent OPE estimators with open("./conf/hyperparams.yaml", "rb") as f: hyperparams = yaml.safe_load(f) base_model_dict = dict( logistic_regression=LogisticRegression, lightgbm=HistGradientBoostingClassifier, random_forest=RandomForestClassifier, ) # compared OPE estimators ope_estimators = [ DirectMethod(), InverseProbabilityWeighting(), SelfNormalizedInverseProbabilityWeighting(), DoublyRobust(), SelfNormalizedDoublyRobust(), SwitchDoublyRobust(tau=1.0, estimator_name="switch-dr (tau=1)"), SwitchDoublyRobust(tau=100.0, estimator_name="switch-dr (tau=100)"), DoublyRobustWithShrinkage(lambda_=1.0, estimator_name="dr-os (lambda=1)"), DoublyRobustWithShrinkage(lambda_=100.0, estimator_name="dr-os (lambda=100)"), ] if __name__ == "__main__": parser = argparse.ArgumentParser( description="evaluate off-policy estimators with synthetic bandit data." ) parser.add_argument( "--n_runs", type=int, default=1, help="number of simulations in the experiment."
data_path = Path("../open_bandit_dataset") obd = OpenBanditDataset( behavior_policy=behavior_policy, campaign=campaign, data_path=data_path ) # hyparparameters for counterfactual policies kwargs = dict( n_actions=obd.n_actions, len_list=obd.len_list, random_state=random_state ) if counterfactual_policy == "bts": kwargs["alpha"] = production_prior_for_bts[campaign]["alpha"] kwargs["beta"] = production_prior_for_bts[campaign]["beta"] kwargs["batch_size"] = production_batch_size_for_bts[campaign] policy = counterfactual_policy_dict[counterfactual_policy](**kwargs) # compared OPE estimators ope_estimators = [DirectMethod(), InverseProbabilityWeighting(), DoublyRobust()] # a base ML model for regression model used in Direct Method and Doubly Robust base_model = CalibratedClassifierCV(LogisticRegression(**hyperparams)) # ground-truth policy value of a counterfactual policy # , which is estimated with factual (observed) rewards (on-policy estimation) ground_truth_policy_value = OpenBanditDataset.calc_on_policy_policy_value_estimate( behavior_policy=counterfactual_policy, campaign=campaign, data_path=data_path ) evaluation_of_ope_results = { est.estimator_name: np.zeros(n_boot_samples) for est in ope_estimators } for b in np.arange(n_boot_samples): # sample bootstrap from batch logged bandit feedback boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(random_state=b) # run a counterfactual bandit algorithm on logged bandit feedback data
def test_ipw_using_invalid_input_data( action_dist: np.ndarray, action: np.ndarray, reward: np.ndarray, pscore: np.ndarray, position: np.ndarray, use_estimated_pscore: bool, estimated_pscore: np.ndarray, description: str, ) -> None: # prepare ipw instances ipw = InverseProbabilityWeighting(use_estimated_pscore=use_estimated_pscore) snipw = SelfNormalizedInverseProbabilityWeighting( use_estimated_pscore=use_estimated_pscore ) sgipw = SubGaussianInverseProbabilityWeighting( use_estimated_pscore=use_estimated_pscore ) ipw_tuning = InverseProbabilityWeightingTuning( lambdas=[10, 1000], use_estimated_pscore=use_estimated_pscore ) sgipw_tuning = SubGaussianInverseProbabilityWeightingTuning( lambdas=[0.01, 0.1], use_estimated_pscore=use_estimated_pscore ) with pytest.raises(ValueError, match=f"{description}*"): _ = ipw.estimate_policy_value( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = ipw.estimate_interval( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = snipw.estimate_policy_value( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = snipw.estimate_interval( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = ipw_tuning.estimate_policy_value( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = ipw_tuning.estimate_interval( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = sgipw.estimate_policy_value( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = sgipw.estimate_interval( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = sgipw_tuning.estimate_policy_value( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, ) with pytest.raises(ValueError, match=f"{description}*"): _ = sgipw_tuning.estimate_interval( action_dist=action_dist, action=action, reward=reward, pscore=pscore, position=position, estimated_pscore=estimated_pscore, )
def main(cfg: DictConfig) -> None: print(cfg) logger.info(f"The current working directory is {Path().cwd()}") start_time = time.time() logger.info("initializing experimental condition..") # compared ope estimators lambdas = list(dict(cfg.estimator_hyperparams)["lambdas"]) ope_estimators = [ InverseProbabilityWeighting(estimator_name="IPW"), SelfNormalizedInverseProbabilityWeighting(estimator_name="SNIPW"), DirectMethod(estimator_name="DM"), DoublyRobust(estimator_name="DR"), SelfNormalizedDoublyRobust(estimator_name="SNDR"), SwitchDoublyRobustTuning(lambdas=lambdas, estimator_name="Switch-DR"), DoublyRobustWithShrinkageTuning(lambdas=lambdas, estimator_name="DRos"), ] # configurations n_seeds = cfg.setting.n_seeds sample_size = cfg.setting.sample_size reg_model = cfg.setting.reg_model campaign = cfg.setting.campaign behavior_policy = cfg.setting.behavior_policy test_size = cfg.setting.test_size is_timeseries_split = cfg.setting.is_timeseries_split n_folds = cfg.setting.n_folds obd_path = (Path().cwd().parents[5] / "open_bandit_dataset" if cfg.setting.is_full_obd else None) random_state = cfg.setting.random_state np.random.seed(random_state) # define dataset dataset_ts = OpenBanditDataset(behavior_policy="bts", campaign=campaign, data_path=obd_path) dataset_ur = OpenBanditDataset(behavior_policy="random", campaign=campaign, data_path=obd_path) # prepare logged bandit feedback and evaluation policies if behavior_policy == "random": if is_timeseries_split: bandit_feedback_ur = dataset_ur.obtain_batch_bandit_feedback( test_size=test_size, is_timeseries_split=True, )[0] else: bandit_feedback_ur = dataset_ur.obtain_batch_bandit_feedback() bandit_feedbacks = [bandit_feedback_ur] # obtain the ground-truth policy value ground_truth_ts = OpenBanditDataset.calc_on_policy_policy_value_estimate( behavior_policy="bts", campaign=campaign, data_path=obd_path, test_size=test_size, is_timeseries_split=is_timeseries_split, ) # obtain action choice probabilities and define evaluation policies policy_ts = BernoulliTS( n_actions=dataset_ts.n_actions, len_list=dataset_ts.len_list, random_state=random_state, is_zozotown_prior=True, campaign=campaign, ) action_dist_ts = policy_ts.compute_batch_action_dist(n_rounds=1000000) evaluation_policies = [(ground_truth_ts, action_dist_ts)] else: if is_timeseries_split: bandit_feedback_ts = dataset_ts.obtain_batch_bandit_feedback( test_size=test_size, is_timeseries_split=True, )[0] else: bandit_feedback_ts = dataset_ts.obtain_batch_bandit_feedback() bandit_feedbacks = [bandit_feedback_ts] # obtain the ground-truth policy value ground_truth_ur = OpenBanditDataset.calc_on_policy_policy_value_estimate( behavior_policy="random", campaign=campaign, data_path=obd_path, test_size=test_size, is_timeseries_split=is_timeseries_split, ) # obtain action choice probabilities and define evaluation policies policy_ur = Random( n_actions=dataset_ur.n_actions, len_list=dataset_ur.len_list, random_state=random_state, ) action_dist_ur = policy_ur.compute_batch_action_dist(n_rounds=1000000) evaluation_policies = [(ground_truth_ur, action_dist_ur)] # regression models used in ope estimators hyperparams = dict(cfg.reg_model_hyperparams)[reg_model] regression_models = [reg_model_dict[reg_model](**hyperparams)] # define an evaluator class evaluator = InterpretableOPEEvaluator( random_states=np.arange(n_seeds), bandit_feedbacks=bandit_feedbacks, evaluation_policies=evaluation_policies, ope_estimators=ope_estimators, regression_models=regression_models, ) # conduct an evaluation of OPE experiment logger.info("experiment started") _ = evaluator.estimate_policy_value(sample_size=sample_size, n_folds_=n_folds) # calculate statistics mean = evaluator.calculate_mean(root=True) mean_scaled = evaluator.calculate_mean(scale=True, root=True) # save results of the evaluation of off-policy estimators log_path = Path("./outputs") log_path.mkdir(exist_ok=True, parents=True) # save root mse root_mse_df = DataFrame() root_mse_df["estimator"] = list(mean.keys()) root_mse_df["mean"] = list(mean.values()) root_mse_df["mean(scaled)"] = list(mean_scaled.values()) root_mse_df.to_csv(log_path / "root_mse.csv") # conduct pairwise t-tests se_df = DataFrame(evaluator.calculate_squared_error()) se_df = DataFrame(se_df.stack()).reset_index(1) se_df.rename(columns={"level_1": "estimators", 0: "se"}, inplace=True) nonparam_ttests = (pg.pairwise_ttests( data=se_df, dv="se", parametric=False, between="estimators", ).round(4).drop(["Contrast", "Parametric", "Paired"], axis=1)) nonparam_ttests.to_csv(log_path / "nonparam_ttests.csv") # save reg model metrics DataFrame(evaluator.reg_model_metrics).describe().to_csv( log_path / "reg_model_metrics.csv") # print result print(root_mse_df) experiment = f"{campaign}-{behavior_policy}-{sample_size}" elapsed_time = np.round((time.time() - start_time) / 60, 2) logger.info(f"finish experiment {experiment} in {elapsed_time}min")
# sample bootstrap from batch logged bandit feedback boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback( test_size=test_size, is_timeseries_split=True, random_state=b) # train an evaluation on the training set of the logged bandit feedback data action_dist = evaluation_policy.fit( context=boot_bandit_feedback["context"], action=boot_bandit_feedback["action"], reward=boot_bandit_feedback["reward"], pscore=boot_bandit_feedback["pscore"], position=boot_bandit_feedback["position"], ) # make action selections (predictions) action_dist = evaluation_policy.predict( context=boot_bandit_feedback["context_test"]) # estimate the policy value of a given counterfactual algorithm by the three OPE estimators. ipw = InverseProbabilityWeighting() ope_results[b] = (ipw.estimate_policy_value( reward=boot_bandit_feedback["reward_test"], action=boot_bandit_feedback["action_test"], position=boot_bandit_feedback["position_test"], pscore=boot_bandit_feedback["pscore_test"], action_dist=action_dist, ) / ground_truth) print( f"{b+1}th iteration: {np.round((time.time() - start) / 60, 2)}min") ope_results_dict = estimate_confidence_interval_by_bootstrap( samples=ope_results, random_state=random_state) ope_results_dict["mean(no-boot)"] = ope_results.mean() ope_results_dict["std"] = np.std(ope_results, ddof=1) ope_results_df = pd.DataFrame(ope_results_dict, index=["ipw"])