def fit(self, X, p, treatment, y, verbose=True): """Fit the treatment effect and outcome models of the R learner. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix p (np.ndarray or pd.Series or dict): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector """ X, treatment, y = convert_pd_to_np(X, treatment, y) check_treatment_vector(treatment, self.control_name) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() check_p_conditions(p, self.t_groups) if isinstance(p, (np.ndarray, pd.Series)): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items() } self._classes = {group: i for i, group in enumerate(self.t_groups)} self.models_tau = { group: deepcopy(self.model_tau) for group in self.t_groups } self.vars_c = {} self.vars_t = {} if verbose: logger.info('generating out-of-fold CV outcome estimates') yhat = cross_val_predict(self.model_mu, X, y, cv=self.cv, method='predict_proba', n_jobs=-1)[:, 1] for group in self.t_groups: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] yhat_filt = yhat[mask] p_filt = p[group][mask] w = (treatment_filt == group).astype(int) if verbose: logger.info( 'training the treatment effect model for {} with R-loss'. format(group)) self.models_tau[group].fit(X_filt, (y_filt - yhat_filt) / (w - p_filt), sample_weight=(w - p_filt)**2) self.vars_c[group] = (y_filt[w == 0] - yhat_filt[w == 0]).var() self.vars_t[group] = (y_filt[w == 1] - yhat_filt[w == 1]).var()
def fit_predict(self, X, treatment, y, p=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, verbose=True): """Fit the treatment effect and outcome models of the R learner and predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. return_ci (bool): whether to return confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap verbose (bool): whether to output progress logs Returns: (numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment]. If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment], UB [n_samples, n_treatment] """ X, treatment, y = convert_pd_to_np(X, treatment, y) self.fit(X, treatment, y, p, verbose=verbose) te = self.predict(X) if p is None: p = self.propensity else: check_p_conditions(p, self.t_groups) if isinstance(p, (np.ndarray, pd.Series)): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()} if not return_ci: return te else: t_groups_global = self.t_groups _classes_global = self._classes model_mu_global = deepcopy(self.model_mu) models_tau_global = deepcopy(self.models_tau) te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)) logger.info('Bootstrap Confidence Intervals') for i in tqdm(range(n_bootstraps)): te_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size) te_bootstraps[:, :, i] = te_b te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2) te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2) # set member variables back to global (currently last bootstrapped outcome) self.t_groups = t_groups_global self._classes = _classes_global self.model_mu = deepcopy(model_mu_global) self.models_tau = deepcopy(models_tau_global) return (te, te_lower, te_upper)
def predict(self, X, p, treatment=None, y=None, return_components=False, verbose=True): """Predict treatment effects. Args: X (np.matrix): a feature matrix p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array, optional): a treatment vector y (np.array, optional): an optional outcome vector Returns: (numpy.ndarray): Predictions of treatment effects. """ check_p_conditions(p, self.t_groups) if isinstance(p, np.ndarray): treatment_name = self.t_groups[0] p = {treatment_name: p} te = np.zeros((X.shape[0], self.t_groups.shape[0])) dhat_cs = {} dhat_ts = {} for i, group in enumerate(self.t_groups): model_tau_c = self.models_tau_c[group] model_tau_t = self.models_tau_t[group] dhat_cs[group] = model_tau_c.predict(X) dhat_ts[group] = model_tau_t.predict(X) _te = (p[group] * dhat_cs[group] + (1 - p[group]) * dhat_ts[group]).reshape(-1, 1) te[:, i] = np.ravel(_te) if (y is not None) and (treatment is not None) and verbose: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] w = (treatment_filt == group).astype(int) yhat = np.zeros_like(y_filt, dtype=float) yhat[w == 0] = self.models_mu_c[group].predict_proba( X_filt[w == 0])[:, 1] yhat[w == 1] = self.models_mu_t[group].predict_proba( X_filt[w == 1])[:, 1] logger.info('Error metrics for group {}'.format(group)) classification_metrics(y_filt, yhat, w) if not return_components: return te else: return te, dhat_cs, dhat_ts
def predict(self, X, p, treatment=None, y=None, return_components=False, verbose=True): """Predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix p (np.ndarray or pd.Series or dict): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array or pd.Series, optional): a treatment vector y (np.array or pd.Series, optional): an outcome vector return_components (bool, optional): whether to return outcome for treatment and control seperately Returns: (numpy.ndarray): Predictions of treatment effects. """ X, treatment, y = convert_pd_to_np(X, treatment, y) check_p_conditions(p, self.t_groups) if isinstance(p, (np.ndarray, pd.Series)): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()} te = np.zeros((X.shape[0], self.t_groups.shape[0])) dhat_cs = {} dhat_ts = {} for i, group in enumerate(self.t_groups): model_tau_c = self.models_tau_c[group] model_tau_t = self.models_tau_t[group] dhat_cs[group] = model_tau_c.predict(X) dhat_ts[group] = model_tau_t.predict(X) _te = (p[group] * dhat_cs[group] + (1 - p[group]) * dhat_ts[group]).reshape(-1, 1) te[:, i] = np.ravel(_te) if (y is not None) and (treatment is not None) and verbose: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] w = (treatment_filt == group).astype(int) yhat = np.zeros_like(y_filt, dtype=float) yhat[w == 0] = self.models_mu_c[group].predict(X_filt[w == 0]) yhat[w == 1] = self.models_mu_t[group].predict(X_filt[w == 1]) logger.info('Error metrics for group {}'.format(group)) regression_metrics(y_filt, yhat, w) if not return_components: return te else: return te, dhat_cs, dhat_ts
def _format_p(p, t_groups): """Format propensity scores into a dictionary of {treatment group: propensity scores}. Args: p (np.ndarray, pd.Series, or dict): propensity scores t_groups (list): treatment group names. Returns: dict of {treatment group: propensity scores} """ check_p_conditions(p, t_groups) if isinstance(p, (np.ndarray, pd.Series)): treatment_name = t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items() } return p
def fit(self, X, treatment, y, p=None): """Fit the inference model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. """ X, treatment, y = convert_pd_to_np(X, treatment, y) check_treatment_vector(treatment, self.control_name) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() if p is None: logger.info('Generating propensity score') p = dict() p_model = dict() for group in self.t_groups: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] w_filt = (treatment_filt == group).astype(int) w = (treatment == group).astype(int) p[group], p_model[group] = compute_propensity_score( X=X_filt, treatment=w_filt, X_pred=X, treatment_pred=w) self.propensity_model = p_model self.propensity = p else: check_p_conditions(p, self.t_groups) if isinstance(p, (np.ndarray, pd.Series)): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items() } self._classes = {group: i for i, group in enumerate(self.t_groups)} self.models_mu_c = { group: deepcopy(self.model_mu_c) for group in self.t_groups } self.models_mu_t = { group: deepcopy(self.model_mu_t) for group in self.t_groups } self.models_tau_c = { group: deepcopy(self.model_tau_c) for group in self.t_groups } self.models_tau_t = { group: deepcopy(self.model_tau_t) for group in self.t_groups } self.vars_c = {} self.vars_t = {} for group in self.t_groups: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] w = (treatment_filt == group).astype(int) # Train outcome models self.models_mu_c[group].fit(X_filt[w == 0], y_filt[w == 0]) self.models_mu_t[group].fit(X_filt[w == 1], y_filt[w == 1]) # Calculate variances and treatment effects var_c = (y_filt[w == 0] - self.models_mu_c[group].predict(X_filt[w == 0])).var() self.vars_c[group] = var_c var_t = (y_filt[w == 1] - self.models_mu_t[group].predict(X_filt[w == 1])).var() self.vars_t[group] = var_t # Train treatment models d_c = self.models_mu_t[group].predict( X_filt[w == 0]) - y_filt[w == 0] d_t = y_filt[w == 1] - self.models_mu_c[group].predict( X_filt[w == 1]) self.models_tau_c[group].fit(X_filt[w == 0], d_c) self.models_tau_t[group].fit(X_filt[w == 1], d_t)
def estimate_ate(self, X, treatment, y, p=None, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000): """Estimate the Average Treatment Effect (ATE). Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. bootstrap_ci (bool): whether run bootstrap for confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap Returns: The mean and confidence interval (LB, UB) of the ATE estimate. """ te, dhat_cs, dhat_ts = self.fit_predict(X, treatment, y, p, return_components=True) X, treatment, y = convert_pd_to_np(X, treatment, y) if p is None: p = self.propensity else: check_p_conditions(p, self.t_groups) if isinstance(p, np.ndarray): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items() } ate = np.zeros(self.t_groups.shape[0]) ate_lb = np.zeros(self.t_groups.shape[0]) ate_ub = np.zeros(self.t_groups.shape[0]) for i, group in enumerate(self.t_groups): _ate = te[:, i].mean() mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] w = (treatment_filt == group).astype(int) prob_treatment = float(sum(w)) / w.shape[0] dhat_c = dhat_cs[group][mask] dhat_t = dhat_ts[group][mask] p_filt = p[group][mask] # SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009. # "Recent Developments in the Econometrics of Program Evaluation." Journal of Economic Literature se = np.sqrt( (self.vars_t[group] / prob_treatment + self.vars_c[group] / (1 - prob_treatment) + (p_filt * dhat_c + (1 - p_filt) * dhat_t).var()) / w.shape[0]) _ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2) _ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2) ate[i] = _ate ate_lb[i] = _ate_lb ate_ub[i] = _ate_ub if not bootstrap_ci: return ate, ate_lb, ate_ub else: t_groups_global = self.t_groups _classes_global = self._classes models_mu_c_global = deepcopy(self.models_mu_c) models_mu_t_global = deepcopy(self.models_mu_t) models_tau_c_global = deepcopy(self.models_tau_c) models_tau_t_global = deepcopy(self.models_tau_t) logger.info('Bootstrap Confidence Intervals for ATE') ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps)) for n in tqdm(range(n_bootstraps)): cate_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size) ate_bootstraps[:, n] = cate_b.mean() ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1) ate_upper = np.percentile(ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1) # set member variables back to global (currently last bootstrapped outcome) self.t_groups = t_groups_global self._classes = _classes_global self.models_mu_c = deepcopy(models_mu_c_global) self.models_mu_t = deepcopy(models_mu_t_global) self.models_tau_c = deepcopy(models_tau_c_global) self.models_tau_t = deepcopy(models_tau_t_global) return ate, ate_lower, ate_upper
def fit(self, X, p, treatment, y, verbose=True): """Fit the treatment effect and outcome models of the R learner. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix p (np.ndarray or pd.Series or dict): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector """ check_treatment_vector(treatment, self.control_name) X, treatment, y = convert_pd_to_np(X, treatment, y) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() check_p_conditions(p, self.t_groups) if isinstance(p, np.ndarray): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()} self._classes = {group: i for i, group in enumerate(self.t_groups)} self.models_tau = {group: deepcopy(self.model_tau) for group in self.t_groups} self.vars_c = {} self.vars_t = {} if verbose: logger.info('generating out-of-fold CV outcome estimates') yhat = cross_val_predict(self.model_mu, X, y, cv=self.cv, n_jobs=-1) for group in self.t_groups: treatment_mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[treatment_mask] w = (treatment_filt == group).astype(int) X_filt = X[treatment_mask] y_filt = y[treatment_mask] yhat_filt = yhat[treatment_mask] p_filt = p[group][treatment_mask] if verbose: logger.info('training the treatment effect model for {} with R-loss'.format(group)) if self.early_stopping: X_train_filt, X_test_filt, y_train_filt, y_test_filt, yhat_train_filt, yhat_test_filt, \ w_train, w_test, p_train_filt, p_test_filt = train_test_split( X_filt, y_filt, yhat_filt, w, p_filt, test_size=self.test_size, random_state=self.random_state ) self.models_tau[group].fit(X=X_train_filt, y=(y_train_filt - yhat_train_filt) / (w_train - p_train_filt), sample_weight=(w_train - p_train_filt) ** 2, eval_set=[(X_test_filt, (y_test_filt - yhat_test_filt) / (w_test - p_test_filt))], sample_weight_eval_set=[(w_test - p_test_filt) ** 2], eval_metric=self.effect_learner_eval_metric, early_stopping_rounds=self.early_stopping_rounds, verbose=verbose) else: self.models_tau[group].fit(X_filt, (y_filt - yhat_filt) / (w - p_filt), sample_weight=(w - p_filt) ** 2, eval_metric=self.effect_learner_eval_metric) self.vars_c[group] = (y_filt[w == 0] - yhat_filt[w == 0]).var() self.vars_t[group] = (y_filt[w == 1] - yhat_filt[w == 1]).var()
def estimate_ate(self, X, p, treatment, y, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000): """Estimate the Average Treatment Effect (ATE). Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix p (np.ndarray or pd.Series or dict): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector bootstrap_ci (bool): whether run bootstrap for confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap verbose (str): whether to output progress logs Returns: The mean and confidence interval (LB, UB) of the ATE estimate. """ X, treatment, y = convert_pd_to_np(X, treatment, y) te = self.fit_predict(X, p, treatment, y) check_p_conditions(p, self.t_groups) if isinstance(p, np.ndarray): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()} ate = np.zeros(self.t_groups.shape[0]) ate_lb = np.zeros(self.t_groups.shape[0]) ate_ub = np.zeros(self.t_groups.shape[0]) for i, group in enumerate(self.t_groups): w = (treatment == group).astype(int) prob_treatment = float(sum(w)) / X.shape[0] _ate = te[:, i].mean() se = (np.sqrt((self.vars_t[group] / prob_treatment) + (self.vars_c[group] / (1 - prob_treatment)) + te[:, i].var()) / X.shape[0]) _ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2) _ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2) ate[i] = _ate ate_lb[i] = _ate_lb ate_ub[i] = _ate_ub if not bootstrap_ci: return ate, ate_lb, ate_ub else: t_groups_global = self.t_groups _classes_global = self._classes model_mu_global = deepcopy(self.model_mu) models_tau_global = deepcopy(self.models_tau) logger.info('Bootstrap Confidence Intervals for ATE') ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps)) for n in tqdm(range(n_bootstraps)): cate_b = self.bootstrap(X, p, treatment, y, size=bootstrap_size) ate_bootstraps[:, n] = cate_b.mean() ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1) ate_upper = np.percentile(ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1) # set member variables back to global (currently last bootstrapped outcome) self.t_groups = t_groups_global self._classes = _classes_global self.model_mu = deepcopy(model_mu_global) self.models_tau = deepcopy(models_tau_global) return ate, ate_lower, ate_upper
def fit(self, X, treatment, y, p=None, verbose=True): """Fit the treatment effect and outcome models of the R learner. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. verbose (bool, optional): whether to output progress logs """ X, treatment, y = convert_pd_to_np(X, treatment, y) check_treatment_vector(treatment, self.control_name) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() if p is None: logger.info('Generating propensity score') p = dict() p_model = dict() for group in self.t_groups: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] w_filt = (treatment_filt == group).astype(int) w = (treatment == group).astype(int) p[group], p_model[group] = compute_propensity_score( X=X_filt, treatment=w_filt, X_pred=X, treatment_pred=w, cv=self.cv) self.propensity_model = p_model self.propensity = p else: check_p_conditions(p, self.t_groups) if isinstance(p, (np.ndarray, pd.Series)): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items() } self._classes = {group: i for i, group in enumerate(self.t_groups)} self.models_tau = { group: deepcopy(self.model_tau) for group in self.t_groups } self.vars_c = {} self.vars_t = {} if verbose: logger.info('generating out-of-fold CV outcome estimates') yhat = cross_val_predict(self.model_mu, X, y, cv=self.cv, n_jobs=-1) for group in self.t_groups: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] yhat_filt = yhat[mask] p_filt = p[group][mask] w = (treatment_filt == group).astype(int) if verbose: logger.info( 'training the treatment effect model for {} with R-loss'. format(group)) self.models_tau[group].fit(X_filt, (y_filt - yhat_filt) / (w - p_filt), sample_weight=(w - p_filt)**2) self.vars_c[group] = (y_filt[w == 0] - yhat_filt[w == 0]).var() self.vars_t[group] = (y_filt[w == 1] - yhat_filt[w == 1]).var()
def fit(self, X, treatment, y, p=None, seed=None): """Fit the inference model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. seed (int): random seed for cross-fitting """ X, treatment, y = convert_pd_to_np(X, treatment, y) check_treatment_vector(treatment, self.control_name) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() self._classes = {group: i for i, group in enumerate(self.t_groups)} # The estimator splits the data into 3 partitions for cross-fit on the propensity score estimation, # the outcome regression, and the treatment regression on the doubly robust estimates. The use of # the partitions is rotated so we do not lose on the sample size. cv = KFold(n_splits=3, shuffle=True, random_state=seed) split_indices = [index for _, index in cv.split(y)] self.models_mu_c = [ deepcopy(self.model_mu_c), deepcopy(self.model_mu_c), deepcopy(self.model_mu_c), ] self.models_mu_t = { group: [ deepcopy(self.model_mu_t), deepcopy(self.model_mu_t), deepcopy(self.model_mu_t), ] for group in self.t_groups } self.models_tau = { group: [ deepcopy(self.model_tau), deepcopy(self.model_tau), deepcopy(self.model_tau), ] for group in self.t_groups } if p is None: self.propensity = { group: np.zeros(y.shape[0]) for group in self.t_groups } for ifold in range(3): treatment_idx = split_indices[ifold] outcome_idx = split_indices[(ifold + 1) % 3] tau_idx = split_indices[(ifold + 2) % 3] treatment_treat, treatment_out, treatment_tau = ( treatment[treatment_idx], treatment[outcome_idx], treatment[tau_idx], ) y_out, y_tau = y[outcome_idx], y[tau_idx] X_treat, X_out, X_tau = X[treatment_idx], X[outcome_idx], X[ tau_idx] if p is None: logger.info("Generating propensity score") cur_p = dict() for group in self.t_groups: mask = (treatment_treat == group) | (treatment_treat == self.control_name) treatment_filt = treatment_treat[mask] X_filt = X_treat[mask] w_filt = (treatment_filt == group).astype(int) w = (treatment_tau == group).astype(int) cur_p[group], _ = compute_propensity_score( X=X_filt, treatment=w_filt, X_pred=X_tau, treatment_pred=w) self.propensity[group][tau_idx] = cur_p[group] else: cur_p = dict() if isinstance(p, (np.ndarray, pd.Series)): cur_p = {self.t_groups[0]: convert_pd_to_np(p[tau_idx])} else: cur_p = {g: prop[tau_idx] for g, prop in p.items()} check_p_conditions(cur_p, self.t_groups) logger.info("Generate outcome regressions") self.models_mu_c[ifold].fit( X_out[treatment_out == self.control_name], y_out[treatment_out == self.control_name], ) for group in self.t_groups: self.models_mu_t[group][ifold].fit( X_out[treatment_out == group], y_out[treatment_out == group]) logger.info("Fit pseudo outcomes from the DR formula") for group in self.t_groups: mask = (treatment_tau == group) | (treatment_tau == self.control_name) treatment_filt = treatment_tau[mask] X_filt = X_tau[mask] y_filt = y_tau[mask] w_filt = (treatment_filt == group).astype(int) p_filt = cur_p[group][mask] mu_t = self.models_mu_t[group][ifold].predict(X_filt) mu_c = self.models_mu_c[ifold].predict(X_filt) dr = ((w_filt - p_filt) / p_filt / (1 - p_filt) * (y_filt - mu_t * w_filt - mu_c * (1 - w_filt)) + mu_t - mu_c) self.models_tau[group][ifold].fit(X_filt, dr)
def fit( self, X, assignment, treatment, y, p=None, pZ=None, seed=None, calibrate=True ): """Fit the inference model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix assignment (np.array or pd.Series): a (0,1)-valued assignment vector. The assignment is the instrumental variable that does not depend on unknown confounders. The assignment status influences treatment in a monotonic way, i.e. one can only be more likely to take the treatment if assigned. treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run ElasticNetPropensityModel() to generate the propensity scores. pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None will run ElasticNetPropensityModel() to generate the assignment probability score. seed (int): random seed for cross-fitting """ X, treatment, assignment, y = convert_pd_to_np(X, treatment, assignment, y) check_treatment_vector(treatment, self.control_name) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() self._classes = {group: i for i, group in enumerate(self.t_groups)} # The estimator splits the data into 3 partitions for cross-fit on the propensity score estimation, # the outcome regression, and the treatment regression on the doubly robust estimates. The use of # the partitions is rotated so we do not lose on the sample size. We do not cross-fit the assignment # score estimation as the assignment process is usually simple. cv = KFold(n_splits=3, shuffle=True, random_state=seed) split_indices = [index for _, index in cv.split(y)] self.models_mu_c = { group: [ deepcopy(self.model_mu_c), deepcopy(self.model_mu_c), deepcopy(self.model_mu_c), ] for group in self.t_groups } self.models_mu_t = { group: [ deepcopy(self.model_mu_t), deepcopy(self.model_mu_t), deepcopy(self.model_mu_t), ] for group in self.t_groups } self.models_tau = { group: [ deepcopy(self.model_tau), deepcopy(self.model_tau), deepcopy(self.model_tau), ] for group in self.t_groups } if p is None: self.propensity_1 = { group: np.zeros(y.shape[0]) for group in self.t_groups } # propensity scores for those assigned self.propensity_0 = { group: np.zeros(y.shape[0]) for group in self.t_groups } # propensity scores for those not assigned if pZ is None: self.propensity_assign, _ = compute_propensity_score( X=X, treatment=assignment, X_pred=X, treatment_pred=assignment, calibrate_p=calibrate, ) else: self.propensity_assign = pZ for ifold in range(3): treatment_idx = split_indices[ifold] outcome_idx = split_indices[(ifold + 1) % 3] tau_idx = split_indices[(ifold + 2) % 3] treatment_treat, treatment_out, treatment_tau = ( treatment[treatment_idx], treatment[outcome_idx], treatment[tau_idx], ) assignment_treat, assignment_out, assignment_tau = ( assignment[treatment_idx], assignment[outcome_idx], assignment[tau_idx], ) y_out, y_tau = y[outcome_idx], y[tau_idx] X_treat, X_out, X_tau = X[treatment_idx], X[outcome_idx], X[tau_idx] pZ_tau = self.propensity_assign[tau_idx] if p is None: logger.info("Generating propensity score") cur_p_1 = dict() cur_p_0 = dict() for group in self.t_groups: mask = (treatment_treat == group) | ( treatment_treat == self.control_name ) mask_1, mask_0 = mask & (assignment_treat == 1), mask & ( assignment_treat == 0 ) cur_p_1[group], _ = compute_propensity_score( X=X_treat[mask_1], treatment=(treatment_treat[mask_1] == group).astype(int), X_pred=X_tau, treatment_pred=(treatment_tau == group).astype(int), ) if (treatment_treat[mask_0] == group).sum() == 0: cur_p_0[group] = np.zeros(X_tau.shape[0]) else: cur_p_0[group], _ = compute_propensity_score( X=X_treat[mask_0], treatment=(treatment_treat[mask_0] == group).astype(int), X_pred=X_tau, treatment_pred=(treatment_tau == group).astype(int), ) self.propensity_1[group][tau_idx] = cur_p_1[group] self.propensity_0[group][tau_idx] = cur_p_0[group] else: cur_p_1 = dict() cur_p_0 = dict() if isinstance(p[0], (np.ndarray, pd.Series)): cur_p_0 = {self.t_groups[0]: convert_pd_to_np(p[0][tau_idx])} else: cur_p_0 = {g: prop[tau_idx] for g, prop in p[0].items()} check_p_conditions(cur_p_0, self.t_groups) if isinstance(p[1], (np.ndarray, pd.Series)): cur_p_1 = {self.t_groups[0]: convert_pd_to_np(p[1][tau_idx])} else: cur_p_1 = {g: prop[tau_idx] for g, prop in p[1].items()} check_p_conditions(cur_p_1, self.t_groups) logger.info("Generate outcome regressions") for group in self.t_groups: mask = (treatment_out == group) | (treatment_out == self.control_name) mask_1, mask_0 = mask & (assignment_out == 1), mask & ( assignment_out == 0 ) self.models_mu_c[group][ifold].fit(X_out[mask_0], y_out[mask_0]) self.models_mu_t[group][ifold].fit(X_out[mask_1], y_out[mask_1]) logger.info("Fit pseudo outcomes from the DR formula") for group in self.t_groups: mask = (treatment_tau == group) | (treatment_tau == self.control_name) treatment_filt = treatment_tau[mask] X_filt = X_tau[mask] y_filt = y_tau[mask] w_filt = (treatment_filt == group).astype(int) p_1_filt = cur_p_1[group][mask] p_0_filt = cur_p_0[group][mask] z_filt = assignment_tau[mask] pZ_filt = pZ_tau[mask] mu_t = self.models_mu_t[group][ifold].predict(X_filt) mu_c = self.models_mu_c[group][ifold].predict(X_filt) dr = ( z_filt * (y_filt - mu_t) / pZ_filt - (1 - z_filt) * (y_filt - mu_c) / (1 - pZ_filt) + mu_t - mu_c ) weight = ( z_filt * (w_filt - p_1_filt) / pZ_filt - (1 - z_filt) * (w_filt - p_0_filt) / (1 - pZ_filt) + p_1_filt - p_0_filt ) dr /= weight self.models_tau[group][ifold].fit(X_filt, dr, sample_weight=weight**2)
def estimate_ate( self, X, assignment, treatment, y, p=None, pZ=None, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000, seed=None, calibrate=True, ): """Estimate the Average Treatment Effect (ATE) for compliers. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix assignment (np.array or pd.Series): an assignment vector. The assignment is the instrumental variable that does not depend on unknown confounders. The assignment status influences treatment in a monotonic way, i.e. one can only be more likely to take the treatment if assigned. treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run ElasticNetPropensityModel() to generate the propensity scores. pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None will run ElasticNetPropensityModel() to generate the assignment probability score. bootstrap_ci (bool): whether run bootstrap for confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap seed (int): random seed for cross-fitting Returns: The mean and confidence interval (LB, UB) of the ATE estimate. """ te, yhat_cs, yhat_ts = self.fit_predict( X, assignment, treatment, y, p, return_components=True, seed=seed, calibrate=calibrate, ) X, assignment, treatment, y = convert_pd_to_np(X, assignment, treatment, y) if p is None: p = (self.propensity_0, self.propensity_1) else: check_p_conditions(p[0], self.t_groups) check_p_conditions(p[1], self.t_groups) if isinstance(p[0], (np.ndarray, pd.Series)): treatment_name = self.t_groups[0] p = ( {treatment_name: convert_pd_to_np(p[0])}, {treatment_name: convert_pd_to_np(p[1])}, ) elif isinstance(p[0], dict): p = ( { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p[0].items() }, { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p[1].items() }, ) ate = np.zeros(self.t_groups.shape[0]) ate_lb = np.zeros(self.t_groups.shape[0]) ate_ub = np.zeros(self.t_groups.shape[0]) for i, group in enumerate(self.t_groups): _ate = te[:, i].mean() mask = (treatment == group) | (treatment == self.control_name) mask_1, mask_0 = mask & (assignment == 1), mask & (assignment == 0) Gamma = (treatment[mask_1] == group).mean() - ( treatment[mask_0] == group ).mean() y_filt_1, y_filt_0 = y[mask_1], y[mask_0] yhat_0 = yhat_cs[group][mask_0] yhat_1 = yhat_ts[group][mask_1] treatment_filt_1, treatment_filt_0 = treatment[mask_1], treatment[mask_0] prob_treatment_1, prob_treatment_0 = ( p[1][group][mask_1], p[0][group][mask_0], ) w = (assignment[mask]).mean() part_1 = ( (y_filt_1 - yhat_1).var() + _ate**2 * (treatment_filt_1 - prob_treatment_1).var() - 2 * _ate * (y_filt_1 * treatment_filt_1 - yhat_1 * prob_treatment_1).mean() ) part_0 = ( (y_filt_0 - yhat_0).var() + _ate**2 * (treatment_filt_0 - prob_treatment_0).var() - 2 * _ate * (y_filt_0 * treatment_filt_0 - yhat_0 * prob_treatment_0).mean() ) part_2 = np.mean( ( yhat_ts[group][mask] - yhat_cs[group][mask] - _ate * (p[1][group][mask] - p[0][group][mask]) ) ** 2 ) # SE formula is based on the lower bound formula (9) from Frölich, Markus. 2006. # "Nonparametric IV estimation of local average treatment effects wth covariates." # Journal of Econometrics. se = np.sqrt((part_1 / w + part_2 / (1 - w)) + part_2) / Gamma _ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2) _ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2) ate[i] = _ate ate_lb[i] = _ate_lb ate_ub[i] = _ate_ub if not bootstrap_ci: return ate, ate_lb, ate_ub else: t_groups_global = self.t_groups _classes_global = self._classes models_mu_c_global = deepcopy(self.models_mu_c) models_mu_t_global = deepcopy(self.models_mu_t) models_tau_global = deepcopy(self.models_tau) logger.info("Bootstrap Confidence Intervals for ATE") ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps)) for n in tqdm(range(n_bootstraps)): cate_b = self.bootstrap( X, assignment, treatment, y, p, pZ, size=bootstrap_size, seed=seed ) ate_bootstraps[:, n] = cate_b.mean() ate_lower = np.percentile( ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1 ) ate_upper = np.percentile( ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1 ) # set member variables back to global (currently last bootstrapped outcome) self.t_groups = t_groups_global self._classes = _classes_global self.models_mu_c = deepcopy(models_mu_c_global) self.models_mu_t = deepcopy(models_mu_t_global) self.models_tau = deepcopy(models_tau_global) return ate, ate_lower, ate_upper
def fit_predict( self, X, assignment, treatment, y, p=None, pZ=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, return_components=False, verbose=True, seed=None, calibrate=True, ): """Fit the treatment effect and outcome models of the R learner and predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix assignment (np.array or pd.Series): a (0,1)-valued assignment vector. The assignment is the instrumental variable that does not depend on unknown confounders. The assignment status influences treatment in a monotonic way, i.e. one can only be more likely to take the treatment if assigned. treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run ElasticNetPropensityModel() to generate the propensity scores. pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None will run ElasticNetPropensityModel() to generate the assignment probability score. return_ci (bool): whether to return confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap return_components (bool, optional): whether to return outcome for treatment and control seperately verbose (str): whether to output progress logs seed (int): random seed for cross-fitting Returns: (numpy.ndarray): Predictions of treatment effects for compliers, , i.e. those individuals who take the treatment only if they are assigned. Output dim: [n_samples, n_treatment] If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment], UB [n_samples, n_treatment] """ X, assignment, treatment, y = convert_pd_to_np(X, assignment, treatment, y) self.fit(X, assignment, treatment, y, p, seed, calibrate) if p is None: p = (self.propensity_0, self.propensity_1) else: check_p_conditions(p[0], self.t_groups) check_p_conditions(p[1], self.t_groups) if isinstance(p[0], (np.ndarray, pd.Series)): treatment_name = self.t_groups[0] p = ( {treatment_name: convert_pd_to_np(p[0])}, {treatment_name: convert_pd_to_np(p[1])}, ) elif isinstance(p[0], dict): p = ( { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p[0].items() }, { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p[1].items() }, ) if pZ is None: pZ = self.propensity_assign te = self.predict( X, treatment=treatment, y=y, return_components=return_components ) if not return_ci: return te else: t_groups_global = self.t_groups _classes_global = self._classes models_mu_c_global = deepcopy(self.models_mu_c) models_mu_t_global = deepcopy(self.models_mu_t) models_tau_global = deepcopy(self.models_tau) te_bootstraps = np.zeros( shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps) ) logger.info("Bootstrap Confidence Intervals") for i in tqdm(range(n_bootstraps)): te_b = self.bootstrap( X, assignment, treatment, y, p, pZ, size=bootstrap_size, seed=seed ) te_bootstraps[:, :, i] = te_b te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2) te_upper = np.percentile( te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2 ) # set member variables back to global (currently last bootstrapped outcome) self.t_groups = t_groups_global self._classes = _classes_global self.models_mu_c = deepcopy(models_mu_c_global) self.models_mu_t = deepcopy(models_mu_t_global) self.models_tau = deepcopy(models_tau_global) return (te, te_lower, te_upper)
def estimate_ate(self, X, treatment, y, p, segment=None, return_ci=False): """Estimate the Average Treatment Effect (ATE). Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1) segment (np.array, optional): An optional segment vector of int. If given, the ATE and its CI will be estimated for each segment. return_ci (bool, optional): Whether to return confidence intervals Returns: (tuple): The ATE and its confidence interval (LB, UB) for each treatment, t and segment, s """ X, treatment, y = convert_pd_to_np(X, treatment, y) check_treatment_vector(treatment, self.control_name) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() check_p_conditions(p, self.t_groups) if isinstance(p, (np.ndarray, pd.Series)): treatment_name = self.t_groups[0] p = {treatment_name: convert_pd_to_np(p)} elif isinstance(p, dict): p = { treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items() } ate = [] ate_lb = [] ate_ub = [] for i, group in enumerate(self.t_groups): logger.info("Estimating ATE for group {}.".format(group)) w_group = (treatment == group).astype(int) p_group = p[group] if self.calibrate_propensity: logger.info("Calibrating propensity scores.") p_group = calibrate(p_group, w_group) yhat_c = np.zeros_like(y, dtype=float) yhat_t = np.zeros_like(y, dtype=float) if self.cv: for i_fold, (i_trn, i_val) in enumerate(self.cv.split(X, y), 1): logger.info( "Training an outcome model for CV #{}".format(i_fold)) self.model_tau.fit( np.hstack((X[i_trn], w_group[i_trn].reshape(-1, 1))), y[i_trn]) yhat_c[i_val] = self.model_tau.predict( np.hstack((X[i_val], np.zeros((len(i_val), 1))))) yhat_t[i_val] = self.model_tau.predict( np.hstack((X[i_val], np.ones((len(i_val), 1))))) else: self.model_tau.fit(np.hstack((X, w_group.reshape(-1, 1))), y) yhat_c = self.model_tau.predict( np.hstack((X, np.zeros((len(y), 1))))) yhat_t = self.model_tau.predict( np.hstack((X, np.ones((len(y), 1))))) if segment is None: logger.info("Training the TMLE learner.") _ate, se = simple_tmle(y, w_group, yhat_c, yhat_t, p_group) _ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2) _ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2) else: assert (segment.shape[0] == X.shape[0] and segment.ndim == 1), "Segment must be the 1-d np.array of int." segments = np.unique(segment) _ate = [] _ate_lb = [] _ate_ub = [] for s in sorted(segments): logger.info( "Training the TMLE learner for segment {}.".format(s)) filt = (segment == s) & (yhat_c < np.quantile(yhat_c, q=0.99)) _ate_s, se = simple_tmle( y[filt], w_group[filt], yhat_c[filt], yhat_t[filt], p_group[filt], ) _ate_lb_s = _ate_s - se * norm.ppf(1 - self.ate_alpha / 2) _ate_ub_s = _ate_s + se * norm.ppf(1 - self.ate_alpha / 2) _ate.append(_ate_s) _ate_lb.append(_ate_lb_s) _ate_ub.append(_ate_ub_s) ate.append(_ate) ate_lb.append(_ate_lb) ate_ub.append(_ate_ub) return np.array(ate), np.array(ate_lb), np.array(ate_ub)