Пример #1
0
def calc_outcome_adaptive_lasso_single_lambda(A, Y, X, Lambda,
                                              gamma_convergence_factor):
    """Calculate ATE with the outcome adaptive lasso"""
    n = A.shape[0]  # number of samples
    # extract gamma according to Lambda and gamma_convergence_factor
    gamma = 2 * (1 + gamma_convergence_factor - log(Lambda, n))
    # fit regression from covariates X and exposure A to outcome Y
    lr = LinearRegression(fit_intercept=True).fit(
        np.hstack([A.values.reshape(-1, 1), X]), Y)
    # extract the coefficients of the covariates
    x_coefs = lr.coef_[1:]
    # calculate outcome adaptive penalization weights
    weights = (np.abs(x_coefs))**(-1 * gamma)
    # apply the penalization to the covariates themselves
    X_w = X / weights
    # fit logistic propensity score model from penalized covariates to the exposure
    ipw = IPW(LogisticRegression(solver='liblinear',
                                 penalty='l1',
                                 C=1 / Lambda),
              use_stabilized=False).fit(X_w, A)
    # compute inverse propensity weighting and calculate ATE
    weights = ipw.compute_weights(X_w, A)
    outcomes = ipw.estimate_population_outcome(X_w, A, Y, w=weights)
    effect = ipw.estimate_effect(outcomes[1], outcomes[0])
    return effect, x_coefs, weights
Пример #2
0
def calc_ate_vanilla_ipw(A, Y, X):
    ipw = IPW(LogisticRegression(solver='liblinear',
                                 penalty='l1',
                                 C=1e2,
                                 max_iter=500),
              use_stabilized=True).fit(X, A)
    weights = ipw.compute_weights(X, A)
    outcomes = ipw.estimate_population_outcome(X, A, Y, w=weights)
    effect = ipw.estimate_effect(outcomes[1], outcomes[0])
    return effect[0]
Пример #3
0
def test_ipw_matches_causallib(linear_data_pandas):
    w, t, y = linear_data_pandas
    causallib_ipw = IPW(learner=LogisticRegression())
    causallib_ipw.fit(w, t)
    potential_outcomes = causallib_ipw.estimate_population_outcome(
        w, t, y, treatment_values=[0, 1])
    causallib_effect = causallib_ipw.estimate_effect(potential_outcomes[1],
                                                     potential_outcomes[0])[0]

    ipw = IPWEstimator()
    ipw.fit(w, t, y)
    our_effect = ipw.estimate_ate()
    assert our_effect == causallib_effect
Пример #4
0
class IPWEstimator(BaseEstimator):
    def __init__(self,
                 prop_score_model=LogisticRegression(),
                 trim_weights=False,
                 trim_eps=None,
                 stabilized=False):
        if trim_weights and trim_eps is None:
            trim_eps = TRIM_EPS
        self.ipw = IPW(learner=prop_score_model,
                       truncate_eps=trim_eps,
                       use_stabilized=stabilized)
        self.w = None
        self.t = None
        self.y = None

    def fit(self, w, t, y):
        w, t, y = to_pandas(w, t, y)
        self.ipw.fit(w, t)
        self.w = w
        self.t = t
        self.y = y

    def estimate_ate(self, t1=1, t0=0, w=None, t=None, y=None):
        w = self.w if w is None else w
        t = self.t if t is None else t
        y = self.y if y is None else y
        if w is None or t is None or y is None:
            raise RuntimeError(
                'Must run .fit(w, t, y) before running .estimate_ate()')
        w, t, y = to_pandas(w, t, y)
        mean_potential_outcomes = self.ipw.estimate_population_outcome(
            w, t, y, treatment_values=[t0, t1])
        ate_estimate = mean_potential_outcomes[1] - mean_potential_outcomes[0]
        # Use below estimate_effect() method if want to allow for effects that are not differences
        # ate_estimate = self.ipw.estimate_effect(mean_potential_outcomes[1], mean_potential_outcomes[0])[0]
        return ate_estimate

    def ate_conf_int(self, percentile=.95) -> tuple:
        raise NotImplementedError