Beispiel #1
0
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000):

    # description of statistical problem

    X, y, truth = generate(n=n,
                           p=p,
                           s=s,
                           equicorrelated=False,
                           rho=0.5,
                           sigma=sigma,
                           signal=signal,
                           random_signs=True,
                           scale=False)[:3]

    dispersion = sigma**2

    S = X.T.dot(y)
    covS = dispersion * X.T.dot(X)
    smooth_sampler = normal_sampler(S, covS)

    def meta_algorithm(X, XTXi, resid, lam, sampler):
        p = XTX.shape[0]
        success = np.zeros(p)

        loss = rr.quadratic_loss((p, ), Q=XTX)
        pen = rr.l1norm(p, lagrange=lam)

        scale = 0.
        noisy_S = sampler(scale=scale)
        loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0)
        problem = rr.simple_problem(loss, pen)
        soln = problem.solve(max_its=100, tol=1.e-10)
        success += soln != 0
        return set(np.nonzero(success)[0])

    XTX = X.T.dot(X)
    XTXi = np.linalg.inv(XTX)
    resid = y - X.dot(XTXi.dot(X.T.dot(y)))
    dispersion = np.linalg.norm(resid)**2 / (n - p)

    lam = 4. * np.sqrt(n)
    selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid,
                                            lam)

    # run selection algorithm

    return full_model_inference(X,
                                y,
                                truth,
                                selection_algorithm,
                                smooth_sampler,
                                success_params=(1, 1),
                                B=B,
                                fit_probability=keras_fit,
                                fit_args={
                                    'epochs': 10,
                                    'sizes': [100] * 5,
                                    'dropout': 0.,
                                    'activation': 'relu'
                                })
Beispiel #2
0
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000):

    # description of statistical problem

    X, y, truth = generate(n=n,
                           p=p,
                           s=s,
                           equicorrelated=False,
                           rho=0.5,
                           sigma=sigma,
                           signal=signal,
                           random_signs=True,
                           scale=False)[:3]

    dispersion = sigma**2

    S = X.T.dot(y)
    covS = dispersion * X.T.dot(X)
    smooth_sampler = normal_sampler(S, covS)

    def meta_algorithm(X, XTXi, resid, sampler):

        n, p = X.shape

        rho = 0.8
        S = sampler(scale=0.)  # deterministic with scale=0
        ynew = X.dot(XTXi).dot(S) + resid  # will be ok for n>p and non-degen X
        Xnew = rho * X + np.sqrt(1 - rho**2) * np.random.standard_normal(
            X.shape)

        X_full = np.hstack([X, Xnew])
        beta_full = np.linalg.pinv(X_full).dot(ynew)
        winners = np.fabs(beta_full)[:p] > np.fabs(beta_full)[p:]
        return set(np.nonzero(winners)[0])

    XTX = X.T.dot(X)
    XTXi = np.linalg.inv(XTX)
    resid = y - X.dot(XTXi.dot(X.T.dot(y)))
    dispersion = np.linalg.norm(resid)**2 / (n - p)

    selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)

    # run selection algorithm

    return full_model_inference(X,
                                y,
                                truth,
                                selection_algorithm,
                                smooth_sampler,
                                success_params=(8, 10),
                                B=B,
                                fit_probability=keras_fit,
                                fit_args={
                                    'epochs': 20,
                                    'sizes': [100] * 5,
                                    'dropout': 0.,
                                    'activation': 'relu'
                                })
Beispiel #3
0
def simulate(n=2000,
             p=1000,
             s=10,
             signal=(0.5, 1),
             sigma=2,
             alpha=0.1,
             B=4000):

    # description of statistical problem

    X, y, truth = gaussian_instance(n=n,
                                    p=p,
                                    s=s,
                                    equicorrelated=False,
                                    rho=0.5,
                                    sigma=sigma,
                                    signal=signal,
                                    random_signs=True,
                                    scale=False)[:3]

    dispersion = sigma**2

    S = X.T.dot(y)
    covS = dispersion * X.T.dot(X)
    smooth_sampler = normal_sampler(S, covS)

    def meta_algorithm(X, XTXi, resid, sampler):

        S = sampler(scale=0.)  # deterministic with scale=0
        ynew = X.dot(XTXi).dot(S) + resid  # will be ok for n>p and non-degen X
        G = lasso_glmnet(X, ynew, *[None] * 4)
        select = G.select()
        print(select)
        return set(list(select[0]))

    XTX = X.T.dot(X)
    XTXi = np.linalg.inv(XTX)
    resid = y - X.dot(XTXi.dot(X.T.dot(y)))
    dispersion = np.linalg.norm(resid)**2 / (n - p)

    selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)

    # run selection algorithm

    return full_model_inference(X,
                                y,
                                truth,
                                selection_algorithm,
                                smooth_sampler,
                                success_params=(1, 1),
                                B=B,
                                fit_probability=keras_fit,
                                fit_args={
                                    'epochs': 10,
                                    'sizes': [100] * 5,
                                    'dropout': 0.,
                                    'activation': 'relu'
                                })
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000):

    # description of statistical problem

    X, y, truth = generate(n=n,
                           p=p, 
                           s=s,
                           equicorrelated=False,
                           rho=0.5, 
                           sigma=sigma,
                           signal=signal,
                           random_signs=True,
                           scale=False)[:3]


    XTX = X.T.dot(X)
    XTXi = np.linalg.inv(XTX)
    resid = y - X.dot(XTXi.dot(X.T.dot(y)))
    dispersion = np.linalg.norm(resid)**2 / (n-p)
                         
    S = X.T.dot(y)
    covS = dispersion * X.T.dot(X)
    smooth_sampler = normal_sampler(S, covS)

    def meta_algorithm(XTX, XTXi, dispersion, lam, sampler):
        global counter
        p = XTX.shape[0]
        success = np.zeros(p)

        loss = rr.quadratic_loss((p,), Q=XTX)
        pen = rr.l1norm(p, lagrange=lam)

        scale = 0.
        noisy_S = sampler(scale=scale)
        soln = XTXi.dot(noisy_S)
        solnZ = soln / (np.sqrt(np.diag(XTXi)) * np.sqrt(dispersion))
        pval = ndist.cdf(solnZ)
        pval = 2 * np.minimum(pval, 1 - pval)
        return set(BHfilter(pval, q=0.2))

    lam = 4. * np.sqrt(n)
    selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, dispersion, lam)

    # run selection algorithm

    return full_model_inference(X,
                                y,
                                truth,
                                selection_algorithm,
                                smooth_sampler,
                                success_params=(1, 1),
                                B=B,
                                fit_probability=gbm_fit_sk,
                                fit_args={'n_estimators':2000})
Beispiel #5
0
def simulate(n=1000, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=2000):

    # description of statistical problem

    X, y, truth = gaussian_instance(n=n,
                                    p=p,
                                    s=s,
                                    equicorrelated=False,
                                    rho=0.5,
                                    sigma=sigma,
                                    signal=signal,
                                    random_signs=True,
                                    scale=False,
                                    center=False)[:3]

    dispersion = sigma**2

    S = X.T.dot(y)
    covS = dispersion * X.T.dot(X)
    smooth_sampler = normal_sampler(S, covS)

    idx = np.random.choice(np.arange(n), int(n / 2), replace=False)

    def meta_algorithm(X, XTXi, resid, idx, sampler):

        n, p = X.shape

        S = sampler(scale=0.)  # deterministic with scale=0
        ynew = X.dot(XTXi).dot(S) + resid  # will be ok for n>p and non-degen X

        G = lasso_glmnet(X[idx], ynew[idx], *[None] * 4)
        select = G.select()
        return set(list(select[0]))

    XTX = X.T.dot(X)
    XTXi = np.linalg.inv(XTX)
    resid = y - X.dot(XTXi.dot(X.T.dot(y)))
    dispersion = np.linalg.norm(resid)**2 / (n - p)

    selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid,
                                            idx)

    # run selection algorithm

    df = full_model_inference(X,
                              y,
                              truth,
                              selection_algorithm,
                              smooth_sampler,
                              success_params=(1, 1),
                              B=B,
                              fit_probability=keras_fit,
                              fit_args={
                                  'epochs': 20,
                                  'sizes': [100] * 5,
                                  'dropout': 0.,
                                  'activation': 'relu'
                              })

    if df is not None:

        observed_set = list(df['variable'])
        split_df = split_full_model_inference(X,
                                              y,
                                              idx,
                                              dispersion,
                                              truth,
                                              observed_set,
                                              alpha=alpha)

        df = pd.merge(df, split_df, on='variable')
        return df
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=8000):

    # description of statistical problem

    X, y, truth = gaussian_instance(n=n,
                                    p=p,
                                    s=s,
                                    equicorrelated=False,
                                    rho=0.5,
                                    sigma=sigma,
                                    signal=signal,
                                    random_signs=True,
                                    scale=False)[:3]

    dispersion = sigma**2

    S = X.T.dot(y)
    covS = dispersion * X.T.dot(X)
    smooth_sampler = normal_sampler(S, covS)

    def meta_algorithm(XTX, XTXi, lam, sampler):

        p = XTX.shape[0]
        success = np.zeros(p)

        loss = rr.quadratic_loss((p, ), Q=XTX)
        pen = rr.l1norm(p, lagrange=lam)

        scale = 0.
        noisy_S = sampler(scale=scale)
        loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0)
        problem = rr.simple_problem(loss, pen)
        soln = problem.solve(max_its=100, tol=1.e-10)
        success += soln != 0
        return tuple(sorted(np.nonzero(success)[0]))

    XTX = X.T.dot(X)
    XTXi = np.linalg.inv(XTX)
    resid = y - X.dot(XTXi.dot(X.T.dot(y)))
    dispersion = np.linalg.norm(resid)**2 / (n - p)

    lam = 4. * np.sqrt(n)
    selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, lam)

    # run selection algorithm

    df = partial_model_inference(X,
                                 y,
                                 truth,
                                 selection_algorithm,
                                 smooth_sampler,
                                 fit_probability=gbm_fit_sk,
                                 fit_args={'n_estimators': 1000},
                                 success_params=(1, 1),
                                 B=B,
                                 alpha=alpha,
                                 learner_klass=sparse_mixture_learner)

    lee_df = lee_inference(X, y, lam, dispersion, truth, alpha=alpha)

    return pd.merge(df, lee_df, on='variable')
def simulate(n=2000,
             p=100,
             s=10,
             signal=(np.sqrt(2) * 0.5, np.sqrt(2) * 1),
             sigma=2,
             alpha=0.1,
             B=3000):

    # description of statistical problem

    X, y, truth = generate(n=n,
                           p=p,
                           s=s,
                           equicorrelated=False,
                           rho=0.5,
                           sigma=sigma,
                           signal=signal,
                           random_signs=True,
                           scale=False)[:3]

    dispersion = sigma**2

    S = X.T.dot(y)
    covS = dispersion * X.T.dot(X)
    smooth_sampler = normal_sampler(S, covS)

    def meta_algorithm(X, XTXi, resid, sampler):

        n, p = X.shape

        idx = np.random.choice(np.arange(n), int(n / 2), replace=False)
        S = sampler(scale=0.)  # deterministic with scale=0
        ynew = X.dot(XTXi).dot(S) + resid  # will be ok for n>p and non-degen X
        Xidx, yidx = X[idx], y[idx]
        rho = 0.8

        Xnew = rho * Xidx + np.sqrt(1 - rho**2) * np.random.standard_normal(
            Xidx.shape)

        X_full = np.hstack([Xidx, Xnew])
        beta_full = np.linalg.pinv(X_full).dot(yidx)
        winners = np.fabs(beta_full)[:p] > np.fabs(beta_full)[p:]
        return set(np.nonzero(winners)[0])

    XTX = X.T.dot(X)
    XTXi = np.linalg.inv(XTX)
    resid = y - X.dot(XTXi.dot(X.T.dot(y)))
    dispersion = np.linalg.norm(resid)**2 / (n - p)

    selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)

    # run selection algorithm

    df = full_model_inference(X,
                              y,
                              truth,
                              selection_algorithm,
                              smooth_sampler,
                              success_params=(8, 10),
                              B=B,
                              fit_probability=keras_fit,
                              fit_args={
                                  'epochs': 20,
                                  'sizes': [100] * 5,
                                  'dropout': 0.,
                                  'activation': 'relu'
                              })

    if df is not None:

        idx2 = np.random.choice(np.arange(n), int(n / 2), replace=False)
        observed_set = list(df['variable'])
        split_df = split_full_model_inference(
            X,
            y,
            idx2,
            None,  # ignored dispersion
            truth,
            observed_set,
            alpha=alpha)

        df = pd.merge(df, split_df, on='variable')
        return df