def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=2000): # description of statistical problem X, y, truth = gaussian_instance(n=n, p=p, s=s, equicorrelated=False, rho=0.5, sigma=sigma, signal=signal, random_signs=True, scale=False)[:3] dispersion = sigma**2 S = X.T.dot(y) covS = dispersion * X.T.dot(X) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(XTX, XTXi, lam, sampler): p = XTX.shape[0] success = np.zeros(p) loss = rr.quadratic_loss((p, ), Q=XTX) pen = rr.l1norm(p, lagrange=lam) scale = 0. noisy_S = sampler(scale=scale) loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0) problem = rr.simple_problem(loss, pen) soln = problem.solve(max_its=100, tol=1.e-10) success += soln != 0 return set(np.nonzero(success)[0]) XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n - p) lam = 4. * np.sqrt(n) selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, lam) # run selection algorithm return full_model_inference(X, y, truth, selection_algorithm, splitting_sampler, success_params=(1, 1), B=B, fit_probability=keras_fit, fit_args={ 'epochs': 10, 'sizes': [100] * 5, 'dropout': 0., 'activation': 'relu' })
def simulate(n=200, p=50, s=5, signal=(0.5, 1), sigma=2, alpha=0.1, B=1000): # description of statistical problem X, y, truth = gaussian_instance(n=n, p=p, s=s, equicorrelated=False, rho=0.5, sigma=sigma, signal=signal, random_signs=True, scale=False)[:3] XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n - p) S = X.T.dot(y) covS = dispersion * X.T.dot(X) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(XTX, XTXi, dispersion, lam, sampler): p = XTX.shape[0] success = np.zeros(p) loss = rr.quadratic_loss((p, ), Q=XTX) pen = rr.l1norm(p, lagrange=lam) scale = 0.5 noisy_S = sampler(scale=scale) soln = XTXi.dot(noisy_S) solnZ = soln / (np.sqrt(np.diag(XTXi)) * np.sqrt(dispersion)) return set(np.nonzero(np.fabs(solnZ) > 2.1)[0]) lam = 4. * np.sqrt(n) selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, dispersion, lam) # run selection algorithm return full_model_inference(X, y, truth, selection_algorithm, splitting_sampler, success_params=(5, 7), B=B, fit_probability=keras_fit, fit_args={ 'epochs': 30, 'sizes': [100, 100], 'activation': 'relu' })
def simulate(n=200, p=100, s=10, signal=(1.5, 2), sigma=2, alpha=0.1, B=3000): # description of statistical problem X, y, truth = gaussian_instance(n=n, p=p, s=s, equicorrelated=False, rho=0.5, sigma=sigma, signal=signal, random_signs=True, scale=False)[:3] dispersion = sigma**2 S = X.T.dot(y) covS = dispersion * X.T.dot(X) smooth_sampler = normal_sampler(S, covS) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(X, XTXi, resid, sampler): S = sampler(scale=0.) # deterministic with scale=0 ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X G = lasso_glmnet(X, ynew, *[None] * 4) select = G.select() return set(list(select[0])) XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n - p) selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid) # run selection algorithm return full_model_inference(X, y, truth, selection_algorithm, splitting_sampler, success_params=(1, 1), B=B, fit_probability=keras_fit, fit_args={ 'epochs': 10, 'sizes': [100] * 5, 'dropout': 0., 'activation': 'relu' })
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000, seed=0): # description of statistical problem n, p = X_full.shape if boot_design: idx = np.random.choice(np.arange(n), n, replace=True) X = X_full[ idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout X += 0.1 * np.std(X) * np.random.standard_normal( X.shape) # to make non-degenerate else: X = X_full.copy() X = X - np.mean(X, 0)[None, :] X = X / np.std(X, 0)[None, :] truth = np.zeros(p) truth[:s] = np.linspace(signal[0], signal[1], s) np.random.shuffle(truth) truth *= sigma / np.sqrt(n) y = X.dot(truth) + sigma * np.random.standard_normal(n) lam_min, lam_1se = cv_glmnet_lam(X.copy(), y.copy(), seed=seed) lam_min, lam_1se = n * lam_min, n * lam_1se XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = sigma**2 S = X.T.dot(y) covS = dispersion * X.T.dot(X) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(X, XTXi, resid, sampler): S = sampler.center.copy() ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X G = lasso_glmnet(X, ynew, *[None] * 4) select = G.select(seed=seed) return set(list(select[0])) selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid) # run selection algorithm df = highdim_model_inference( X, y, truth, selection_algorithm, splitting_sampler, lam_min, sigma**2, # dispersion assumed known for now success_params=(1, 1), B=B, fit_probability=keras_fit, fit_args={ 'epochs': 10, 'sizes': [100] * 5, 'dropout': 0., 'activation': 'relu' }) if df is not None: liu_df = liu_inference(X, y, 1.00001 * lam_min, dispersion, truth, alpha=alpha, approximate_inverse='BN') return pd.merge(df, liu_df, on='variable')
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=5000, seed=0): # description of statistical problem n, p = X_full.shape if boot_design: idx = np.random.choice(np.arange(n), n, replace=True) X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate else: X = X_full.copy() X = X - np.mean(X, 0)[None, :] X = X / np.std(X, 0)[None, :] n, p = X.shape truth = np.zeros(p) truth[:s] = np.linspace(signal[0], signal[1], s) np.random.shuffle(truth) truth /= np.sqrt(n) truth *= sigma y = X.dot(truth) + sigma * np.random.standard_normal(n) XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n-p) S = X.T.dot(y) covS = dispersion * X.T.dot(X) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(XTX, XTXi, lam, sampler): p = XTX.shape[0] success = np.zeros(p) loss = rr.quadratic_loss((p,), Q=XTX) pen = rr.l1norm(p, lagrange=lam) scale = 0. noisy_S = sampler(scale=scale) loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0) problem = rr.simple_problem(loss, pen) soln = problem.solve(max_its=100, tol=1.e-10) success += soln != 0 return set(np.nonzero(success)[0]) lam = 4. * np.sqrt(n) selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, lam) # run selection algorithm df = full_model_inference(X, y, truth, selection_algorithm, splitting_sampler, success_params=(1, 1), B=B, fit_probability=keras_fit, fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'}) if False: # df is not None: liu_df = liu_inference(X, y, lam, dispersion, truth, alpha=alpha) return pd.merge(df, liu_df, on='variable') else: return df
def simulate(n=1000, p=100, s=20, signal=(2, 4), sigma=2, alpha=0.1, B=2000): # description of statistical problem X, y, truth = gaussian_instance(n=n, p=p, s=s, equicorrelated=False, rho=0.1, sigma=sigma, signal=signal, random_signs=True, scale=True)[:3] dispersion = sigma**2 S = X.T.dot(y) covS = dispersion * X.T.dot(X) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(XTX, XTXi, sampler): min_success = 6 ntries = 10 def _alpha_grid(X, y, center, XTX): n, p = X.shape alphas, coefs, _ = lasso_path(X, y, Xy=center, precompute=XTX) nselected = np.count_nonzero(coefs, axis=0) return alphas[nselected < np.sqrt(0.8 * p)] alpha_grid = _alpha_grid(X, y, sampler(scale=0.), XTX) success = np.zeros((p, alpha_grid.shape[0])) for _ in range(ntries): scale = 1. # corresponds to sub-samples of 50% noisy_S = sampler(scale=scale) _, coefs, _ = lasso_path(X, y, Xy = noisy_S, precompute=XTX, alphas=alpha_grid) success += np.abs(np.sign(coefs)) selected = np.apply_along_axis(lambda row: any(x>min_success for x in row), 1, success) vars = set(np.nonzero(selected)[0]) return vars XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n-p) selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi) # run selection algorithm return full_model_inference(X, y, truth, selection_algorithm, splitting_sampler, success_params=(1, 1), B=B, fit_probability=keras_fit, fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000, seed=0): # description of statistical problem n, p = X_full.shape if boot_design: idx = np.random.choice(np.arange(n), n, replace=True) X = X_full[ idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout X += 0.1 * np.std(X) * np.random.standard_normal( X.shape) # to make non-degenerate else: X = X_full.copy() X = X - np.mean(X, 0)[None, :] X = X / np.std(X, 0)[None, :] n, p = X.shape truth = np.zeros(p) truth[:s] = np.linspace(signal[0], signal[1], s) np.random.shuffle(truth) truth /= np.sqrt(n) truth *= sigma y = X.dot(truth) + sigma * np.random.standard_normal(n) XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n - p) S = X.T.dot(y) covS = dispersion * X.T.dot(X) print(dispersion, sigma**2) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(X, XTXi, resid, sampler): S = sampler(scale=0.5) # deterministic with scale=0 ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X G = lasso_glmnet(X, ynew, *[None] * 4) select = G.select(seed=seed) return set(list(select[0])) selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid) # run selection algorithm df = full_model_inference(X, y, truth, selection_algorithm, splitting_sampler, success_params=(6, 10), B=B, fit_probability=keras_fit, fit_args={ 'epochs': 10, 'sizes': [100] * 5, 'dropout': 0., 'activation': 'relu' }) return df
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000, seed=0): # description of statistical problem n, p = X_full.shape if boot_design: idx = np.random.choice(np.arange(n), n, replace=True) X = X_full[ idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout X += 0.1 * np.std(X) * np.random.standard_normal( X.shape) # to make non-degenerate else: X = X_full.copy() X = X - np.mean(X, 0)[None, :] X = X / np.std(X, 0)[None, :] n, p = X.shape truth = np.zeros(p) truth[:s] = np.linspace(signal[0], signal[1], s) np.random.shuffle(truth) truth /= np.sqrt(n) truth *= sigma y = X.dot(truth) + sigma * np.random.standard_normal(n) XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n - p) S = X.T.dot(y) covS = dispersion * X.T.dot(X) print(dispersion, sigma**2) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(XTX, XTXi, sampler): min_success = 6 ntries = 10 def _alpha_grid(X, y, center, XTX): n, p = X.shape alphas, coefs, _ = lasso_path(X.copy(), y.copy(), Xy=center.copy(), precompute=XTX.copy()) nselected = np.count_nonzero(coefs, axis=0) alphas = alphas[nselected < 20] return alphas alpha_grid = _alpha_grid(X, y, sampler.center, XTX) success = np.zeros((p, alpha_grid.shape[0])) for _ in range(ntries): scale = 1. # corresponds to sub-samples of 50% noisy_S = sampler(scale=scale) _, coefs, _ = lasso_path(X, y, Xy=noisy_S, precompute=XTX, alphas=alpha_grid) success += np.abs(np.sign(coefs)) selected = np.apply_along_axis( lambda row: any(x > min_success for x in row), 1, success) vars = set(np.nonzero(selected)[0]) return vars selection_algorithm = functools.partial(meta_algorithm, X, XTXi) # run selection algorithm df = full_model_inference(X, y, truth, selection_algorithm, splitting_sampler, success_params=(6, 10), B=B, fit_probability=keras_fit, fit_args={ 'epochs': 10, 'sizes': [100] * 5, 'dropout': 0., 'activation': 'relu' }) return df
def simulate(n=1000, p=60, s=15, signal=3, sigma=2, alpha=0.1): # description of statistical problem X, y, truth = gaussian_instance(n=n, p=p, s=s, equicorrelated=False, rho=0.5, sigma=sigma, signal=signal, random_signs=True)[:3] dispersion = sigma**2 S = X.T.dot(y) covS = dispersion * X.T.dot(X) smooth_sampler = normal_sampler(S, covS) splitting_sampler = split_sampler(X * y[:, None], covS / n) def meta_algorithm(XTX, XTXi, dispersion, sampler): min_success = 3 ntries = 7 p = XTX.shape[0] success = np.zeros(p) for _ in range(ntries): scale = 0.5 frac = 1. / (scale**2 + 1.) noisy_S = sampler(scale=scale) noisy_beta = XTXi.dot(noisy_S) noisy_Z = noisy_beta / np.sqrt(dispersion * np.diag(XTXi) * frac) success += np.fabs(noisy_Z) > 2 return set(np.nonzero(success >= min_success)[0]) XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n-p) selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, dispersion) # run selection algorithm observed_set = selection_algorithm(splitting_sampler) # find the target, based on the observed outcome # we just take the first target pivots, covered, lengths, naive_lengths = [], [], [], [] for idx in observed_set: print(idx, len(observed_set)) true_target = truth[idx] (pivot, interval) = infer_full_target(selection_algorithm, observed_set, [idx], splitting_sampler, dispersion, hypothesis=[true_target], fit_probability=probit_fit, success_params=(1, 1), alpha=alpha, B=1000)[0][:2] pivots.append(pivot) covered.append((interval[0] < true_target) * (interval[1] > true_target)) lengths.append(interval[1] - interval[0]) target_sd = np.sqrt(dispersion * XTXi[idx, idx]) naive_lengths.append(2 * ndist.ppf(1 - 0.5 * alpha) * target_sd) return pivots, covered, lengths, naive_lengths
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=2000): # description of statistical problem X, y, truth = gaussian_instance(n=n, p=p, s=s, equicorrelated=False, rho=0.5, sigma=sigma, signal=signal, random_signs=True, scale=False)[:3] dispersion = sigma**2 S = X.T.dot(y) covS = dispersion * X.T.dot(X) smooth_sampler = normal_sampler(S, covS) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(X, XTXi, resid, sampler): S = sampler(scale=0.) # deterministic with scale=0 ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X G = lasso_glmnet(X, ynew, *[None] * 4) select = G.select() return set(list(select[0])) XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n - p) selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid) # run selection algorithm success_params = (1, 1) observed_set = repeat_selection(selection_algorithm, smooth_sampler, *success_params) # find the target, based on the observed outcome # we just take the first target pivots, covered, lengths, pvalues = [], [], [], [] lower, upper = [], [] naive_pvalues, naive_pivots, naive_covered, naive_lengths = [], [], [], [] targets = [] observed_list = sorted(observed_set) np.random.shuffle(observed_list) for idx in observed_list[:1]: print("variable: ", idx, "total selected: ", len(observed_set)) true_target = [truth[idx]] targets.extend(true_target) (pivot, interval, pvalue) = infer_full_target(selection_algorithm, observed_set, [idx], splitting_sampler, dispersion, hypothesis=true_target, fit_probability=probit_fit, success_params=success_params, alpha=alpha, B=B, single=True)[0][:3] pvalues.append(pvalue) pivots.append(pivot) covered.append( (interval[0] < true_target[0]) * (interval[1] > true_target[0])) lengths.append(interval[1] - interval[0]) target_sd = np.sqrt(dispersion * XTXi[idx, idx]) observed_target = np.squeeze(XTXi[idx].dot(X.T.dot(y))) quantile = ndist.ppf(1 - 0.5 * alpha) naive_interval = (observed_target - quantile * target_sd, observed_target + quantile * target_sd) naive_pivot = (1 - ndist.cdf( (observed_target - true_target[0]) / target_sd)) naive_pivot = 2 * min(naive_pivot, 1 - naive_pivot) naive_pivots.append(naive_pivot) naive_pvalue = (1 - ndist.cdf(observed_target / target_sd)) naive_pvalue = 2 * min(naive_pivot, 1 - naive_pivot) naive_pvalues.append(naive_pvalue) naive_covered.append((naive_interval[0] < true_target[0]) * (naive_interval[1] > true_target[0])) naive_lengths.append(naive_interval[1] - naive_interval[0]) lower.append(interval[0]) upper.append(interval[1]) if len(pvalues) > 0: return pd.DataFrame({ 'pivot': pivots, 'target': targets, 'pvalue': pvalues, 'coverage': covered, 'length': lengths, 'naive_pivot': naive_pivots, 'naive_coverage': naive_covered, 'naive_length': naive_lengths, 'upper': upper, 'lower': lower })
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=1000): # description of statistical problem X, y, truth = gaussian_instance(n=n, p=p, s=s, equicorrelated=False, rho=0.5, sigma=sigma, signal=signal, random_signs=True, scale=False)[:3] XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = np.linalg.norm(resid)**2 / (n-p) S = X.T.dot(y) covS = dispersion * X.T.dot(X) smooth_sampler = normal_sampler(S, covS) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(XTX, XTXi, dispersion, sampler): p = XTX.shape[0] success = np.zeros(p) scale = 0. noisy_S = sampler(scale=scale) soln = XTXi.dot(noisy_S) solnZ = soln / (np.sqrt(np.diag(XTXi)) * np.sqrt(dispersion)) pval = ndist.cdf(solnZ) pval = 2 * np.minimum(pval, 1 - pval) return set(BHfilter(pval, q=0.2)) selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, dispersion) # run selection algorithm success_params = (1, 1) observed_set = repeat_selection(selection_algorithm, smooth_sampler, *success_params) # find the target, based on the observed outcome # we just take the first target targets = [] idx = sorted(observed_set) np.random.shuffle(idx) idx = idx[:1] if len(idx) > 0: print("variable: ", idx, "total selected: ", len(observed_set)) true_target = truth[idx] results = infer_full_target(selection_algorithm, observed_set, idx, splitting_sampler, dispersion, hypothesis=true_target, fit_probability=logit_fit, fit_args={'df':20}, success_params=success_params, alpha=alpha, B=B, single=True) pvalues = [r[2] for r in results] covered = [(r[1][0] < t) * (r[1][1] > t) for r, t in zip(results, true_target)] pivots = [r[0] for r in results] target_sd = np.sqrt(np.diag(dispersion * XTXi)[idx]) observed_target = XTXi[idx].dot(X.T.dot(y)) quantile = ndist.ppf(1 - 0.5 * alpha) naive_interval = np.vstack([observed_target - quantile * target_sd, observed_target + quantile * target_sd]) naive_pivots = (1 - ndist.cdf((observed_target - true_target) / target_sd)) naive_pivots = 2 * np.minimum(naive_pivots, 1 - naive_pivots) naive_pvalues = (1 - ndist.cdf(observed_target / target_sd)) naive_pvalues = 2 * np.minimum(naive_pvalues, 1 - naive_pvalues) naive_covered = (naive_interval[0] < true_target) * (naive_interval[1] > true_target) naive_lengths = naive_interval[1] - naive_interval[0] lower = [r[1][0] for r in results] upper = [r[1][1] for r in results] lengths = np.array(upper) - np.array(lower) return pd.DataFrame({'pivot':pivots, 'pvalue':pvalues, 'coverage':covered, 'length':lengths, 'naive_pivot':naive_pivots, 'naive_coverage':naive_covered, 'naive_length':naive_lengths, 'upper':upper, 'lower':lower, 'targets':true_target, 'batch_size':B * np.ones(len(idx), np.int)})