def test_fbumnLL(self): np.random.seed(seed=100) testpeaks = np.vstack( (np.random.uniform(0, 1, 10), np.random.uniform(0, 0.2, 10))).flatten() x = BUM.bumOptim(testpeaks, starts=1, seed=100) self.assertEqual(np.around(x['pi1'], decimals=2), 0.29)
def test_fpLL(self): np.random.seed(seed=100) testpeaks = np.vstack( (np.random.uniform(0, 1, 10), np.random.uniform(0, 0.2, 10))).flatten() x = np.sum(BUM.fpLL([0.5, 0.5], testpeaks)) self.assertEqual(np.around(x, decimals=2), 9.57)
def test_fbumnLL(self): np.random.seed(seed=100) testpeaks = np.vstack( (np.random.uniform(0, 1, 10), np.random.uniform(0, 0.2, 10))).flatten() x = BUM.fbumnLL([0.5, 0.5], testpeaks) self.assertEqual(np.around(x, decimals=2)[0], -4.42)
def neuropowermodel(request): # Get the template sid = get_session_id(request) template = "neuropower/neuropowermodel.html" context = {} # Load model data parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0] peakdata = PeakTableModel.objects.filter(SID=sid)[::-1][0] if not peakdata.err == "": context["text"] = peakdata.err return render(request, template, context) peaks = peakdata.data # Estimate pi1 bum = BUM.bumOptim(peaks.pval.tolist(), starts=10) # :) # Estimate mixture model modelfit = neuropowermodels.modelfit(peaks.peak.tolist(), bum['pi1'], exc=float(parsdata.ExcZ), starts=10, method="RFT") # Save estimates to form mixtureform = MixtureForm() form = mixtureform.save(commit=False) form.SID = sid form.pi1 = bum['pi1'] form.a = bum['a'] form.mu = modelfit['mu'] form.sigma = modelfit['sigma'] form.save() # Get step status context["steps"] = get_neuropower_steps(template, sid) return render(request, template, context)
def run_power_analysis(input_img, n, fwhm=[8, 8, 8], mask_img=None, dtype='t', design='one-sample', exc=0.001, alpha=0.05, method='RFT', n_iters=1000, seed=None): """ Parameters ---------- input_img : :obj:`nibabel.Nifti1Image` Input image. n : :obj:`int` Total sample size from analysis. fwhm : :obj:`list` A list of FWHM values in mm of length 3. mask_img : :obj:`nibabel.Nifti1Image`, optional Mask image. dtype : {'t', 'z'}, optional Data type of input image. design : {'one-sample', 'two-sample'}, optional Design of analysis from input image. exc : :obj:`float`, optional Z-threshold (excursion threshold) alpha : :obj:`float`, optional Desired alpha. method : {'RFT', 'CS'}, optional Multiple comparisons correction method. n_iters : :obj:`int`, optional Number of iterations. seed : :obj:`int`, optional Random seed. Returns ------- params : :obj:`dict` Parameters of fitted distributions. peak_df : :obj:`pandas.DataFrame` DataFrame of local maxima from statistical map, along with associated z-values and p-values. power_df : :obj:`pandas.DataFrame` DataFrame of power estimates using different multiple comparisons correction methods for different sample sizes. """ spm = input_img.get_data() affine = input_img.affine voxel_size = input_img.header.get_zooms() if mask_img is not None: mask = mask_img.get_data() else: mask = (spm != 0).astype(int) n_voxels = np.sum(mask) if design == 'one-sample': df = n - 1 elif design == 'two-sample': df = n - 2 else: raise Exception('Unrecognized design: {0}'.format(design)) z_u = norm.ppf(1 - exc) # threshold in z if dtype == 'z': spm_z = spm.copy() elif dtype == 't': spm_z = -norm.ppf(tdist.cdf(-spm, df=df)) else: raise Exception('Unrecognized data type: {0}'.format(dtype)) peak_df = cluster.PeakTable(spm_z, z_u, mask) ijk = peak_df[['i', 'j', 'k']].values xyz = pd.DataFrame(data=nib.affines.apply_affine(affine, ijk), columns=['x', 'y', 'z']) peak_df = pd.concat([xyz, peak_df], axis=1) peak_df = peak_df.drop(['i', 'j', 'k'], axis=1) peak_df.index.name = 'peak index' z_values = peak_df['zval'].values p_values = peak_df['pval'].values # Fit models out1 = BUM.EstimatePi1(p_values, n_iters=n_iters) out2 = modelfit(z_values, pi1=out1['pi1'], exc=z_u, n_iters=n_iters, seed=seed, method=method) params = {} params['z_u'] = z_u params['a'] = out1['a'] params['pi1'] = out1['pi1'] params['lambda'] = out1['lambda'] params['mu'] = out2['mu'] params['sigma'] = out2['sigma'] params['mu_s'] = params['mu'] / np.sqrt(n) # Predict power for range of sample sizes thresholds = threshold(p_values, fwhm, voxel_size, n_voxels, alpha, z_u) powerpred_all = [] test_ns = range(n, n + 600) for s in test_ns: projected_effect = params['mu_s'] * np.sqrt(s) powerpred_s = {} for k, v in thresholds.items(): if not v == 'nan': powerpred_s[k] = 1 - altCDF([v], projected_effect, params['sigma'], params['z_u'], method)[0] powerpred_s['sample size'] = s powerpred_all.append(powerpred_s) power_df = pd.DataFrame(powerpred_all) power_df = power_df.set_index('sample size', drop=True) power_df = power_df.loc[(power_df[power_df.columns] < 1).all(axis=1)] return params, peak_df, power_df
# compute P-values if MODEL == "RFT": pvalues = np.exp(-EXC * (np.array(peaks.peak) - EXC)) pvalues = [max(10**(-6), t) for t in pvalues] elif MODEL == "CS": peaks.peak[peaks.peak > np.max(alltvals)] = np.round(np.max(alltvals), decimals=4) peaks.peak[peaks.peak < np.min(alltvals)] = np.round(np.min(alltvals), decimals=4) pvalues = np.array([allpvals[t == alltvals] for t in peaks.peak]).flatten() peaks['pval'] = pvalues # estimate model bum = BUM.EstimatePi1(peaks['pval'].tolist(), starts=10) est_exp_eff = "nan" est_sd = "nan" if bum['pi1'] == 0: est_eff = 'nan' else: if MODEL == "RFT": modelfit = neuropowermodels.modelfit(peaks.peak, bum['pi1'], exc=EXC, starts=20, method="RFT") est_eff = modelfit['mu'] est_sd = modelfit['sigma'] tau = neuropowermodels.TruncTau(est_eff, est_sd, EXC) est_exp_eff = est_eff + tau * est_sd
############################################################### # estimate and compute model and estimate power on pilot data # ############################################################### # compute P-values if MODEL == "RFT": pvalues = np.exp(-EXC*(np.array(peaks.peak)-EXC)) pvalues = [max(10**(-6),t) for t in pvalues] elif MODEL == "CS": pvalues = 1-np.asarray(neuropowermodels.nulCDF(peaks.peak,method="CS")) peaks['pval'] = pvalues # estimate model bum = BUM.EstimatePi1(peaks['pval'].tolist(),starts=10) if bum['pi1'] == 0: est_eff = 'nan' else: if MODEL == "RFT": modelfit = neuropowermodels.modelfit(peaks.peak,bum['pi1'],exc=EXC,starts=20,method="RFT") est_eff = modelfit['mu'] est_sd = modelfit['sigma'] tau = neuropowermodels.TruncTau(est_eff,est_sd,EXC) est_exp_eff = est_eff + tau*est_sd mu = modelfit['mu'] elif MODEL == "CS": modelfit = neuropowermodels.modelfit(peaks.peak,bum['pi1'],starts=5,method="CS") est_sd = 'nan' xn = np.arange(-10,30,0.01) alt = np.asarray(neuropowermodels.altPDF(xn,mu=modelfit['mu'],method="CS"))
def test_fpLL(self): np.random.seed(seed=100) testpeaks = np.vstack((np.random.uniform(0,1,10),np.random.uniform(0,0.2,10))).flatten() x = np.sum(BUM.fpLL([0.5,0.5],testpeaks)) self.assertEqual(np.around(x,decimals=2),9.57)
def test_fbumnLL(self): np.random.seed(seed=100) testpeaks = np.vstack((np.random.uniform(0,1,10),np.random.uniform(0,0.2,10))).flatten() x = BUM.bumOptim(testpeaks,starts=1,seed=100) self.assertEqual(np.around(x['pi1'],decimals=2),0.29)
def test_fbumnLL(self): np.random.seed(seed=100) testpeaks = np.vstack((np.random.uniform(0,1,10),np.random.uniform(0,0.2,10))).flatten() x = BUM.fbumnLL([0.5,0.5],testpeaks) self.assertEqual(np.around(x,decimals=2)[0],-4.42)