Beispiel #1
0
 def __init__(self, _run):
     print 'Calculating models...'
     M = {}
     output = []
     fpath_D = './../OUTPUT_FILES/RUNS/' + _run.subdir + 'PICKLES/data.pkl'
     with open(fpath_D, 'r') as fD:
         D = cPickle.load(fD)
     
         c_old = simps(D['pCO2'],D['time']) 
         start_time = time.time()
         L_of_tau = partial(Compute_Model,D['pCO2'],D['time'],0.1,c_old)
         pool = Pool(5)
         output += pool.map(L_of_tau,_run.tau)            
         pool.close()
         pool.join()       
         delta_time = time.time() - start_time
         
         #For each tau, compute a model prediction depending also on A and B.
         #This avoids running repeatd convolution calculations.
         for (_tau,_conv_pCO2) in output:
             for (_A,_B) in _run.parspace_AB:
                 M[data_handling.pars2label(_A,_tau,_B)] = _A * _conv_pCO2 + _B
         M['parspace'] = _run.parspace
         data_handling.save_pickle(_run.subdir, 'models.pkl', M)
   
     print '    Run took ', format(delta_time, '.1f'), 's'
     print '    Approx ', format(delta_time / _run.N_cells * 1.e6, '.3f'), ' mus/cell'
     print '    Done.\n'
    def get_most_likely_model(self):

        fpath = os.path.join('./../OUTPUT_FILES/RUNS/' + self._run.subdir,
                             'most_likely_A_tau.csv')

        df = pd.read_csv(fpath, header=0, low_memory=False, dtype='str')
        cond = (df['voxel'] == str(self._idx_space)).values

        A = float(df['A'].values[cond][0])
        tau = float(df['tau'].values[cond][0])
        self.most_likely = self.M[data_handling.pars2label(A, tau)]
    def add_models(self):
        models = data_handling.get_master_pickle(self._run)

        for (A, tau) in self.M['parspace']:
            model = self.M[data_handling.pars2label(A, tau)]
            self.ax.plot(self.M['time'][1:],
                         model,
                         ls='--',
                         marker='None',
                         color='gray',
                         alpha=0.3)
Beispiel #4
0
    def __init__(self, _run):
        print 'Calculating likelihoods...'
        L = {}

        A_qE = (_run.A_step / 2.)**2.

        fpath_S = './../OUTPUT_FILES/RUNS/' + _run.subdir + 'PICKLES/smooth.pkl'
        fpath_M = './../OUTPUT_FILES/RUNS/' + _run.subdir + 'PICKLES/models.pkl'
        fpath_out = './../OUTPUT_FILES/RUNS/' + _run.subdir + 'most_likely_A_tau.csv'
        N_trials = 0
        with open(fpath_S, 'r') as fS, open(fpath_M, 'r') as fM,\
          open(fpath_out, 'w') as out:
            S, M = cPickle.load(fS), cPickle.load(fM)
            out.write('voxel,A,A_unc_l,A_unc_u,tau,tau_unc_l,tau_unc_u')

            columns = zip(*_run.parspace_Atau)
            As, taus = np.array(columns[0]), np.array(columns[1])
            cond = data_handling.region2cond(_run, S['time'])
            N_voxels = S['signal_ns'].shape[0]
            ln_B_range = np.log(_run.B[-1] - _run.B[0])

            start_time = time.time()
            print '    N voxels = ' + str(N_voxels)
            for idx_space in range(N_voxels):
                #for idx_space in [3523]: #fewer voxels. 15821
                print idx_space
                N_trials += 1
                bold_voxel = S['signal_ns'][idx_space, :]
                signal_unc = S['signal_noise'][idx_space, :]
                pCO2_unc = S['pCO2_noise']

                ln_L = []
                for (A, tau) in _run.parspace_Atau:
                    label = data_handling.pars2label(A, tau, 0.)

                    unc = np.sqrt(signal_unc**2. +
                                  (A * pCO2_unc)**2.)  #improve.

                    ln_L_of_B = np.array([
                        cf.compute_L(
                            bold_voxel[cond],
                            M[data_handling.pars2label(A, tau,
                                                       B)][cond], unc[cond])
                        for B in _run.B
                    ])

                    ln_L_of_B_clean, max_base = stats.treat_array(ln_L_of_B)
                    ln_L_margin = (cf.marg(ln_L_of_B_clean, _run.B) +
                                   max_base - ln_B_range)

                    ln_L.append(ln_L_margin)

                ln_L = np.array(ln_L)
                L['likelihood_list_' + str(idx_space)] = ln_L

                outvars = stats.get_contour_uncertainties(As, taus, ln_L, A_qE)
                line = vars2line1(idx_space, outvars)

                out.write('\n' + str(idx_space) + line)
                delta_time = time.time() - start_time

            data_handling.save_pickle(_run.subdir, 'likelihoods.pkl', L)

        print '    Run took ', format(delta_time, '.1f'), 's'
        #print '    Approx ', format(delta_time / float(N_voxels), '.3f'), ' s/voxel'
        print '    Approx ', format(delta_time / float(N_trials),
                                    '.3f'), ' s/voxel'
        print '    Done.\n'