コード例 #1
0
def run_exp1mod16(id):
    """ drift-rate: rwd/pen tradeoff, stable target variance model
        bias : rwd/pen tradeoff, stable target variance model"""
    #imports
    import hddm
    data = hddm.load_csv('~/Pub_Code_master/Data/exp1data.csv')
    #data grooming, remove fast outliers
    rtSig = data.rt.std()
    rtMu = data.rt.mean()
    cutoff = rtMu - rtSig
    data = data[data.rt > cutoff]
    data.reset_index(drop=True, inplace=True)
    #build model
    exp1model16 = hddm.HDDM(data,
                            depends_on={
                                'v': ['rwd_pen', 'stableVar'],
                                'z': ['rwd_pen', 'stabelVar']
                            },
                            bias=True,
                            include=['v', 'a', 't', 'z'],
                            p_outlier=0.05)
    exp1model16.find_starting_values()
    exp1model16.sample(3000,
                       burn=1000,
                       dbname='exp1mod16_%i.db' % id,
                       db='pickle')
    return exp1model16
コード例 #2
0
    def test_HDDM_split_std(self):
        data, _ = hddm.generate.gen_rand_data({
            "cond1": {
                "v": 0,
                "a": 2,
                "t": 0.3,
                "z": 0.5,
                "sv": 0.1,
                "st": 0.1,
                "sz": 0.1,
            },
            "cond2": {
                "v": 0,
                "a": 2,
                "t": 0.3,
                "z": 0.5,
                "sv": 0.1,
                "st": 0.1,
                "sz": 0.1,
            },
        })

        for param in ["a", "v", "z", "t"]:
            model = hddm.HDDM(
                data,
                include="all",
                depends_on={param: "condition"},
                is_group_model=True,
                std_depends=True,
            )
            idx = model.nodes_db.knode_name == param + "_std"
            self.assertEqual(len(model.nodes_db.node[idx]), 2)

        return model.mc
コード例 #3
0
def multiple_run():
    '''
    OUTPUT:
        Prints out the gelman rubin stats and min and max rho values below.     
    '''
    models = []
    for i in range(5):
        m = hddm.HDDM(dataf,
                #include=['sv', 'st'], group_only_nodes=['sv', 'st'],
                #include=('st'), include=('sv', 'st', 'sz'),
                depends_on={'v': ['condition', 'angle'],
                            't': ['condition', 'angle'],
                            'a': ['condition', 'angle']},
                p_outlier=0.05)

        m.find_starting_values()
        m.sample(20000, burn=5000, thin=2)
        models.append(m)

    print('\n---\n')
    print(gelman_rubin(models))

    dd = gelman_rubin(models)

    rhos = []
    for k in dd:
        rhos.append(dd[k])

    print(min(rhos), max(rhos))
コード例 #4
0
    def test_HDDM_split_std(self):
        data, _ = hddm.generate.gen_rand_data({
            'cond1': {
                'v': 0,
                'a': 2,
                't': .3,
                'z': .5,
                'sv': .1,
                'st': .1,
                'sz': .1
            },
            'cond2': {
                'v': 0,
                'a': 2,
                't': .3,
                'z': .5,
                'sv': .1,
                'st': .1,
                'sz': .1
            }
        })

        for param in ['a', 'v', 'z', 't']:
            model = hddm.HDDM(data,
                              include='all',
                              depends_on={param: 'condition'},
                              is_group_model=True,
                              std_depends=True)
            idx = model.nodes_db.knode_name == param + '_std'
            self.assertEqual(len(model.nodes_db.node[idx]), 2)

        return model.mc
コード例 #5
0
ファイル: test_models.py プロジェクト: Sahanduiuc/hddm
def test_posterior_plots_breakdown():
    params = hddm.generate.gen_rand_params()
    data, params_subj = hddm.generate.gen_rand_data(params=params, subjs=5)
    m = hddm.HDDM(data)
    m.sample(200, burn=10)
    m.plot_posterior_predictive()
    m.plot_posterior_quantiles()
    m.plot_posteriors()
コード例 #6
0
def outlierModel(data):
    m_outlier = hddm.HDDM(data, p_outlier=0.05, depends_on={'v': 'stim'})
    m_outlier.find_starting_values()
    m_outlier.sample(10000, burn=100)
    m_outlier.plot_posterior_predictive()
    plt.xlabel('RT')
    plt.ylabel("Probability Density")
    plt.savefig("No-Outlier-model-v2.pdf")
コード例 #7
0
def optimize_sx(mname, project='imaging'):

    m = defmod.define_model(mname, project=project)
    data = m.data
    if 'z' in m.depends_on.keys():
        bias = True
    else:
        bias = False

    grp_dict = {}
    subj_params = []
    aic_list = []
    bic_list = []
    dic_list = []
    ic_dict = {}

    for subj_idx, subj_data in data.groupby('subj_idx'):

        m_subj = hddm.HDDM(subj_data,
                           depends_on=m.depends_on,
                           bias=bias,
                           include=m.include)

        sx_params = m_subj.optimize('ML')

        pdict = opt.get_pdict(sx_params)
        subj_params.append(sx_params)
        aic_list.append(aic(m_subj))
        bic_list.append(bic(m_subj))
        #dic_list.append(m_subj.dic)

        grp_dict[subj_idx] = pdict

    ic_dict = {'aic': aic_list, 'bic': bic_list}
    ic_df = pd.DataFrame(ic_dict)
    ic_df.to_csv(mname + "_IC_Rank.csv")
    #write grp_dict to .txt file for reloading later
    f = open('mle_params.txt', 'w')
    f.write('grp_dict=' + repr(grp_dict) + '\n')
    f.close()

    params = pd.DataFrame(subj_params)
    simdf = vis.predict(grp_dict,
                        data,
                        ntrials=100,
                        nsims=100,
                        save=True,
                        RTname="dbmz_RT.jpeg",
                        ACCname="dbmz_ACC.jpeg")
    #simdf=vis.predict(grp_dict, df, ntrials=160, nsims=100, save=True, RTname="SimRT_EvT.jpeg", ACCname="SimACC_EvT.jpeg")
    simdf.to_csv("simdf_opt.csv")
    params.to_csv("subj_params_opt.csv", index=False)
    sdt.plot_rho_sdt(data, simdf)
    empvsim = sdt.rho_sdt(data, simdf)

    return grp_dict, ic_df
コード例 #8
0
def test_posterior_plots_breakdown():
    params = hddm.generate.gen_rand_params()
    data, params_subj = hddm.generate.gen_rand_data(params=params, subjs=4)
    m = hddm.HDDM(data)
    m.sample(2000, burn=10)
    m.plot_posterior_predictive()
    m.plot_posterior_quantiles()
    m.plot_posteriors()
    ppc = hddm.utils.post_pred_gen(m, samples=10)
    hddm.utils.post_pred_stats(data, ppc)
コード例 #9
0
def GelmanRubinModel(data):
    models = []
    for i in range(5):
        m = hddm.HDDM(data, p_outlier=0.05)
        m.find_starting_values
        m.sample(5000, burn=100)
        models.append(m)
        m.plot_posterior_predictive(figsize=(14, 10))
        plt.show()
    stats = hddm.analyze.gelman_rubin(models)
    print(stats)
コード例 #10
0
ファイル: final.py プロジェクト: ctw/myhddm
def analyze_models(nsims=100, ntrials=100):
	
	mnames=['msm', 'dbm', 'dbmz', 'pbm']
	#mnames=['pbm']
	bias=True
	data=pd.read_csv("/Users/kyle/Desktop/beh_hddm/allsx_feat.csv")
	
	for m in mnames:
		
		if m=='dbm':
			bias=False
		
		model=defmod.define_model(m, project='behav')
		
		mpath="/Users/kyle/Desktop/beh_hddm/revised_models/"+m
		os.chdir(mpath)
		
		m0=model; m1=model; m2=model
		mlist=[m0.load_db(m+"_traces0.db", db='pickle'), m1.load_db(m+"_traces1.db", db='pickle'), m2.load_db(m+"_traces2.db", db='pickle')]
		allmodels=kabuki.utils.concat_models(mlist)
		allmodels.print_stats(m+"_stats_all.txt")
		
		vis.plot_neutral_traces(allmodels)
		for node in ['z', 'vf', 'vh']:
			vis.plot_posterior_nodes(allmodels, node)
			
		gparams={}; subj_params=[]
		
		msingle=defmod.define_single(m, project='behav')
		
		for subj_idx, subj_data in data.groupby('subj_idx'):
			m_subj=hddm.HDDM(subj_data, depends_on=msingle.depends_on, bias=bias, include=msingle.include)
			sx_params=m_subj.optimize('ML')
			pdict=opt.get_pdict(sx_params)
			subj_params.append(sx_params)
			gparams[subj_idx]=pdict
		
		#write gparams to .txt file for reloading later
		f=open(m+'mle_gparams.txt', 'w')
		f.write('gparams=' + repr(gparams) + '\n')
		f.close()
		
		simdf_list=[]
		for i in range(nsims):
			simdf, params_used=sims.sim_exp(pdict=gparams, ntrials=ntrials, pfast=0.0, pslow=0.0, nsims_per_sub=1)
			simdf['sim_n']=[i]*len(simdf.index)
			simdf_list.append(simdf)

		simdf=pd.concat(simdf_list)

		params = pd.DataFrame(subj_params)
		simdf.to_csv(m+"_simdf.csv")
		params.to_csv(m+"_sparams.csv", index=False)
コード例 #11
0
def run_testmod(id):
    #imports
    import hddm
    data = hddm.load_csv('~/Pub_Code_master/Data/exp1data.csv')
    #data grooming, remove fast outliers
    rtSig = data.rt.std()
    rtMu = data.rt.mean()
    cutoff = rtMu - rtSig
    data = data[data.rt > cutoff]
    data.reset_index(drop=True, inplace=True)
    #build model
    testmod = hddm.HDDM(data)
    testmod.sample(200, burn=20, dbname='testmod_%i.db' % id, db='pickle')
    return testmod
コード例 #12
0
def simpleModel(data):
    print("Fitting model...")
    m = hddm.HDDM(data)
    m.find_starting_values()
    m.sample(7000, burn=100)

    print("Fitted parameters and model stats")
    #m.print_stats()
    stats = m.gen_stats()
    print(stats)

    print("Plotting posterior distributions and theoretical RT distributions")
    m.plot_posteriors(['a', 't', 'v', 'a_std'])
    plt.show()
    print("Lumped model DIC: ", m.dic)
コード例 #13
0
def run_exp1mod6(id):
    """ drift-rate: stable target variance model """
    import hddm
    data = hddm.load_csv('~/Pub_Code_master/Data/exp1data.csv')
    rtSig = data.rt.std()
    rtMu = data.rt.mean()
    cutoff = rtMu - rtSig
    data = data[data.rt > cutoff]
    data.reset_index(drop=True, inplace=True)
    exp1model6 = hddm.HDDM(data, depends_on={'v': 'stableVar'}, p_outlier=0.05)
    exp1model6.find_starting_values()
    exp1model6.sample(1500,
                      burn=500,
                      dbname='exp1mod6_%i.db' % id,
                      db='pickle')
    return exp1model6
コード例 #14
0
 def test_HDDM_distributions(self):
     params = hddm.generate.gen_rand_params()
     data, params_subj = hddm.generate.gen_rand_data(subjs=4,
                                                     params=params,
                                                     size=10)
     m = hddm.HDDM(data)
     m.sample(self.iter, burn=self.burn)
     self.assertIsInstance(m.nodes_db.loc['wfpt.0']['node'].parents['v'],
                           pm.Normal)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']['node'].parents['v'].parents['mu'],
         pm.Normal)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']['node'].parents['v'].parents['tau'],
         pm.Deterministic)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']
         ['node'].parents['v'].parents['tau'].parents['x'], pm.HalfNormal)
     self.assertIsInstance(m.nodes_db.loc['wfpt.0']['node'].parents['a'],
                           pm.Gamma)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']['node'].parents['a'].parents['alpha'],
         pm.Deterministic)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']
         ['node'].parents['a'].parents['alpha'].parents['x'], pm.Gamma)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']['node'].parents['a'].parents['beta'],
         pm.Deterministic)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']
         ['node'].parents['a'].parents['beta'].parents['y'], pm.HalfNormal)
     self.assertIsInstance(m.nodes_db.loc['wfpt.0']['node'].parents['t'],
                           pm.Gamma)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']['node'].parents['t'].parents['alpha'],
         pm.Deterministic)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']
         ['node'].parents['t'].parents['alpha'].parents['x'], pm.Gamma)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']['node'].parents['t'].parents['beta'],
         pm.Deterministic)
     self.assertIsInstance(
         m.nodes_db.loc['wfpt.0']
         ['node'].parents['t'].parents['beta'].parents['y'], pm.HalfNormal)
コード例 #15
0
def run_exp2mod6(id):
    """ drift-rate: rwd prob model """
    import hddm
    data = hddm.load_csv('~/Pub_Code_master/Data/exp2data.csv')
    #data grooming, remove fast outliers
    #rtSig = data.rt.std()
    #rtMu = data.rt.mean()
    #cutoff =  rtMu - rtSig
    #data = data[data.rt>cutoff]
    #data.reset_index(drop=True, inplace=True)
    exp2model6 = hddm.HDDM(data, depends_on={'v': 'rwd_prob'}, p_outlier=0.05)
    exp2model6.find_starting_values()
    exp2model6.sample(1500,
                      burn=500,
                      dbname='exp2mod6_%i.db' % id,
                      db='pickle')
    return exp2model6
コード例 #16
0
ファイル: diag.py プロジェクト: Sahanduiuc/hddm
def test_params_on_data(params,
                        data,
                        include=(),
                        depends_on=None,
                        conf_interval=95):
    thin = 1
    samples = 10000
    burn = 10000
    n_iter = burn + samples * thin
    stdout.flush()
    if depends_on is None:
        depends_on = {}
    if 'pi' in include or 'gamma' in include:
        m_hddm = hddm.HDDMContaminant(data, bias=True, depends_on=depends_on)
    else:
        m_hddm = hddm.HDDM(data,
                           bias=True,
                           include=include,
                           depends_on=depends_on)
    model = m_hddm.mcmc()
    #[model.use_step_method(pm.Metropolis, x,proposal_sd=0.1) for x in model.stochastics]
    i_t = time()
    model.sample(n_iter, burn=burn, thin=thin)
    print "sampling took: %.2f seconds" % (time() - i_t)
    ok = True
    if check_model(model, params, assert_=False,
                   conf_interval=conf_interval) == False:
        print "model checking failed. running again"
        stdout.flush()
        model.sample(n_iter, burn=burn, thin=thin)
        if check_model(model,
                       params,
                       assert_=False,
                       conf_interval=conf_interval) == False:
            print "model checking failed again !!!!!!!!!!!!!!!!!!!!!!!"
            ok = False

    res = {}
    res['params'] = params
    res['data'] = data
    res['mc'] = model
    check_rejection(model, assert_=False)
    check_correl(model)
    stdout.flush()
    return ok, res
コード例 #17
0
ファイル: Exp3models.py プロジェクト: CoAxLab/Net_Worth
def run_exp3mod1(id):
    """ Pure drift-rate to task params model
    """
    import hddm
    data = hddm.load_csv(
        'C:/Users/Rory/Dropbox/Net_Worth/Pub_Code/Data/Experiment3/Model/Exp3ModelData.csv'
    )
    #data grooming, remove fast outliers
    #rtSig = data.rt.std()
    #rtMu = data.rt.mean()
    #cutoff =  rtMu - rtSig
    #data = data[data.rt>cutoff]
    #data.reset_index(drop=True, inplace=True)
    exp3model1 = hddm.HDDM(data,
                           depends_on={'v': ['rwd_prob', 'stableVar']},
                           p_outlier=0.05)
    exp3model1.find_starting_values()
    exp3model1.sample(1500, burn=250, dbname='exp3mod1%i.db' % id, db='pickle')
    return exp3model1
コード例 #18
0
def run_exp2mod2(id):
    """
    Bias: task params model
    """
    import hddm
    data = hddm.load_csv('~/Pub_Code_master/Data/exp2data.csv')
    #data grooming, remove fast outliers
    #rtSig = data.rt.std()
    #rtMu = data.rt.mean()
    #cutoff =  rtMu - rtSig
    #data = data[data.rt>cutoff]
    #data.reset_index(drop=True, inplace=True)
    exp2model2 = hddm.HDDM(data,
                           depends_on={'z': ['rwd_pen', 'rwd_prob']},
                           include='z',
                           p_outlier=0.05)
    exp2model2.find_starting_values()
    exp2model2.sample(1500, burn=500, dbname='exp2mod2%i.db' % id, db='pickle')
    return exp2model2
コード例 #19
0
ファイル: test_models.py プロジェクト: Sahanduiuc/hddm
 def test_HDDM_distributions(self):
     params = hddm.generate.gen_rand_params()
     data, params_subj = hddm.generate.gen_rand_data(subjs=4, params=params)
     m = hddm.HDDM(data)
     m.sample(self.iter, burn=self.burn)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'], pm.Normal)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['mu'], pm.Normal)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['tau'], pm.Deterministic)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['tau'].parents['x'], pm.Uniform)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'], pm.Deterministic)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['x'], pm.Normal)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['x'].parents['mu'], pm.Normal)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['x'].parents['tau'], pm.Deterministic)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['x'].parents['tau'].parents['x'], pm.Uniform)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'], pm.Deterministic)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['x'], pm.Normal)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['x'].parents['mu'], pm.Normal)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['x'].parents['tau'], pm.Deterministic)
     assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['x'].parents['tau'].parents['x'], pm.Uniform)
コード例 #20
0
def run_exp1mod2(id):
    """ bias:task params model """
    import hddm
    data = hddm.load_csv('~/Pub_Code_master/Data/exp1data.csv')
    rtSig = data.rt.std()
    rtMu = data.rt.mean()
    cutoff = rtMu - rtSig
    data = data[data.rt > cutoff]
    data.reset_index(drop=True, inplace=True)
    exp1model2 = hddm.HDDM(data,
                           depends_on={'z': ['rwd_pen', 'stableVar']},
                           include='z',
                           p_outlier=0.05)
    exp1model2.find_starting_values()
    exp1model2.sample(1500,
                      burn=500,
                      dbname='exp1mod2_%i.db' % id,
                      db='pickle')
    return exp1model2
コード例 #21
0
def gelman_rubin_models(n):

    import hddm

    import pandas as pd
    import pickle
    import os

    #load data
    data = hddm.load_csv(
        'https://raw.githubusercontent.com/xuankai91/MResProject/master/tofit_HDDM.csv'
    )
    print('data loaded')

    #setup
    samples = 5000
    savepath = './'

    #create save folders
    if not os.path.exists(savepath + 'gelman_rubin'):
        os.makedirs(savepath + 'gelman_rubin')

    #start modelling
    print('starting...')

    #instantiate model object
    model = hddm.HDDM(data,
                      depends_on={
                          'v': 'stim',
                          'a': 'stim',
                          't': 'stim'
                      },
                      include=('sv', 'st', 'sz'),
                      p_outlier=.05)

    #start model fitting
    model.find_starting_values()
    model.sample(samples,
                 burn=int(samples / 10),
                 dbname='%s/gelman_rubin/m_grs_%d_traces.db' % (savepath, n),
                 db='pickle')
    model.save('%s/gelman_rubin/m_grs_%d' % (savepath, n))
コード例 #22
0
ファイル: HDDMPrac.py プロジェクト: rameshsrinivasanuci/hnlpy
def next_step(data):
#    print('working... \n')
    #m = hddm.HDDM(data)
    m = hddm.HDDM(data, depends_on={'v':'condition'})
    m.save('/data/pdmattention/TestModel')
    #m = hddm.HDDM(data, depends_on={'t','rt'})
    #m = hddm.HDDM(data, depends_on={'a','correct'})

# 3 param a t v 
# v -drift
# t - nondec time -- may not depend upon diff
# a - boundary sep
# run a version with dependence of condition 
    # find a good starting point which helps with convergence
    print('finding starting values... \n')
    m.find_starting_values() # Find good starting values for optimization
                             # uses gradient ascent optimization
                             # finds the minimum of a function
    print('\n')
    print('starting values found... \n')
    # starting drawing 7000 samples and discardin 5000 as burn-in
    m.sample(5000, burn=500) # posterior samples
    #m.sample(1000, burn=20) # smaller sample to test
    print('generating stats... \n')
    stats = m.gen_stats()

    print('stats type: ', type(stats))

    # confirm that post for v
	# confirm for all a t v for subs
    stats[stats.index.isin(['a', 'a_std', 'a_subj.0', 't', 't_std', 't_subj.0', 'v', 'v_std', 'v_subj.0'])]

    print('printing stats... \n')
    #m.print_stats()

    print('\n')
    
    m.plot_posteriors(['a', 't','v'])


    m.plot_posterior_predictive(figsize=(14, 10))
    print('Legend: Red = Indiv Subject; Blue = Prediction')
コード例 #23
0
def modelFitDrift(data):
    print("Fitting model...")
    m_stim = hddm.HDDM(data,
                       p_outlier=0.05,
                       depends_on={
                           'z': 'stim',
                           'v': 'stim',
                           'a': 'stim'
                       },
                       include=('z', 'sv', 'st', 'sz'))
    m_stim.find_starting_values()
    m_stim.sample(10000, burn=1000)
    #m_stim.print_stats()

    v_Pos, v_Neg = m_stim.nodes_db.node[['v(1)', 'v(2)']]
    hddm.analyze.plot_posterior_nodes([v_Pos, v_Neg])
    plt.xlabel("drift rate")
    plt.ylabel('Posterior Probability')
    plt.title("Posterior of drift-rate group means")
    plt.savefig("hddm_posteriors_v4.pdf")

    #a_Pos,a_Neg = m_stim.nodes_db.node[['a(1)','a(2)']]
    #hddm.analyze.plot_posterior_nodes([a_Pos,a_Neg])
    #plt.xlabel("boundary threshold")
    #plt.ylabel("Posterior of boundary threshold group means")
    #plt.savefig("hddm_posteriors_a2.pdf")

    z_Pos, z_Neg = m_stim.nodes_db.node[['z(1)', 'z(2)']]
    hddm.analyze.plot_posterior_nodes([z_Pos, z_Neg])
    plt.xlabel("Starting Point")
    plt.ylabel('Posterior Probability')
    plt.title("Posterior of starting-point group means")
    plt.savefig("hddm_posteriors_z.pdf")

    print("Print fitted parameters and model stats")
    m_stim.print_stats()

    # Significance testing on the posteriors
    print("P(vPos > vNeg) = ", (v_Pos.trace() > v_Neg.trace()).mean())
    print("P(zPos > tNeg) = ", (z_Pos.trace() > z_Neg.trace()).mean())
    # Deviance Information Criterion
    print("Stimulus model DIC: ", m_stim.dic)
コード例 #24
0
def run_exp1mod1(id):
    """ pure drift-rate:task params model """
    #imports
    import hddm
    data = hddm.load_csv('~/Pub_Code_master/Data/exp1data.csv')
    #data grooming, remove fast outliers
    rtSig = data.rt.std()
    rtMu = data.rt.mean()
    cutoff = rtMu - rtSig
    data = data[data.rt > cutoff]
    data.reset_index(drop=True, inplace=True)
    #build model
    exp1model1 = hddm.HDDM(data,
                           depends_on={'v': ['rwd_pen', 'stableVar']},
                           p_outlier=0.05)
    exp1model1.find_starting_values()
    exp1model1.sample(3000,
                      burn=1500,
                      dbname='exp1mod1_%i.db' % id,
                      db='pickle')
    return exp1model1
コード例 #25
0
def single_run(dbname = 'traces_1.db', model_save = 'hddm_model_vta_no.ml',
               generative_save = 'ppc_vta1.csv'):
    '''
    INPUT:
      dbname (str) - name of the sqlite db for model params saving
      model_save (str) - name of the sqlite db for model params saving
    OUTPUT:
     save model and predictive values to specified CSV files
    '''
    m = hddm.HDDM(dataf,
              #include=['sv', 'st'], group_only_nodes=['sv', 'st'],
              #include=('st'), include=('sv', 'st', 'sz'),
              depends_on={'v': ['condition', 'angle'],
                          't': ['condition', 'angle'],
                          'a': ['condition', 'angle']},
              p_outlier=0.05)
    m.find_starting_values()
    m.sample(20000, burn=5000, thin=2, dbname=dbname, db='pickle')
    m.save(mode_save)
    ppc_data = hddm.utils.post_pred_gen(m, samples=10)
    ppc_data.to_csv(generative_save)
コード例 #26
0
def run_exp2mod10(id):
    """ drift-rate: rwd/pen tradeoff with free bias model """
    #imports
    import hddm
    data = hddm.load_csv('~/Pub_Code_master/Data/exp2data.csv')
    #data grooming, remove fast outliers
    #rtSig = data.rt.std()
    #rtMu = data.rt.mean()
    #cutoff =  rtMu - rtSig
    #data = data[data.rt>cutoff]
    #data.reset_index(drop=True, inplace=True)
    #build model
    exp2model10 = hddm.HDDM(data,
                            depends_on={'v': 'rwd_pen'},
                            include='z',
                            p_outlier=0.05)
    exp2model10.find_starting_values()
    exp2model10.sample(1500,
                       burn=500,
                       dbname='exp2mod10_%i.db' % id,
                       db='pickle')
    return exp2model10
コード例 #27
0
ファイル: Exp3models.py プロジェクト: CoAxLab/Net_Worth
def run_exp3mod8(id):
    """ bias: rwd prob model """
    import hddm
    data = hddm.load_csv(
        'C:/Users/Rory/Dropbox/Net_Worth/Pub_Code/Data/Experiment3/Model/Exp3ModelData.csv'
    )
    #data grooming, remove fast outliers
    #rtSig = data.rt.std()
    #rtMu = data.rt.mean()
    #cutoff =  rtMu - rtSig
    #data = data[data.rt>cutoff]
    #data.reset_index(drop=True, inplace=True)
    exp3model8 = hddm.HDDM(data,
                           depends_on={'z': 'rwd_prob'},
                           include='z',
                           p_outlier=0.05)
    exp3model8.find_starting_values()
    exp3model8.sample(1500,
                      burn=250,
                      dbname='exp3mod8_%i.db' % id,
                      db='pickle')
    return exp3model8
コード例 #28
0
ファイル: Exp3models.py プロジェクト: CoAxLab/Net_Worth
def run_exp3mod10(id):
    """ drift-rate: stable target variance with free bias model """
    #imports
    import hddm
    data = hddm.load_csv(
        'C://Users/Rory/Dropbox/Net_Worth/Pub_Code/Data/Experiment3/Model/Exp3ModelData.csv'
    )
    #data grooming, remove fast outliers
    #rtSig = data.rt.std()
    #rtMu = data.rt.mean()
    #cutoff =  rtMu - rtSig
    #data = data[data.rt>cutoff]
    #data.reset_index(drop=True, inplace=True)
    #build model
    exp3model10 = hddm.HDDM(data,
                            depends_on={'v': 'stableVar'},
                            include='z',
                            p_outlier=0.05)
    exp3model10.find_starting_values()
    exp3model10.sample(1500,
                       burn=250,
                       dbname='exp3mod10_%i.db' % id,
                       db='pickle')
    return exp3model10
コード例 #29
0
ファイル: loop_hddm.py プロジェクト: pjrice/python
import pandas as pd
import hddm
import pickle

# load data
data = hddm.load_csv(
    'Z://Work//UW//projects//RR_TMS//hddm//data//infins_hddm.csv')

# init models
models = []

# make models
for i in range(5):
    m = hddm.HDDM(data, depends_on={'v': 'stim', 'a': 'stim'})
    m.find_starting_values()
    m.sample(
        10000,
        burn=5000,
        dbname=
        'Z://Work//UW//projects//RR_TMS//hddm//db//ii_va_stim_traces%i.db' % i,
        db='pickle')
    models.append(m)

# save models
for i in range(5):
    fname = 'Z://Work//UW//projects//RR_TMS//hddm//models//by_cond//ii_va_stim' + str(
        i)
    models[i].save(fname)
コード例 #30
0
ファイル: runModel.py プロジェクト: theodrosmhaile/ACTR_DDM
#folder = '/home/ausmanpa/Documents/gp/ACTR_DDM/simulations/'
folder = '/projects/actr/models/ACTR_DDM/simulations01_redux/'
files = os.listdir(folder)
folderIdx = files.index('DDM_results')
files.pop(folderIdx)

#listdir will list the results folder, remove it

for f in files:

    hddmData = hddm.load_csv(folder + f)

    #create model object where v, a, and t all depend on the difficulty
    model = hddm.HDDM(hddmData,
                      depends_on={
                          'v': 'stim',
                          'a': 'stim',
                          't': 'stim'
                      })

    #find a good starting point to help with convergence - but, seems to run into a warning/error
    model.find_starting_values()

    #draw 2000 samples, discard 20 as burn-in
    model.sample(2000, burn=20, dbname='traces.db', db='pickle')

    tempF = f.split('.txt')[0]

    fname = folder + 'DDM_results/' + tempF

    model.save(fname)