def setup_model(self): """ Function to setup model. """ class DriftCoherence(ddm.models.Drift): name = "Drift depends linearly on coherence" # Parameter that should be included in the ddm required_parameters = ["driftcoherence"] # Task Parameter, i.e. coherence required_conditions = ["coherence"] # Define the get_drift function def get_drift(self, conditions, **kwargs): return self.driftcoherence * conditions['coherence'] # Set up Model with Drift depending on Coherence model = Model( name='Noise Model - Drift varies with coherence', drift=DriftCoherence(driftcoherence=Fittable(minval=0, maxval=20)), noise=NoiseConstant(noise=1), bound=BoundConstant(B=Fittable(minval=.1, maxval=1.5)), overlay=OverlayChain(overlays=[ OverlayNonDecision(nondectime=Fittable(minval=0, maxval=.4)), OverlayPoissonMixture(pmixturecoef=.02, rate=1) ]), dx=.01, dt=.01, T_dur=2) return model
def apply(self, solution): corr = solution.corr err = solution.err m = solution.model cond = solution.conditions undec = solution.undec evolution = solution.evolution lapse = getattr(self, 'lapse{}'.format(cond['reward'])) assert isinstance(solution, Solution) # check what corr would look like with drift rate == 0 # ---------------------------------------------------- m2 = copy.copy(m) # set depenencies dependencies = m.dependencies for i in range(len(dependencies)): if dependencies[i].depname == 'Overlay': overlays = dependencies[i].overlays for j in range(len(overlays)): if overlays[j].name == 'evidence lapse': overlays.pop(j) dependencies[i] = OverlayChain(overlays=overlays) m2.dependencies = dependencies # set parameters: param_names = m.get_model_parameter_names() param_values = m.get_model_parameters() for i in range(len(param_values)): if param_names[i][0] == 'v': param_values[i] = Fitted(0) m2.set_model_parameters(param_values) # solve: solution2 = m2.solve(cond) corr2 = solution2.corr # ---------------------------------------------------- # update corr: corr = (corr * (1 - lapse)) + (lapse * corr2 ) # Assume numpy ndarrays, not lists return Solution(corr, err, m, cond, undec, evolution)
def make_model(sample, model_settings): # model components: z = make_z(sample=sample, z_depends_on=model_settings['depends_on']['z']) drift = make_drift(sample=sample, drift_bias=model_settings['drift_bias'], v_depends_on=model_settings['depends_on']['v'], b_depends_on=model_settings['depends_on']['b']) a = make_a(sample=sample, urgency=model_settings['urgency'], a_depends_on=model_settings['depends_on']['a'], u_depends_on=model_settings['depends_on']['u']) t = make_t(sample=sample, t_depends_on=model_settings['depends_on']['t']) T_dur = model_settings['T_dur'] # limits: ranges = { 'z':(0.05,0.95), # starting point 'v':(0,5), # drift rate 'b':(-5,5), # drift bias 'a':(0.1,5), # bound # 'u':(-T_dur*10,T_dur*10), # hyperbolic collapse 'u':(0.01,T_dur*10), # hyperbolic collapse 't':(0,2), # non-decision time } # put together: if model_settings['start_bias']: initial_condition = z(**{param:Fittable(minval=ranges[param[0]][0], maxval=ranges[param[0]][1]) for param in z.required_parameters}) else: initial_condition = z(**{'z':0.5}) model = Model(name='stimulus coding model / collapsing bound', IC=initial_condition, drift=drift(**{param:Fittable(minval=ranges[param[0]][0], maxval=ranges[param[0]][1]) for param in drift.required_parameters}), bound=a(**{param:Fittable(minval=ranges[param[0]][0], maxval=ranges[param[0]][1]) for param in a.required_parameters}), overlay=OverlayChain(overlays=[t(**{param:Fittable(minval=ranges[param[0]][0], maxval=ranges[param[0]][1]) for param in t.required_parameters}), OverlayUniformMixture(umixturecoef=0)]), # OverlayPoissonMixture(pmixturecoef=.01, rate=1)]), noise=NoiseConstant(noise=1), dx=.005, dt=.01, T_dur=T_dur) return model
def DDM_FIT(RT, ANSWER): df = [] # RT is scalles to seconds, the function takes seconds df = pd.DataFrame({'RT': RT / 1000, 'correct': ANSWER}) df.head() sample = Sample.from_pandas_dataframe(df, rt_column_name="RT", correct_column_name="correct") model = Model( name='Model', drift=DriftConstant(drift=Fittable(minval=6, maxval=25)), noise=NoiseConstant( noise=1.5), #(noise=Fittable(minval=0.5, maxval=2.5)), bound=BoundConstant(B=2.5), overlay=OverlayChain(overlays=[ OverlayNonDecision(nondectime=Fittable(minval=0, maxval=.8)), OverlayPoissonMixture(pmixturecoef=.02, rate=1) ]), dx=.001, dt=.01, T_dur=2) # Fitting this will also be fast because PyDDM can automatically # determine that DriftCoherence will allow an analytical solution. fit_model = fit_adjust_model(sample=sample, model=model, fitting_method="differential_evolution", lossfunction=LossRobustBIC, verbose=False) param = fit_model.get_model_parameters() Drift = np.asarray(param[0]) Delay = np.asarray(param[1]) return Drift, Delay
from ddm.functions import fit_adjust_model, display_model from ddm.models import NoiseConstant, BoundConstant, OverlayChain, OverlayNonDecision, OverlayPoissonMixture model_rs = Model( name='Roitman data, drift varies with coherence', drift=DriftCoherence(driftcoh=Fittable(minval=0, maxval=20)), noise=NoiseConstant(noise=1), bound=BoundConstant(B=Fittable(minval=.1, maxval=1.5)), # Since we can only have one overlay, we use # OverlayChain to string together multiple overlays. # They are applied sequentially in order. OverlayNonDecision # implements a non-decision time by shifting the # resulting distribution of response times by # `nondectime` seconds. overlay=OverlayChain(overlays=[ OverlayNonDecision(nondectime=Fittable(minval=0, maxval=.4)), OverlayPoissonMixture(pmixturecoef=.02, rate=1) ]), dx=.001, dt=.01, T_dur=2) # Fitting this will also be fast because PyDDM can automatically # determine that DriftCoherence will allow an analytical solution. fit_model_rs = fit_adjust_model(sample=roitman_sample, model=model_rs, verbose=False) display_model(fit_model_rs) fit_model_rs.parameters() # Plot the model fit to the PDFs and save the file. import ddm.plot
def make_model_one_accumulator(sample, model_settings): # # components: # z = make_z_one_accumulator(sample=sample, # a_depends_on=model_settings['depends_on']['a']) # drift = make_drift_one_accumulator(sample=sample, # drift_bias=model_settings['drift_bias'], # leak=model_settings['leak'], # v_depends_on=model_settings['depends_on']['v'], # b_depends_on=model_settings['depends_on']['b'], # k_depends_on=model_settings['depends_on']['k'], # a_depends_on=model_settings['depends_on']['v'],) # bound = BoundConstant # t = NonDecisionTime # n = NoisePulse(noise=1) # # limits: # ranges = { # # 'v':(0.99,1.01), # drift rate # # 'b':(-0.21,-0.19), # drift bias # # 'k':(2.39,2.41), # leak # # 'z':(0.75,0.999), # starting point --> translates into bound heigth 0-5 # 'v':(0,5), # drift rate # 'b':(-5,5), # drift bias # 'k':(0,5), # leak # 'a':(0.1,5), # bound # 't':(0,.1), # non-decision time # } # # initialize params: # a_params = {param:Fittable(minval=ranges[param[0]][0], maxval=ranges[param[0]][1]) for param in z.required_parameters} # drift_params = {param:Fittable(minval=ranges[param[0]][0], maxval=ranges[param[0]][1]) for param in drift.required_parameters if not 'a' in param} # drift_params = {**drift_params, **a_params} # # put together: # model = Model(name='one accumulator model', # IC=z(**a_params), # drift=drift(**drift_params), # bound=bound(B=10), # overlay=OverlayChain(overlays=[t(**{param:Fittable(minval=ranges[param[0]][0], maxval=ranges[param[0]][1]) for param in t.required_parameters}), # # OverlayUniformMixture(umixturecoef=0) # # OverlayUniformMixture(umixturecoef=0.01) # OverlayPoissonMixture(pmixturecoef=.02, rate=1) # ]), # noise=n, # dx=.01, dt=.01, T_dur=T_dur) # parameters: ranges = { # 'v':(0.99,1.01), # drift rate # 'b':(-0.21,-0.19), # drift bias # 'k':(2.39,2.41), # leak # 'z':(0.75,0.999), # starting point --> translates into bound heigth 0-5 'v': (0, 25), # drift rate 'b': (-5, 5), # drift bias 'k': (0, 5), # leak 'a': (0.1, 5), # bound 't': (0, .1), # non-decision time 'lapse': (0.001, 0.999), # lapse rate 'mixture': (0.001, 0.999), # mixture rate } a0_value = Fittable(minval=ranges['a'][0], maxval=ranges['a'][1], default=1) a1_value = Fittable(minval=ranges['a'][0], maxval=ranges['a'][1], default=1) v0_value = Fittable(minval=ranges['v'][0], maxval=ranges['v'][1], default=1) v1_value = Fittable(minval=ranges['v'][0], maxval=ranges['v'][1], default=1) if model_settings['drift_bias'] & (model_settings['depends_on']['b'] == ['reward']): b0_value = Fittable(minval=ranges['b'][0], maxval=ranges['b'][1], default=0) b1_value = Fittable(minval=ranges['b'][0], maxval=ranges['b'][1], default=0) elif model_settings['drift_bias'] & (model_settings['depends_on']['b'] is None): b0_value = Fittable(minval=ranges['b'][0], maxval=ranges['b'][1], default=0) b1_value = b0_value else: b0_value = 0 b1_value = 0 if model_settings['leak'] & (model_settings['depends_on']['k'] == ['reward']): k0_value = Fittable(minval=ranges['k'][0], maxval=ranges['k'][1], default=1) k1_value = Fittable(minval=ranges['k'][0], maxval=ranges['k'][1], default=1) elif model_settings['leak'] & (model_settings['depends_on']['k'] is None): k0_value = Fittable(minval=ranges['k'][0], maxval=ranges['k'][1], default=1) k1_value = k0_value else: k0_value = 0 k1_value = 0 if model_settings['lapse'] & (model_settings['depends_on']['lapse'] == ['reward']): lapse0_value = Fittable(minval=ranges['lapse'][0], maxval=ranges['lapse'][1], default=0.1) lapse1_value = Fittable(minval=ranges['lapse'][0], maxval=ranges['lapse'][1], default=0.1) elif model_settings['lapse'] & (model_settings['depends_on']['lapse'] is None): lapse0_value = Fittable(minval=ranges['lapse'][0], maxval=ranges['lapse'][1], default=0.1) lapse1_value = lapse0_value else: lapse0_value = 0 lapse1_value = 0 if model_settings['mixture'] & (model_settings['depends_on']['mixture'] == ['reward']): mixture0_value = Fittable(minval=ranges['mixture'][0], maxval=ranges['mixture'][1], default=0.1) mixture1_value = Fittable(minval=ranges['mixture'][0], maxval=ranges['mixture'][1], default=0.1) elif model_settings['mixture'] & (model_settings['depends_on']['mixture'] is None): mixture0_value = Fittable(minval=ranges['mixture'][0], maxval=ranges['mixture'][1], default=0.1) mixture1_value = mixture0_value else: mixture0_value = 0 mixture1_value = 0 # components: starting_point_components = {'a0': a0_value, 'a1': a1_value} drift_components = { 'v0': v0_value, 'v1': v1_value, 'k0': k0_value, 'k1': k1_value, 'b0': b0_value, 'b1': b1_value, 'a0': a0_value, 'a1': a1_value, } mixture_components = { 'mixture0': mixture0_value, 'mixture1': mixture1_value } lapse_components = {'lapse0': lapse0_value, 'lapse1': lapse1_value} # build model: from ddm.models import DriftConstant, NoiseConstant, BoundConstant, OverlayChain, OverlayNonDecision, OverlayPoissonMixture, OverlayUniformMixture, InitialCondition, ICPoint, ICPointSourceCenter, LossBIC bound = BoundConstant a = StartingPoint_reward drift = DriftPulse_reward t = NonDecisionTime n = NoisePulse(noise=1) model = Model( name='one accumulator model', IC=a(**starting_point_components), drift=drift(**drift_components), bound=bound(B=10), overlay=OverlayChain(overlays=[ t(t=0), # t(**{param:Fittable(minval=ranges[param[0]][0], maxval=ranges[param[0]][1]) for param in t.required_parameters}), OverlayExGaussMixture(**mixture_components), OverlayEvidenceLapse(**lapse_components), ]), noise=n, dx=model_settings['dx'], dt=model_settings['dt'], T_dur=model_settings['T_dur']) # # fit: # # model_fit = fit_adjust_model(sample=sample, model=model, lossfunction=LossLikelihood) # try: # model_fit = fit_adjust_model(sample=sample, model=model, lossfunction=LossLikelihoodGonogo, fitting_method="differential_evolution") # # print('model: {}'.format(model_fit.solve({'stimulus':0}).prob_correct())) # # print('data: {}'.format(df.loc[df['stimulus']==0, 'response'].mean())) # # print() # # print('model: {}'.format(model_fit.solve({'stimulus':1}).prob_correct())) # # print('data: {}'.format(df.loc[df['stimulus']==1, 'response'].mean())) # # # plot: # # ddm.plot.plot_fit_diagnostics(model=model_fit, sample=sample) # # get params: # params = pd.DataFrame(np.atleast_2d([p.real for p in model_fit.get_model_parameters()]), columns=model_fit.get_model_parameter_names()) # params['bic'] = model_fit.fitresult.value() # except Exception as e: # print(e) # params = pd.DataFrame(np.atleast_2d([np.nan, np.nan, np.nan, np.nan]), columns=model.get_model_parameter_names()) return model
def Fit_DDM_Group_JK_ib(domain='whole'): # create a df to record the output data column_names = ['domain', 'RS_level', 'sub_omit'] + model_rDDM.get_model_parameter_names() + \ ['loss_func_value', 'criterion', 'sample_size', 'on_bound'] rDDM_fitting_jk = pd.DataFrame(columns=column_names) # decide whether to fit rDDM on the whole dataset, # the advantageous trials or disadvantageous trials if domain == 'adv': data_for_fit = data[data['domain_group'] == 1] elif domain == 'dis': data_for_fit = data[data['domain_group'] == 2] else: data_for_fit = data # fit on groups of different RS levels for group in np.sort(data_for_fit['RS_level'].unique()): data_subgroup = data_for_fit[data_for_fit['RS_level'] == group] # omit subjects one at a time for sub_omit in np.sort(data_subgroup['subject'].unique()): # find the current fitting parameters in the fit_ref where = np.where((fit_ref['domain'] == domain) & (fit_ref['RS_level'] == group) & (fit_ref['sub_omit'] == str(sub_omit))) where = int(where[0]) # allow for extra ranges from the best fitting results extra = 0.05 extra_v_dis = 0.0005 extra_v_els = 0.005 if domain == 'dis': vg_max = fit_ref.iloc[where]['vg_best'] + extra_v_dis vg_min = fit_ref.iloc[where]['vg_best'] - extra_v_dis vl_max = fit_ref.iloc[where]['vl_best'] + extra_v_dis vl_min = fit_ref.iloc[where]['vl_best'] - extra_v_dis else: vg_max = fit_ref.iloc[where]['vg_best'] + extra_v_els vg_min = fit_ref.iloc[where]['vg_best'] - extra_v_els vl_max = fit_ref.iloc[where]['vl_best'] + extra_v_els vl_min = fit_ref.iloc[where]['vl_best'] - extra_v_els fixed_max = fit_ref.iloc[where]['fixed_best'] + extra fixed_min = fit_ref.iloc[where]['fixed_best'] - extra B_max = fit_ref.iloc[where]['B_best'] + extra B_min = fit_ref.iloc[where]['B_best'] - extra x0_max = fit_ref.iloc[where]['x0_best'] + extra x0_min = fit_ref.iloc[where]['x0_best'] - extra nondectime_max = fit_ref.iloc[where]['nondectime_best'] + extra nondectime_min = fit_ref.iloc[where]['nondectime_best'] - extra # create a DDM modell based on the current fitting range model_rDDM_ib = Model( name='Risk_DDM_individual_range_fit', drift=Risk_Drift(vg=Fittable(minval=vg_min, maxval=vg_max), vl=Fittable(minval=vl_min, maxval=vl_max), fixed=Fittable(minval=fixed_min, maxval=fixed_max)), IC=Biased_Start(x0=Fittable(minval=x0_min, maxval=x0_max)), noise=NoiseConstant(noise=1), bound=BoundConstant(B=Fittable(minval=B_min, maxval=B_max)), # Uniform Mixture Model overlay=OverlayChain(overlays=[ OverlayNonDecision(nondectime=Fittable( minval=nondectime_min, maxval=nondectime_max)), OverlayUniformMixture(umixturecoef=.05) ]), dx=0.001, dt=0.001, T_dur=10) # dx=0.01, dt=0.01, T_dur=10) data_jk = data_subgroup[data_subgroup['subject'] != sub_omit] sample_size = data_jk.shape[0] # create a sample and start fitting data_jk_sample = Sample.from_pandas_dataframe( data_jk, rt_column_name="RT", correct_column_name="accept") fit_sample = fit_adjust_model( sample=data_jk_sample, model=model_rDDM_ib, fitting_method='differential_evolution') # sort out results result_list = [[domain, group, sub_omit] + fit_sample.get_model_parameters() + \ [fit_sample.fitresult.value(), fit_sample.fitresult.loss, sample_size, 0]] # 0 in the end is the place holder for the counts in the on_buond column result_df = pd.DataFrame(result_list, columns=column_names) # check whether the estimated results are on the limits of the fitting ranges if result_df['vg'][0] == vg_min or result_df['vg'][0] == vg_max: result_df['on_bound'][0] += 1 if result_df['vl'][0] == vl_min or result_df['vl'][0] == vl_max: result_df['on_bound'][0] += 1 if result_df['fixed'][0] == fixed_min or result_df['fixed'][ 0] == fixed_max: result_df['on_bound'][0] += 1 if result_df['B'][0] == B_min or result_df['B'][0] == B_max: result_df['on_bound'][0] += 1 if result_df['x0'][0] == x0_min or result_df['x0'][0] == x0_max: result_df['on_bound'][0] += 1 if result_df['nondectime'][0] == nondectime_min or result_df[ 'nondectime'][0] == nondectime_max: result_df['on_bound'][0] += 1 # append the results rDDM_fitting_jk = rDDM_fitting_jk.append(result_df, ignore_index=True) return rDDM_fitting_jk
valuation = self.vg * conditions['gain'] - self.vl * conditions['loss'] return valuation + self.fixed model_rDDM = Model( name='Risk_DDM', drift=Risk_Drift(vg=Fittable(minval=0.001, maxval=0.06), vl=Fittable(minval=0.001, maxval=0.06), fixed=Fittable(minval=-1.6, maxval=0.8)), IC=Biased_Start(x0=Fittable(minval=-0.55, maxval=0.55)), noise=NoiseConstant(noise=1), bound=BoundConstant(B=Fittable(minval=0.5, maxval=2)), # Uniform Mixture Model overlay=OverlayChain(overlays=[ OverlayNonDecision(nondectime=Fittable(minval=0, maxval=1.1)), OverlayUniformMixture(umixturecoef=.05) ]), dx=0.001, dt=0.001, T_dur=10) # dx=0.01, dt=0.01, T_dur=10) # Check if an analytical solution exists. model_rDDM.has_analytical_solution() model_rDDM.get_model_parameter_names() model_rDDM.get_model_parameters() # ### check numerical stability? # model_rDDM.can_solve_cn(conditions={'gain': data['gain'], 'loss': data['loss']}) # model_rDDM.can_solve_explicit(conditions={'gain': data['gain'], 'loss': data['loss']}) # model_rDDM.can_solve_explicit(conditions={'gain': [10, 20, 30], 'loss': [10, 20, 30]})