Esempio n. 1
0
def comparisons(arglist):
    fine_sigma, i = arglist[0]
    reward, j = arglist[1]
    print(i, j)
    model_params['fine_sigma'] = fine_sigma
    model_params['reward'] = reward
    finegr = FineGrained(**model_params)
    coarse_stats = finegr.coarse_stats
    model_params['mu'] = coarse_stats[1, :, 0]
    model_params['sigma'] = coarse_stats[1, :, 1]
    model_params['N'] = model_params['N_values'][1]
    bellutil = BellmanUtil(**model_params)
    model_params['decisions'] = bellutil.decisions

    sim_params = deepcopy(model_params)
    sim_params['simulate'] = True
    fp_params = deepcopy(model_params)

    fpobs = ObserverSim(**fp_params)
    simobs = ObserverSim(**sim_params)

    sim_likelihood = DataLikelihoods(subject_num)
    fp_likelihood = DataLikelihoods(subject_num)

    fp_likelihood.increment_likelihood(fpobs.fractions, **fp_params)
    sim_likelihood.increment_likelihood_legacy(simobs.dist_matrix,
                                               simobs.rts_matrix, **sim_params)

    fp_value = fp_likelihood.likelihood
    sim_value = sim_likelihood.likelihood
    return (fp_value, sim_value, i, j)
Esempio n. 2
0
def subject_likelihood(likelihood_arglist):
    """
    First handle the different cases of what we need to fit and save parameters. They just set
    the appropriate values of fine_sigma, reward, and punishment depending on the model type
    and print what is being evaluated by the optimization algorithm."""
    log_parameters, model_params = likelihood_arglist

    curr_params = deepcopy(model_params)
    # fine_sigma and punishment are fit, reward fixed at 1
    sigma = np.array((np.exp(log_parameters[0]), np.exp(log_parameters[1])))
    reward = np.exp(log_parameters[2])
    punishment = np.exp(log_parameters[3])
    alpha = np.exp(log_parameters[4])
    print('sigmas = {:.2f}, {:.2f}'.format(*sigma),
          '; reward = {:.2f}'.format(reward),
          '; punishment = {:.2f}'.format(punishment),
          '; alpha = {:.2f}'.format(alpha))

    curr_params['sigma'] = sigma
    curr_params['reward'] = reward
    curr_params['punishment'] = -punishment
    curr_params['alpha'] = alpha

    bellutil = BellmanUtil(**curr_params)
    curr_params['rho'] = bellutil.rho
    curr_params['decisions'] = bellutil.decisions

    obs = ObserverSim(**curr_params)
    curr_params['fractions'] = obs.fractions

    likelihood_data = DataLikelihoods(**curr_params)
    likelihood_data.increment_likelihood(**curr_params)
    print(likelihood_data.likelihood)
    return likelihood_data.likelihood
Esempio n. 3
0
def likelihood_inner_loop(curr_params):
    bellutil = BellmanUtil(**curr_params)
    curr_params['decisions'] = bellutil.decisions

    obs = ObserverSim(**curr_params)
    curr_params['fractions'] = obs.fractions
    return curr_params
def run_model(model_params):
    finegr = FineGrained(**model_params)
    model_params['coarse_stats'] = finegr.coarse_stats

    N_values = model_params['N_values']
    dist_computed_params = []
    for i, N in enumerate(N_values):
        curr_params = deepcopy(model_params)
        curr_params['N'] = N
        curr_params['mu'] = model_params['coarse_stats'][i, :, 0]
        curr_params['sigma'] = model_params['coarse_stats'][i, :, 1]
        bellutil = BellmanUtil(**curr_params)
        curr_params['decisions'] = bellutil.decisions
        obs = ObserverSim(**curr_params)
        curr_params['fractions'] = obs.fractions
        dist_computed_params.append(curr_params)

    return dist_computed_params
        'reward_scheme': 'asym_reward',
    }

    finegr = FineGrained(**model_params)
    model_params['coarse_stats'] = finegr.coarse_stats

    N_values = model_params['N_values']
    dist_computed_params = []
    for i, N in enumerate(N_values):
        curr_params = deepcopy(model_params)
        curr_params['N'] = N
        curr_params['mu'] = model_params['coarse_stats'][i, :, 0]
        curr_params['sigma'] = model_params['coarse_stats'][i, :, 1]
        bellutil = BellmanUtil(**curr_params)
        curr_params['decisions'] = bellutil.decisions
        obs = ObserverSim(**curr_params)
        curr_params['fractions'] = obs.fractions
        dist_computed_params.append(curr_params)

    data_eval = DataLikelihoods(**model_params)
    for single_N_params in dist_computed_params:
        data_eval.increment_likelihood(**single_N_params)

    print(data_eval.likelihood)

    T = model_params['T']
    dt = model_params['dt']
    fig, axes = plt.subplots(3, 1)
    t_values = np.arange(0, T, dt) + (dt / 2)

    for i, N in enumerate(N_values):
Esempio n. 6
0
    def __init__(self,
                 subject_num,
                 T,
                 dt,
                 t_w,
                 t_max,
                 size,
                 lapse,
                 mu,
                 N_values,
                 g_values,
                 experiment,
                 N,
                 reward_scheme,
                 tested_params,
                 likelihoods_returned,
                 t_delay,
                 opt_regime=None,
                 **kwargs):
        model_params = {
            'T': 10,
            'dt': 0.05,
            't_w': 0.5,
            't_delay': 0.2,
            't_max': 5.,
            'size': size,
            'lapse': 1e-6,
            'mu': np.array((0, 1)),
            'N': int(N),
            'N_values': (8, 12, 16),
            'g_values': np.linspace(1e-4, 1 - 1e-4, size),
            'subject_num': int(subject_num),
            'reward_scheme': 'asym_reward',
            'experiment': experiment
        }

        curr_params = deepcopy(model_params)
        self.opt_params = tested_params[np.argmin(likelihoods_returned)]
        opt_likelihood = np.amin(likelihoods_returned)
        curr_params['sigma'] = np.exp(self.opt_params[:2])
        curr_params['reward'] = np.exp(self.opt_params[2])
        curr_params['punishment'] = -np.exp(self.opt_params[3])
        curr_params['alpha'] = np.exp(self.opt_params[4])

        # If we don't have the decisions, rho, and reaction times for opt params, compute them
        if not opt_regime:
            bellutil = BellmanUtil(**curr_params)
            rho = bellutil.rho
            decisions = bellutil.decisions

            obs = ObserverSim(decisions=decisions, **curr_params)
            fractions = obs.fractions
            opt_regime = {
                'rho': rho,
                'decisions': decisions,
                'fractions': fractions
            }

        curr_params['opt_regime'] = opt_regime
        curr_params['rho'] = opt_regime['rho']
        curr_params['decisions'] = opt_regime['decisions']
        curr_params['fractions'] = opt_regime['fractions']
        likelihood_data = DataLikelihoods(**curr_params)
        likelihood_data.increment_likelihood(**curr_params)
        if not np.abs(likelihood_data.likelihood - opt_likelihood) < 1e-3:
            warnings.warn(
                'Diff between computed likelihood and opt is greater than 0.0001'
            )
            print(likelihood_data.likelihood - opt_likelihood)

        self.model_params = curr_params
        self.opt_regim = opt_regime
        self.N_data = likelihood_data.sub_data.query('setsize == {}'.format(N))