Esempio n. 1
0
def general_WienerCont(err=1e-4,
                       n_st=2,
                       n_sz=2,
                       use_adaptive=1,
                       simps_err=1e-3):
    _like = lambda value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max, err=err, n_st=n_st, n_sz=n_sz, use_adaptive=use_adaptive, simps_err=simps_err: wiener_like_contaminant(
        value,
        cont_x,
        v,
        sv,
        a,
        z,
        sz,
        t,
        st,
        t_min,
        t_max,
        err=err,
        n_st=n_st,
        n_sz=n_sz,
        use_adaptive=use_adaptive,
        simps_err=simps_err,
    )
    _like.__doc__ = wiener_like_contaminant.__doc__
    return stochastic_from_dist(name="Wiener Diffusion Contaminant Process",
                                logp=_like)
Esempio n. 2
0
def general_WienerCont(err=1e-4, n_st=2, n_sz=2, use_adaptive=1, simps_err=1e-3):
    _like = lambda  value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max, err=err, n_st=n_st, n_sz=n_sz, \
    use_adaptive=use_adaptive, simps_err=simps_err: \
    wiener_like_contaminant(value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max,\
                            err=err, n_st=n_st, n_sz=n_sz, use_adaptive=use_adaptive, simps_err=simps_err)
    _like.__doc__ = wiener_like_contaminant.__doc__
    return stochastic_from_dist(name="Wiener Diffusion Contaminant Process",
                                logp=_like)
Esempio n. 3
0
def generate_wfpt_stochastic_class(wiener_params=None, sampling_method='cdf', cdf_range=(-5,5), sampling_dt=1e-4):
    """
    create a wfpt stochastic class by creating a pymc nodes and then adding quantile functions.
    Input:
        wiener_params <dict> - dictonary of wiener_params for wfpt likelihoods
        sampling_method <string> - an argument used by hddm.generate.gen_rts
        cdf_range <sequance> -  an argument used by hddm.generate.gen_rts
        sampling_dt <float> - an argument used by hddm.generate.gen_rts
    Ouput:
        wfpt <class> - the wfpt stochastic
    """

    #set wiener_params
    if wiener_params is None:
        wiener_params = {'err': 1e-4, 'n_st':2, 'n_sz':2,
                         'use_adaptive':1,
                         'simps_err':1e-3,
                         'w_outlier': 0.1}
    wp = wiener_params

    #create likelihood function
    def wfpt_like(x, v, sv, a, z, sz, t, st, p_outlier=0):
        return hddm.wfpt.wiener_like(x['rt'].values, v, sv, a, z, sz, t, st, p_outlier=p_outlier, **wp)


    #create random function
    def random(self):
        return hddm.utils.flip_errors(hddm.generate.gen_rts(method=sampling_method,
                                                            size=self.shape, dt=sampling_dt,
                                                            range_=cdf_range,
                                                            structured=True,
                                                            **self.parents.value))


    #create pdf function
    def pdf(self, x):
        out = hddm.wfpt.pdf_array(x, **self.parents)
        return out

    #create cdf function
    def cdf(self, x):
        return hddm.cdfdif.dmat_cdf_array(x, w_outlier=wp['w_outlier'], **self.parents)

    #create wfpt class
    wfpt = stochastic_from_dist('wfpt', wfpt_like)

    #add pdf and cdf_vec to the class
    wfpt.pdf = pdf
    wfpt.cdf_vec = lambda self: hddm.wfpt.gen_cdf_using_pdf(time=cdf_range[1], **dict(self.parents.items() + wp.items()))
    wfpt.cdf = cdf
    wfpt.random = random

    #add quantiles functions
    add_quantiles_functions_to_pymc_class(wfpt)

    return wfpt
Esempio n. 4
0
def generate_wfpt_reg_stochastic_class(wiener_params=None, sampling_method='cdf', cdf_range=(-5,5), sampling_dt=1e-4):

    #set wiener_params
    if wiener_params is None:
        wiener_params = {'err': 1e-4, 'n_st':2, 'n_sz':2,
                         'use_adaptive':1,
                         'simps_err':1e-3,
                         'w_outlier': 0.1}
    wp = wiener_params

    def wiener_multi_like(value, v, sv, a, z, sz, t, st, reg_outcomes, p_outlier=0):
        """Log-likelihood for the full DDM using the interpolation method"""
        params = {'v': v, 'sv': sv, 'a': a, 'z': z, 'sz': sz, 't': t, 'st': st}
        for reg_outcome in reg_outcomes:
            params[reg_outcome] = params[reg_outcome].loc[value['rt'].index].values
        return hddm.wfpt.wiener_like_multi(value['rt'].values,
                                           params['v'], params['sv'], params['a'], params['z'],
                                           params['sz'], params['t'], params['st'], 1e-4,
                                           reg_outcomes,
                                           p_outlier=p_outlier)


    def random(self):
        param_dict = deepcopy(self.parents.value)
        del param_dict['reg_outcomes']
        sampled_rts = self.value.copy()
        #print(sampled_rts)
        #print(self.value.index)
        for i in self.value.index:
            #get current params
            #print(i)
            for p in self.parents['reg_outcomes']:
                param_dict[p] = np.asscalar(self.parents.value[p].loc[i])
            #sample
            samples = hddm.generate.gen_rts(method=sampling_method,
                                            size=1, dt=sampling_dt, **param_dict)

            sampled_rts.at[i, 'rt'] = hddm.utils.flip_errors(samples).rt

        return sampled_rts

    stoch = stochastic_from_dist('wfpt_reg', wiener_multi_like)
    stoch.random = random

    return stoch
Esempio n. 5
0
def generate_wfpt_reg_stochastic_class(wiener_params=None, sampling_method='cdf', cdf_range=(-5,5), sampling_dt=1e-4):

    #set wiener_params
    if wiener_params is None:
        wiener_params = {'err': 1e-4, 'n_st':2, 'n_sz':2,
                         'use_adaptive':1,
                         'simps_err':1e-3,
                         'w_outlier': 0.1}
    wp = wiener_params

    def wiener_multi_like(value, v, sv, a, z, sz, t, st, reg_outcomes, p_outlier=0):
        """Log-likelihood for the full DDM using the interpolation method"""
        params = {'v': v, 'sv': sv, 'a': a, 'z': z, 'sz': sz, 't': t, 'st': st}
        for reg_outcome in reg_outcomes:
            params[reg_outcome] = params[reg_outcome].ix[value['rt'].index].values
        return hddm.wfpt.wiener_like_multi(value['rt'].values,
                                           params['v'], params['sv'], params['a'], params['z'],
                                           params['sz'], params['t'], params['st'], 1e-4,
                                           reg_outcomes,
                                           p_outlier=p_outlier)


    def random(self):
        param_dict = deepcopy(self.parents.value)
        del param_dict['reg_outcomes']
        sampled_rts = self.value.copy()

        for i in self.value.index:
            #get current params
            for p in self.parents['reg_outcomes']:
                param_dict[p] = np.asscalar(self.parents.value[p].ix[i])
            #sample
            samples = hddm.generate.gen_rts(method=sampling_method,
                                            size=1, dt=sampling_dt, **param_dict)

            sampled_rts.ix[i]['rt'] = hddm.utils.flip_errors(samples).rt

        return sampled_rts

    stoch = stochastic_from_dist('wfpt_reg', wiener_multi_like)
    stoch.random = random

    return stoch
Esempio n. 6
0
def make_mlp_likelihood_rlssm(
    model=None, model_config=None, model_config_rl=None, wiener_params=None, **kwargs
):
    """Defines the likelihoods for the MLP networks for RLSSMs.

    :Arguments:
        model: str <default='ddm'>
            String that determines which model you would like to fit your data to.
            Currently available models are: 'ddm', 'full_ddm', 'angle', 'weibull', 'ornstein', 'levy'
        model_config: dict <default=None>
            Config dictionary for the sequential sampling model, necessary for construction of likelihood. In the style of what you find under hddm.model_config.
        model_config_rl: dict <default=None>
            Config dictionary for the reinforcement learning model, necessary for construction of likelihood. In the style of what you find under hddm.model_config_rl.
        kwargs: dict
            Dictionary of additional keyword arguments.
            Importantly here, this carries the preloaded CNN.

    :Returns:
        Returns a pymc.object stochastic object as defined by PyMC2
    """

    def make_likelihood():
        likelihood_str = make_likelihood_str_mlp_rlssm(
            model=model,
            config=model_config,
            config_rl=model_config_rl,
            wiener_params=wiener_params,
        )
        exec(likelihood_str)
        my_fun = locals()["custom_likelihood"]
        return my_fun

    likelihood_ = make_likelihood()

    wfpt_nn_rl = stochastic_from_dist(
        "WienernnRL_" + model, partial(likelihood_, **kwargs)
    )

    return wfpt_nn_rl
Esempio n. 7
0
        'w_outlier': 0.1
    }
    wp = wiener_params

    #with open("weights.pickle", "rb") as tmp_file:
    #    weights = pickle.load(tmp_file)
    #with open('biases.pickle', 'rb') as tmp_file:
    #    biases = pickle.load(tmp_file)
    #with open('activations.pickle', 'rb') as tmp_file:
    #    activations = pickle.load(tmp_file)

    #print('hei')
    nn_response = x['nn_response'].values.astype(int)
    return wiener_like_nn_collapsing_keras(np.absolute(x['rt'].values),
                                           nn_response,
                                           v,
                                           sv,
                                           a,
                                           alpha,
                                           beta,
                                           z,
                                           sz,
                                           t,
                                           st,
                                           p_outlier=p_outlier,
                                           **wp)


Wienernn_collapsing_keras = stochastic_from_dist(
    'Wienernn_collapsing_keras', wienernn_like_collapsing_keras)
Esempio n. 8
0
def generate_wfpt_stochastic_class(wiener_params=None, sampling_method='cdf', cdf_range=(-5,5), sampling_dt=1e-4):
    """
    create a wfpt stochastic class by creating a pymc nodes and then adding quantile functions.
    Input:
        wiener_params <dict> - dictonary of wiener_params for wfpt likelihoods
        sampling_method <string> - an argument used by hddm.generate.gen_rts
        cdf_range <sequance> -  an argument used by hddm.generate.gen_rts
        sampling_dt <float> - an argument used by hddm.generate.gen_rts
    Ouput:
        wfpt <class> - the wfpt stochastic
    """

    #set wiener_params
    if wiener_params is None:
        wiener_params = {'err': 1e-4, 'n_st':2, 'n_sz':2,
                         'use_adaptive':1,
                         'simps_err':1e-3,
                         'w_outlier': 0.1}
    wp = wiener_params

    #create likelihood function
    def wfpt_like(x, v, sv, a, z, sz, t, st, p_outlier=0):
        if x['rt'].abs().max() < 998:
            return hddm.wfpt.wiener_like(x['rt'].values, v, sv, a, z, sz, t, st,
                                         p_outlier=p_outlier, **wp)
        else:  # for missing RTs. Currently undocumented.
            noresponse = x['rt'].abs() >= 999
            ## get sum of log p for trials with RTs as usual ##
            LLH_resp = hddm.wfpt.wiener_like(x.loc[-noresponse, 'rt'].values,
                                             v, sv, a, z, sz, t, st, p_outlier=p_outlier, **wp)

            ## get sum of log p for no-response trials from p(upper_boundary|parameters) ##
            # this function assumes following format for the RTs:
            # - accuracy coding such that correct responses have a 1 and incorrect responses a 0
            # - usage of HDDMStimCoding for z
            # - missing RTs are coded as 999/-999
            # - note that hddm will flip RTs, such that error trials have negative RTs
            # so that the miss-trial in the go condition and comission error
            # in the no-go condition will have negative RTs

            # get number of no-response trials
            n_noresponse = sum(noresponse)

            # percentage correct according to probability to get to upper boundary
            if v == 0:
                p_correct = z
            else:
                p_correct = (np.exp(-2 * a * z * v) - 1) / (np.exp(-2 * a * v) - 1)

            # calculate percent no-response trials from % correct
            if sum(x.loc[noresponse, 'rt']) > 0:
                p_noresponse = p_correct # when no-response trials have a positive RT
                                         # we are looking at nogo Trials
            else:
                p_noresponse = 1 - p_correct # when no-response trials have a 
                                             # negative RT we are looking at go Trials

            # likelihood for no-response trials
            LLH_noresp = np.log(p_noresponse)*n_noresponse

            return LLH_resp + LLH_noresp

    #create random function
    def random(self):
        return hddm.utils.flip_errors(hddm.generate.gen_rts(method=sampling_method,
                                                            size=self.shape, dt=sampling_dt,
                                                            range_=cdf_range,
                                                            structured=True,
                                                            **self.parents.value))


    #create pdf function
    def pdf(self, x):
        out = hddm.wfpt.pdf_array(x, **self.parents)
        return out

    #create cdf function
    def cdf(self, x):
        return hddm.cdfdif.dmat_cdf_array(x, w_outlier=wp['w_outlier'], **self.parents)

    #create wfpt class
    wfpt = stochastic_from_dist('wfpt', wfpt_like)

    #add pdf and cdf_vec to the class
    wfpt.pdf = pdf
    wfpt.cdf_vec = lambda self: hddm.wfpt.gen_cdf_using_pdf(time=cdf_range[1], **dict(list(self.parents.items()) + list(wp.items())))
    wfpt.cdf = cdf
    wfpt.random = random

    #add quantiles functions
    add_quantiles_functions_to_pymc_class(wfpt)

    return wfpt
Esempio n. 9
0
    # initialize problem
    states = len(data.state.unique())
    actions = len(data.action.unique())
    V = 1./actions * np.ones((states, actions))

    logp = 0
    for t, (s, a, reward) in data[['state', 'action', 'reward']].iterrows():
        # get proba and add it to the log likelihood
        proba = softmax_proba(V[s,:], a, invtemp)
        logp += np.log(proba)
        V[s, a] += lrate * (reward - V[s, a])

    return logp

RL_like = stochastic_from_dist(name="QLearn likelihood",
                               logp=RL_likelihood,
                               random=RL_generate)

def check_params_valid(**params):
    lrate = params.get('lrate', .1)
    invtemp = params.get('invtemp', 5)

    return (0 < lrate) and (lrate < 1) and (invtemp > 0)

def gen_data(params=None, **kwargs):
    if params is None:
        params = {'lrate': .1, 'invtemp': 10}

    return kabuki.generate.gen_rand_data(RL_generate, params,
                                         check_valid_func=check_params_valid,
                                         **kwargs)
Esempio n. 10
0
        'n_st': 2,
        'n_sz': 2,
        'use_adaptive': 1,
        'simps_err': 1e-3,
        'w_outlier': 0.1
    }
    wp = wiener_params
    response = x['response'].values.astype(int)
    q = x['q_init'].iloc[0]
    feedback = x['feedback'].values
    split_by = x['split_by'].values
    return wiener_like_rlddm(x['rt'].values,
                             response,
                             feedback,
                             split_by,
                             q,
                             alpha,
                             pos_alpha,
                             v,
                             sv,
                             a,
                             z,
                             sz,
                             t,
                             st,
                             p_outlier=p_outlier,
                             **wp)


WienerRL = stochastic_from_dist('wienerRL', wienerRL_like)
Esempio n. 11
0
def generate_wfpt_rl_reg_stochastic_class(wiener_params=None,
                                          sampling_method="cdf",
                                          cdf_range=(-5, 5),
                                          sampling_dt=1e-4):
    # set wiener_params
    if wiener_params is None:
        wiener_params = {
            "err": 1e-4,
            "n_st": 2,
            "n_sz": 2,
            "use_adaptive": 1,
            "simps_err": 1e-3,
            "w_outlier": 0.1,
        }
    wp = wiener_params

    def wienerRL_multi_like(value,
                            v,
                            sv,
                            a,
                            z,
                            sz,
                            t,
                            st,
                            alpha,
                            reg_outcomes,
                            p_outlier=0):
        """Log-likelihood for the full DDM using the interpolation method"""
        response = value["response"].values.astype(int)
        q = value["q_init"].iloc[0]
        feedback = value["feedback"].values.astype(float)
        split_by = value["split_by"].values.astype(int)
        params = {
            "v": v,
            "sv": sv,
            "a": a,
            "z": z,
            "sz": sz,
            "t": t,
            "st": st,
            "alpha": alpha,
        }
        for reg_outcome in reg_outcomes:
            params[reg_outcome] = params[reg_outcome].loc[
                value["rt"].index].values
        return hddm.wfpt.wiener_like_multi_rlddm(
            value["rt"].values,
            response,
            feedback,
            split_by,
            q,
            params["v"],
            params["sv"],
            params["a"],
            params["z"],
            params["sz"],
            params["t"],
            params["st"],
            params["alpha"],
            1e-4,
            reg_outcomes,
            p_outlier=p_outlier,
        )

    def random(self):
        param_dict = deepcopy(self.parents.value)
        del param_dict["reg_outcomes"]
        sampled_rts = self.value.copy()

        for i in self.value.index:
            # get current params
            for p in self.parents["reg_outcomes"]:
                param_dict[p] = np.asscalar(self.parents.value[p].loc[i])
            # sample
            samples = hddm.generate.gen_rts(method=sampling_method,
                                            size=1,
                                            dt=sampling_dt,
                                            **param_dict)

            sampled_rts.loc[i, "rt"] = hddm.utils.flip_errors(samples).rt

        return sampled_rts

    stoch = stochastic_from_dist("wfpt_reg", wienerRL_multi_like)
    stoch.random = random

    return stoch
Esempio n. 12
0
def generate_wfpt_nn_reg_stochastic_class(wiener_params=None,
                                          sampling_method='cdf',
                                          cdf_range=(-5, 5),
                                          sampling_dt=1e-4):

    #set wiener_params
    if wiener_params is None:
        wiener_params = {
            'err': 1e-4,
            'n_st': 2,
            'n_sz': 2,
            'use_adaptive': 1,
            'simps_err': 1e-3,
            'w_outlier': 0.1
        }
    wp = wiener_params

    def wienerNN_multi_like(value,
                            v,
                            sv,
                            a,
                            z,
                            sz,
                            t,
                            st,
                            alpha,
                            beta,
                            reg_outcomes,
                            p_outlier=0):
        """Log-likelihood for the full DDM using the interpolation method"""
        nn_response = value['nn_response'].values.astype(int)
        with open("weights.pickle", "rb") as tmp_file:
            weights = pickle.load(tmp_file)
        with open('biases.pickle', 'rb') as tmp_file:
            biases = pickle.load(tmp_file)
        with open('activations.pickle', 'rb') as tmp_file:
            activations = pickle.load(tmp_file)
        params = {
            'v': v,
            'sv': sv,
            'a': a,
            'z': z,
            'sz': sz,
            't': t,
            'st': st,
            'alpha': alpha,
            'beta': beta
        }
        for reg_outcome in reg_outcomes:
            params[reg_outcome] = params[reg_outcome].loc[
                value['rt'].index].values
        return hddm.wfpt.wiener_like_multi_nnddm(np.absolute(
            value['rt'].values),
                                                 nn_response,
                                                 activations,
                                                 weights,
                                                 biases,
                                                 params['v'],
                                                 params['sv'],
                                                 params['a'],
                                                 params['z'],
                                                 params['sz'],
                                                 params['t'],
                                                 params['st'],
                                                 params['alpha'],
                                                 params['beta'],
                                                 1e-4,
                                                 reg_outcomes,
                                                 p_outlier=p_outlier)

    def random(self):
        param_dict = deepcopy(self.parents.value)
        del param_dict['reg_outcomes']
        sampled_rts = self.value.copy()

        for i in self.value.index:
            #get current params
            for p in self.parents['reg_outcomes']:
                param_dict[p] = np.asscalar(self.parents.value[p].loc[i])
            #sample
            samples = hddm.generate.gen_rts(method=sampling_method,
                                            size=1,
                                            dt=sampling_dt,
                                            **param_dict)

            sampled_rts.loc[i, 'rt'] = hddm.utils.flip_errors(samples).rt

        return sampled_rts

    stoch = stochastic_from_dist('wfpt_reg', wienerNN_multi_like)
    stoch.random = random

    return stoch
Esempio n. 13
0
                     **wfpt_parents)


def wienernn_like(x, v, sv, a, z, sz, t, st, p_outlier=0):

    wiener_params = {
        'err': 1e-4,
        'n_st': 2,
        'n_sz': 2,
        'use_adaptive': 1,
        'simps_err': 1e-3,
        'w_outlier': 0.1
    }
    wp = wiener_params

    nn_response = x['nn_response'].values.astype(int)
    return wiener_like_nn(np.absolute(x['rt'].values),
                          nn_response,
                          v,
                          sv,
                          a,
                          z,
                          sz,
                          t,
                          st,
                          p_outlier=p_outlier,
                          **wp)


Wienernn = stochastic_from_dist('wienernn', wienernn_like)
Esempio n. 14
0
def generate_wfpt_stochastic_class(wiener_params=None, sampling_method='cdf', cdf_range=(-5,5), sampling_dt=1e-4):
    """
    create a wfpt stochastic class by creating a pymc nodes and then adding quantile functions.
    Input:
        wiener_params <dict> - dictonary of wiener_params for wfpt likelihoods
        sampling_method <string> - an argument used by hddm.generate.gen_rts
        cdf_range <sequance> -  an argument used by hddm.generate.gen_rts
        sampling_dt <float> - an argument used by hddm.generate.gen_rts
    Ouput:
        wfpt <class> - the wfpt stochastic
    """

    #set wiener_params
    if wiener_params is None:
        wiener_params = {'err': 1e-4, 'n_st':2, 'n_sz':2,
                         'use_adaptive':1,
                         'simps_err':1e-3,
                         'w_outlier': 0.1,
                         }
    wp = wiener_params

    #create likelihood function
    def wfpt_like(x, v, sv, a, z, sz, t, st, p_outlier=0):
        if x['rt'].abs().max() < 998:
            return hddm.wfpt.wiener_like(x['rt'].values, v, sv, a, z, sz, t, st,
                                         p_outlier=p_outlier, **wp)
        else:  # for missing RTs. Currently undocumented.
            noresponse = x['rt'].abs() >= 999
            ## get sum of log p for trials with RTs as usual ##
            logp_resp = hddm.wfpt.wiener_like(x.loc[~noresponse, 'rt'].values,
                                             v, sv, a, z, sz, t, st, p_outlier=p_outlier, **wp)

            # get number of no-response trials
            n_noresponse = sum(noresponse)
            k_upper = sum(x.loc[noresponse, 'rt'] > 0)

            # percentage correct according to probability to get to upper boundary
            if v == 0:
                p_upper = z
            else:
                p_upper = (np.exp(-2 * a * z * v) - 1) / (np.exp(-2 * a * v) - 1)

            logp_noresp = stats.binom.logpmf(k_upper, n_noresponse, p_upper)
            return logp_resp + logp_noresp

    #create random function
    def random(self):
        return hddm.utils.flip_errors(hddm.generate.gen_rts(method=sampling_method,
                                                            size=self.shape, dt=sampling_dt,
                                                            range_=cdf_range,
                                                            structured=True,
                                                            **self.parents.value))


    #create pdf function
    def pdf(self, x):
        out = hddm.wfpt.pdf_array(x, **self.parents)
        return out

    #create cdf function
    def cdf(self, x):
        return hddm.cdfdif.dmat_cdf_array(x, w_outlier=wp['w_outlier'], **self.parents)

    #create wfpt class
    wfpt = stochastic_from_dist('wfpt', wfpt_like)

    #add pdf and cdf_vec to the class
    wfpt.pdf = pdf
    wfpt.cdf_vec = lambda self: hddm.wfpt.gen_cdf_using_pdf(time=cdf_range[1], **dict(list(self.parents.items()) + list(wp.items())))
    wfpt.cdf = cdf
    wfpt.random = random

    #add quantiles functions
    add_quantiles_functions_to_pymc_class(wfpt)

    return wfpt
Esempio n. 15
0
from time import time
import unittest

from kabuki.utils import stochastic_from_dist


def multi_normal_like(values, vec_mu, tau):
    """logp for multi normal"""
    logp = 0
    for i in range(len(vec_mu)):
        logp += pm.normal_like(values[i, :], vec_mu[i], tau)

    return logp


MN = stochastic_from_dist(name="MultiNormal", logp=multi_normal_like)


class TestStepMethods(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestStepMethods, self).__init__(*args, **kwargs)
        self.uniform_lb = 1e-10
        self.uniform_ub = 1e10

    def runTest(self):
        return

    def assert_results(self,
                       node,
                       true_value,
                       true_mean,
Esempio n. 16
0
        'w_outlier': 0.1
    }
    wp = wiener_params

    #with open("weights.pickle", "rb") as tmp_file:
    #    weights = pickle.load(tmp_file)
    #with open('biases.pickle', 'rb') as tmp_file:
    #    biases = pickle.load(tmp_file)
    #with open('activations.pickle', 'rb') as tmp_file:
    #    activations = pickle.load(tmp_file)

    #print('hei')
    nn_response = x['nn_response'].values.astype(int)
    return wiener_like_nn_weibull(np.absolute(x['rt'].values),
                                  nn_response,
                                  v,
                                  sv,
                                  a,
                                  alpha,
                                  beta,
                                  z,
                                  sz,
                                  t,
                                  st,
                                  p_outlier=p_outlier,
                                  **wp)


Wienernn_weibull = stochastic_from_dist('Wienernn_weibull',
                                        wienernn_like_weibull)
Esempio n. 17
0
                    rt1 = np.inf

                if rt0 < rt1:
                    sampled_rts[i_sample] = rt0
                else:
                    sampled_rts[i_sample] = -rt1

    data = pd.DataFrame(sampled_rts, columns=['rt'])
    data['response'] = 1.
    data['response'][data['rt']<0] = 0.
    data['rt'] = np.abs(data['rt'])

    return data


lba_class = stochastic_from_dist(name='lba_like', logp=lba_like, random=lba_random)
lba_class.pdf = pdf

def gen_single_params_set():
    """Returns a dict of DDM parameters with random values for a singel conditin
    the function is used by gen_rand_params.
    """
    params = {}
    params['s'] = 2.5*rand()
    params['b'] = 0.5+rand()*1.5
    params['A'] = rand() * params['b']
    params['v'] = rand()
    params['t'] = 0.2+rand()*0.3

    return params
Esempio n. 18
0
def generate_wfpt_stochastic_class(wiener_params=None,
                                   sampling_method="cssm",
                                   cdf_range=(-5, 5),
                                   sampling_dt=1e-4):
    """
    create a wfpt stochastic class by creating a pymc nodes and then adding quantile functions.

    :Arguments:
        wiener_params: dict <default=None>
            dictonary of wiener_params for wfpt likelihoods
        sampling_method: str <default='cssm'>
            an argument used by hddm.generate.gen_rts
        cdf_range: sequence <default=(-5,5)>
            an argument used by hddm.generate.gen_rts
        sampling_dt: float <default=1e-4>
            an argument used by hddm.generate.gen_rts

    :Output:
        wfpt: class
            the wfpt stochastic
    """

    # set wiener_params
    if wiener_params is None:
        wiener_params = {
            "err": 1e-4,
            "n_st": 2,
            "n_sz": 2,
            "use_adaptive": 1,
            "simps_err": 1e-3,
            "w_outlier": 0.1,
        }
    wp = wiener_params

    # create likelihood function
    def wfpt_like(x, v, sv, a, z, sz, t, st, p_outlier=0):
        if x["rt"].abs().max() < 998:
            return hddm.wfpt.wiener_like(x["rt"].values,
                                         v,
                                         sv,
                                         a,
                                         z,
                                         sz,
                                         t,
                                         st,
                                         p_outlier=p_outlier,
                                         **wp)
        else:  # for missing RTs. Currently undocumented.
            noresponse = x["rt"].abs() >= 999
            ## get sum of log p for trials with RTs as usual ##
            logp_resp = hddm.wfpt.wiener_like(x.loc[~noresponse, "rt"].values,
                                              v,
                                              sv,
                                              a,
                                              z,
                                              sz,
                                              t,
                                              st,
                                              p_outlier=p_outlier,
                                              **wp)

            # get number of no-response trials
            n_noresponse = sum(noresponse)
            k_upper = sum(x.loc[noresponse, "rt"] > 0)

            # percentage correct according to probability to get to upper boundary
            if v == 0:
                p_upper = z
            else:
                p_upper = (np.exp(-2 * a * z * v) - 1) / (np.exp(-2 * a * v) -
                                                          1)

            logp_noresp = stats.binom.logpmf(k_upper, n_noresponse, p_upper)
            return logp_resp + logp_noresp

    # create random function
    def random(
        self,
        keep_negative_responses=True,
        add_model=False,
        add_outliers=False,
        add_model_parameters=False,
        keep_subj_idx=False,
    ):
        # print(self.value)
        # print(type(self.value))
        assert sampling_method in [
            "cdf",
            "drift",
            "cssm",
        ], "Sampling method is invalid!"

        if sampling_method == "cdf" or sampling_method == "drift":
            return hddm.utils.flip_errors(
                hddm.generate.gen_rts(method=sampling_method,
                                      size=self.shape,
                                      dt=sampling_dt,
                                      range_=cdf_range,
                                      structured=True,
                                      **self.parents.value))
        elif sampling_method == "cssm":
            keys_tmp = self.parents.value.keys()
            cnt = 0
            theta = np.zeros(len(list(keys_tmp)), dtype=np.float32)

            for param in model_config["full_ddm_vanilla"]["params"]:
                theta[cnt] = np.array(self.parents.value[param]).astype(
                    np.float32)
                cnt += 1

            sim_out = simulator(theta=theta,
                                model="full_ddm_vanilla",
                                n_samples=self.shape[0],
                                max_t=20)

            if add_outliers:
                if self.parents.value["p_outlier"] > 0.0:
                    sim_out = hddm_dataset_generators._add_outliers(
                        sim_out=sim_out,
                        p_outlier=self.parents.value["p_outlier"],
                        max_rt_outlier=1 / wiener_params["w_outlier"],
                    )

            sim_out_proc = hddm_preprocess(
                sim_out,
                keep_negative_responses=keep_negative_responses,
                keep_subj_idx=keep_subj_idx,
                add_model_parameters=add_model_parameters,
            )

            if add_model:
                if ((self.parents.value["sz"] == 0)
                        and (self.parents.value["sv"] == 0)
                        and (self.parents.value["st"] == 0)):
                    sim_out_proc["model"] = "ddm_vanilla"
                else:
                    sim_out_proc["model"] = "full_ddm_vanilla"

            sim_out_proc = hddm.utils.flip_errors(
                sim_out_proc)  # ['rt'] * sim_out_proc['response']

            return sim_out_proc

    # create pdf function
    def pdf(self, x):
        out = hddm.wfpt.pdf_array(x, **self.parents)
        return out

    # create cdf function
    def cdf(self, x):
        return hddm.cdfdif.dmat_cdf_array(x,
                                          w_outlier=wp["w_outlier"],
                                          **self.parents)

    # create wfpt class
    wfpt = stochastic_from_dist("wfpt", wfpt_like)

    # add pdf and cdf_vec to the class
    wfpt.pdf = pdf
    wfpt.cdf_vec = lambda self: hddm.wfpt.gen_cdf_using_pdf(
        time=cdf_range[1],
        **dict(list(self.parents.items()) + list(wp.items())))
    wfpt.cdf = cdf
    wfpt.random = random

    # add quantiles functions
    add_quantiles_functions_to_pymc_class(wfpt)

    return wfpt
Esempio n. 19
0
def generate_wfpt_reg_stochastic_class(wiener_params=None,
                                       sampling_method="cdf",
                                       cdf_range=(-5, 5),
                                       sampling_dt=1e-4):

    # set wiener_params
    if wiener_params is None:
        wiener_params = {
            "err": 1e-4,
            "n_st": 2,
            "n_sz": 2,
            "use_adaptive": 1,
            "simps_err": 1e-3,
            "w_outlier": 0.1,
        }
    wp = wiener_params

    def wiener_multi_like(value,
                          v,
                          sv,
                          a,
                          z,
                          sz,
                          t,
                          st,
                          reg_outcomes,
                          p_outlier=0.05):
        """Log-likelihood for the full DDM using the interpolation method"""
        params = {"v": v, "sv": sv, "a": a, "z": z, "sz": sz, "t": t, "st": st}
        for reg_outcome in reg_outcomes:
            params[reg_outcome] = params[reg_outcome].loc[
                value["rt"].index].values
        return hddm.wfpt.wiener_like_multi(
            value["rt"].values,
            params["v"],
            params["sv"],
            params["a"],
            params["z"],
            params["sz"],
            params["t"],
            params["st"],
            1e-4,
            reg_outcomes,
            w_outlier=wp["w_outlier"],
            p_outlier=p_outlier,
        )

    def random(
        self,
        keep_negative_responses=True,
        add_model_parameters=False,
        keep_subj_idx=False,
    ):

        assert sampling_method in ["drift",
                                   "cssm"], "Sampling method is invalid!"
        # AF add: exchange this with new simulator
        param_dict = deepcopy(self.parents.value)
        del param_dict["reg_outcomes"]
        sampled_rts = self.value.copy()

        if sampling_method == "drift":
            for i in self.value.index:
                # get current params
                for p in self.parents["reg_outcomes"]:
                    param_dict[p] = np.asscalar(self.parents.value[p].loc[i])
                # sample
                samples = hddm.generate.gen_rts(method=sampling_method,
                                                size=1,
                                                dt=sampling_dt,
                                                **param_dict)

                sampled_rts.loc[i, "rt"] = hddm.utils.flip_errors(
                    samples).rt.iloc[0]

            return sampled_rts

        if sampling_method == "cssm":
            param_data = np.zeros(
                (self.value.shape[0],
                 len(model_config["full_ddm_vanilla"]["params"])),
                dtype=np.float32,
            )
            cnt = 0
            for tmp_str in model_config["full_ddm_vanilla"]["params"]:
                if tmp_str in self.parents["reg_outcomes"]:
                    param_data[:, cnt] = param_dict[tmp_str].values
                    # changed from iloc[self.value.index]
                else:
                    param_data[:, cnt] = param_dict[tmp_str]
                cnt += 1

            sim_out = simulator(theta=param_data,
                                model="full_ddm_vanilla",
                                n_samples=1,
                                max_t=20)

            sim_out_proc = hddm_preprocess(
                sim_out,
                keep_negative_responses=keep_negative_responses,
                add_model_parameters=add_model_parameters,
                keep_subj_idx=keep_subj_idx,
            )

            sim_out_proc = hddm.utils.flip_errors(sim_out_proc)

            return sim_out_proc

    stoch = stochastic_from_dist("wfpt_reg", wiener_multi_like)
    stoch.random = random

    return stoch
Esempio n. 20
0
from nose import SkipTest
from pandas import DataFrame
from time import time
import unittest

from kabuki.utils import stochastic_from_dist

def multi_normal_like(values, vec_mu, tau):
    """logp for multi normal"""
    logp = 0
    for i in range(len(vec_mu)):
        logp += pm.normal_like(values[i,:], vec_mu[i], tau)

    return logp

MN = stochastic_from_dist(name="MultiNormal", logp=multi_normal_like)



class TestStepMethods(unittest.TestCase):

    def __init__(self, *args, **kwargs):
        super(TestStepMethods, self).__init__(*args, **kwargs)
        self.uniform_lb = 1e-10
        self.uniform_ub = 1e10

    def runTest(self):
        return


    def assert_results(self, node, true_value, true_mean, true_std=None,
Esempio n. 21
0
def RL_like(x, v, alpha, pos_alpha, z=0.5, p_outlier=0):

    wiener_params = {
        "err": 1e-4,
        "n_st": 2,
        "n_sz": 2,
        "use_adaptive": 1,
        "simps_err": 1e-3,
        "w_outlier": 0.1,
    }
    sum_logp = 0
    wp = wiener_params
    response = x["response"].values.astype(int)
    q = x["q_init"].iloc[0]
    feedback = x["feedback"].values.astype(float)
    split_by = x["split_by"].values.astype(int)
    return wiener_like_rl(response,
                          feedback,
                          split_by,
                          q,
                          alpha,
                          pos_alpha,
                          v,
                          z,
                          p_outlier=p_outlier,
                          **wp)


RL = stochastic_from_dist("RL", RL_like)
Esempio n. 22
0
def generate_wfpt_stochastic_class(wiener_params=None, sampling_method='cdf', cdf_range=(-5,5), sampling_dt=1e-4):
    """
    create a wfpt stochastic class by creating a pymc nodes and then adding quantile functions.
    Input:
        wiener_params <dict> - dictonary of wiener_params for wfpt likelihoods
        sampling_method <string> - an argument used by hddm.generate.gen_rts
        cdf_range <sequance> -  an argument used by hddm.generate.gen_rts
        sampling_dt <float> - an argument used by hddm.generate.gen_rts
    Ouput:
        wfpt <class> - the wfpt stochastic
    """

    #set wiener_params
    if wiener_params is None:
        wiener_params = {'err': 1e-4, 'n_st':2, 'n_sz':2,
                         'use_adaptive':1,
                         'simps_err':1e-3,
                         'w_outlier': 0.1}
    wp = wiener_params

    #create likelihood function
    def wfpt_like(x, v, sv, a, z, sz, t, st, p_outlier=0):
        if np.all(~np.isnan(x['rt'])):
            return hddm.wfpt.wiener_like(x['rt'].values, v, sv, a, z, sz, t, st,
                                         p_outlier=p_outlier, **wp)
        else:  # for missing RTs. Currently undocumented.
            noresponse = np.isnan(x['rt'])
            ## get sum of log p for trials with RTs as usual ##
            LLH_resp = hddm.wfpt.wiener_like(x.loc[-noresponse, 'rt'].values,
                                             v, sv, a, z, sz, t, st, p_outlier=p_outlier, **wp)

            ## get sum of log p for no-response trials from p(upper_boundary|parameters) ##
            # this function assumes following format for the RTs:
            # - accuracy coding such that correct responses have a 1 and incorrect responses a 0
            # - usage of HDDMStimCoding for z
            # - missing RTs are coded as 999/-999
            # - note that hddm will flip RTs, such that error trials have negative RTs
            # so that the miss-trial in the go condition and comission error
            # in the no-go condition will have negative RTs

            # get number of no-response trials
            n_noresponse = sum(noresponse)

            # percentage correct according to probability to get to upper boundary
            if v == 0:
                p_correct = z
            else:
                p_correct = (np.exp(-2 * a * z * v) - 1) / (np.exp(-2 * a * v) - 1)

            # calculate percent no-response trials from % correct
            if sum(x.loc[noresponse, 'rt']) > 0:
                p_noresponse = p_correct # when no-response trials have a positive RT
                                         # we are looking at nogo Trials
            else:
                p_noresponse = 1 - p_correct # when no-response trials have a
                                             # negative RT we are looking at go Trials

            # likelihood for no-response trials
            LLH_noresp = np.log(p_noresponse)*n_noresponse

            return LLH_resp + LLH_noresp

    #create random function
    def random(self):
        return hddm.utils.flip_errors(hddm.generate.gen_rts(method=sampling_method,
                                                            size=self.shape, dt=sampling_dt,
                                                            range_=cdf_range,
                                                            structured=True,
                                                            **self.parents.value))


    #create pdf function
    def pdf(self, x):
        out = hddm.wfpt.pdf_array(x, **self.parents)
        return out

    #create cdf function
    def cdf(self, x):
        return hddm.cdfdif.dmat_cdf_array(x, w_outlier=wp['w_outlier'], **self.parents)

    #create wfpt class
    wfpt = stochastic_from_dist('wfpt', wfpt_like)

    #add pdf and cdf_vec to the class
    wfpt.pdf = pdf
    wfpt.cdf_vec = lambda self: hddm.wfpt.gen_cdf_using_pdf(time=cdf_range[1], **dict(list(self.parents.items()) + list(wp.items())))
    wfpt.cdf = cdf
    wfpt.random = random

    #add quantiles functions
    add_quantiles_functions_to_pymc_class(wfpt)

    return wfpt
    def _create_wfpt_parents_dict(self, knodes):
        wfpt_parents = super(HDDMnn_collapsing, self)._create_wfpt_parents_dict(knodes)
        wfpt_parents['beta'] = knodes['beta_bottom']
        wfpt_parents['alpha'] = knodes['alpha_bottom'] if self.k else 3.00
        return wfpt_parents

    def _create_wfpt_knode(self, knodes):
        wfpt_parents = self._create_wfpt_parents_dict(knodes)
        return Knode(self.wfpt_nn_collapsing_class, 'wfpt', observed=True, col_name=['nn_response', 'rt'], **wfpt_parents)


def wienernn_like_collapsing(x, v, sv, a, alpha, beta, z, sz, t, st, p_outlier=0): #theta

    wiener_params = {'err': 1e-4, 'n_st': 2, 'n_sz': 2,
                     'use_adaptive': 1,
                     'simps_err': 1e-3,
                     'w_outlier': 0.1}
    wp = wiener_params

    with open("weights.pickle", "rb") as tmp_file:
        weights = pickle.load(tmp_file)
    with open('biases.pickle', 'rb') as tmp_file:
        biases = pickle.load(tmp_file)
    with open('activations.pickle', 'rb') as tmp_file:
        activations = pickle.load(tmp_file)

    nn_response = x['nn_response'].values.astype(int)
    return wiener_like_nn_collapsing(np.absolute(x['rt'].values), nn_response, activations, weights, biases, v, sv, a, alpha, beta, z, sz, t, st, p_outlier=p_outlier, **wp)
Wienernn_collapsing = stochastic_from_dist('Wienernn_collapsing', wienernn_like_collapsing)
Esempio n. 24
0
        "n_st": 2,
        "n_sz": 2,
        "use_adaptive": 1,
        "simps_err": 1e-3,
        "w_outlier": 0.1,
    }
    wp = wiener_params
    response = x["response"].values.astype(int)
    q = x["q_init"].iloc[0]
    feedback = x["feedback"].values.astype(float)
    split_by = x["split_by"].values.astype(int)
    return wiener_like_rlddm(x["rt"].values,
                             response,
                             feedback,
                             split_by,
                             q,
                             alpha,
                             pos_alpha,
                             v,
                             sv,
                             a,
                             z,
                             sz,
                             t,
                             st,
                             p_outlier=p_outlier,
                             **wp)


WienerRL = stochastic_from_dist("wienerRL", wienerRL_like)
Esempio n. 25
0
def generate_wfpt_stochastic_class(wiener_params=None,
                                   sampling_method='cdf',
                                   cdf_range=(-5, 5),
                                   sampling_dt=1e-4):
    """
    create a wfpt stochastic class by creating a pymc nodes and then adding quantile functions.
    Input:
        wiener_params <dict> - dictonary of wiener_params for wfpt likelihoods
        sampling_method <string> - an argument used by hddm.generate.gen_rts
        cdf_range <sequance> -  an argument used by hddm.generate.gen_rts
        sampling_dt <float> - an argument used by hddm.generate.gen_rts
    Ouput:
        wfpt <class> - the wfpt stochastic
    """

    #set wiener_params
    if wiener_params is None:
        wiener_params = {
            'err': 1e-4,
            'n_st': 2,
            'n_sz': 2,
            'use_adaptive': 1,
            'simps_err': 1e-3,
            'w_outlier': 0.1
        }
    wp = wiener_params

    #create likelihood function
    def wfpt_like(x, v, sv, a, z, sz, t, st, p_outlier=0):
        return hddm.wfpt.wiener_like(x['rt'].values,
                                     v,
                                     sv,
                                     a,
                                     z,
                                     sz,
                                     t,
                                     st,
                                     p_outlier=p_outlier,
                                     **wp)

    #create random function
    def random(self):
        return hddm.utils.flip_errors(
            hddm.generate.gen_rts(method=sampling_method,
                                  size=self.shape,
                                  dt=sampling_dt,
                                  range_=cdf_range,
                                  structured=True,
                                  **self.parents.value))

    #create pdf function
    def pdf(self, x):
        out = hddm.wfpt.pdf_array(x, **self.parents)
        return out

    #create cdf function
    def cdf(self, x):
        return hddm.cdfdif.dmat_cdf_array(x,
                                          w_outlier=wp['w_outlier'],
                                          **self.parents)

    #create wfpt class
    wfpt = stochastic_from_dist('wfpt', wfpt_like)

    #add pdf and cdf_vec to the class
    wfpt.pdf = pdf
    wfpt.cdf_vec = lambda self: hddm.wfpt.gen_cdf_using_pdf(
        time=cdf_range[1],
        **dict(list(self.parents.items()) + list(wp.items())))
    wfpt.cdf = cdf
    wfpt.random = random

    #add quantiles functions
    add_quantiles_functions_to_pymc_class(wfpt)

    return wfpt
Esempio n. 26
0
import numpy as np
from scipy import stats

from kabuki.utils import stochastic_from_dist

np.seterr(divide='ignore')

import hddm

def wiener_like_contaminant(value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max,
                            err, n_st, n_sz, use_adaptive, simps_err):
    """Log-likelihood for the simple DDM including contaminants"""
    return hddm.wfpt.wiener_like_contaminant(value, cont_x.astype(np.int32), v, sv, a, z, sz, t, st,
                                             t_min, t_max, err, n_st, n_sz, use_adaptive, simps_err)

WienerContaminant = stochastic_from_dist(name="Wiener Simple Diffusion Process",
                                         logp=wiener_like_contaminant)

def general_WienerCont(err=1e-4, n_st=2, n_sz=2, use_adaptive=1, simps_err=1e-3):
    _like = lambda  value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max, err=err, n_st=n_st, n_sz=n_sz, \
    use_adaptive=use_adaptive, simps_err=simps_err: \
    wiener_like_contaminant(value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max,\
                            err=err, n_st=n_st, n_sz=n_sz, use_adaptive=use_adaptive, simps_err=simps_err)
    _like.__doc__ = wiener_like_contaminant.__doc__
    return stochastic_from_dist(name="Wiener Diffusion Contaminant Process",
                                logp=_like)

def generate_wfpt_stochastic_class(wiener_params=None, sampling_method='cdf', cdf_range=(-5,5), sampling_dt=1e-4):
    """
    create a wfpt stochastic class by creating a pymc nodes and then adding quantile functions.
    Input:
        wiener_params <dict> - dictonary of wiener_params for wfpt likelihoods
Esempio n. 27
0
        return knodes

    def _create_wfpt_parents_dict(self, knodes):
        wfpt_parents = OrderedDict()
        wfpt_parents['v'] = knodes['v_bottom']
        wfpt_parents['alpha'] = knodes['alpha_bottom']
        wfpt_parents['pos_alpha'] = knodes['pos_alpha_bottom'] if self.dual else 100.00
        wfpt_parents['z'] = knodes['z_bottom'] if 'z' in self.include else 0.5

        return wfpt_parents

    def _create_wfpt_knode(self, knodes):
        wfpt_parents = self._create_wfpt_parents_dict(knodes)
        return Knode(self.rl_class, 'wfpt', observed=True, col_name=['split_by', 'feedback', 'response', 'q_init'], **wfpt_parents)


def RL_like(x, v, alpha, pos_alpha, z=0.5, p_outlier=0):

    wiener_params = {'err': 1e-4, 'n_st': 2, 'n_sz': 2,
                     'use_adaptive': 1,
                     'simps_err': 1e-3,
                     'w_outlier': 0.1}
    sum_logp = 0
    wp = wiener_params
    response = x['response'].values.astype(int)
    q = x['q_init'].iloc[0]
    feedback = x['feedback'].values.astype(float)
    split_by = x['split_by'].values.astype(int)
    return wiener_like_rl(response, feedback, split_by, q, alpha, pos_alpha, v, z, p_outlier=p_outlier, **wp)
RL = stochastic_from_dist('RL', RL_like)
Esempio n. 28
0
import numpy as np
from scipy import stats

from kabuki.utils import stochastic_from_dist

np.seterr(divide='ignore')

import hddm

def wiener_like_contaminant(value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max,
                            err, n_st, n_sz, use_adaptive, simps_err):
    """Log-likelihood for the simple DDM including contaminants"""
    return hddm.wfpt.wiener_like_contaminant(value, cont_x.astype(np.int32), v, sv, a, z, sz, t, st,
                                             t_min, t_max, err, n_st, n_sz, use_adaptive, simps_err)

WienerContaminant = stochastic_from_dist(name="Wiener Simple Diffusion Process",
                                         logp=wiener_like_contaminant)

def general_WienerCont(err=1e-4, n_st=2, n_sz=2, use_adaptive=1, simps_err=1e-3):
    _like = lambda  value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max, err=err, n_st=n_st, n_sz=n_sz, \
    use_adaptive=use_adaptive, simps_err=simps_err: \
    wiener_like_contaminant(value, cont_x, v, sv, a, z, sz, t, st, t_min, t_max,\
                            err=err, n_st=n_st, n_sz=n_sz, use_adaptive=use_adaptive, simps_err=simps_err)
    _like.__doc__ = wiener_like_contaminant.__doc__
    return stochastic_from_dist(name="Wiener Diffusion Contaminant Process",
                                logp=_like)

def generate_wfpt_stochastic_class(wiener_params=None, sampling_method='cdf', cdf_range=(-5,5), sampling_dt=1e-4):
    """
    create a wfpt stochastic class by creating a pymc nodes and then adding quantile functions.
    Input:
        wiener_params <dict> - dictonary of wiener_params for wfpt likelihoods
Esempio n. 29
0
def make_mlp_likelihood(model=None,
                        model_config=None,
                        wiener_params=None,
                        **kwargs):
    """Defines the likelihoods for the MLP networks.

    :Arguments:
        model: str <default='ddm'>
            String that determines which model you would like to fit your data to.
            Currently available models are: 'ddm', 'full_ddm', 'angle', 'weibull', 'ornstein', 'levy'
        model_config: dict <default=None>
            Model config supplied via the calling HDDM class. Necessary for construction of likelihood.
            Should have the structure of model_configs in the hddm.model_config.model_config dictionary.
        kwargs: dict
            Dictionary of additional keyword arguments.
            Importantly here, this carries the preloaded CNN.

    :Returns:
        Returns a pymc.object stochastic object as defined by PyMC2
    """
    def random(
        self,
        keep_negative_responses=True,
        add_model=False,
        add_model_parameters=False,
        add_outliers=False,
        keep_subj_idx=False,
    ):
        """
        Generate random samples from a given model (the dataset matches the size of the respective observated dataset supplied as an attribute of self).
        """

        # This can be simplified so that we pass parameters directly to the simulator ...
        theta = np.array(model_config["params_default"], dtype=np.float32)
        keys_tmp = self.parents.value.keys()
        cnt = 0

        for param in model_config["params"]:
            if param in keys_tmp:
                theta[cnt] = np.array(self.parents.value[param]).astype(
                    np.float32)
            cnt += 1

        sim_out = simulator(theta=theta,
                            model=model,
                            n_samples=self.shape[0],
                            max_t=20)

        # Add outliers:
        if add_outliers:
            if self.parents.value["p_outlier"] > 0.0:
                sim_out = hddm_dataset_generators._add_outliers(
                    sim_out=sim_out,
                    p_outlier=self.parents.value["p_outlier"],
                    max_rt_outlier=1 / wiener_params["w_outlier"],
                )

        sim_out_proc = hddm_preprocess(
            sim_out,
            keep_negative_responses=keep_negative_responses,
            keep_subj_idx=keep_subj_idx,
            add_model_parameters=add_model_parameters,
        )

        if add_model:
            sim_out_proc["model"] = model

        return sim_out_proc

    def pdf(self, x):
        # Check if model supplied has only two choice options
        # If yes --> check if two-dimensional input (rt, response) or one-dimensional input (rt) --> processing depends on it
        # If not --> input x has to be two dimensional (rt, response) becasuse we can't deduce response from rt
        x = np.array(x, dtype=np.float32)

        if len(x.shape) == 1 or x.shape[1] == 1:
            rt = x
            response = rt / np.abs(rt)
            rt = np.abs(rt)
        elif x.shape[1] == 2:
            rt = x[:, 0]
            response = x[:, 1]

        params = np.array([
            self.parents[param] for param in model_config["params"]
        ]).astype(np.float32)

        return hddm.wfpt.wiener_like_nn_mlp_pdf(
            rt,
            response,
            params,
            p_outlier=self.parents.value["p_outlier"],
            w_outlier=wiener_params["w_outlier"],
            network=kwargs["network"],
        )

    def cdf(self, x):
        # TODO: Implement the CDF method for neural networks
        return "Not yet implemented"

    def make_likelihood():
        likelihood_str = make_likelihood_str_mlp(config=model_config,
                                                 wiener_params=wiener_params)
        exec(likelihood_str)
        my_fun = locals()["custom_likelihood"]
        return my_fun

    likelihood_ = make_likelihood()

    wfpt_nn = stochastic_from_dist("Wienernn_" + model,
                                   partial(likelihood_, **kwargs))

    wfpt_nn.pdf = pdf
    wfpt_nn.cdf_vec = None  # AF TODO: Implement this for neural nets (not a big deal actually but not yet sure where this is ever used finally)
    wfpt_nn.cdf = cdf
    wfpt_nn.random = random
    return wfpt_nn
Esempio n. 30
0
def make_mlp_likelihood_reg(model=None,
                            model_config=None,
                            wiener_params=None,
                            **kwargs):
    """Defines the regressor likelihoods for the MLP networks.

    :Arguments:
        model: str <default='ddm'>
            String that determines which model you would like to fit your data to.
            Currently available models are: 'ddm', 'full_ddm', 'angle', 'weibull', 'ornstein', 'levy'
        model_config: dict <default=None>
            Model config supplied via the calling HDDM class. Necessary for construction of likelihood.
            Should have the structure of model_configs in the hddm.model_config.model_config dictionary.
        kwargs: dict
            Dictionary of additional keyword arguments.
            Importantly here, this carries the preloaded CNN.

    :Returns:
        Returns a pymc.object stochastic object as defined by PyMC2
    """

    # Prepare indirect regressors
    # From dictionary that has indirect regressors as keys and links to parameters
    # To dictionary that has parameters as keys and links them to any potential indirect regressor
    param_links = {}
    if 'indirect_regressors' in model_config:
        for indirect_regressor_tmp in model_config['indirect_regressors'].keys(
        ):
            for links_to_tmp in model_config['indirect_regressors'][
                    indirect_regressor_tmp]['links_to']:
                if links_to_tmp in param_links.keys():
                    param_links[links_to_tmp].add(indirect_regressor_tmp)
                else:
                    param_links[links_to_tmp] = set()
                    param_links[links_to_tmp].add(indirect_regressor_tmp)

    # For remaining parameters that haven't been linked to anything
    # we let them link to an empty set
    # If there are not indirect_regressors, all parameters link to the empty set
    for param in model_config["params"]:
        if param in param_links:
            pass
        else:
            param_links[param] = set()

    print(model_config['params'])
    print(param_links)

    # Need to rewrite these random parts !
    def random(
        self,
        keep_negative_responses=True,
        add_model=False,
        add_model_parameters=False,
        add_outliers=False,
        keep_subj_idx=False,
    ):
        """
        Function to sample from a regressor based likelihood. Conditions on the covariates.
        """
        param_dict = deepcopy(self.parents.value)
        del param_dict["reg_outcomes"]

        param_data = np.zeros(
            (self.value.shape[0], len(model_config["params"])),
            dtype=np.float32)

        cnt = 0
        for tmp_str in model_config["params"]:
            if tmp_str in self.parents["reg_outcomes"]:
                param_data[:, cnt] = param_dict[tmp_str].values
                for linked_indirect_regressor in param_links[tmp_str]:
                    param_data[:, cnt] = param_data[:, cnt] + \
                                            param_dict[linked_indirect_regressor].values
            else:
                param_data[:, cnt] = param_dict[tmp_str]
            cnt += 1

        sim_out = simulator(
            theta=param_data,
            model=model,
            n_samples=1,
            max_t=20  # n_trials = size,
        )

        # Add outliers:
        if add_outliers:
            if self.parents.value["p_outlier"] > 0.0:
                sim_out = hddm_dataset_generators._add_outliers(
                    sim_out=sim_out,
                    p_outlier=self.parents.value["p_outlier"],
                    max_rt_outlier=1 / wiener_params["w_outlier"],
                )

        sim_out_proc = hddm_preprocess(
            sim_out,
            keep_negative_responses=keep_negative_responses,
            add_model_parameters=add_model_parameters,
            keep_subj_idx=keep_subj_idx,
        )

        if add_model:
            sim_out_proc["model"] = model

        return sim_out_proc

    def pdf(self, x):
        return "Not yet implemented"

    def cdf(self, x):
        # TODO: Implement the CDF method for neural networks
        return "Not yet implemented"

    def make_likelihood():
        likelihood_str = make_reg_likelihood_str_mlp(
            config=model_config,
            wiener_params=wiener_params,
            param_links=param_links,
        )

        exec(likelihood_str)
        print(likelihood_str)
        my_fun = locals()["custom_likelihood_reg"]
        return my_fun

    likelihood_ = make_likelihood()
    stoch = stochastic_from_dist("wfpt_reg", partial(likelihood_, **kwargs))
    stoch.pdf = pdf
    stoch.cdf = cdf
    stoch.random = random
    return stoch
Esempio n. 31
0

def wienernn_like_angle(x, v, sv, a, theta, z, sz, t, st, p_outlier=0):

    wiener_params = {
        'err': 1e-4,
        'n_st': 2,
        'n_sz': 2,
        'use_adaptive': 1,
        'simps_err': 1e-3,
        'w_outlier': 0.1
    }
    wp = wiener_params

    nn_response = x['nn_response'].values.astype(int)
    return wiener_like_nn_angle(np.absolute(x['rt'].values),
                                nn_response,
                                v,
                                sv,
                                a,
                                theta,
                                z,
                                sz,
                                t,
                                st,
                                p_outlier=p_outlier,
                                **wp)


Wienernn_angle = stochastic_from_dist('Wienernn_angle', wienernn_like_angle)