Example #1
0
    thresh_func_gpu = k / l * (x / l)**(k - 1) * pycuda_exp(-(x / l)**k)
    thresh_func_gpu *= a
    return thresh_func_gpu

class wfpt_gpu_gen(hddm.likelihoods.wfpt_gen):
    sampling_method = 'gpu'

    def _rvs(self, v, V, a, z, Z, t, T):
        param_dict = {'v':v, 'z':z, 't':t, 'a':a, 'Z':Z, 'V':V, 'T':T}
        if self.sampling_method == 'gpu':
            sampled_rts = sim_drift(v, V, a, z, Z, t, T, size=self._size, dt=self.dt, return_gpu=True)
        else:
            sampled_rts = hddm.generate.gen_rts(param_dict, method=self.sampling_method, samples=self._size, dt=self.dt)
        return sampled_rts[:self._size]

wfpt_gpu_like = scipy_stochastic(wfpt_gpu_gen, name='wfpt_gpu')


def main():
    thresh_func = gen_weibull_gpu(3, 1, 1)
    max_time = 5.
    dt = 1e-4

    thresh_const = np.ones(max_time/dt, dtype=np.float32)
    thresh_func_const = pycuda.gpuarray.to_gpu(thresh_const)
    print thresh_func_const.get()
    plt.plot(np.arange(0, max_time, dt), thresh_func.get())
    plt.plot(np.arange(0, max_time, dt), -thresh_func.get())

    #thresh_func = np.array(a*np.exp(-rate*np.linspace(0, max_time, steps)), dtype=np.float32)
    size = 512
Example #2
0
    def _rvs(self, v, V, a, z, Z, t, T):
        param_dict = {'v':v, 'z':z, 't':t, 'a':a, 'Z':Z, 'V':V, 'T':T}
        sampled_rts = hddm.generate.gen_rts(param_dict, method=self.sampling_method, samples=self._size, dt=self.dt)
        return sampled_rts

    def random(self, v=1., V=0., a=2, z=.5, Z=.1, t=.3, T=.1, size=100):
        self._size = size
        return self._rvs(v, V, a, z, Z, t, T)

wfpt_like = scipy_stochastic(wfpt_gen, name='wfpt', longname="""Wiener first passage time likelihood function""", extradoc="""Wiener first passage time (WFPT) likelihood function of the Ratcliff Drift Diffusion Model (DDM). Models two choice decision making tasks as a drift process that accumulates evidence across time until it hits one of two boundaries and executes the corresponding response. Implemented using the Navarro & Fuss (2009) method.

Parameters:
***********
v: drift-rate
a: threshold
z: bias [0,1]
t: non-decision time

References:
***********
Fast and accurate calculations for first-passage times in Wiener diffusion models
Navarro & Fuss - Journal of Mathematical Psychology, 2009 - Elsevier
""")

def wiener_like_gpu(value, v, V, a, z, t, out, err=1e-4):
    """Log-likelihood for the simple DDM including contaminants"""
    # Check if parameters are in allowed range
    if z<0 or z>1 or t<0 or a <= 0 or V<=0:
        return -np.inf

    wfpt_gpu.pdf_gpu(value, float(v), float(V), float(a), float(z), float(t), err, out)
    logp = gpuarray.sum(out).get() #cumath.log(out)).get()
Example #3
0
                                    z,
                                    Z,
                                    t,
                                    T,
                                    size=self._size,
                                    dt=self.dt,
                                    return_gpu=True)
        else:
            sampled_rts = hddm.generate.gen_rts(param_dict,
                                                method=self.sampling_method,
                                                samples=self._size,
                                                dt=self.dt)
        return sampled_rts[:self._size]


wfpt_gpu_like = scipy_stochastic(wfpt_gpu_gen, name='wfpt_gpu')


def main():
    thresh_func = gen_weibull_gpu(3, 1, 1)
    max_time = 5.
    dt = 1e-4

    thresh_const = np.ones(max_time / dt, dtype=np.float32)
    thresh_func_const = pycuda.gpuarray.to_gpu(thresh_const)
    print thresh_func_const.get()
    plt.plot(np.arange(0, max_time, dt), thresh_func.get())
    plt.plot(np.arange(0, max_time, dt), -thresh_func.get())

    #thresh_func = np.array(a*np.exp(-rate*np.linspace(0, max_time, steps)), dtype=np.float32)
    size = 512
Example #4
0
                        rt1 = (b - rand()*A) / i_v1 + t
                    else:
                        rt1 = np.inf

                    if rt0 < rt1:
                        sampled_rts[i_sample] = rt0
                    else:
                        sampled_rts[i_sample] = -rt1

        return sampled_rts

    def random(self, t, A, b, s, v, size):
        self._size = size
        return self._rvs(t, A, b, s, v)

lba_like = scipy_stochastic(lba_gen, name='lba', longname="", extradoc="")


class HLBA(AccumulatorModel):
    def create_lba_knode(self, knodes):
        lba_parents = OrderedDict()
        lba_parents['t'] = knodes['t_bottom']
        lba_parents['A'] = knodes['A_bottom']
        lba_parents['b'] = knodes['b_bottom']
        lba_parents['s'] = knodes['s_bottom']
        lba_parents['v'] = knodes['v_bottom']

        return Knode(lba_like, 'lba', observed=True, col_name='rt', **lba_parents)

    def create_knodes(self):
        """Returns list of model parameters.