def di_lca(Id=3.5, Ii=3, dt=.005, si=2.5, tau=.05, ntrials=10, tmax=3.0, w=-.2, k=.93, rmax=70, b=35, g=15): """ tau: time constant (cf NMDA receptors) k: leak (0<k<1) | rec. excitation (1<k<~2) w: strength of cross-inhibition rmax: max rate of cells b: input needed for 1/2-max firing g: determines steepness of sigmoidal f-I curve """ timepoints = np.arange(0, tmax, dt) rd = np.zeros(len(timepoints)) ri = np.zeros(len(timepoints)) rd[0] = .01 ri[0] = 3 Ed = si * np.sqrt(dt / tau) * rs(len(rd)) Ei = si * np.sqrt(dt / tau) * rs(len(ri)) NInput = lambda x, r: rmax / (1 + np.exp(-(x - b) / g)) - r for i in timepoints: rd[i] = rd[i-1] + dt/tau * NInput(Id + k*rd[i-1] - w*ri[i-1], rd[i-1]) + Ed[i] ri[i] = ri[i-1] + dt/tau * NInput(Ii + k*ri[i-1] - w*rd[i-1], ri[i-1]) + Ei[i] return rd, ri
def LCA_Model(I1=10, I2=8, I0=2, k=5, B=5, si=1., Z=1, dt=.01, tau=.1, tmax=1.5): timepoints = np.arange(0, tmax, dt) ntime = timepoints.size y1 = np.zeros(ntime) y2 = np.zeros(ntime) dx = np.sqrt(si * dt / tau) E1 = si * np.sqrt(dt / tau) * rs(ntime) E2 = si * np.sqrt(dt / tau) * rs(ntime) onset = 100 for i in range(onset, ntime): y1[i] = y1[i - 1] + (I1 + -k * y1[i - 1] + -B * y2[i - 1]) * dt / tau + E1[i] y2[i] = y2[i - 1] + (I2 + -k * y2[i - 1] + -B * y1[i - 1]) * dt / tau + E2[i] y_t = np.array([y1[i], y2[i]]) if np.any(y_t >= Z): rt = i act = np.argmax(y_t) return y1[:i], y2[:i], rt, act return y1[:i], y2[:i], np.nan, np.nan
def simulate_dpm(p, pc_map={'vd':['vd_early', 'vd_late', 'vd_uniform'], 'vi': ['vi_early', 'vi_late', 'vi_uniform']}, dt=.001, si=.1, tb=.65, ssd=None, sso=0, single_process=False, return_di=False): nlevels = len(pc_map.values()[0]) dx=si*np.sqrt(dt) p = vectorize_params(p, pc_map=pc_map, nresp=nlevels) Tg = np.ceil((tb-p['tr'])/dt).astype(int) xtb = temporal_dynamics(p, np.cumsum([dt]*Tg.max())) if single_process: Pg = 0.5*(1 + (p['vd']-p['vi'])*dx/si) DVg = xtb[0] * np.cumsum(np.where((rs((nlevels, Tg.max())).T < Pg), dx, -dx).T, axis=1) else: Pd = 0.5*(1 + p['vd']*dx/si) Pi = 0.5*(1 + p['vi']*dx/si) direct = np.where((rs((nlevels, Tg.max())).T < Pd),dx,-dx).T indirect = np.where((rs((nlevels, Tg.max())).T < Pi),dx,-dx).T DVg = xtb[0] * np.cumsum(direct-indirect, axis=1) if ssd is not None: if 'sso' in list(p): sso = p['sso'] Ps = 0.5 * (1 + p['ssv'] * dx / si) Ts = np.ceil((tb - (ssd + sso)) / dt).astype(int) ss_on = np.where(Ts<Tg, Tg-Ts, 0) ssBase = DVg[np.arange(nlevels), ss_on[:, None]][:, :, None] # add ssBaseline to SS traces (nlevels, nSSD, ntrials_perssd, ntimepoints) DVs = ssBase + np.cumsum(np.where(rs((nlevels, 1, Ts.max())) < Ps, dx, -dx), axis=2) return [DVg, DVs] return DVg
def __update_rand_vectors__(self): """ update rvector (random_floats) for Go and Stop traces """ nl, ntot, nTime = self.nlevels, self.ntot, self.ntime self.xtime = csum([self.dt] * nTime) self.rvector = rs((nl, ntot, nTime)) if self.include_ss: ssd, nssd, nss, nss_per, ssd_ix = self.ssd_info self.rvector_ss = rs((nl, nssd, nss_per, nTime))
def __update_rand_vectors__(self): """ update rvector (random_floats) for Go and Stop traces """ nl, ntot, ntime = self.nlevels, self.ntot, self.ntime self.rvector = rs((nl, ntot, ntime)) if self.include_ss: ssd, nssd, nss, nss_per, ssd_ix = self.ssd_info self.rvector_ss=self.rvector[:, :nss, :].reshape(nl, nssd, nss_per, ntime)
def attractor_network(I1=6, I2=3, I0=2, k=.85, B=.28, si=.3, rmax=50, b=30, g=9, Z=20, dt=.001, tau=.05, tmax=1.5): timepoints = np.arange(0, tmax, dt) ntime = timepoints.size r1 = np.zeros(ntime) r2 = np.zeros(ntime) dv = np.zeros(ntime) NInput = lambda x, r: rmax / (1 + np.exp(-(x - b) / g)) - r dspace = lambda r1, r2: (r1 - r2) / np.sqrt(2) E1 = si * np.sqrt(dt / tau) * rs(ntime) E2 = si * np.sqrt(dt / tau) * rs(ntime) onset = 100 r1[:onset], r2[:onset] = [ v[0][:onset] + I0 + v[1][:onset] for v in [[r1, E1], [r2, E2]] ] subZ = True for i in range(onset, ntime): r1[i] = r1[i - 1] + dt / tau * (NInput( I1 + I0 + k * r1[i - 1] + -B * r2[i - 1], r1[i - 1])) + E1[i] r2[i] = r2[i - 1] + dt / tau * (NInput( I2 + I0 + k * r2[i - 1] + -B * r1[i - 1], r2[i - 1])) + E2[i] dv[i] = (r1[i] - r2[i]) / np.sqrt(2) if np.abs(dv[i]) >= Z: rt = i + 1 return r1[:i + 1], r2[:i + 1], dv[:i + 1], rt rt = i + 1 return r1[:i], r2[:i], dv[:i], rt
def decision_network(Id=3.5, Ii=3.5, Io=3., wdi=.22, wid=.22, k=.85, si=2.3, dt=.001, tau=.05, tmax=1.5, rmax=70, b=35, g=15, ntrials=10, y=1, Z=20, IoMax=4.5): timepoints = np.arange(0, tmax, dt) ntp = len(timepoints) rd = np.zeros(ntp) ri = np.zeros(ntp) dv = np.zeros(ntp) NInput = lambda x, r: rmax / (1 + np.exp(-(x - b) / g)) - r dspace = lambda rd, ri: (rd - ri) / np.sqrt(2) Ed = si * np.sqrt(dt / tau) * rs(ntp) Ei = si * np.sqrt(dt / tau) * rs(ntp) x = 200 rd[:x], ri[:x] = [v[0][:x] + Io + v[1][:x] for v in [[rd, Ed], [ri, Ei]]] subZ = True IIi, IId = [deepcopy(ii) for ii in [Id, Ii]] for i in xrange(x, ntp): rd[i] = rd[i - 1] + dt / tau * \ (NInput(Id + y * Io + k * rd[i - 1] + - wid * ri[i - 1], rd[i - 1])) + Ed[i] ri[i] = ri[i - 1] + dt / tau * \ (NInput(Ii + y * Io + k * ri[i - 1] + - wdi * rd[i - 1], ri[i - 1])) + Ei[i] if dv[i - 1] < Z and subZ: dv[i] = dspace(rd[i - 1], ri[i - 1]) elif subZ: Id, Ii, Io = -Id * Io, -Ii * Io, Io wdi, wid = 0, 0 NInput = lambda x, r: Io / (1 + np.exp(-(x - b) / g)) - r - IoMax subZ = False elif not subZ and rd[i] < (IoMax + 1): x = len(rd[i:]) rd0 = hs(rd[:200].tolist() * 3) ri0 = hs(ri[:200].tolist() * 3) rd, ri = hs([rd[:i], rd0]), hs([ri[:i], ri0]) break return rd, ri, dv[:dv[dv < Z].argmax()]
def di_decision(p): #p = vectorize_params(p, pc_map=pc_map, ncond=nc) Pd = 0.5 * (1 + p['vd'] * dx / si) Pi = 0.5 * (1 + p['vi'] * dx / si) Tex = np.ceil((tb - p['tr']) / dt).astype(int) #state = np.where(rs(ntot)>.5, 'l', 'r') #state=np.sort(state) Pd, Pi, Tex = update_execution(p) direct = np.where((rs((nc, Tex.max())).T < Pd), dx, -dx).T indirect = np.where((rs((nc, Tex.max())).T < Pi), dx, -dx).T execution = np.cumsum(direct - indirect, axis=1) choice = np.nan while np.isnan(choice): choice, p = analyze_execution(execution, p) return int(choice)
def simulate_multirace(p, pcmap={'vd': ['vd_a', 'vd_b', 'vd_c', 'vd_d'], 'vi': ['vi_a', 'vi_b', 'vi_c', 'vi_d']}, dt=.001, si=.01, tb=.9, single_process=0, return_di=False): nresp = len(pcmap.values()[0]) dx = si * np.sqrt(dt) p = vectorize_params(p, pcmap=pcmap, nresp=nresp) Tex = np.ceil((tb-p['tr'])/dt).astype(int) xtb = temporal_dynamics(p, np.cumsum([dt]*Tex.max())) if single_process: Pe = 0.5*(1 + (p['vd']-p['vi'])*dx/si) execution = xtb * np.cumsum(np.where((rs((nresp, Tex.max())).T < Pe), dx, -dx).T, axis=1) else: Pd = 0.5 * (1 + p['vd'] * dx / si) Pi = 0.5 * (1 + p['vi'] * dx / si) direct = xtb * np.where((rs((nresp, Tex.max())).T < Pd),dx,-dx).T indirect = np.where((rs((nresp, Tex.max())).T < Pi),dx,-dx).T execution = np.cumsum(direct-indirect, axis=1) if return_di: return np.cumsum(direct, axis=1), np.cumsum(indirect, axis=1), execution return execution
def simulate_learning(p, pc_map={'vd': ['vd_e', 'vd_u', 'vd_l'], 'vi': ['vi_e', 'vi_u', 'vi_l']}, nc=3, lr=array([.4, .3]), nssd=5, dt=.001, si=.1, ntot=1000, tb=.3): dx = np.sqrt(si * dt) p = vectorize_params(p, pc_map=pc_map, ncond=nc) Pd, Pi, Tex = update_execution(p) t = np.cumsum([dt] * Tex.max()) xtb = temporal_dynamics(p, t) #Ph, Th = update_brake(p) #ss_index = [np.where(Th<Tex[c],Tex[c]-Th,0) for c in range(nc)] rts, vd, vi = [], [], [] for i in xrange(ntot): Pd, Pi, Tex = update_execution(p) direct = np.where((rs((nc, Tex.max())).T < Pd), dx, -dx).T indirect = np.where((rs((nc, Tex.max())).T < Pi), dx, -dx).T execution = np.cumsum(direct + indirect, axis=1) # if i<=int(.5*ntot): # init_ss = array([[execution[c,ix] for ix in ss_index] for c in range(nc)]) # hyper = init_ss[:,:,:,None]+np.cumsum(np.where(rs(Th.max())<Ph, dx, -dx), axis=1) r = np.argmax((execution.T >= p['a']).T, axis=1) * dt rt = p['tr'] + (r * np.where(r == 0, np.nan, 1)) resp = np.where(rt < tb, 1, 0) # find conditions where response was recorded for ci in np.where(~np.isnan(rt))[0]: p['vd'][ci] = p['vd'][ci] + p['vd'][ci] * (lr[0] * (rt[ci] - .500)) p['vi'][ci] = p['vi'][ci] - p['vi'][ci] * (lr[1] * (rt[ci] - .500)) vd.append(deepcopy(p['vd'])) vi.append(deepcopy(p['vi'])) rts.append(rt) vd = np.asarray(vd) vi = np.asarray(vi) rts = np.asarray(rts)
def simulate_multirace(p, dt=.001, si=.1, tb=1.5, singleProcess=False): temporal_dynamics = lambda p, t: np.cosh(p['xb'] * t) nresp = p['vd'].size dx = si * np.sqrt(dt) nTime = np.ceil((tb - p['tr']) / dt).astype(int) xtb = temporal_dynamics(p, np.cumsum([dt] * nTime)) if singleProcess: Pdelta = .5 * (1 + ((p['vd'] - p['vi']) * np.sqrt(dt)) / si) execution = xtb * np.cumsum(np.where((rs( (nresp, nTime)).T < Pdelta), dx, -dx).T, axis=1) else: Pd = .5 * (1 + (p['vd'] * np.sqrt(dt)) / si) Pi = .5 * (1 + (p['vi'] * np.sqrt(dt)) / si) direct = xtb * np.where((rs((nresp, nTime)).T < Pd), dx, -dx).T indirect = np.where((rs((nresp, nTime)).T < Pi), dx, -dx).T execution = np.cumsum(direct - indirect, axis=1) act_ix, rt, rt_ix = analyze_multiresponse(execution, p, dt=dt) return act_ix, rt, rt_ix
def simulate_rldpm(self, p, analyze=True): """ Simulate the dependent process model (DPM) with learning """ p = self.vectorize_params(p) Pg, xtb, Ps, ss_on = self.__update_go_process__(p) nl, ntot, dx = self.nlevels, self.ntot, self.dx ssd, nssd, nss, nss_per, ssd_ix = self.ssd_info for trial in xrange(ntot): DVg = xtb[:, na] * csum(np.where(self.rvector[:, trial, :].T < Pg, dx, -dx).T, axis=2) # INITIALIZE DVs FROM DVg(t=SSD) if trial%2: DVg[:, ] ssBase = ssDVg[np.arange(nl)[:,na], ssd, :, ss_on][:,:,:,na] DVs = ssBase + csum(np.where(self.rvector_ss.T < Ps, dx, -dx).T, axis=3) #ssBase = DVg[np.arange(nl)[:,na], ssd_ix, :, ss_on][:,:,:,na] #DVs = init_ss[:,:,na] + csum(np.where(rs((nss, Ts.max()))<Ps, dx, -dx), axis=1) DVs = csum(np.where(rs((nl, Ts.max()))<Ps, dx, -dx), axis=1) if analyze: return self.analyze_fx(DVg, DVs, p) return [DVg, DVs]
def rew_func(rprob): if rs() < rprob: return 1 else: return 0
def random(self, shape): if type(shape) not in (int, tuple): exit('Int or tuple expected for shape.') return rs(shape)
if __name__ == '__main__': #Build data moments and pickle them dat_moments(period=1, sampling_number=4, transform=2) # refresh #Initialize the file with parameters lb, ub, x0, keys, translator = calibration_params( ) # bounds are set in a separate file ##### FIRST LET'S TRY TO RUN THE FUNCTION IN FEW POINTS print('Testing the workers...') from p_client import compute_for_values pts = [lb + rs(lb.shape) * (ub - lb) for _ in range(1)] pts = [('compute', translator(x)) for x in pts] outs = compute_for_values(pts, timeout=72000.0) print('Everything worked, output is {}'.format(outs)) print('') print('') print('running tic tac...') print('') print('') #Tik Tak Optimization param = tiktak(N=2000, N_st=50, skip_local=False, skip_global=False) print('f is {} and x is {}'.format(param[0], param[1]))
start = default_timer() tic = default_timer() time_abort = 7200.0 # time when the worker stops, in seconds while True: toc = default_timer() if toc - start > time_abort: break if toc - tic > 5.0: gc.collect() # runs garbage collection print('I am ok, running for {:.1f} min, memory used is {}'.format( (toc - start) / 60, get_mem())) tic = toc sleep(rs()) # sleep random number of seconds li_txt = [ f for f in listdir('Job') if f.endswith('.pkl') and f.startswith('in') ] num_in = len(li_txt) if num_in == 0: continue getnum = lambda x: int(find_between(x, 'in', '.pkl')) try: rnum = ri(1 + (num_in // 2)) fname = li_txt[rnum] num = getnum(fname)