def plot_weights(plot, W): w = np.ravel(W) w_exc = w[np.where(w > 0)] w_inh = w[np.where(w < 0)] plot.hist(w_exc, color=Figure.colors('blue')) plot.hist(w_inh, color=Figure.colors('red'))
def do(action, args, config): """ Manage tasks. """ print("ACTION*: " + str(action)) print("ARGS*: " + str(args)) #===================================================================================== if 'trials' in action: try: trials_per_condition = int(args[0]) except: trials_per_condition = 500 model = config['model'] pg = model.get_pg(config['savefile'], config['seed'], config['dt']) # Conditions spec = model.spec mods = spec.mods freqs = spec.freqs n_conditions = spec.n_conditions n_trials = n_conditions * trials_per_condition print("{} trials".format(n_trials)) task = model.Task() trials = [] for n in xrange(n_trials): k = tasktools.unravel_index(n, (len(mods), len(freqs))) context = {'mod': mods[k.pop(0)], 'freq': freqs[k.pop(0)]} trials.append(task.get_condition(pg.rng, pg.dt, context)) runtools.run(action, trials, pg, config['trialspath']) #===================================================================================== elif action == 'psychometric': trialsfile = runtools.behaviorfile(config['trialspath']) fig = Figure() plot = fig.add() psychometric(trialsfile, plot) plot.vline(config['model'].spec.boundary) fig.save(path=config['figspath'], name='psychometric') fig.close() #===================================================================================== elif action == 'sort': if 'value' in args: network = 'v' else: network = 'p' trialsfile = runtools.activityfile(config['trialspath']) sort(trialsfile, (config['figspath'], 'sorted'), network=network)
def process_trial(plot, n): if perf.choices[n] is None: print("Trial {}: No decision.".format(n)) return trial = trials[n] time = trial['time'] u = U[:, n] z = Z[:, n] stimulus = np.asarray(trial['epochs']['stimulus']) evidenceL = np.sum(u[stimulus - 1, inputs['LEFT']]) evidenceR = np.sum(u[stimulus - 1, inputs['RIGHT']]) decision = np.asarray(trial['epochs']['decision']) t_choice = perf.t_choices[n] idx = decision[np.where(decision <= t_choice)] t0 = time[idx][0] pL = z[idx, inputs['LEFT']] pR = z[idx, inputs['RIGHT']] S = pL + pR if perf.choices[n] == 'R': ls = '-' #else: # ls = '--' plot.plot(time[idx] - t0, pL / S, ls, color=Figure.colors('red'), lw=0.5, zorder=5) if perf.choices[n] == 'R': ls = '-' #else: # ls = '--' plot.plot(time[idx] - t0, pR / S, ls, color=Figure.colors('blue'), lw=0.5, zorder=5)
def sort(trialsfile, plots, units=None, network='p', **kwargs): """ Sort trials. """ # Load trials data = utils.load(trialsfile) if len(data) == 9: trials, U, Z, A, P, M, perf, r_p, r_v = data else: trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = data # Which network? if network == 'p': r = r_p else: r = r_v # Data shape Ntime = r.shape[0] N = r.shape[-1] # Same for every trial time = trials[0]['time'] # Aligned time time_a = np.concatenate((-time[1:][::-1], time)) Ntime_a = len(time_a) #===================================================================================== # Sort trials #===================================================================================== # Sort trials_by_cond = {} for n, trial in enumerate(trials): if perf.choices[n] is None or not perf.corrects[n]: continue # Condition gt_lt = trial['gt_lt'] fpair = trial['fpair'] if gt_lt == '>': f1, f2 = fpair else: f2, f1 = fpair cond = (f1, f2) # Firing rates Mn = np.tile(M[:, n], (N, 1)).T Rn = r[:, n] * Mn # Align point t0 = trial['epochs']['f1'][0] - 1 # Storage trials_by_cond.setdefault(cond, { 'r': np.zeros((Ntime_a, N)), 'n': np.zeros((Ntime_a, N)) }) # Before n_b = Rn[:t0].shape[0] trials_by_cond[cond]['r'][Ntime - 1 - n_b:Ntime - 1] += Rn[:t0] trials_by_cond[cond]['n'][Ntime - 1 - n_b:Ntime - 1] += Mn[:t0] # After n_a = Rn[t0:].shape[0] trials_by_cond[cond]['r'][Ntime - 1:Ntime - 1 + n_a] += Rn[t0:] trials_by_cond[cond]['n'][Ntime - 1:Ntime - 1 + n_a] += Mn[t0:] # Average for cond in trials_by_cond: trials_by_cond[cond] = utils.div(trials_by_cond[cond]['r'], trials_by_cond[cond]['n']) #===================================================================================== # Plot #===================================================================================== lw = kwargs.get('lw', 1.5) w, = np.where((time_a >= -500) & (time_a <= 4000)) def plot_sorted(plot, unit): t = 1e-3 * time_a[w] yall = [[1]] for (f1, f2), r in trials_by_cond.items(): plot.plot(t, r[w, unit], color=smap.to_rgba(f1), lw=lw) yall.append(r[w, unit]) return t, yall if units is not None: for plot, unit in zip(plots, units): plot_sorted(plot, unit) else: figspath, name = plots for unit in xrange(N): fig = Figure() plot = fig.add() #----------------------------------------------------------------------------- t, yall = plot_sorted(plot, unit) plot.xlim(t[0], t[-1]) plot.lim('y', yall, lower=0) plot.highlight(0, 0.5) plot.highlight(3.5, 4) #----------------------------------------------------------------------------- fig.save(path=figspath, name=name + '_{}{:03d}'.format(network, unit)) fig.close()
analysisfile = os.path.join(analysispath, 'rdm.py') analysis = imp.load_source('analysis', analysisfile) # models/rdm_rt modelfile = os.path.join(modelspath, 'rdm_rt.py') model = imp.load_source('model', modelfile) behavior = os.path.join(trialspath, 'rdm_rt', 'trials_behavior.pkl') activity = os.path.join(trialspath, 'rdm_rt', 'trials_activity.pkl') #========================================================================================= w = utils.mm_to_inch(174) r = 0.29 h = r*w fig = Figure(w=w, h=h, labelpadx=4.5, labelpady=4.5) #========================================================================================= w_task = 0.26 w_behavior = 0.19 w_activity = 0.19 h = 0.67 h_epochs = 0.2 h_input = 0.14 x0 = 0.145 DX = 0.095 dx = 0.075
def do(action, args, config): """ Manage tasks. """ print("ACTION*: " + str(action)) print("ARGS*: " + str(args)) #===================================================================================== if 'trials' in action: try: trials_per_condition = int(args[0]) except IndexError: trials_per_condition = 100 model = config['model'] pg = model.get_pg(config['savefile'], config['seed'], config['dt']) spec = model.spec juices = spec.juices offers = spec.offers n_conditions = spec.n_conditions n_trials = trials_per_condition * n_conditions print("{} trials".format(n_trials)) task = model.Task() trials = [] for n in xrange(n_trials): k = tasktools.unravel_index(n, (len(juices), len(offers))) context = {'juice': juices[k.pop(0)], 'offer': offers[k.pop(0)]} trials.append(task.get_condition(pg.rng, pg.dt, context)) runtools.run(action, trials, pg, config['trialspath']) #===================================================================================== elif action == 'choice_pattern': trialsfile = runtools.behaviorfile(config['trialspath']) fig = Figure() plot = fig.add() spec = config['model'].spec choice_pattern(trialsfile, spec.offers, plot) plot.xlabel('Offer (\#B : \#A)') plot.ylabel('Percent choice B') plot.text_upper_left('1A = {}B'.format(spec.A_to_B), fontsize=10) fig.save(path=config['figspath'], name=action) fig.close() elif action == 'indifference_point': trialsfile = runtools.behaviorfile(config['trialspath']) fig = Figure() plot = fig.add() spec = config['model'].spec indifference_point(trialsfile, spec.offers, plot) plot.xlabel('$(n_B - n_A)/(n_B + n_A)$') plot.ylabel('Percent choice B') #plot.text_upper_left('1A = {}B'.format(spec.A_to_B), fontsize=10) fig.save(path=config['figspath'], name=action) fig.close() #===================================================================================== elif action == 'sort_epoch': behaviorfile = runtools.behaviorfile(config['trialspath']) activityfile = runtools.activityfile(config['trialspath']) epoch = args[0] if 'value' in args: network = 'v' else: network = 'p' separate_by_choice = ('separate-by-choice' in args) sort_epoch(behaviorfile, activityfile, epoch, config['model'].spec.offers, os.path.join(config['figspath'], 'sorted'), network=network, separate_by_choice=separate_by_choice)
def sort_epoch(behaviorfile, activityfile, epoch, offers, plots, units=None, network='p', separate_by_choice=False, **kwargs): """ Sort trials. """ # Load trials data = utils.load(activityfile) trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = data if network == 'p': print("POLICY NETWORK") r = r_p else: print("VALUE NETWORK") r = r_v # Number of units N = r.shape[-1] # Same for every trial time = trials[0]['time'] Ntime = len(time) # Aligned time time_a = np.concatenate((-time[1:][::-1], time)) Ntime_a = len(time_a) #===================================================================================== # Sort trials #===================================================================================== # Epochs events = ['offer', 'choice'] # Sort events_by_cond = {e: {} for e in events} n_by_cond = {} n_nondecision = 0 for n, trial in enumerate(trials): if perf.choices[n] is None: n_nondecision += 1 continue # Condition offer = trial['offer'] choice = perf.choices[n] if separate_by_choice: cond = (offer, choice) else: cond = offer n_by_cond.setdefault(cond, 0) n_by_cond[cond] += 1 # Storage for e in events_by_cond: events_by_cond[e].setdefault(cond, { 'r': np.zeros((Ntime_a, N)), 'n': np.zeros((Ntime_a, N)) }) # Firing rates m_n = np.tile(M[:, n], (N, 1)).T r_n = r[:, n] * m_n for e in events_by_cond: # Align point if e == 'offer': t0 = trial['epochs']['offer-on'][0] elif e == 'choice': t0 = perf.t_choices[n] else: raise ValueError(e) # Before n_b = r_n[:t0].shape[0] events_by_cond[e][cond]['r'][Ntime - 1 - n_b:Ntime - 1] += r_n[:t0] events_by_cond[e][cond]['n'][Ntime - 1 - n_b:Ntime - 1] += m_n[:t0] # After n_a = r_n[t0:].shape[0] events_by_cond[e][cond]['r'][Ntime - 1:Ntime - 1 + n_a] += r_n[t0:] events_by_cond[e][cond]['n'][Ntime - 1:Ntime - 1 + n_a] += m_n[t0:] print("Non-decision trials: {}/{}".format(n_nondecision, len(trials))) # Average trials for e in events_by_cond: for cond in events_by_cond[e]: events_by_cond[e][cond] = utils.div(events_by_cond[e][cond]['r'], events_by_cond[e][cond]['n']) # Epochs epochs = ['preoffer', 'postoffer', 'latedelay', 'prechoice'] # Average epochs epochs_by_cond = {e: {} for e in epochs} for e in epochs_by_cond: if e == 'preoffer': ev = 'offer' w, = np.where((-500 <= time_a) & (time_a < 0)) elif e == 'postoffer': ev = 'offer' w, = np.where((0 <= time_a) & (time_a < 500)) elif e == 'latedelay': ev = 'offer' w, = np.where((500 <= time_a) & (time_a < 1000)) elif e == 'prechoice': ev = 'choice' w, = np.where((-500 <= time_a) & (time_a < 0)) else: raise ValueError(e) for cond in events_by_cond[ev]: epochs_by_cond[e][cond] = np.mean(events_by_cond[ev][cond][w], axis=0) #===================================================================================== # Classify units #===================================================================================== idpt = indifference_point(behaviorfile, offers) unit_types = classify_units(trials, perf, r, idpt) #unit_types = {} numbers = {} for v in unit_types.values(): numbers[v] = 0 for k, v in unit_types.items(): numbers[v] += 1 n_tot = np.sum(numbers.values()) for k, v in numbers.items(): print("{}: {}/{} = {}%".format(k, v, n_tot, 100 * v / n_tot)) #===================================================================================== # Plot #===================================================================================== lw = kwargs.get('lw', 1.5) ms = kwargs.get('ms', 6) mew = kwargs.get('mew', 0.5) rotation = kwargs.get('rotation', 60) #min_trials = kwargs.get('min_trials', 100) def plot_activity(plot, unit): yall = [1] min_trials = 20 # Pre-offer epoch_by_cond = epochs_by_cond['preoffer'] color = '0.7' if separate_by_choice: for choice, marker in zip(['A', 'B'], ['d', 'o']): x = [] y = [] for i, offer in enumerate(offers): cond = (offer, choice) if cond in n_by_cond and n_by_cond[cond] >= min_trials: y_i = epoch_by_cond[cond][unit] plot.plot(i, y_i, marker, mfc=color, mec=color, ms=0.8 * ms, mew=0.8 * mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers) - 1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=0.8 * lw, zorder=5) else: x = [] y = [] for i, offer in enumerate(offers): y_i = epoch_by_cond[offer][unit] plot.plot(i, y_i, 'o', mfc=color, mec=color, ms=0.8 * ms, mew=0.8 * mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers) - 1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=0.8 * lw, zorder=5) # Epoch epoch_by_cond = epochs_by_cond[epoch] if epoch == 'postoffer': color = Figure.colors('darkblue') elif epoch == 'latedelay': color = Figure.colors('darkblue') elif epoch == 'prechoice': color = Figure.colors('darkblue') else: raise ValueError(epoch) if separate_by_choice: for choice, marker, color in zip( ['A', 'B'], ['d', 'o'], [Figure.colors('red'), Figure.colors('blue')]): x = [] y = [] for i, offer in enumerate(offers): cond = (offer, choice) if cond in n_by_cond and n_by_cond[cond] >= min_trials: y_i = epoch_by_cond[cond][unit] yall.append(y_i) plot.plot(i, y_i, marker, mfc=color, mec=color, ms=ms, mew=mew, zorder=10) if i != 0 and i != len(offers) - 1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=lw, zorder=5) else: x = [] y = [] for i, offer in enumerate(offers): y_i = epoch_by_cond[offer][unit] plot.plot(i, y_i, 'o', mfc=color, mec=color, ms=ms, mew=mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers) - 1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=lw, zorder=5) plot.xticks(range(len(offers))) plot.xticklabels(['{}B:{}A'.format(*offer) for offer in offers], rotation=rotation) plot.xlim(0, len(offers) - 1) plot.lim('y', yall, lower=0) return yall #------------------------------------------------------------------------------------- if units is not None: for plot, unit in zip(plots, units): plot_activity(plot, unit) else: name = plots for unit in xrange(N): fig = Figure() plot = fig.add() plot_activity(plot, unit) if separate_by_choice: suffix = '_sbc' else: suffix = '' if unit in unit_types: plot.text_upper_right(unit_types[unit], fontsize=9) fig.save(name + '_{}{}_{}{:03d}'.format(epoch, suffix, network, unit)) fig.close()
# models/rdm_fixed rdm_fixed_modelfile = os.path.join(modelspath, 'rdm_fixed.py') rdm_fixed_model = imp.load_source('rdm_fixed_model', rdm_fixed_modelfile) rdm_fixed_behavior = os.path.join(trialspath, 'rdm_fixed', 'trials_behavior.pkl') rdm_fixed_activity = os.path.join(trialspath, 'rdm_fixed', 'trials_activity.pkl') #========================================================================================= w = utils.mm_to_inch(174) r = 0.29 h = r * w fig = Figure(w=w, h=h, labelpadx=4.5, labelpady=4.5) #========================================================================================= w_task = 0.26 w_behavior = 0.205 w_activity = 0.205 h = 0.67 h_epochs = 0.2 h_input = 0.14 x0 = 0.145 DX = 0.085 dx = 0.075
romo_modelfile = os.path.join(modelspath, 'romo.py') romo_model = imp.load_source('romo_model', romo_modelfile) romo_behavior = os.path.join(trialspath, 'romo', 'trials_behavior.pkl') romo_activity = os.path.join(trialspath, 'romo', 'trials_activity.pkl') #========================================================================================= # Figure setup #========================================================================================= w = utils.mm_to_inch(174) r = 0.98 fig = Figure(w=w, r=r, axislabelsize=9, labelpadx=4, labelpady=6, thickness=0.8, ticksize=3, ticklabelsize=7.5, ticklabelpad=2) w_behavior = 0.19 h_behavior = 0.15 w_activity = 0.16 h_activity = h_behavior xleft = 0.1 ybot = 0.06 DX = 0.09 dx = 0.06
multisensory_analysisfile = os.path.join(analysispath, 'multisensory.py') multisensory_analysis = imp.load_source('multisensory_analysis', multisensory_analysisfile) # models/multisensory multisensory_modelfile = os.path.join(modelspath, 'multisensory.py') multisensory_model = imp.load_source('multisensory_model', multisensory_modelfile) multisensory_behavior = os.path.join(trialspath, 'multisensory', 'trials_behavior.pkl') multisensory_activity = os.path.join(trialspath, 'multisensory', 'trials_activity.pkl') #========================================================================================= fig = Figure() plot = fig.add() sigmas = [] for s in [''] + ['_s' + str(i) for i in xrange(101, 106)]: behaviorfile = os.path.join(trialspath, 'multisensory' + s, 'trials_behavior.pkl') sigmas.append(multisensory_analysis.psychometric(behaviorfile, plot)) fig.save() #========================================================================================= print("") for i, (sigma_v, sigma_a, sigma_va) in enumerate(sigmas): if i == 0:
rdm_analysisfile = os.path.join(analysispath, 'rdm.py') rdm_analysis = imp.load_source('rdm_analysis', rdm_analysisfile) # models/rdm_fixed rdm_fixed_modelfile = os.path.join(modelspath, 'rdm_fixed.py') rdm_fixed_model = imp.load_source('rdm_fixed_model', rdm_fixed_modelfile) rdm_fixed_behavior = os.path.join(trialspath, 'rdm_fixed', 'trials_behavior.pkl') rdm_fixed_activity = os.path.join(trialspath, 'rdm_fixed', 'trials_activity.pkl') #========================================================================================= w = utils.mm_to_inch(174) r = 1 fig = Figure(w=w, r=r) x0 = 0.1 y0 = 0.1 dy = 0.05 w = 0.82 h = 0.13 fig.add('trial-5', [x0, y0, w, h]) fig.add('trial-4', [x0, fig[-1].top + dy, w, h]) fig.add('trial-3', [x0, fig[-1].top + dy, w, h]) fig.add('trial-2', [x0, fig[-1].top + dy, w, h]) fig.add('trial-1', [x0, fig[-1].top + dy, w, h])
# model modelfile = os.path.join(modelspath, 'postdecisionwager.py') model = imp.load_source('model', modelfile) trialsfile_b = os.path.join(trialspath, 'postdecisionwager', 'trials_behavior.pkl') trialsfile_a = os.path.join(trialspath, 'postdecisionwager', 'trials_activity.pkl') #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(114) r = 0.9 thickness = 0.4 axislabelsize = 6 fig = Figure(w=w, r=r, thickness=thickness, ticksize=3, ticklabelsize=5, axislabelsize=axislabelsize, labelpadx=3, labelpady=4.5) x0 = 0.16 y0 = 0.67 DX = 0.1 w_task = 0.82 h_task = 0.3 dy0 = 0.09 dy = 0.12 w_behavior = 0.24 h_behavior = 0.2 y1 = y0-dy0-h_behavior
def sort(trialsfile, plots, units=None, network='p', **kwargs): """ Sort trials. """ # Load trials data = utils.load(trialsfile) trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = data # Which network? if network == 'p': r = r_p else: r = r_v # Number of units N = r.shape[-1] # Same for every trial time = trials[0]['time'] Ntime = len(time) # Aligned time time_a = np.concatenate((-time[1:][::-1], time)) Ntime_a = len(time_a) #===================================================================================== # Aligned to stimulus onset #===================================================================================== r_by_cond_stimulus = {} n_r_by_cond_stimulus = {} for n, trial in enumerate(trials): if not perf.decisions[n]: continue if trial['mod'] == 'va': continue assert trial['mod'] == 'v' or trial['mod'] == 'a' if not perf.corrects[n]: continue # Condition mod = trial['mod'] choice = perf.choices[n] cond = (mod, choice) # Storage r_by_cond_stimulus.setdefault(cond, np.zeros((Ntime_a, N))) n_r_by_cond_stimulus.setdefault(cond, np.zeros((Ntime_a, N))) # Firing rates Mn = np.tile(M[:,n], (N,1)).T Rn = r[:,n]*Mn # Align point t0 = trial['epochs']['stimulus'][0] - 1 # Before n_b = Rn[:t0].shape[0] r_by_cond_stimulus[cond][Ntime-1-n_b:Ntime-1] += Rn[:t0] n_r_by_cond_stimulus[cond][Ntime-1-n_b:Ntime-1] += Mn[:t0] # After n_a = Rn[t0:].shape[0] r_by_cond_stimulus[cond][Ntime-1:Ntime-1+n_a] += Rn[t0:] n_r_by_cond_stimulus[cond][Ntime-1:Ntime-1+n_a] += Mn[t0:] for cond in r_by_cond_stimulus: r_by_cond_stimulus[cond] = utils.div(r_by_cond_stimulus[cond], n_r_by_cond_stimulus[cond]) #------------------------------------------------------------------------------------- # Plot #------------------------------------------------------------------------------------- lw = kwargs.get('lw', 1.5) dashes = kwargs.get('dashes', [3, 2]) vline_props = {'lw': kwargs.get('lw_vline', 0.5)} if 'dashes_vline' in kwargs: vline_props['linestyle'] = '--' vline_props['dashes'] = dashes colors_by_mod = { 'v': Figure.colors('blue'), 'a': Figure.colors('green') } linestyle_by_choice = { 'L': '-', 'H': '--' } lineprops = dict(lw=lw) def plot_sorted(plot, unit, w, r_sorted): t = time_a[w] yall = [[1]] for cond in [('v', 'H'), ('v', 'L'), ('a', 'H'), ('a', 'L')]: mod, choice = cond if mod == 'v': label = 'Vis, ' elif mod == 'a': label = 'Aud, ' else: raise ValueError(mod) if choice == 'H': label += 'high' elif choice == 'L': label += 'low' else: raise ValueError(choice) linestyle = linestyle_by_choice[choice] if linestyle == '-': lineprops = dict(linestyle=linestyle, lw=lw) else: lineprops = dict(linestyle=linestyle, lw=lw, dashes=dashes) plot.plot(t, r_sorted[cond][w,unit], color=colors_by_mod[mod], label=label, **lineprops) yall.append(r_sorted[cond][w,unit]) return t, yall def on_stimulus(plot, unit): w, = np.where((time_a >= -300) & (time_a <= 1000)) t, yall = plot_sorted(plot, unit, w, r_by_cond_stimulus) plot.xlim(t[0], t[-1]) return yall if units is not None: for plot, unit in zip(plots, units): on_stimulus(plot, unit) else: figspath, name = plots for unit in xrange(N): fig = Figure() plot = fig.add() #----------------------------------------------------------------------------- yall = [] yall += on_stimulus(plot, unit) plot.lim('y', yall, lower=0) plot.vline(0) plot.xlabel('Time (ms)') plot.ylabel('Firing rate (a.u.)') #----------------------------------------------------------------------------- fig.save(path=figspath, name=name+'_{}{:03d}'.format(network, unit)) fig.close()
from __future__ import absolute_import, division import os import numpy as np from pyrl import fittools, runtools, tasktools, utils from pyrl.figtools import Figure #///////////////////////////////////////////////////////////////////////////////////////// colors = { 'v': Figure.colors('blue'), 'a': Figure.colors('green'), 'va': Figure.colors('orange') } #///////////////////////////////////////////////////////////////////////////////////////// def psychometric(trialsfile, plot, **kwargs): # Load trials trials, A, R, M, perf = utils.load(trialsfile) decision_by_freq = {} high_by_freq = {} for n, trial in enumerate(trials): mod = trial['mod'] freq = trial['freq'] decision_by_freq.setdefault(mod, {}) high_by_freq.setdefault(mod, {}) decision_by_freq[mod].setdefault(freq, [])
def do(action, args, config): print("ACTION*: " + str(action)) print("ARGS*: " + str(args)) #===================================================================================== if 'trials' in action: try: trials_per_condition = int(args[0]) except: trials_per_condition = 100 model = config['model'] pg = model.get_pg(config['savefile'], config['seed'], config['dt']) spec = model.spec wagers = spec.wagers left_rights = spec.left_rights cohs = spec.cohs n_conditions = spec.n_conditions n_trials = trials_per_condition * n_conditions print("{} trials".format(n_trials)) task = model.Task() trials = [] for n in xrange(n_trials): k = tasktools.unravel_index(n, (len(wagers), len(left_rights), len(cohs))) context = { 'wager': wagers[k.pop(0)], 'left_right': left_rights[k.pop(0)], 'coh': cohs[k.pop(0)] } trials.append(task.get_condition(pg.rng, pg.dt, context)) runtools.run(action, trials, pg, config['trialspath']) #===================================================================================== elif action == 'sure_stimulus_duration': trialsfile = runtools.behaviorfile(config['trialspath']) fig = Figure() plot = fig.add() sure_stimulus_duration(trialsfile, plot) plot.xlabel('Stimulus duration (ms)') plot.ylabel('Probability sure target') fig.save(path=config['figspath'], name=action) #===================================================================================== elif action == 'correct_stimulus_duration': trialsfile = runtools.behaviorfile(config['trialspath']) fig = Figure() plot = fig.add() correct_stimulus_duration(trialsfile, plot) plot.xlabel('Stimulus duration (ms)') plot.ylabel('Probability correct') fig.save(path=config['figspath'], name=action) #===================================================================================== elif action == 'value_stimulus_duration': trialsfile = runtools.activityfile(config['trialspath']) fig = Figure() plot = fig.add() value_stimulus_duration(trialsfile, plot) plot.xlabel('Stimulus duration (ms)') plot.ylabel('Expected reward') fig.save(path=config['figspath'], name=action) #===================================================================================== elif action == 'sort': if 'value' in args: network = 'v' else: network = 'p' trialsfile = runtools.activityfile(config['trialspath']) sort(trialsfile, os.path.join(config['figspath'], 'sorted'), network=network)
def sort(trialsfile, plots, units=None, network='p', **kwargs): """ Sort trials. """ # Load trials data = utils.load(trialsfile) trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = data # Which network? if network == 'p': r = r_p else: r = r_v # Number of units N = r.shape[-1] # Same for every trial time = trials[0]['time'] Ntime = len(time) # Aligned time time_a = np.concatenate((-time[1:][::-1], time)) Ntime_a = len(time_a) #===================================================================================== # Aligned to stimulus onset #===================================================================================== r_by_cond_stimulus = {} n_r_by_cond_stimulus = {} for n, trial in enumerate(trials): if not perf.decisions[n]: continue if trial['mod'] == 'va': continue assert trial['mod'] == 'v' or trial['mod'] == 'a' if not perf.corrects[n]: continue # Condition mod = trial['mod'] choice = perf.choices[n] cond = (mod, choice) # Storage r_by_cond_stimulus.setdefault(cond, np.zeros((Ntime_a, N))) n_r_by_cond_stimulus.setdefault(cond, np.zeros((Ntime_a, N))) # Firing rates Mn = np.tile(M[:, n], (N, 1)).T Rn = r[:, n] * Mn # Align point t0 = trial['epochs']['stimulus'][0] - 1 # Before n_b = Rn[:t0].shape[0] r_by_cond_stimulus[cond][Ntime - 1 - n_b:Ntime - 1] += Rn[:t0] n_r_by_cond_stimulus[cond][Ntime - 1 - n_b:Ntime - 1] += Mn[:t0] # After n_a = Rn[t0:].shape[0] r_by_cond_stimulus[cond][Ntime - 1:Ntime - 1 + n_a] += Rn[t0:] n_r_by_cond_stimulus[cond][Ntime - 1:Ntime - 1 + n_a] += Mn[t0:] for cond in r_by_cond_stimulus: r_by_cond_stimulus[cond] = utils.div(r_by_cond_stimulus[cond], n_r_by_cond_stimulus[cond]) #------------------------------------------------------------------------------------- # Plot #------------------------------------------------------------------------------------- lw = kwargs.get('lw', 1.5) dashes = kwargs.get('dashes', [3, 2]) vline_props = {'lw': kwargs.get('lw_vline', 0.5)} if 'dashes_vline' in kwargs: vline_props['linestyle'] = '--' vline_props['dashes'] = dashes colors_by_mod = {'v': Figure.colors('blue'), 'a': Figure.colors('green')} linestyle_by_choice = {'L': '-', 'H': '--'} lineprops = dict(lw=lw) def plot_sorted(plot, unit, w, r_sorted): t = time_a[w] yall = [[1]] for cond in [('v', 'H'), ('v', 'L'), ('a', 'H'), ('a', 'L')]: mod, choice = cond if mod == 'v': label = 'Vis, ' elif mod == 'a': label = 'Aud, ' else: raise ValueError(mod) if choice == 'H': label += 'high' elif choice == 'L': label += 'low' else: raise ValueError(choice) linestyle = linestyle_by_choice[choice] if linestyle == '-': lineprops = dict(linestyle=linestyle, lw=lw) else: lineprops = dict(linestyle=linestyle, lw=lw, dashes=dashes) plot.plot(t, r_sorted[cond][w, unit], color=colors_by_mod[mod], label=label, **lineprops) yall.append(r_sorted[cond][w, unit]) return t, yall def on_stimulus(plot, unit): w, = np.where((time_a >= -300) & (time_a <= 1000)) t, yall = plot_sorted(plot, unit, w, r_by_cond_stimulus) plot.xlim(t[0], t[-1]) return yall if units is not None: for plot, unit in zip(plots, units): on_stimulus(plot, unit) else: figspath, name = plots for unit in xrange(N): fig = Figure() plot = fig.add() #----------------------------------------------------------------------------- yall = [] yall += on_stimulus(plot, unit) plot.lim('y', yall, lower=0) plot.vline(0) plot.xlabel('Time (ms)') plot.ylabel('Firing rate (a.u.)') #----------------------------------------------------------------------------- fig.save(path=figspath, name=name + '_{}{:03d}'.format(network, unit)) fig.close()
multisensory_behavior = os.path.join(trialspath, 'multisensory', 'trials_behavior.pkl') multisensory_activity = os.path.join(trialspath, 'multisensory', 'trials_activity.pkl') # models/romo romo_modelfile = os.path.join(modelspath, 'romo.py') romo_model = imp.load_source('romo_model', romo_modelfile) romo_behavior = os.path.join(trialspath, 'romo', 'trials_behavior.pkl') romo_activity = os.path.join(trialspath, 'romo', 'trials_activity.pkl') #========================================================================================= # Figure setup #========================================================================================= w = utils.mm_to_inch(174) r = 0.98 fig = Figure(w=w, r=r, axislabelsize=9, labelpadx=4, labelpady=6, thickness=0.8, ticksize=3, ticklabelsize=7.5, ticklabelpad=2) w_behavior = 0.19 h_behavior = 0.15 w_activity = 0.16 h_activity = h_behavior xleft = 0.1 ybot = 0.06 DX = 0.09 dx = 0.06 dy = 0.08 DY = 0.08 fig.add('romo-behavior', [xleft, ybot, w_behavior, w_behavior/r])
timespath = os.path.join(paperpath, 'times') figspath = os.path.join(paperpath, 'work', 'figs') modelname = sys.argv[1] #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(174) r = 0.48 fig = Figure(w=w, r=r, axislabelsize=11, labelpadx=6, labelpady=6, thickness=0.9, ticksize=5, ticklabelsize=9, ticklabelpad=3) x0 = 0.11 y0 = 0.18 w = 0.24 h = 0.71 DX = 0.07 fig.add('Wrec', [x0, y0, w, h]) fig.add('Wrec_lambda', [fig[-1].right + DX, y0, w, h])
'trials_behavior.pkl') trialsfile_a = os.path.join(trialspath, 'postdecisionwager', 'trials_activity.pkl') #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(114) r = 0.9 thickness = 0.4 axislabelsize = 6 fig = Figure(w=w, r=r, thickness=thickness, ticksize=3, ticklabelsize=5, axislabelsize=axislabelsize, labelpadx=3, labelpady=4.5) x0 = 0.16 y0 = 0.67 DX = 0.1 w_task = 0.82 h_task = 0.3 dy0 = 0.09 dy = 0.12 w_behavior = 0.24
trialsfile_b = os.path.join(trialspath, 'padoaschioppa2006', 'trials_behavior.pkl') trialsfile_a = os.path.join(trialspath, 'padoaschioppa2006', 'trials_activity.pkl') # Model 2 modelfile2 = os.path.join(modelspath, 'padoaschioppa2006_1A3B.py') model2 = imp.load_source('model2', modelfile2) trialsfile2_b = os.path.join(trialspath, 'padoaschioppa2006_1A3B', 'trials_behavior.pkl') trialsfile2_a = os.path.join(trialspath, 'padoaschioppa2006_1A3B', 'trials_activity.pkl') #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(174) r = 0.52 fig = Figure(w=w, r=r, axislabelsize=8.5, ticklabelsize=6.5, labelpadx=4.5, labelpady=4.5) x0 = 0.07 y0 = 0.17 w = 0.18 h = 0.3 DX = 0.09 dx = 0.04 DY = 0.16 fig.add('choice-lower', [x0, y0, w, h]) fig.add('choice-upper', [fig[-1].x, fig[-1].top+DY, w, h]) fig.add('activity-1', [fig['choice-upper'].right+DX, fig['choice-upper'].y, w, h])
modelfile2 = os.path.join(modelspath, 'padoaschioppa2006_1A3B.py') model2 = imp.load_source('model2', modelfile2) trialsfile2_b = os.path.join(trialspath, 'padoaschioppa2006_1A3B', 'trials_behavior.pkl') trialsfile2_a = os.path.join(trialspath, 'padoaschioppa2006_1A3B', 'trials_activity.pkl') #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(174) r = 0.52 fig = Figure(w=w, r=r, axislabelsize=8.5, ticklabelsize=6.5, labelpadx=4.5, labelpady=4.5) x0 = 0.07 y0 = 0.17 w = 0.18 h = 0.3 DX = 0.09 dx = 0.04 DY = 0.16 fig.add('choice-lower', [x0, y0, w, h]) fig.add('choice-upper', [fig[-1].x, fig[-1].top + DY, w, h])
import numpy as np from pyrl.figtools import Figure w = 174/25.4 r = 0.6 h = r*w fig = Figure(w=w, h=h) #========================================================================================= # Supervised learning #========================================================================================= #========================================================================================= # Reinforcement learning #========================================================================================= #========================================================================================= fig.save()
def sort_epoch(behaviorfile, activityfile, epoch, offers, plots, units=None, network='p', separate_by_choice=False, **kwargs): """ Sort trials. """ # Load trials data = utils.load(activityfile) trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = data if network == 'p': print("POLICY NETWORK") r = r_p else: print("VALUE NETWORK") r = r_v # Number of units N = r.shape[-1] # Same for every trial time = trials[0]['time'] Ntime = len(time) # Aligned time time_a = np.concatenate((-time[1:][::-1], time)) Ntime_a = len(time_a) #===================================================================================== # Sort trials #===================================================================================== # Epochs events = ['offer', 'choice'] # Sort events_by_cond = {e: {} for e in events} n_by_cond = {} n_nondecision = 0 for n, trial in enumerate(trials): if perf.choices[n] is None: n_nondecision += 1 continue # Condition offer = trial['offer'] choice = perf.choices[n] if separate_by_choice: cond = (offer, choice) else: cond = offer n_by_cond.setdefault(cond, 0) n_by_cond[cond] += 1 # Storage for e in events_by_cond: events_by_cond[e].setdefault(cond, {'r': np.zeros((Ntime_a, N)), 'n': np.zeros((Ntime_a, N))}) # Firing rates m_n = np.tile(M[:,n], (N,1)).T r_n = r[:,n]*m_n for e in events_by_cond: # Align point if e == 'offer': t0 = trial['epochs']['offer-on'][0] elif e == 'choice': t0 = perf.t_choices[n] else: raise ValueError(e) # Before n_b = r_n[:t0].shape[0] events_by_cond[e][cond]['r'][Ntime-1-n_b:Ntime-1] += r_n[:t0] events_by_cond[e][cond]['n'][Ntime-1-n_b:Ntime-1] += m_n[:t0] # After n_a = r_n[t0:].shape[0] events_by_cond[e][cond]['r'][Ntime-1:Ntime-1+n_a] += r_n[t0:] events_by_cond[e][cond]['n'][Ntime-1:Ntime-1+n_a] += m_n[t0:] print("Non-decision trials: {}/{}".format(n_nondecision, len(trials))) # Average trials for e in events_by_cond: for cond in events_by_cond[e]: events_by_cond[e][cond] = utils.div(events_by_cond[e][cond]['r'], events_by_cond[e][cond]['n']) # Epochs epochs = ['preoffer', 'postoffer', 'latedelay', 'prechoice'] # Average epochs epochs_by_cond = {e: {} for e in epochs} for e in epochs_by_cond: if e == 'preoffer': ev = 'offer' w, = np.where((-500 <= time_a) & (time_a < 0)) elif e == 'postoffer': ev = 'offer' w, = np.where((0 <= time_a) & (time_a < 500)) elif e == 'latedelay': ev = 'offer' w, = np.where((500 <= time_a) & (time_a < 1000)) elif e == 'prechoice': ev = 'choice' w, = np.where((-500 <= time_a) & (time_a < 0)) else: raise ValueError(e) for cond in events_by_cond[ev]: epochs_by_cond[e][cond] = np.mean(events_by_cond[ev][cond][w], axis=0) #===================================================================================== # Classify units #===================================================================================== idpt = indifference_point(behaviorfile, offers) unit_types = classify_units(trials, perf, r, idpt) #unit_types = {} numbers = {} for v in unit_types.values(): numbers[v] = 0 for k, v in unit_types.items(): numbers[v] += 1 n_tot = np.sum(numbers.values()) for k, v in numbers.items(): print("{}: {}/{} = {}%".format(k, v, n_tot, 100*v/n_tot)) #===================================================================================== # Plot #===================================================================================== lw = kwargs.get('lw', 1.5) ms = kwargs.get('ms', 6) mew = kwargs.get('mew', 0.5) rotation = kwargs.get('rotation', 60) #min_trials = kwargs.get('min_trials', 100) def plot_activity(plot, unit): yall = [1] min_trials = 20 # Pre-offer epoch_by_cond = epochs_by_cond['preoffer'] color = '0.7' if separate_by_choice: for choice, marker in zip(['A', 'B'], ['d', 'o']): x = [] y = [] for i, offer in enumerate(offers): cond = (offer, choice) if cond in n_by_cond and n_by_cond[cond] >= min_trials: y_i = epoch_by_cond[cond][unit] plot.plot(i, y_i, marker, mfc=color, mec=color, ms=0.8*ms, mew=0.8*mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers)-1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=0.8*lw, zorder=5) else: x = [] y = [] for i, offer in enumerate(offers): y_i = epoch_by_cond[offer][unit] plot.plot(i, y_i, 'o', mfc=color, mec=color, ms=0.8*ms, mew=0.8*mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers)-1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=0.8*lw, zorder=5) # Epoch epoch_by_cond = epochs_by_cond[epoch] if epoch == 'postoffer': color = Figure.colors('darkblue') elif epoch == 'latedelay': color = Figure.colors('darkblue') elif epoch == 'prechoice': color = Figure.colors('darkblue') else: raise ValueError(epoch) if separate_by_choice: for choice, marker, color in zip(['A', 'B'], ['d', 'o'], [Figure.colors('red'), Figure.colors('blue')]): x = [] y = [] for i, offer in enumerate(offers): cond = (offer, choice) if cond in n_by_cond and n_by_cond[cond] >= min_trials: y_i = epoch_by_cond[cond][unit] yall.append(y_i) plot.plot(i, y_i, marker, mfc=color, mec=color, ms=ms, mew=mew, zorder=10) if i != 0 and i != len(offers)-1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=lw, zorder=5) else: x = [] y = [] for i, offer in enumerate(offers): y_i = epoch_by_cond[offer][unit] plot.plot(i, y_i, 'o', mfc=color, mec=color, ms=ms, mew=mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers)-1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=lw, zorder=5) plot.xticks(range(len(offers))) plot.xticklabels(['{}B:{}A'.format(*offer) for offer in offers], rotation=rotation) plot.xlim(0, len(offers)-1) plot.lim('y', yall, lower=0) return yall #------------------------------------------------------------------------------------- if units is not None: for plot, unit in zip(plots, units): plot_activity(plot, unit) else: name = plots for unit in xrange(N): fig = Figure() plot = fig.add() plot_activity(plot, unit) if separate_by_choice: suffix = '_sbc' else: suffix = '' if unit in unit_types: plot.text_upper_right(unit_types[unit], fontsize=9) fig.save(name+'_{}{}_{}{:03d}'.format(epoch, suffix, network, unit)) fig.close()
def plot_activity(plot, unit): yall = [1] min_trials = 20 # Pre-offer epoch_by_cond = epochs_by_cond['preoffer'] color = '0.7' if separate_by_choice: for choice, marker in zip(['A', 'B'], ['d', 'o']): x = [] y = [] for i, offer in enumerate(offers): cond = (offer, choice) if cond in n_by_cond and n_by_cond[cond] >= min_trials: y_i = epoch_by_cond[cond][unit] plot.plot(i, y_i, marker, mfc=color, mec=color, ms=0.8 * ms, mew=0.8 * mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers) - 1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=0.8 * lw, zorder=5) else: x = [] y = [] for i, offer in enumerate(offers): y_i = epoch_by_cond[offer][unit] plot.plot(i, y_i, 'o', mfc=color, mec=color, ms=0.8 * ms, mew=0.8 * mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers) - 1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=0.8 * lw, zorder=5) # Epoch epoch_by_cond = epochs_by_cond[epoch] if epoch == 'postoffer': color = Figure.colors('darkblue') elif epoch == 'latedelay': color = Figure.colors('darkblue') elif epoch == 'prechoice': color = Figure.colors('darkblue') else: raise ValueError(epoch) if separate_by_choice: for choice, marker, color in zip( ['A', 'B'], ['d', 'o'], [Figure.colors('red'), Figure.colors('blue')]): x = [] y = [] for i, offer in enumerate(offers): cond = (offer, choice) if cond in n_by_cond and n_by_cond[cond] >= min_trials: y_i = epoch_by_cond[cond][unit] yall.append(y_i) plot.plot(i, y_i, marker, mfc=color, mec=color, ms=ms, mew=mew, zorder=10) if i != 0 and i != len(offers) - 1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=lw, zorder=5) else: x = [] y = [] for i, offer in enumerate(offers): y_i = epoch_by_cond[offer][unit] plot.plot(i, y_i, 'o', mfc=color, mec=color, ms=ms, mew=mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers) - 1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=lw, zorder=5) plot.xticks(range(len(offers))) plot.xticklabels(['{}B:{}A'.format(*offer) for offer in offers], rotation=rotation) plot.xlim(0, len(offers) - 1) plot.lim('y', yall, lower=0) return yall
def plot_activity(plot, unit): yall = [1] min_trials = 20 # Pre-offer epoch_by_cond = epochs_by_cond['preoffer'] color = '0.7' if separate_by_choice: for choice, marker in zip(['A', 'B'], ['d', 'o']): x = [] y = [] for i, offer in enumerate(offers): cond = (offer, choice) if cond in n_by_cond and n_by_cond[cond] >= min_trials: y_i = epoch_by_cond[cond][unit] plot.plot(i, y_i, marker, mfc=color, mec=color, ms=0.8*ms, mew=0.8*mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers)-1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=0.8*lw, zorder=5) else: x = [] y = [] for i, offer in enumerate(offers): y_i = epoch_by_cond[offer][unit] plot.plot(i, y_i, 'o', mfc=color, mec=color, ms=0.8*ms, mew=0.8*mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers)-1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=0.8*lw, zorder=5) # Epoch epoch_by_cond = epochs_by_cond[epoch] if epoch == 'postoffer': color = Figure.colors('darkblue') elif epoch == 'latedelay': color = Figure.colors('darkblue') elif epoch == 'prechoice': color = Figure.colors('darkblue') else: raise ValueError(epoch) if separate_by_choice: for choice, marker, color in zip(['A', 'B'], ['d', 'o'], [Figure.colors('red'), Figure.colors('blue')]): x = [] y = [] for i, offer in enumerate(offers): cond = (offer, choice) if cond in n_by_cond and n_by_cond[cond] >= min_trials: y_i = epoch_by_cond[cond][unit] yall.append(y_i) plot.plot(i, y_i, marker, mfc=color, mec=color, ms=ms, mew=mew, zorder=10) if i != 0 and i != len(offers)-1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=lw, zorder=5) else: x = [] y = [] for i, offer in enumerate(offers): y_i = epoch_by_cond[offer][unit] plot.plot(i, y_i, 'o', mfc=color, mec=color, ms=ms, mew=mew, zorder=10) yall.append(y_i) if i != 0 and i != len(offers)-1: x.append(i) y.append(y_i) plot.plot(x, y, '-', color=color, lw=lw, zorder=5) plot.xticks(range(len(offers))) plot.xticklabels(['{}B:{}A'.format(*offer) for offer in offers], rotation=rotation) plot.xlim(0, len(offers)-1) plot.lim('y', yall, lower=0) return yall
def do(action, args, config): """ Manage tasks. """ print("ACTION*: " + str(action)) print("ARGS*: " + str(args)) #===================================================================================== if action == 'plot_trial': try: trials_per_condition = int(args[0]) except: trials_per_condition = 1000 model = config['model'] pg = model.get_pg(config['savefile'], config['seed'], config['dt']) spec = model.spec juices = spec.juices offers = spec.offers n_conditions = spec.n_conditions n_trials = trials_per_condition * n_conditions print("{} trials".format(n_trials)) task = model.Task() fig = Figure(axislabelsize=10, ticklabelsize=9) plot = fig.add() plot_trial() performance(config['savefile'], plot) fig.save(path=config['figspath'], name='performance') fig.close() #===================================================================================== elif 'trials' in action: try: trials_per_condition = int(args[0]) except IndexError: trials_per_condition = 100 model = config['model'] pg = model.get_pg(config['savefile'], config['seed'], config['dt']) spec = model.spec juices = spec.juices offers = spec.offers n_conditions = spec.n_conditions n_trials = trials_per_condition * n_conditions print("{} trials".format(n_trials)) task = model.Task() trials = [] for n in xrange(n_trials): k = tasktools.unravel_index(n, (len(juices), len(offers))) context = {'juice': juices[k.pop(0)], 'offer': offers[k.pop(0)]} trials.append(task.get_condition(pg.rng, pg.dt, context)) runtools.run(action, trials, pg, config['trialspath']) #===================================================================================== elif action == 'choice_pattern': trialsfile = runtools.behaviorfile(config['trialspath']) # print trialsfile # fig = Figure() # plot = fig.add() savefile = config['figspath'] spec = config['model'].spec #print spec.offers choice_pattern(trialsfile, spec.offers, savefile, action) #plot.xlabel('Offer (\#B : \#A)') #plot.ylabel('Percent choice B') #plot.text_upper_left('1A = {}B'.format(spec.A_to_B), fontsize=10) #===================================================================================== elif action == 'sort': if 'value' in args: network = 'v' else: network = 'p' trialsfile = runtools.activityfile(config['trialspath']) sort(trialsfile, (config['figspath'], 'sorted'), network=network) #===================================================================================== elif action == 'statespace': trialsfile = runtools.activityfile(config['trialspath']) statespace(trialsfile, (config['figspath'], 'statespace'))
def do(action, args, config): """ Manage tasks. """ print("ACTION*: " + str(action)) print("ARGS*: " + str(args)) #===================================================================================== if 'trials' in action: try: trials_per_condition = int(args[0]) except IndexError: trials_per_condition = 100 model = config['model'] pg = model.get_pg(config['savefile'], config['seed'], config['dt']) spec = model.spec juices = spec.juices offers = spec.offers n_conditions = spec.n_conditions n_trials = trials_per_condition * n_conditions print("{} trials".format(n_trials)) task = model.Task() trials = [] for n in xrange(n_trials): k = tasktools.unravel_index(n, (len(juices), len(offers))) context = { 'juice': juices[k.pop(0)], 'offer': offers[k.pop(0)] } trials.append(task.get_condition(pg.rng, pg.dt, context)) runtools.run(action, trials, pg, config['trialspath']) #===================================================================================== elif action == 'choice_pattern': trialsfile = runtools.behaviorfile(config['trialspath']) fig = Figure() plot = fig.add() spec = config['model'].spec choice_pattern(trialsfile, spec.offers, plot) plot.xlabel('Offer (\#B : \#A)') plot.ylabel('Percent choice B') plot.text_upper_left('1A = {}B'.format(spec.A_to_B), fontsize=10) fig.save(path=config['figspath'], name=action) fig.close() elif action == 'indifference_point': trialsfile = runtools.behaviorfile(config['trialspath']) fig = Figure() plot = fig.add() spec = config['model'].spec indifference_point(trialsfile, spec.offers, plot) plot.xlabel('$(n_B - n_A)/(n_B + n_A)$') plot.ylabel('Percent choice B') #plot.text_upper_left('1A = {}B'.format(spec.A_to_B), fontsize=10) fig.save(path=config['figspath'], name=action) fig.close() #===================================================================================== elif action == 'sort_epoch': behaviorfile = runtools.behaviorfile(config['trialspath']) activityfile = runtools.activityfile(config['trialspath']) epoch = args[0] if 'value' in args: network = 'v' else: network = 'p' separate_by_choice = ('separate-by-choice' in args) sort_epoch(behaviorfile, activityfile, epoch, config['model'].spec.offers, os.path.join(config['figspath'], 'sorted'), network=network, separate_by_choice=separate_by_choice)
def do(action, args, config): """ Manage tasks. """ print("ACTION*: " + str(action)) print("ARGS*: " + str(args)) if 'trials' in action: try: trials_per_condition = int(args[0]) except: trials_per_condition = 100 model = config['model'] pg = model.get_pg(config['savefile'], config['seed'], config['dt']) spec = model.spec gt_lts = spec.gt_lts fpairs = spec.fpairs n_conditions = spec.n_conditions n_trials = trials_per_condition * n_conditions print("{} trials".format(n_trials)) task = model.Task() trials = [] for n in xrange(n_trials): k = tasktools.unravel_index(n, (len(gt_lts), len(fpairs))) context = { 'delay': 3000, 'gt_lt': gt_lts[k.pop(0)], 'fpair': fpairs[k.pop(0)] } trials.append(task.get_condition(pg.rng, pg.dt, context)) runtools.run(action, trials, pg, config['trialspath']) #===================================================================================== elif action == 'performance': trialsfile = runtools.behaviorfile(config['trialspath']) fig = Figure() plot = fig.add() performance(trialsfile, plot) plot.xlabel('$f_1$ (Hz)') plot.ylabel('$f_2$ (Hz)') fig.save(os.path.join(config['figspath'], action)) #===================================================================================== elif action == 'sort': if 'value' in args: network = 'v' else: network = 'p' trialsfile = runtools.activityfile(config['trialspath']) sort(trialsfile, (config['figspath'], 'sorted'), network=network)
def sort(trialsfile, plots, unit=None, network='p', **kwargs): # Load trials data = utils.load(trialsfile) if len(data) == 9: trials, U, Z, A, P, M, perf, r_p, r_v = data else: trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = data if network == 'p': print("Sorting policy network activity.") r = r_p else: print("Sorting value network activity.") r = r_v # Number of units N = r.shape[-1] # Time time = trials[0]['time'] Ntime = len(time) # Aligned time time_a = np.concatenate((-time[1:][::-1], time)) Ntime_a = len(time_a) #===================================================================================== # Preferred targets #===================================================================================== preferred_targets = get_preferred_targets(trials, perf, r) #===================================================================================== # No-wager trials #===================================================================================== def get_no_wager(func_t0): trials_by_cond = {} for n, trial in enumerate(trials): if trial['wager']: continue if trial['coh'] == 0: continue if perf.choices[n] is None: continue cond = trial['left_right'] m_n = np.tile(M[:,n], (N, 1)).T r_n = r[:,n]*m_n t0 = func_t0(trial['epochs'], perf.t_choices[n]) # Storage trials_by_cond.setdefault(cond, {'r': np.zeros((Ntime_a, N)), 'n': np.zeros((Ntime_a, N))}) # Before n_b = r_n[:t0].shape[0] trials_by_cond[cond]['r'][Ntime-1-n_b:Ntime-1] += r_n[:t0] trials_by_cond[cond]['n'][Ntime-1-n_b:Ntime-1] += m_n[:t0] # After n_a = r_n[t0:].shape[0] trials_by_cond[cond]['r'][Ntime-1:Ntime-1+n_a] += r_n[t0:] trials_by_cond[cond]['n'][Ntime-1:Ntime-1+n_a] += m_n[t0:] # Average for cond in trials_by_cond: trials_by_cond[cond] = utils.div(trials_by_cond[cond]['r'], trials_by_cond[cond]['n']) return trials_by_cond noTs_stimulus = get_no_wager(lambda epochs, t_choice: epochs['stimulus'][0] - 1) noTs_choice = get_no_wager(lambda epochs, t_choice: t_choice) #===================================================================================== # Wager trials, aligned to stimulus onset #===================================================================================== def get_wager(func_t0): trials_by_cond = {} trials_by_cond_sure = {} for n, trial in enumerate(trials): if not trial['wager']: continue if perf.choices[n] is None: continue if trial['coh'] == 0: continue cond = trial['left_right'] m_n = np.tile(M[:,n], (N, 1)).T r_n = r[:,n]*m_n t0 = func_t0(trial['epochs'], perf.t_choices[n]) if perf.choices[n] == 'S': # Storage trials_by_cond_sure.setdefault(cond, {'r': np.zeros((Ntime_a, N)), 'n': np.zeros((Ntime_a, N))}) # Before n_b = r_n[:t0].shape[0] trials_by_cond_sure[cond]['r'][Ntime-1-n_b:Ntime-1] += r_n[:t0] trials_by_cond_sure[cond]['n'][Ntime-1-n_b:Ntime-1] += m_n[:t0] # After n_a = r_n[t0:].shape[0] trials_by_cond_sure[cond]['r'][Ntime-1:Ntime-1+n_a] += r_n[t0:] trials_by_cond_sure[cond]['n'][Ntime-1:Ntime-1+n_a] += m_n[t0:] else: # Storage trials_by_cond.setdefault(cond, {'r': np.zeros((Ntime_a, N)), 'n': np.zeros((Ntime_a, N))}) # Before n_b = r_n[:t0].shape[0] trials_by_cond[cond]['r'][Ntime-1-n_b:Ntime-1] += r_n[:t0] trials_by_cond[cond]['n'][Ntime-1-n_b:Ntime-1] += m_n[:t0] # After n_a = r_n[t0:].shape[0] trials_by_cond[cond]['r'][Ntime-1:Ntime-1+n_a] += r_n[t0:] trials_by_cond[cond]['n'][Ntime-1:Ntime-1+n_a] += m_n[t0:] # Average for cond in trials_by_cond: trials_by_cond[cond] = utils.div(trials_by_cond[cond]['r'], trials_by_cond[cond]['n']) # Average for cond in trials_by_cond_sure: trials_by_cond_sure[cond] = utils.div(trials_by_cond_sure[cond]['r'], trials_by_cond_sure[cond]['n']) return trials_by_cond, trials_by_cond_sure Ts_stimulus, Ts_stimulus_sure = get_wager(lambda epochs, t_choice: epochs['stimulus'][0] - 1) Ts_sure, Ts_sure_sure = get_wager(lambda epochs, t_choice: epochs['sure'][0] - 1) Ts_choice, Ts_choice_sure = get_wager(lambda epochs, t_choice: t_choice) #===================================================================================== # Plot #===================================================================================== lw = kwargs.get('lw', 1.25) dashes = kwargs.get('dashes', [3, 1.5]) in_opp_colors = {-1: '0.6', +1: 'k'} def plot_noTs(noTs, plot, unit, tmin, tmax): w, = np.where((tmin <= time_a) & (time_a <= tmax)) t = time_a[w] yall = [[1]] for lr in noTs: color = in_opp_colors[lr*preferred_targets[unit]] y = noTs[lr][w,unit] plot.plot(t, y, color=color, lw=lw) yall.append(y) plot.xlim(tmin, tmax) plot.xticks([0, tmax]) plot.lim('y', yall, lower=0) return yall def plot_Ts(Ts, Ts_sure, plot, unit, tmin, tmax): w, = np.where((tmin <= time_a) & (time_a <= tmax)) t = time_a[w] yall = [[1]] for lr in Ts: color = in_opp_colors[lr*preferred_targets[unit]] y = Ts[lr][w,unit] plot.plot(t, y, color=color, lw=lw) yall.append(y) for lr in Ts_sure: color = in_opp_colors[lr*preferred_targets[unit]] y = Ts_sure[lr][w,unit] plot.plot(t, y, color=color, lw=lw, linestyle='--', dashes=dashes) yall.append(y) plot.xlim(tmin, tmax) plot.xticks([0, tmax]) plot.lim('y', yall, lower=0) return yall if unit is not None: y = [] tmin = kwargs.get('noTs-stimulus-tmin', -100) tmax = kwargs.get('noTs-stimulus-tmax', 700) y += plot_noTs(noTs_stimulus, plots['noTs-stimulus'], unit, tmin, tmax) tmin = kwargs.get('noTs-choice-tmin', -500) tmax = kwargs.get('noTs-choice-tmax', 0) y += plot_noTs(noTs_choice, plots['noTs-choice'], unit, tmin, tmax) tmin = kwargs.get('Ts-stimulus-tmin', -100) tmax = kwargs.get('Ts-stimulus-tmax', 700) y += plot_Ts(Ts_stimulus, Ts_stimulus_sure, plots['Ts-stimulus'], unit, tmin, tmax) tmin = kwargs.get('Ts-sure-tmin', -200) tmax = kwargs.get('Ts-sure-tmax', 700) y += plot_Ts(Ts_sure, Ts_sure_sure, plots['Ts-sure'], unit, tmin, tmax) tmin = kwargs.get('Ts-choice-tmin', -500) tmax = kwargs.get('Ts-choice-tmax', 0) y += plot_Ts(Ts_choice, Ts_choice_sure, plots['Ts-choice'], unit, tmin, tmax) return y else: name = plots for unit in xrange(N): w = utils.mm_to_inch(174) r = 0.35 fig = Figure(w=w, r=r) x0 = 0.09 y0 = 0.15 w = 0.13 h = 0.75 dx = 0.05 DX = 0.08 fig.add('noTs-stimulus', [x0, y0, w, h]) fig.add('noTs-choice', [fig[-1].right+dx, y0, w, h]) fig.add('Ts-stimulus', [fig[-1].right+DX, y0, w, h]) fig.add('Ts-sure', [fig[-1].right+dx, y0, w, h]) fig.add('Ts-choice', [fig[-1].right+dx, y0, w, h]) #----------------------------------------------------------------------------- y = [] plot = fig['noTs-stimulus'] y += plot_noTs(noTs_stimulus, plot, unit, -100, 700) plot.vline(0) plot = fig['noTs-choice'] y += plot_noTs(noTs_choice, plot, unit, -500, 200) plot.vline(0) plot = fig['Ts-stimulus'] y += plot_Ts(Ts_stimulus, Ts_stimulus_sure, plot, unit, -100, 700) plot.vline(0) plot = fig['Ts-sure'] y += plot_Ts(Ts_sure, Ts_sure_sure, plot, unit, -200, 700) plot.vline(0) plot = fig['Ts-choice'] y += plot_Ts(Ts_choice, Ts_choice_sure, plot, unit, -500, 200) plot.vline(0) for plot in fig.plots.values(): plot.lim('y', y, lower=0) #----------------------------------------------------------------------------- fig.save(name+'_{}{:03d}'.format(network, unit)) fig.close()
paperpath = os.path.join(parent, 'paper') timespath = os.path.join(paperpath, 'times') figspath = os.path.join(paperpath, 'work', 'figs') for path in [paperpath, timespath, figspath]: utils.mkdir_p(path) modelname = sys.argv[1] #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(174) r = 0.48 fig = Figure(w=w, r=r, axislabelsize=11, labelpadx=6, labelpady=6, thickness=0.9, ticksize=5, ticklabelsize=9, ticklabelpad=3) x0 = 0.11 y0 = 0.18 w = 0.36 h = 0.71 DX = 0.14 fig.add('reward', [x0, y0, w, h]) fig.add('correct', [fig[-1].right+DX, y0, w, h]) T = 1000 #=========================================================================================
from __future__ import absolute_import, division import os import numpy as np from pyrl import fittools, runtools, tasktools, utils from pyrl.figtools import Figure #///////////////////////////////////////////////////////////////////////////////////////// colors = { 'v': Figure.colors('blue'), 'a': Figure.colors('green'), 'va': Figure.colors('orange') } #///////////////////////////////////////////////////////////////////////////////////////// def psychometric(trialsfile, plot, **kwargs): # Load trials trials, A, R, M, perf = utils.load(trialsfile) decision_by_freq = {} high_by_freq = {} for n, trial in enumerate(trials): mod = trial['mod'] freq = trial['freq'] decision_by_freq.setdefault(mod, {}) high_by_freq.setdefault(mod, {})
for path in [paperpath, timespath, figspath]: utils.mkdir_p(path) modelname = sys.argv[1] #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(174) r = 0.48 fig = Figure(w=w, r=r, axislabelsize=11, labelpadx=6, labelpady=6, thickness=0.9, ticksize=5, ticklabelsize=9, ticklabelpad=3) x0 = 0.11 y0 = 0.18 w = 0.36 h = 0.71 DX = 0.14 fig.add('reward', [x0, y0, w, h]) fig.add('correct', [fig[-1].right + DX, y0, w, h])
import numpy as np from pyrl.figtools import Figure w = 174 / 25.4 r = 0.6 h = r * w fig = Figure(w=w, h=h) #========================================================================================= # Supervised learning #========================================================================================= #========================================================================================= # Reinforcement learning #========================================================================================= #========================================================================================= fig.save()
# analysis/rdm rdm_analysisfile = os.path.join(analysispath, 'rdm.py') rdm_analysis = imp.load_source('rdm_analysis', rdm_analysisfile) # models/rdm_fixed rdm_fixed_modelfile = os.path.join(modelspath, 'rdm_fixed.py') rdm_fixed_model = imp.load_source('rdm_fixed_model', rdm_fixed_modelfile) rdm_fixed_behavior = os.path.join(trialspath, 'rdm_fixed', 'trials_behavior.pkl') rdm_fixed_activity = os.path.join(trialspath, 'rdm_fixed', 'trials_activity.pkl') #========================================================================================= w = utils.mm_to_inch(174) r = 0.5 fig = Figure(w=w, r=r) x0 = 0.12 y0 = 0.15 w = 0.37 h = 0.8 dx = 1.3*w fig.add('on-stimulus', [x0, y0, w, h]) fig.add('on-choice', [x0+dx, y0, w, h]) #========================================================================================= kwargs = {'on-stimulus-tmin': -200, 'on-stimulus-tmax': 600, 'on-choice-tmin': -400, 'on-choice-tmax': 0, 'colors': 'kiani', 'dashes': [3.5, 2]}
analysis = imp.load_source('padoa_schioppa2006_analysis', analysisfile) # Model modelfile = os.path.join(modelspath, 'padoa_schioppa2006.py') model = imp.load_source('model', modelfile) trialsfile_b = os.path.join(trialspath, 'padoa_schioppa2006', 'trials_behavior.pkl') trialsfile_e = os.path.join(trialspath, 'padoa_schioppa2006', 'trials_electrophysiology.pkl') #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(174) r = 0.3 h = r*w fig = Figure(w=w, h=h, axislabelsize=8, ticklabelsize=6, labelpadx=4.5, labelpady=4) x0 = 0.07 y0 = 0.27 DX = 0.08 w_choice = 0.2 h_choice = 0.63 w_sorted = 0.17 h_sorted = h_choice dx = 0.055 plots = { 'choice': fig.add([x0, y0, w_choice, h_choice]), 'sorted-cv': fig.add([x0+w_choice+DX, y0, w_sorted, h_sorted]),
here = utils.get_here(__file__) parent = utils.get_parent(here) paperpath = os.path.join(parent, 'paper') timespath = os.path.join(paperpath, 'times') figspath = os.path.join(paperpath, 'work', 'figs') modelname = sys.argv[1] #========================================================================================= # Figure #========================================================================================= w = utils.mm_to_inch(174) r = 0.48 fig = Figure(w=w, r=r, axislabelsize=11, labelpadx=6, labelpady=6, thickness=0.9, ticksize=5, ticklabelsize=9, ticklabelpad=3) x0 = 0.11 y0 = 0.18 w = 0.24 h = 0.71 DX = 0.07 fig.add('Wrec', [x0, y0, w, h]) fig.add('Wrec_lambda', [fig[-1].right+DX, y0, w, h]) fig.add('Wrec_gamma', [fig[-1].right+DX, y0, w, h]) #=========================================================================================
rdm_analysisfile = os.path.join(analysispath, 'rdm.py') rdm_analysis = imp.load_source('rdm_analysis', rdm_analysisfile) # models/rdm_fixed rdm_fixed_modelfile = os.path.join(modelspath, 'rdm_fixed.py') rdm_fixed_model = imp.load_source('rdm_fixed_model', rdm_fixed_modelfile) rdm_fixed_behavior = os.path.join(trialspath, 'rdm_fixed', 'trials_behavior.pkl') rdm_fixed_activity = os.path.join(trialspath, 'rdm_fixed', 'trials_activity.pkl') #========================================================================================= w = utils.mm_to_inch(174) r = 0.5 fig = Figure(w=w, r=r) x0 = 0.12 y0 = 0.15 w = 0.37 h = 0.8 dx = 1.3 * w fig.add('on-stimulus', [x0, y0, w, h]) fig.add('on-choice', [x0 + dx, y0, w, h]) #========================================================================================= kwargs = { 'on-stimulus-tmin': -200, 'on-stimulus-tmax': 600,
modelspath = os.path.join(parent, 'examples', 'models') # analysis/multisensory multisensory_analysisfile = os.path.join(analysispath, 'multisensory.py') multisensory_analysis = imp.load_source('multisensory_analysis', multisensory_analysisfile) # models/multisensory multisensory_modelfile = os.path.join(modelspath, 'multisensory.py') multisensory_model = imp.load_source('multisensory_model', multisensory_modelfile) multisensory_behavior = os.path.join(trialspath, 'multisensory', 'trials_behavior.pkl') multisensory_activity = os.path.join(trialspath, 'multisensory', 'trials_activity.pkl') #========================================================================================= fig = Figure() plot = fig.add() sigmas = [] for s in [''] + ['_s'+str(i) for i in xrange(101, 106)]: behaviorfile = os.path.join(trialspath, 'multisensory'+s, 'trials_behavior.pkl') sigmas.append(multisensory_analysis.psychometric(behaviorfile, plot)) fig.save() #========================================================================================= print("") for i, (sigma_v, sigma_a, sigma_va) in enumerate(sigmas): if i == 0: print(r"\textbf{{{:.3f}}} & \textbf{{{:.3f}}} & \textbf{{{:.3f}}} & \textbf{{{:.3f}}} & \textbf{{{:.3f}}} \\"
rdm_analysisfile = os.path.join(analysispath, 'rdm.py') rdm_analysis = imp.load_source('rdm_analysis', rdm_analysisfile) # models/rdm_fixed rdm_fixed_modelfile = os.path.join(modelspath, 'rdm_fixed.py') rdm_fixed_model = imp.load_source('rdm_fixed_model', rdm_fixed_modelfile) rdm_fixed_behavior = os.path.join(trialspath, 'rdm_fixed', 'trials_behavior.pkl') rdm_fixed_activity = os.path.join(trialspath, 'rdm_fixed', 'trials_activity.pkl') #========================================================================================= w = utils.mm_to_inch(174) r = 0.29 h = r*w fig = Figure(w=w, h=h, labelpadx=4.5, labelpady=4.5) #========================================================================================= w_task = 0.26 w_behavior = 0.205 w_activity = 0.205 h = 0.67 h_epochs = 0.2 h_input = 0.14 x0 = 0.145 DX = 0.085 dx = 0.075