if list_item in keys: data[list_item] = list(data[list_item]) clusters = data['clusters'] stas = data['stas'] filter_length = data['filter_length'] stx_w = data['stx_w'] exp_name = data['exp_name'] stimname = data['stimname'] stimname += '_corrected' frame_duration = data['frame_duration'] quals = data['quals'] clusterids = plf.clusters_to_ids(clusters) # Determine frame size so that the total frame covers # an area large enough i.e. 2*700um t = np.arange(filter_length) * frame_duration * 1000 vscale = int(stas[0].shape[0] * stx_w * px_size / 1000) for i in range(clusters.shape[0]): sta = stas[i] vmax = np.max(np.abs(sta)) vmin = -vmax plt.figure(figsize=(6, 15)) ax = plt.subplot(111) im = ax.imshow(sta, cmap='RdBu', vmin=vmin,
def stripesurround(exp_name, stimnrs): exp_dir = iof.exp_dir_fixer(exp_name) if isinstance(stimnrs, int): stimnrs = [stimnrs] for stimnr in stimnrs: data = iof.load(exp_name, stimnr) _, metadata = asc.read_spikesheet(exp_dir) px_size = metadata['pixel_size(um)'] clusters = data['clusters'] stas = data['stas'] max_inds = data['max_inds'] filter_length = data['filter_length'] stx_w = data['stx_w'] exp_name = data['exp_name'] stimname = data['stimname'] frame_duration = data['frame_duration'] quals = data['quals'] clusterids = plf.clusters_to_ids(clusters) fsize = int(700 / (stx_w * px_size)) t = np.arange(filter_length) * frame_duration * 1000 vscale = fsize * stx_w * px_size #%% cs_inds = np.empty(clusters.shape[0]) polarities = np.empty(clusters.shape[0]) savepath = os.path.join(exp_dir, 'data_analysis', stimname) for i in range(clusters.shape[0]): sta = stas[i] max_i = max_inds[i] sta, max_i = msc.cutstripe(sta, max_i, fsize * 2) plt.figure(figsize=(12, 10)) ax = plt.subplot(121) plf.stashow(sta, ax) # Isolate the time point from which the fit will # be obtained fitv = sta[:, max_i[1]] # Make a space vector s = np.arange(fitv.shape[0]) if np.max(fitv) != np.max(np.abs(fitv)): onoroff = -1 else: onoroff = 1 polarities[i] = onoroff # Determine the peak values for center and surround # to give as initial parameters for curve fitting centerpeak = -onoroff * np.max(fitv * onoroff) surroundpeak = -onoroff * np.max(fitv * -onoroff) # Define initial guesses for the center and surround gaussians # First set of values are for center, second for surround. p_initial = [centerpeak, max_i[0], 2, surroundpeak, max_i[0], 4] bounds = ([0, -np.inf, -np.inf, 0, -np.inf, -np.inf], np.inf) try: popt, _ = curve_fit(centersurround_onedim, s, fitv, p0=p_initial, bounds=bounds) except ValueError as e: if str(e) == "`x0` is infeasible.": print(e) popt, _ = curve_fit(onedgauss, s, onoroff * fitv, p0=p_initial[:3]) popt = np.append(popt, [0, popt[1], popt[2]]) else: raise fit = centersurround_onedim(s, *popt) # Avoid dividing by zero when calculating center-surround index if popt[3] > 0: csi = popt[0] / popt[3] else: csi = 0 cs_inds[i] = csi ax = plt.subplot(122) plf.spineless(ax) ax.set_yticks([]) # We need to flip the vertical axis to match # with the STA next to it plt.plot(onoroff * fitv, -s, label='Data') plt.plot(onoroff * fit, -s, label='Fit') plt.axvline(0, linestyle='dashed', alpha=.5) plt.title(f'Center: a: {popt[0]:4.2f}, μ: {popt[1]:4.2f},' + f' σ: {popt[2]:4.2f}\n' + f'Surround: a: {popt[3]:4.2f}, μ: {popt[4]:4.2f},' + f' σ: {popt[5]:4.2f}' + f'\n CS index: {csi:4.2f}') plt.subplots_adjust(top=.82) plt.suptitle(f'{exp_name}\n{stimname}\n{clusterids[i]}') os.makedirs(os.path.join(savepath, 'stripesurrounds'), exist_ok=True) plt.savefig( os.path.join(savepath, 'stripesurrounds', clusterids[i] + '.svg')) plt.close() data.update({'cs_inds': cs_inds, 'polarities': polarities}) np.savez(os.path.join(savepath, f'{stimnr}_data.npz'), **data)
import matplotlib.pyplot as plt import iofuncs as iof import plotfuncs as plf import analysis_scripts as asc from scipy import stats import texplot data = np.load('/home/ycan/Documents/thesis/analysis_auxillary_files/' 'thesis_csiplotting.npz') cells = [] include = data['include'] colors = data['colors'] colorcategories = data['colorcategories'] maxts = np.empty([2, 1]) for i, exp_name in enumerate(['20180118', '20180124', '20180207']): clusterids = plf.clusters_to_ids(asc.read_spikesheet(exp_name)[0]) cells.extend([(exp_name, cl_id) for cl_id in clusterids]) if '20180124' in exp_name or '20180207' in exp_name: stripeflicker = [6, 12] elif '20180118' in exp_name: stripeflicker = [7, 14] a = np.empty([2, len(clusterids)]) for j, stimnr in enumerate(stripeflicker): data = iof.load(exp_name, stimnr) a[j, :] = np.array(data['max_inds'])[:, 1] maxts = np.hstack((maxts, a)) maxts = maxts[:, 1:] # Remove first element used in initialization maxts = maxts * .0167 * 1000 # Convert to milliseconds maxts_f = maxts[:, include]
def saccadegratingsanalyzer(exp_name, stim_nr): """ Analyze and save responses to saccadegratings stimulus. """ exp_dir = iof.exp_dir_fixer(exp_name) exp_name = os.path.split(exp_dir)[-1] stimname = iof.getstimname(exp_dir, stim_nr) clusters, metadata = asc.read_spikesheet(exp_dir) clusterids = plf.clusters_to_ids(clusters) refresh_rate = metadata['refresh_rate'] parameters = asc.read_parameters(exp_name, stim_nr) if parameters['stimulus_type'] != 'saccadegrating': raise ValueError('Unexpected stimulus type: ' f'{parameters["stimulus_type"]}') fixfr = parameters.get('fixationframes', 80) sacfr = parameters.get('saccadeframes', 10) barwidth = parameters.get('barwidth', 40) averageshift = parameters.get('averageshift', 2) # The seed is hard-coded in the Stimulator seed = -10000 ftimes = asc.readframetimes(exp_dir, stim_nr) ftimes.resize(int(ftimes.shape[0] / 2), 2) nfr = ftimes.size # Re-generate the stimulus # Amplitude of the shift and the transition type (saccade or grey is # determined based on the output of ran1 randnrs = np.array(randpy.ran1(seed, nfr)[0]) # Separate the amplitude and transitions into two arrays stimpos = (4 * randnrs[::2]).astype(int) # Transition variable, determines whether grating is moving during # the transion or only a grey screen is presented. trans = np.array(randnrs[1::2] > 0.5) # Record before and after positions in a single array and remove # The first element b/c there is no before value stimposx = np.append(0, stimpos)[:-1] stimtr = np.stack((stimposx, stimpos), axis=1)[1:] trans = trans[:-1] saccadetr = stimtr[trans, :] greytr = stimtr[~trans, :] # Create a time vector with defined temporal bin size tstep = 0.01 # Bin size is defined here, unit is seconds trialduration = (fixfr + sacfr) / refresh_rate nrsteps = int(trialduration / tstep) + 1 t = np.linspace(0, trialduration, num=nrsteps) # Collect saccade beginning time for each trial trials = ftimes[1:, 0] sacftimes = trials[trans] greyftimes = trials[~trans] sacspikes = np.empty((clusters.shape[0], sacftimes.shape[0], t.shape[0])) greyspikes = np.empty((clusters.shape[0], greyftimes.shape[0], t.shape[0])) # Collect all the psth in one array. The order is # transision type, cluster index, start pos, target pos, time psth = np.zeros((2, clusters.shape[0], 4, 4, t.size)) for i, (chid, clid, _) in enumerate(clusters): spiketimes = asc.read_raster(exp_dir, stim_nr, chid, clid) for j, _ in enumerate(sacftimes): sacspikes[i, j, :] = asc.binspikes(spiketimes, sacftimes[j] + t) for k, _ in enumerate(greyftimes): greyspikes[i, k, :] = asc.binspikes(spiketimes, greyftimes[k] + t) # Sort trials according to the transition type # nton[i][j] contains the indexes of trials where saccade was i to j nton_sac = [[[] for _ in range(4)] for _ in range(4)] for i, trial in enumerate(saccadetr): nton_sac[trial[0]][trial[1]].append(i) nton_grey = [[[] for _ in range(4)] for _ in range(4)] for i, trial in enumerate(greytr): nton_grey[trial[0]][trial[1]].append(i) savedir = os.path.join(exp_dir, 'data_analysis', stimname) os.makedirs(savedir, exist_ok=True) for i in range(clusters.shape[0]): fig, axes = plt.subplots(4, 4, sharex=True, sharey=True, figsize=(8, 8)) for j in range(4): for k in range(4): # Start from bottom left corner ax = axes[3 - j][k] # Average all transitions of one type psth_sac = sacspikes[i, nton_sac[j][k], :].mean(axis=0) psth_grey = greyspikes[i, nton_grey[j][k], :].mean(axis=0) # Convert to spikes per second psth_sac = psth_sac / tstep psth_grey = psth_grey / tstep psth[0, i, j, k, :] = psth_sac psth[1, i, j, k, :] = psth_grey ax.axvline(sacfr / refresh_rate * 1000, color='red', linestyle='dashed', linewidth=.5) ax.plot(t * 1000, psth_sac, label='Saccadic trans.') ax.plot(t * 1000, psth_grey, label='Grey trans.') ax.set_yticks([]) ax.set_xticks([]) # Cosmetics plf.spineless(ax) if j == k: ax.set_facecolor((1, 1, 0, 0.15)) if j == 0: ax.set_xlabel(f'{k}') if k == 3: ax.legend(fontsize='xx-small', loc=0) if k == 0: ax.set_ylabel(f'{j}') # Add an encompassing label for starting and target positions ax0 = fig.add_axes([0.08, 0.08, .86, .86]) plf.spineless(ax0) ax0.patch.set_alpha(0) ax0.set_xticks([]) ax0.set_yticks([]) ax0.set_ylabel('Start position') ax0.set_xlabel('Target position') plt.suptitle(f'{exp_name}\n{stimname}\n{clusterids[i]}') plt.savefig(os.path.join(savedir, f'{clusterids[i]}.svg')) plt.close() # Save results keystosave = [ 'fixfr', 'sacfr', 't', 'averageshift', 'barwidth', 'seed', 'trans', 'saccadetr', 'greytr', 'nton_sac', 'nton_grey', 'stimname', 'sacspikes', 'greyspikes', 'psth', 'nfr', 'parameters' ] data_in_dict = {} for key in keystosave: data_in_dict[key] = locals()[key] np.savez(os.path.join(savedir, str(stim_nr) + '_data'), **data_in_dict) print(f'Analysis of {stimname} completed.')
def allfff(exp_name, stim_nrs): """ Plot all of the full field flicker STAs on top of each other to see the progression of the cell responses, their firing rates. """ if isinstance(stim_nrs, int) or len(stim_nrs) <= 1: print('Multiple full field flicker stimuli expected, ' 'allfff analysis will be skipped.') return exp_dir = iof.exp_dir_fixer(exp_name) exp_name = os.path.split(exp_dir)[-1] # Sanity check to ensure we are commparing the same stimuli and parameters prev_parameters = {} for i in stim_nrs: pars = asc.read_parameters(exp_name, i) currentfname = pars.pop('filename') if len(prev_parameters) == 0: prev_parameters = pars for k1, k2 in zip(pars.keys(), prev_parameters.keys()): if pars[k1] != prev_parameters[k2]: raise ValueError( f'Parameters for {currentfname} do not match!\n' f'{k1}:{pars[k1]}\n{k2}:{prev_parameters[k2]}') stimnames = [] for j, stim in enumerate(stim_nrs): data = iof.load(exp_name, stim) stas = data['stas'] clusters = data['clusters'] filter_length = data['filter_length'] frame_duration = data['frame_duration'] if j == 0: all_stas = np.zeros( (clusters.shape[0], filter_length, len(stim_nrs))) all_spikenrs = np.zeros((clusters.shape[0], len(stim_nrs))) all_stas[:, :, j] = stas all_spikenrs[:, j] = data['spikenrs'] stimnames.append(iof.getstimname(exp_name, stim)) t = np.linspace(0, frame_duration * filter_length, num=filter_length) #%% clusterids = plf.clusters_to_ids(clusters) for i in range(clusters.shape[0]): fig = plt.figure() ax1 = plt.subplot(111) ax1.plot(t, all_stas[i, :, :]) ax1.set_xlabel('Time [ms]') ax1.legend(stimnames, fontsize='x-small') ax2 = fig.add_axes([.65, .15, .2, .2]) for j in range(len(stim_nrs)): ax2.plot(j, all_spikenrs[i, j], 'o') ax2.set_ylabel('# spikes', fontsize='small') ax2.set_xticks([]) ax2.patch.set_alpha(0) plf.spineless(ax1, 'tr') plf.spineless(ax2, 'tr') plt.suptitle(f'{exp_name}\n {clusterids[i]}') plotpath = os.path.join(exp_dir, 'data_analysis', 'all_fff') if not os.path.isdir(plotpath): os.makedirs(plotpath, exist_ok=True) plt.savefig(os.path.join(plotpath, clusterids[i]) + '.svg', format='svg', dpi=300) plt.close() print('Plotted full field flicker STAs together from all stimuli.')