コード例 #1
0
def frametimesfrommat(exp_name):
    """
    Extract frame times from .mat files. Needed for analyzing data
    from other people, binary files containing the frame time pulses
    are not usually available.

    The converted frametime files are corrected for monitor delay.
    """
    exp_dir = iof.exp_dir_fixer(exp_name)

    _, metadata = asc.read_spikesheet(exp_dir)
    monitor_delay = metadata['monitor_delay(s)']

    for i in range(1, 100):
        try:
            name = iof.getstimname(exp_dir, i)
        except IndexError as e:
            if str(e).startswith('Stimulus'):
                continue
            else:
                raise

        matfile = os.path.join(exp_dir, 'frametimes',
                               name + '_frametimings.mat')
        # Check for zero padded name
        if not os.path.isfile(matfile):
            name = '0' + name
            matfile = os.path.join(exp_dir, 'frametimes',
                                   name + '_frametimings.mat')
        try:
            f = scipy.io.matlab.loadmat(matfile)
            ftimes = f['ftimes'][0, :]
            if 'ftimesoff' in f.keys():
                ftimes_off = f['ftimesoff'][0, :]
            else:
                ftimes_off = None
        except NotImplementedError:
            import h5py
            with h5py.File(matfile, mode='r') as f:
                ftimes = f['ftimes'][:]

                if 'ftimesoff' in f.keys():
                    ftimes_off = f['ftimesoff'][:]
                else:
                    ftimes_off = None

                if len(ftimes.shape) != 1:
                    ftimes = ftimes.flatten()
                    if ftimes_off is not None:
                        ftimes_off = ftimes_off.flatten()

        ftimes = ftimes + monitor_delay
        savedict = {'f_on': ftimes}
        if ftimes_off is not None:
            ftimes_off = ftimes_off + monitor_delay
            savedict.update({'f_off': ftimes_off})

        np.savez(os.path.join(exp_dir, 'frametimes', name + '_frametimes'),
                 **savedict)
        print(f'Converted and saved frametimes for {name}')
コード例 #2
0
def allinds(**kwargs):
    csi = np.empty([2, 0])
    colors = []
    bias = np.empty([2, 0])
    quals = np.empty([2, 0])
    cells = []
    for exp in ['20180118', '20180124', '20180207']:
        clusterids = plf.clusters_to_ids(asc.read_spikesheet(exp)[0])
        cells.extend([(exp, cl_id) for cl_id in clusterids])
        csi_r, colors_r, bias_r, quals_r = csindexchange(exp, **kwargs)
        csi = np.hstack((csi, csi_r))
        colors.extend(colors_r)
        bias = np.hstack((bias, bias_r))
        quals = np.hstack((quals, quals_r))
    return csi, colors, bias, quals, cells
コード例 #3
0
ファイル: stimulus.py プロジェクト: gollischlab/pymer
    def __init__(self, exp, stimnr, maxframes=None):
        self.exp = exp
        self.stimnr = stimnr
        self.maxframes = maxframes
        self.clusters, self.metadata = asc.read_spikesheet(self.exp)
        self.nclusters = self.clusters.shape[0]
        self.exp_dir = Path(iof.exp_dir_fixer(exp))
        self.exp_foldername = self.exp_dir.stem
        self.stimname = iof.getstimname(self.exp_dir, self.stimnr)
        self.clids = plf.clusters_to_ids(self.clusters)
        self.refresh_rate = self.metadata['refresh_rate']
        self.sampling_rate = self.metadata['sampling_freq']
        self.readpars()
        self.get_frametimings()
        self._getstimtype()

        self.stim_dir = self.exp_dir / 'data_analysis' / self.stimname
コード例 #4
0
 def __init__(self, exp, stimnr, maxframes=None):
     self.exp = exp
     self.stimnr = stimnr
     self.clusters, self.metadata = asc.read_spikesheet(self.exp)
     self.nclusters = self.clusters.shape[0]
     self.exp_dir = iof.exp_dir_fixer(exp)
     self.exp_foldername = os.path.split(self.exp_dir)[-1]
     self.stimname = iof.getstimname(exp, stimnr)
     #        self.get_frametimings()
     self._getstimtype()
     self.refresh_rate = self.metadata['refresh_rate']
     self.sampling_rate = self.metadata['sampling_freq']
     self.maxframes = maxframes
     if maxframes:
         self.maxframes_i = maxframes + 1
     else:
         self.maxframes_i = None
コード例 #5
0
def clusters_metadata(folder):
    """
    Generate metadata dictionary to emulate asc.read_spikesheet behavior
    """
    def convert(val):
        """
        Cast from string to appropriate type
        """
        constructors = [int, float, str, bool]
        for c in constructors:
            try:
                return c(val)
            except ValueError:
                pass

    try:
        metadata = asc.read_spikesheet(asc.kilosorted_path(folder),
                                       onlymetadata=True)
        return metadata
    except FileNotFoundError:
        pass

    params = read_params(asc.kilosorted_path(folder))

    params_dict = {}

    for el in params:
        keyval = el.split('=')
        key, val = keyval[0].strip(), convert(keyval[1].strip())
        if key == 'sample_rate':
            # Naming is expected to be specifically sample_freq
            key = 'sample_freq'
            val = int(val)
        elif key == 'hp_filtered':
            val = val == 'True'
        params_dict.update({key: val})

    return params_dict
コード例 #6
0
def read_and_match_pars(exp, omb_stimnr, chk_stimnr):
    OMB_DEFAULT_TEXTURESIZE = np.array([800, 800])

    _, metadata = asc.read_spikesheet(exp)

    scr_x, scr_y = metadata['screen_width'], metadata['screen_height']

    #datao = iof.load(exp, omb_stimnr)
    #datac = iof.load(exp, checker_stimnr)

    parso = asc.read_parameters(exp, omb_stimnr)
    parsc = asc.read_parameters(exp, chk_stimnr)

    chk_stxw, chk_stxh = [parsc[key] for key in ['stixelwidth', 'stixelheight']]

    if (not chk_stxw == chk_stxh or
       parsc['stimulus_type'] not in ['FrozenNoise', 'checkerflicker']):
        ValueError(f'{iof.getstimname(exp, chk_stimnr)} is not '
                     'checkerflicker!')

#    ckdim = scr_x//chk_stxw
#    omb_dim = np.array(OMB_DEFAULT_TEXTURESIZE)//parso['bgstixel']

    ombstx = parso['bgstixel']
    chkstx = chk_stxw

    # Calculate the difference between the dimensions for both stimuli in pixels
    px_diff = OMB_DEFAULT_TEXTURESIZE - np.array([scr_x, scr_y])

    # We divide by two because the texture is centered in the screen and
    # clipped.
    px_diff //= 2

    # HINT
    px_diff = px_diff[::-1]

    return ombstx, chkstx, px_diff
コード例 #7
0
def savenpztomat(exp_name, savedir=None):
    """
    Convert frametime files in .npz to .mat for interoperability
    with MATLAB users.

    savedir
    """
    exp_dir = iof.exp_dir_fixer(exp_name)

    _, metadata = asc.read_spikesheet(exp_dir)
    monitor_delay = metadata['monitor_delay(s)']

    for i in range(1, 100):
        print(i)
        try:
            ft_on, ft_off = asc.readframetimes(exp_name, i, returnoffsets=True)
        except ValueError as e:
            if str(e).startswith('No frametimes'):
                break
            else:
                raise
        # Convert to milliseconds b/c that is the convertion in MATLAB scripts
        ft_on = (ft_on - monitor_delay) * 1000
        ft_off = (ft_off - monitor_delay) * 1000

        stimname = iof.getstimname(exp_dir, i)

        if savedir is None:
            savedir = pjoin(exp_dir, 'frametimes')
        savename = pjoin(savedir, stimname)
        print(savename)
        scipy.io.savemat(savename + '_frametimings', {
            'ftimes': ft_on,
            'ftimes_offsets': ft_off
        },
                         appendmat=True)
コード例 #8
0
spikecutoff = 1000
ratingcutoff = 4
staqualcutoff = 0
inner_b = 2
outer_b = 4

exp_name = '20180207'
stim_nr = 11

exp_dir = iof.exp_dir_fixer(exp_name)
stim_nr = str(stim_nr)

savefolder = 'surroundplots'

_, metadata = asc.read_spikesheet(exp_name)
px_size = metadata['pixel_size(um)']

data = iof.load(exp_name, stim_nr)

clusters = data['clusters']
stas = data['stas']
stx_h = data['stx_h']
exp_name = data['exp_name']
stimname = data['stimname']
max_inds = data['max_inds']
frame_duration = data['frame_duration']
filter_length = data['filter_length']
quals = data['quals'][-1, :]

spikenrs = np.array([a.sum() for a in data['all_spiketimes']])
コード例 #9
0
def stripeflickeranalysis(exp_name, stim_nrs):
    exp_dir = iof.exp_dir_fixer(exp_name)

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]

    for stim_nr in stim_nrs:
        stimname = iof.getstimname(exp_name, stim_nr)

        clusters, metadata = asc.read_spikesheet(exp_dir)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        scr_width = metadata['screen_width']
        px_size = metadata['pixel_size(um)']

        stx_w = parameters['stixelwidth']
        stx_h = parameters['stixelheight']

        if (stx_h / stx_w) < 2:
            raise ValueError('Make sure the stimulus is stripeflicker.')

        sy = scr_width / stx_w
        if sy % 1 == 0:
            sy = int(sy)
        else:
            raise ValueError('sy is not an integer')

        nblinks = parameters['Nblinks']
        try:
            bw = parameters['blackwhite']
        except KeyError:
            bw = False

        try:
            seed = parameters['seed']
        except KeyError:
            seed = -10000

        if nblinks == 1:
            ft_on, ft_off = asc.readframetimes(exp_dir,
                                               stim_nr,
                                               returnoffsets=True)
            # Initialize empty array twice the size of one of them, assign
            # value from on or off to every other element.
            frametimings = np.empty(ft_on.shape[0] * 2, dtype=float)
            frametimings[::2] = ft_on
            frametimings[1::2] = ft_off
            # Set filter length so that temporal filter is ~600 ms.
            # The unit here is number of frames.
            filter_length = 40
        elif nblinks == 2:
            frametimings = asc.readframetimes(exp_dir, stim_nr)
            filter_length = 20
        else:
            raise ValueError('Unexpected value for nblinks.')

        # Omit everything that happens before the first 10 seconds
        cut_time = 10

        frame_duration = np.average(np.ediff1d(frametimings))
        total_frames = frametimings.shape[0]

        all_spiketimes = []
        # Store spike triggered averages in a list containing correct
        # shaped arrays
        stas = []

        for i in range(len(clusters[:, 0])):
            spiketimes = asc.read_raster(exp_dir, stim_nr, clusters[i, 0],
                                         clusters[i, 1])
            spikes = asc.binspikes(spiketimes, frametimings)
            all_spiketimes.append(spikes)
            stas.append(np.zeros((sy, filter_length)))

        if bw:
            randnrs, seed = randpy.ran1(seed, sy * total_frames)
            randnrs = [1 if i > .5 else -1 for i in randnrs]
        else:
            randnrs, seed = randpy.gasdev(seed, sy * total_frames)

        stimulus = np.reshape(randnrs, (sy, total_frames), order='F')
        del randnrs

        for k in range(filter_length, total_frames - filter_length + 1):
            stim_small = stimulus[:, k - filter_length + 1:k + 1][:, ::-1]
            for j in range(clusters.shape[0]):
                spikes = all_spiketimes[j]
                if spikes[k] != 0 and frametimings[k] > cut_time:
                    stas[j] += spikes[k] * stim_small

        max_inds = []
        spikenrs = np.array([spikearr.sum() for spikearr in all_spiketimes])

        quals = np.array([])

        for i in range(clusters.shape[0]):
            stas[i] = stas[i] / spikenrs[i]
            # Find the pixel with largest absolute value
            max_i = np.squeeze(
                np.where(np.abs(stas[i]) == np.max(np.abs(stas[i]))))
            # If there are multiple pixels with largest value,
            # take the first one.
            if max_i.shape != (2, ):
                try:
                    max_i = max_i[:, 0]
                # If max_i cannot be found just set it to zeros.
                except IndexError:
                    max_i = np.array([0, 0])

            max_inds.append(max_i)

            quals = np.append(quals, asc.staquality(stas[i]))

        savefname = str(stim_nr) + '_data'
        savepath = pjoin(exp_dir, 'data_analysis', stimname)

        exp_name = os.path.split(exp_dir)[-1]

        if not os.path.isdir(savepath):
            os.makedirs(savepath, exist_ok=True)
        savepath = os.path.join(savepath, savefname)

        keystosave = [
            'stas', 'max_inds', 'clusters', 'sy', 'frame_duration',
            'all_spiketimes', 'stimname', 'total_frames', 'stx_w', 'spikenrs',
            'bw', 'quals', 'nblinks', 'filter_length', 'exp_name'
        ]
        data_in_dict = {}
        for key in keystosave:
            data_in_dict[key] = locals()[key]

        np.savez(savepath, **data_in_dict)
        print(f'Analysis of {stimname} completed.')
コード例 #10
0
ファイル: cell_class.py プロジェクト: gollischlab/pymer
 def __init__(self, exp_path):
     self.exp_path = exp_path
     self.ods_extracted = asc.read_spikesheet(exp_path)
コード例 #11
0

#%%
# If the script is being imported from elsewhere to use the functions, do not run the simulation
if __name__ == '__main__':

    # Using real filter and stimulus
    if True:
        import genlinmod as glm
        import iofuncs as iof
        import analysis_scripts as asc

        expstim = ('20180802', 1)

        data = iof.load(*expstim)
        _, metadata = asc.read_spikesheet(expstim[0])
        sta = data['stas'][3]

        filter_length = sta.shape[0]
        frame_rate = metadata['refresh_rate']
        time_res = 1 / frame_rate
        tstop = data['total_frames'] * time_res
        t = np.arange(0, tstop, time_res)
        k_in = sta

        stim = glm.loadstim(*expstim)

    else:
        filter_length = 40
        frame_rate = 60
        time_res = (1 / frame_rate)
コード例 #12
0
def csindexchange(exp_name, onoffcutoff=.5, qualcutoff=9):
    """
    Plots the change in center surround indexes in different light
    levels. Also classifies based on ON-OFF index from the onoffsteps
    stimulus at the matching light level.
    """

    # For now there are only three experiments with the
    # different light levels and the indices of stimuli
    # are different. To automate it will be tricky and
    # ROI is just not enough to justify; so they are
    # hard coded.
    if '20180124' in exp_name or '20180207' in exp_name:
        stripeflicker = [6, 12, 17]
        onoffs = [3, 8, 14]
    elif '20180118' in exp_name:
        stripeflicker = [7, 14, 19]
        onoffs = [3, 10, 16]

    exp_dir = iof.exp_dir_fixer(exp_name)
    exp_name = os.path.split(exp_dir)[-1]
    clusternr = asc.read_spikesheet(exp_name)[0].shape[0]

    # Collect all CS indices, on-off indices and quality scores
    csinds = np.zeros((3, clusternr))
    quals = np.zeros((3, clusternr))

    onoffinds = np.zeros((3, clusternr))
    for i, stim in enumerate(onoffs):
        onoffinds[i, :] = iof.load(exp_name, stim)['onoffbias']

    for i, stim in enumerate(stripeflicker):
        data = iof.load(exp_name, stim)
        quals[i, :] = data['quals']
        csinds[i, :] = data['cs_inds']

    csinds_f = np.copy(csinds)
    quals_f = np.copy(quals)
    onoffbias_f = np.copy(onoffinds)

    # Filter them according to the quality cutoff value
    # and set excluded ones to NaN
    for j in range(quals.shape[1]):
        if not np.all(quals[:, j] > qualcutoff):
            quals_f[:, j] = np.nan
            csinds_f[:, j] = np.nan
            onoffbias_f[:, j] = np.nan

    # Define the color for each point depending on each cell's ON-OFF index
    # by appending the color name in an array.
    colors = []
    for j in range(onoffbias_f.shape[1]):
        if np.all(onoffbias_f[:, j] > onoffcutoff):
            # If it stays ON througout
            colors.append('blue')
        elif np.all(onoffbias_f[:, j] < -onoffcutoff):
            # If it stays OFF throughout
            colors.append('red')
        elif (np.all(onoffcutoff > onoffbias_f[:, j])
              and np.all(onoffbias_f[:, j] > -onoffcutoff)):
            # If it's ON-OFF throughout
            colors.append('black')
        else:
            colors.append('white')

    scatterkwargs = {'c': colors, 'alpha': .6, 'linewidths': 0}

    colorcategories = ['blue', 'red', 'black']
    colorlabels = ['ON', 'OFF', 'ON-OFF']

    # Create an array for all the colors to use with plt.legend()
    patches = []
    for color, label in zip(colorcategories, colorlabels):
        patches.append(mpatches.Patch(color=color, label=label))

    x = [np.nanmin(csinds_f), np.nanmax(csinds_f)]

    plt.figure(figsize=(12, 6))
    ax1 = plt.subplot(121)
    plt.legend(handles=patches, fontsize='small')
    plt.scatter(csinds_f[0, :], csinds_f[1, :], **scatterkwargs)
    plt.plot(x, x, 'r--', alpha=.5)
    plt.xlabel('Low 1')
    plt.ylabel('High')

    ax1.set_aspect('equal')
    plf.spineless(ax1)

    ax2 = plt.subplot(122)
    plt.scatter(csinds_f[0, :], csinds_f[2, :], **scatterkwargs)
    plt.plot(x, x, 'r--', alpha=.5)
    plt.xlabel('Low 1')
    plt.ylabel('Low 2')
    ax2.set_aspect('equal')
    plf.spineless(ax2)

    plt.suptitle(f'Center-Surround Index Change\n{exp_name}')
    plt.text(.8,
             -0.1,
             f'qualcutoff:{qualcutoff} onoffcutoff:{onoffcutoff}',
             fontsize='small',
             transform=ax2.transAxes)
    plotsave = os.path.join(exp_dir, 'data_analysis', 'csinds')
    plt.savefig(plotsave + '.svg', format='svg', bbox_inches='tight')
    plt.savefig(plotsave + '.pdf', format='pdf', bbox_inches='tight')
    plt.show()
    plt.close()
コード例 #13
0
def plot_checker_stas(exp_name, stim_nr, filename=None):
    """
    Plot and save all STAs from checkerflicker analysis. The plots
    will be saved in a new folder called STAs under the data analysis
    path of the stimulus.

    <exp_dir>/data_analysis/<stim_nr>_*/<stim_nr>_data.h5 file is
    used by default. If a different file is to be used, filename
    should be supplied.
    """

    from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar

    exp_dir = iof.exp_dir_fixer(exp_name)
    stim_nr = str(stim_nr)
    if filename:
        filename = str(filename)

    _, metadata = asc.read_spikesheet(exp_dir)
    px_size = metadata['pixel_size(um)']

    if not filename:
        savefolder = 'STAs'
        label = ''
    else:
        label = filename.strip('.npz')
        savefolder = 'STAs_' + label

    data = iof.load(exp_name, stim_nr, fname=filename)

    clusters = data['clusters']
    stas = data['stas']
    filter_length = data['filter_length']
    stx_h = data['stx_h']
    exp_name = data['exp_name']
    stimname = data['stimname']

    for j in range(clusters.shape[0]):
        a = stas[j]
        subplot_arr = plf.numsubplots(filter_length)
        sta_max = np.max(np.abs([np.max(a), np.min(a)]))
        sta_min = -sta_max
        plt.figure(dpi=250)
        for i in range(filter_length):
            ax = plt.subplot(subplot_arr[0], subplot_arr[1], i + 1)
            im = ax.imshow(a[:, :, i],
                           vmin=sta_min,
                           vmax=sta_max,
                           cmap=iof.config('colormap'))
            ax.set_aspect('equal')
            plt.axis('off')
            if i == 0:
                scalebar = AnchoredSizeBar(ax.transData,
                                           10,
                                           '{} µm'.format(10 * stx_h *
                                                          px_size),
                                           'lower left',
                                           pad=0,
                                           color='k',
                                           frameon=False,
                                           size_vertical=1)
                ax.add_artist(scalebar)
            if i == filter_length - 1:
                plf.colorbar(im, ticks=[sta_min, 0, sta_max], format='%.2f')
        plt.suptitle('{}\n{}\n'
                     '{:0>3}{:0>2} Rating: {}'.format(exp_name,
                                                      stimname + label,
                                                      clusters[j][0],
                                                      clusters[j][1],
                                                      clusters[j][2]))

        savepath = os.path.join(
            exp_dir, 'data_analysis', stimname, savefolder,
            '{:0>3}{:0>2}'.format(clusters[j][0], clusters[j][1]))

        os.makedirs(os.path.split(savepath)[0], exist_ok=True)

        plt.savefig(savepath + '.png', bbox_inches='tight')
        plt.close()
    print(f'Plotted checkerflicker STA for {stimname}')
コード例 #14
0
ファイル: gqm_with_OMBdata.py プロジェクト: gollischlab/pymer
data = iof.load(exp_name, stim_nr)
stimname = iof.getstimname(exp_name, stim_nr)
stimulus_xy = glm.loadstim(exp_name, stim_nr)

gqmlabel= 'GQM_x'
stimulus = stimulus_xy[0, :]
clusters = data['clusters']

parameters = asc.read_parameters(exp_name, stim_nr)
_, frametimes = asc.ft_nblinks(exp_name, stim_nr,
                               parameters.get('Nblinks', 2))
frametimes = frametimes[:-1]
bin_length = np.ediff1d(frametimes).mean()

filter_length = l = data['filter_length']
refresh_rate = asc.read_spikesheet(exp_name)[1]['refresh_rate']

t = np.arange(0, filter_length*bin_length, bin_length)*1000

exp_name = os.path.split(exp_dir)[-1]
savedir = os.path.join(exp_dir, 'data_analysis', stimname, gqmlabel)
os.makedirs(savedir, exist_ok=True)

kall = np.zeros((clusters.shape[0], l))
Qall = np.zeros((clusters.shape[0], l, l))
muall = np.zeros((clusters.shape[0]))

eigvals = np.zeros((clusters.shape[0], l))
eigvecs = np.zeros((clusters.shape[0], l, l))

コード例 #15
0
def OMSpatchesanalyzer(exp_name, stim_nrs):
    """
    Analyze and plot the responses to object motion patches stimulus.
    """

    exp_dir = iof.exp_dir_fixer(exp_name)

    exp_name = os.path.split(exp_dir)[-1]

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]
    elif len(stim_nrs) == 0:
        return

    clusters, metadata = asc.read_spikesheet(exp_dir, cutoff=4)
    clusterids = plf.clusters_to_ids(clusters)
    all_omsi = np.empty((clusters.shape[0], len(stim_nrs)))
    stimnames = []
    for stim_index, stim_nr in enumerate(stim_nrs):
        stim_nr = str(stim_nr)

        stimname = iof.getstimname(exp_dir, stim_nr)
        stimnames.append(stimname)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        refresh_rate = metadata['refresh_rate']

        nblinks = parameters.get('Nblinks', 1)
        seed = parameters.get('seed', -10000)
        stim_duration = parameters.get('stimFrames', 1400)
        # The duration in the parameters refers to the total duration of both
        # epochs. We divide by two to get the length of a single stim_duration
        stim_duration = int(stim_duration / 2)
        prefr_duration = parameters.get('preFrames', 100)

        frametimings = asc.readframetimes(exp_dir, stim_nr)

        # ntrials is the number of trials containing both
        ntrials = np.floor((frametimings.shape[0] / (stim_duration + 1))) / 2
        ntrials = ntrials.astype(int)
        frametimings_rs = frametimings[:ntrials * 2 * (stim_duration + 1)]
        frametimings_rs = frametimings_rs.reshape(
            (ntrials * 2, stim_duration + 1))

        ft_local = frametimings_rs[::2][:, :-1]
        ft_global = frametimings_rs[1::2][:, :-1]

        localspikes = np.empty((clusters.shape[0], ntrials, stim_duration))
        globalspikes = np.empty((clusters.shape[0], ntrials, stim_duration))

        for i, cluster in enumerate(clusters):
            spikes = asc.read_raster(exp_name, stim_nr, cluster[0], cluster[1])
            for j in range(ntrials):
                localspikes[i, j, :] = asc.binspikes(spikes, ft_local[j, :])
                globalspikes[i, j, :] = asc.binspikes(spikes, ft_global[j, :])

        response_local = localspikes.mean(axis=1)
        response_global = globalspikes.mean(axis=1)

        # Differential and coherent firing rates
        fr_d = response_local.mean(axis=1)
        fr_c = response_global.mean(axis=1)

        # Calculate object motion sensitivity index (OMSI) as defined in
        # Kühn et al, 2016
        # There the first second of each trial is discarded, here it does not
        # seem to be very different from the rest.
        omsi = (fr_d - fr_c) / (fr_d + fr_c)

        # Create a time array for plotting
        time = np.linspace(0,
                           stim_duration * 2 / refresh_rate,
                           num=stim_duration)

        savepath = os.path.join(exp_dir, 'data_analysis', stimname)
        if not os.path.isdir(savepath):
            os.makedirs(savepath, exist_ok=True)

        for i, cluster in enumerate(clusters):
            gs = gridspec.GridSpec(2, 1)
            ax1 = plt.subplot(gs[0])
            ax2 = plt.subplot(gs[1])

            rastermat = np.vstack(
                (localspikes[i, :, :], globalspikes[i, :, :]))
            ax1.matshow(rastermat, cmap='Greys')
            ax1.axhline(ntrials - 1, color='r', lw=.1)
            ax1.plot([0, 0], [ntrials, 0])
            ax1.plot([0, 0], [ntrials * 2, ntrials])
            ax1.set_xticks([])
            ax1.set_yticks([])
            plf.spineless(ax1)

            ax2.plot(time, response_local[i, :], label='Local')
            ax2.plot(time, response_global[i, :], label='Global')
            ax2.set_xlabel('Time [s]')
            ax2.set_ylabel('Average firing rate [au]')
            ax2.set_xlim([time.min(), time.max()])
            plf.spineless(ax2, 'tr')
            ax2.legend(fontsize='x-small')

            plt.suptitle(f'{exp_name}\n{stimname}\n'
                         f'{clusterids[i]} OMSI: {omsi[i]:4.2f}')
            plt.tight_layout()
            plt.savefig(os.path.join(savepath, clusterids[i] + '.svg'),
                        bbox_inches='tight')
            plt.close()
        keystosave = [
            'nblinks', 'refresh_rate', 'stim_duration', 'prefr_duration',
            'ntrials', 'response_local', 'response_global', 'fr_d', 'fr_c',
            'omsi', 'clusters'
        ]
        datadict = {}

        for key in keystosave:
            datadict[key] = locals()[key]

        npzfpath = os.path.join(savepath, str(stim_nr) + '_data')
        np.savez(npzfpath, **datadict)
        all_omsi[:, stim_index] = omsi
    print(f'Analysis of {stimname} completed.')
    # Draw the distribution of the OMSI for all OMSI stimuli
    # If there is only one OMS stimulus, draw it in the same folder
    # If there are multiple stimuli, save it in the data analysis folder
    if len(stim_nrs) == 1:
        pop_plot_savepath = os.path.join(savepath, 'omsi_population.svg')
    else:
        pop_plot_savepath = os.path.split(savepath)[0]
        pop_plot_savepath = os.path.join(pop_plot_savepath, 'all_omsi.svg')

    plt.figure(figsize=(5, 2 * len(stim_nrs)))
    ax2 = plt.subplot(111)
    for j, stim_nr in enumerate(stim_nrs):
        np.random.seed(j)
        ax2.scatter(all_omsi[:, j],
                    j + (np.random.random(omsi.shape) - .5) / 1.1)
    np.random.seed()
    ax2.set_yticks(np.arange(len(stim_nrs)))
    ax2.set_yticklabels(stimnames, fontsize='xx-small', rotation='45')
    ax2.set_xlabel('Object-motion sensitivity index')
    ax2.set_title(f'{exp_name}\nDistribution of OMSI')
    plf.spineless(ax2, 'tr')
    plt.savefig(pop_plot_savepath, bbox_inches='tight')
    plt.close()
コード例 #16
0
    """
    Rxpands a matrix by mirroring at all sides, to mimic OpenGL
    texture wrapping (GL_MIRORED_REPEAT) behaviour.

    """
    for i in range(times):
        x = np.vstack((np.flipud(x), x, np.flipud(x)))
        x = np.hstack((np.fliplr(x), x, np.fliplr(x)))
    return x


#%%
exp, stim_nr = ('20180710', 8)

sortedstim = asc.stimulisorter(exp)
clusters, metadata = asc.read_spikesheet(exp)
pars = asc.read_parameters(exp, stim_nr)

for key, val in sortedstim.items():
    if stim_nr in val:
        stimtype = key

if stimtype != 'OMB':
    ValueError('The stimulus is not OMB.')

bgnoise = pars.get('bgnoise', 4)
if bgnoise != 1:
    bgstixel = pars.get('bgstixel', 5)
else:
    bgstixel = pars.get('bgstixel', 10)
コード例 #17
0
ファイル: fffanalyzer.py プロジェクト: gollischlab/pymer
def fffanalyzer(exp_name, stimnrs):
    """
    Analyzes and plots data from full field flicker
    stimulus.
    """
    exp_dir = iof.exp_dir_fixer(exp_name)
    exp_name = os.path.split(exp_dir)[-1]

    if isinstance(stimnrs, int):
        stimnrs = [stimnrs]

    for stimnr in stimnrs:
        stimnr = str(stimnr)

        stimname = iof.getstimname(exp_name, stimnr)

        clusters, metadata = asc.read_spikesheet(exp_dir)

        parameters = asc.read_parameters(exp_dir, stimnr)

        clusterids = plf.clusters_to_ids(clusters)

        refresh_rate = metadata['refresh_rate']

        if parameters['stixelheight'] < 600 or parameters['stixelwidth'] < 800:
            raise ValueError('Make sure the stimulus is full field flicker.')

        nblinks = parameters['Nblinks']

        bw = parameters.get('blackwhite', False)

        seed = parameters.get('seed', -10000)

        filter_length, frametimings = asc.ft_nblinks(exp_dir, stimnr)

        frame_duration = np.average(np.ediff1d(frametimings))
        total_frames = frametimings.shape[0]

        all_spiketimes = []
        # Store spike triggered averages in a list containing correct shaped
        # arrays
        stas = []
        # Make a list for covariances of the spike triggered ensemble
        covars = []
        for i in range(len(clusters[:, 0])):
            spiketimes = asc.read_raster(exp_dir, stimnr,
                                         clusters[i, 0], clusters[i, 1])
            spikes = asc.binspikes(spiketimes, frametimings)
            all_spiketimes.append(spikes)
            stas.append(np.zeros(filter_length))
            covars.append(np.zeros((filter_length, filter_length)))

        if bw:
            randnrs, seed = randpy.ranb(seed, total_frames)
            # Since ranb returns zeros and ones, we need to convert the zeros
            # into -1s.
            stimulus = np.array(randnrs) * 2 - 1
        else:
            randnrs, seed = randpy.gasdev(seed, total_frames)
            stimulus = np.array(randnrs)

        for k in range(filter_length, total_frames-filter_length+1):
            stim_small = stimulus[k-filter_length+1:k+1][::-1]
            for j in range(clusters.shape[0]):
                spikes = all_spiketimes[j]
                if spikes[k] != 0:
                    stas[j] += spikes[k]*stim_small
                    # This trick is needed to use .T for tranposing
                    stim_small_n = stim_small[np.newaxis, :]
                    # Calculate the covariance as the weighted outer product
                    # of small stimulus(i.e. snippet) with itself
                    # This is non-centered STC (a la Cantrell et al., 2010)
                    covars[j] += spikes[k]*(np.dot(stim_small_n.T,
                                                   stim_small_n))
        spikenrs = np.array([spikearr.sum() for spikearr in all_spiketimes])

        plotpath = os.path.join(exp_dir, 'data_analysis',
                                stimname, 'filters')
        if not os.path.isdir(plotpath):
            os.makedirs(plotpath, exist_ok=True)

        t = np.arange(filter_length)*frame_duration*1000

        eigvals = [np.zeros((filter_length)) for i in range(clusters.shape[0])]
        eigvecs = [np.zeros((filter_length,
                             filter_length)) for i in range(clusters.shape[0])]

        for i in range(clusters.shape[0]):
            stas[i] = stas[i]/spikenrs[i]
            covars[i] = covars[i]/spikenrs[i]
            try:
                eigvals[i], eigvecs[i] = np.linalg.eigh(covars[i])
            except np.linalg.LinAlgError:
                eigvals[i] = np.full((filter_length), np.nan)
                eigvecs[i] = np.full((filter_length, filter_length), np.nan)
            fig = plt.figure(figsize=(9, 6))
            ax = plt.subplot(111)
            ax.plot(t, stas[i], label='STA')
            ax.plot(t, eigvecs[i][:, 0], label='STC component 1', alpha=.5)
            ax.plot(t, eigvecs[i][:, -1], label='STC component 2', alpha=.5)
            # Add eigenvalues as inset
            ax2 = fig.add_axes([.65, .15, .2, .2])
            # Highlight the first and second components which are plotted
            ax2.plot(0, eigvals[i][0], 'o',
                     markersize=7, markerfacecolor='C1', markeredgewidth=0)
            ax2.plot(filter_length-1, eigvals[i][-1], 'o',
                     markersize=7, markerfacecolor='C2', markeredgewidth=0)
            ax2.plot(eigvals[i], 'ko', alpha=.5, markersize=4,
                     markeredgewidth=0)
            ax2.set_axis_off()
            plf.spineless(ax)
            ax.set_xlabel('Time[ms]')
            ax.set_title(f'{exp_name}\n{stimname}\n{clusterids[i]} Rating:'
                         f' {clusters[i, 2]} {int(spikenrs[i])} spikes')
            plt.savefig(os.path.join(plotpath, clusterids[i])+'.svg',
                        format='svg', dpi=300)
            plt.close()

        savepath = os.path.join(os.path.split(plotpath)[0], stimnr+'_data')

        keystosave = ['stas', 'clusters', 'frame_duration', 'all_spiketimes',
                      'stimname', 'total_frames', 'spikenrs', 'bw', 'nblinks',
                      'filter_length', 'exp_name', 'covars', 'eigvals',
                      'eigvecs']
        data_in_dict = {}
        for key in keystosave:
            data_in_dict[key] = locals()[key]

        np.savez(savepath, **data_in_dict)
        print(f'Analysis of {stimname} completed.')
コード例 #18
0
ファイル: OMBanalyzer.py プロジェクト: gollischlab/pymer
def OMBanalyzer(exp_name, stimnr, plotall=False, nr_bins=20):
    """
    Analyze responses to object moving background stimulus. STA and STC
    are calculated.

    Note that there are additional functions that make use of the
    OMB class. This function was written before the OMB class existed
    """
    # TODO
    # Add iteration over multiple stimuli

    exp_dir = iof.exp_dir_fixer(exp_name)
    exp_name = os.path.split(exp_dir)[-1]
    stimname = iof.getstimname(exp_dir, stimnr)

    parameters = asc.read_parameters(exp_name, stimnr)
    assert parameters['stimulus_type'] == 'objectsmovingbackground'
    stimframes = parameters.get('stimFrames', 108000)
    preframes = parameters.get('preFrames', 200)
    nblinks = parameters.get('Nblinks', 2)

    seed = parameters.get('seed', -10000)
    seed2 = parameters.get('objseed', -1000)

    stepsize = parameters.get('stepsize', 2)

    ntotal = int(stimframes / nblinks)

    clusters, metadata = asc.read_spikesheet(exp_name)

    refresh_rate = metadata['refresh_rate']
    filter_length, frametimings = asc.ft_nblinks(exp_name, stimnr, nblinks,
                                                 refresh_rate)
    frame_duration = np.ediff1d(frametimings).mean()
    frametimings = frametimings[:-1]

    if ntotal != frametimings.shape[0]:
        print(f'For {exp_name}\nstimulus {stimname} :\n'
              f'Number of frames specified in the parameters file ({ntotal}'
              f' frames) and frametimings ({frametimings.shape[0]}) do not'
              ' agree!'
              ' The stimulus was possibly interrupted during recording.'
              ' ntotal is changed to match actual frametimings.')
        ntotal = frametimings.shape[0]

    # Generate the numbers to be used for reconstructing the motion
    # ObjectsMovingBackground.cpp line 174, steps are generated in an
    # alternating fashion. We can generate all of the numbers at once
    # (total lengths is defined by stimFrames) and then assign
    # to x and y directions. Although there is more
    # stuff around line 538
    randnrs, seed = randpy.gasdev(seed, ntotal * 2)
    randnrs = np.array(randnrs) * stepsize

    xsteps = randnrs[::2]
    ysteps = randnrs[1::2]

    clusterids = plf.clusters_to_ids(clusters)

    all_spikes = np.empty((clusters.shape[0], ntotal))
    for i, (cluster, channel, _) in enumerate(clusters):
        spiketimes = asc.read_raster(exp_name, stimnr, cluster, channel)
        spikes = asc.binspikes(spiketimes, frametimings)
        all_spikes[i, :] = spikes

    # Collect STA for x and y movement in one array
    stas = np.zeros((clusters.shape[0], 2, filter_length))
    stc_x = np.zeros((clusters.shape[0], filter_length, filter_length))
    stc_y = np.zeros((clusters.shape[0], filter_length, filter_length))
    t = np.arange(filter_length) * 1000 / refresh_rate * nblinks
    for k in range(filter_length, ntotal - filter_length + 1):
        x_mini = xsteps[k - filter_length + 1:k + 1][::-1]
        y_mini = ysteps[k - filter_length + 1:k + 1][::-1]
        for i, (cluster, channel, _) in enumerate(clusters):
            if all_spikes[i, k] != 0:
                stas[i, 0, :] += all_spikes[i, k] * x_mini
                stas[i, 1, :] += all_spikes[i, k] * y_mini
                # Calculate non-centered STC (Cantrell et al., 2010)
                stc_x[i, :, :] += all_spikes[i, k] * calc_covar(x_mini)
                stc_y[i, :, :] += all_spikes[i, k] * calc_covar(y_mini)

    eigvals_x = np.zeros((clusters.shape[0], filter_length))
    eigvals_y = np.zeros((clusters.shape[0], filter_length))
    eigvecs_x = np.zeros((clusters.shape[0], filter_length, filter_length))
    eigvecs_y = np.zeros((clusters.shape[0], filter_length, filter_length))

    bins_x = np.zeros((clusters.shape[0], nr_bins))
    bins_y = np.zeros((clusters.shape[0], nr_bins))
    spikecount_x = np.zeros(bins_x.shape)
    spikecount_y = np.zeros(bins_x.shape)
    generators_x = np.zeros(all_spikes.shape)
    generators_y = np.zeros(all_spikes.shape)
    # Normalize STAs and STCs with respect to spike numbers
    for i in range(clusters.shape[0]):
        totalspikes = all_spikes.sum(axis=1)[i]
        stas[i, :, :] = stas[i, :, :] / totalspikes
        stc_x[i, :, :] = stc_x[i, :, :] / totalspikes
        stc_y[i, :, :] = stc_y[i, :, :] / totalspikes
        try:
            eigvals_x[i, :], eigvecs_x[i, :, :] = np.linalg.eigh(
                stc_x[i, :, :])
            eigvals_y[i, :], eigvecs_y[i, :, :] = np.linalg.eigh(
                stc_y[i, :, :])
        except np.linalg.LinAlgError:
            continue
        # Calculate the generator signals and nonlinearities
        generators_x[i, :] = np.convolve(eigvecs_x[i, :, -1],
                                         xsteps,
                                         mode='full')[:-filter_length + 1]
        generators_y[i, :] = np.convolve(eigvecs_y[i, :, -1],
                                         ysteps,
                                         mode='full')[:-filter_length + 1]
        spikecount_x[i, :], bins_x[i, :] = nlt.calc_nonlin(
            all_spikes[i, :], generators_x[i, :], nr_bins)
        spikecount_y[i, :], bins_y[i, :] = nlt.calc_nonlin(
            all_spikes[i, :], generators_y[i, :], nr_bins)
    savepath = os.path.join(exp_dir, 'data_analysis', stimname)
    if not os.path.isdir(savepath):
        os.makedirs(savepath, exist_ok=True)

    # Calculated based on last eigenvector
    magx = eigvecs_x[:, :, -1].sum(axis=1)
    magy = eigvecs_y[:, :, -1].sum(axis=1)
    r_ = np.sqrt(magx**2 + magy**2)
    theta_ = np.arctan2(magy, magx)
    # To draw the vectors starting from origin, insert zeros every other element
    r = np.zeros(r_.shape[0] * 2)
    theta = np.zeros(theta_.shape[0] * 2)
    r[1::2] = r_
    theta[1::2] = theta_
    plt.polar(theta, r)
    plt.gca().set_xticks(np.pi / 180 * np.array([0, 90, 180, 270]))
    plt.title(f'Population plot for motion STAs\n{exp_name}')
    plt.savefig(os.path.join(savepath, 'population.svg'))
    if plotall:
        plt.show()
    plt.close()

    for i in range(stas.shape[0]):
        stax = stas[i, 0, :]
        stay = stas[i, 1, :]
        ax1 = plt.subplot(211)
        ax1.plot(t, stax, label=r'STA$_{\rm X}$')
        ax1.plot(t, stay, label=r'STA$_{\rm Y}$')
        ax1.plot(t, eigvecs_x[i, :, -1], label='Eigenvector_X 0')
        ax1.plot(t, eigvecs_y[i, :, -1], label='Eigenvector_Y 0')
        plt.legend(fontsize='x-small')

        ax2 = plt.subplot(4, 4, 9)
        ax3 = plt.subplot(4, 4, 13)
        ax2.set_yticks([])
        ax2.set_xticklabels([])
        ax3.set_yticks([])
        ax2.set_title('Eigenvalues', size='small')
        ax2.plot(eigvals_x[i, :],
                 'o',
                 markerfacecolor='C0',
                 markersize=4,
                 markeredgewidth=0)
        ax3.plot(eigvals_y[i, :],
                 'o',
                 markerfacecolor='C1',
                 markersize=4,
                 markeredgewidth=0)
        ax4 = plt.subplot(2, 3, 5)
        ax4.plot(bins_x[i, :], spikecount_x[i, :] / frame_duration)
        ax4.plot(bins_y[i, :], spikecount_y[i, :] / frame_duration)
        ax4.set_ylabel('Firing rate [Hz]')
        ax4.set_title('Nonlinearities', size='small')
        plf.spineless([ax1, ax2, ax3, ax4], 'tr')
        ax5 = plt.subplot(2, 3, 6, projection='polar')
        ax5.plot(theta, r, color='k', alpha=.3)
        ax5.plot(theta[2 * i:2 * i + 2], r[2 * i:2 * i + 2], lw=3)
        ax5.set_xticklabels(['0', '', '', '', '180', '', '270', ''])
        ax5.set_title('Vector sum of X and Y STCs', size='small')
        plt.suptitle(f'{exp_name}\n{stimname}\n{clusterids[i]}')
        plt.subplots_adjust(hspace=.4)
        plt.savefig(os.path.join(savepath, clusterids[i] + '.svg'),
                    bbox_inches='tight')
        if plotall:
            plt.show()
        plt.close()

    keystosave = [
        'nblinks', 'all_spikes', 'clusters', 'frame_duration', 'eigvals_x',
        'eigvals_y', 'eigvecs_x', 'eigvecs_y', 'filter_length', 'magx', 'magy',
        'ntotal', 'r', 'theta', 'stas', 'stc_x', 'stc_y', 'bins_x', 'bins_y',
        'nr_bins', 'spikecount_x', 'spikecount_y', 'generators_x',
        'generators_y', 't'
    ]
    datadict = {}

    for key in keystosave:
        datadict[key] = locals()[key]

    npzfpath = os.path.join(savepath, str(stimnr) + '_data')
    np.savez(npzfpath, **datadict)
コード例 #19
0
    if max_i[0] - fsize <= 0 or max_i[0] + fsize > sta.shape[0]:
        raise ValueError('Cutting outside the STA range.')
    sta_r = sta[max_i[0]-fsize:max_i[0]+fsize+1, :]
    max_i_r = np.append(fsize, max_i[-1])
    return sta_r, max_i_r


exp_name = '20171122'
stripes = [8, 9, 10, 11]

exp_dir = iof.exp_dir_fixer(exp_name)

data = iof.load(exp_name, stripes[0])


_, metadata = asc.read_spikesheet(exp_dir)
px_size = metadata['pixel_size(um)']
exp_name = data['exp_name']
stx_w = data['stx_w']


clusters = data['clusters']
clusterids = plf.clusters_to_ids(clusters)

fsize = int(700/(stx_w*px_size))
vscale = fsize * stx_w*px_size


for i in range(len(clusterids)):
    plt.figure(figsize=(8, 8))
コード例 #20
0
def csindexchange(exp_name, onoffcutoff=.5, qualcutoff=qualcutoff):
    """
    Returns in center surround indexes and ON-OFF classfication in
    mesopic and photopic light levels.
    """
    # For now there are only three experiments with the
    # different light levels and the indices of stimuli
    # are different. To automate it will be tricky and
    # ROI is just not enough to justify; so they are
    # hard coded.
    if '20180124' in exp_name or '20180207' in exp_name:
        stripeflicker = [6, 17]
        onoffs = [3, 14]
    elif '20180118' in exp_name:
        stripeflicker = [7, 19]
        onoffs = [3, 16]

    exp_dir = iof.exp_dir_fixer(exp_name)
    exp_name = os.path.split(exp_dir)[-1]
    clusternr = asc.read_spikesheet(exp_name)[0].shape[0]

    # Collect all CS indices, on-off indices and quality scores
    csinds = np.zeros((2, clusternr))
    quals = np.zeros((2, clusternr))

    onoffinds = np.zeros((2, clusternr))
    for i, stim in enumerate(onoffs):
        onoffinds[i, :] = iof.load(exp_name, stim)['onoffbias']

    for i, stim in enumerate(stripeflicker):
        data = iof.load(exp_name, stim)
        quals[i, :] = data['quals']
        csinds[i, :] = data['cs_inds']

    csinds_f = np.copy(csinds)
    quals_f = np.copy(quals)
    onoffbias_f = np.copy(onoffinds)

    # Filter them according to the quality cutoff value
    # and set excluded ones to NaN

    for j in range(quals.shape[1]):
        if not np.all(quals[:, j] > qualcutoff):
            quals_f[:, j] = np.nan
            csinds_f[:, j] = np.nan
            onoffbias_f[:, j] = np.nan

    # Calculate the change of polarity for each cell
    # np.diff gives the high-low value
    biaschange = np.diff(onoffbias_f, axis=0)[0]

    # Define the color for each point depending on each cell's ON-OFF index
    # by appending the color name in an array.
    colors = []
    for j in range(onoffbias_f.shape[1]):
        if np.all(onoffbias_f[:, j] > onoffcutoff):
            # If it stays ON througout
            colors.append(colorcategories[0])
        elif np.all(onoffbias_f[:, j] < -onoffcutoff):
            # If it stays OFF throughout
            colors.append(colorcategories[1])
        elif (np.all(onoffcutoff > onoffbias_f[:, j])
              and np.all(onoffbias_f[:, j] > -onoffcutoff)):
            # If it's ON-OFF throughout
            colors.append(colorcategories[2])
        elif biaschange[j] > 0:
            # Increasing polarity
            # If it's not consistent in any category and
            # polarity change is positive
            colors.append(colorcategories[3])
        elif biaschange[j] < 0:
            # Decreasing polarity
            colors.append(colorcategories[4])
        else:
            colors.append('yellow')

    return csinds_f, colors, onoffbias_f, quals_f
コード例 #21
0
def checkerflickerplusanalyzer(exp_name,
                               stimulusnr,
                               clusterstoanalyze=None,
                               frametimingsfraction=None,
                               cutoff=4):
    """
    Analyzes checkerflicker-like data, typically interspersed
    stimuli in between chunks of checkerflicker.
    e.g. checkerflickerplusmovie, frozennoise

    Parameters:
    ----------
        exp_name:
            Experiment name.
        stimulusnr:
            Number of the stimulus to be analyzed.
        clusterstoanalyze:
            Number of clusters should be analyzed. Default is None.

            First N cells will be analyzed if this parameter is given.
            In case of long recordings it might make sense to first
            look at a subset of cells before starting to analyze
            the whole dataset.

        frametimingsfraction:
            Fraction of the recording to analyze. Should be a number
            between 0 and 1. e.g. 0.3 will analyze the first 30% of
            the whole recording.
        cutoff:
           Worst rating that is wanted for the analysis. Default
           is 4. The source of this value is manual rating of each
           cluster.
    """
    exp_dir = iof.exp_dir_fixer(exp_name)

    stimname = iof.getstimname(exp_dir, stimulusnr)

    exp_name = os.path.split(exp_dir)[-1]

    clusters, metadata = asc.read_spikesheet(exp_dir, cutoff=cutoff)

    # Check that the inputs are as expected.
    if clusterstoanalyze:
        if clusterstoanalyze > len(clusters[:, 0]):
            warnings.warn('clusterstoanalyze is larger '
                          'than number of clusters in dataset. '
                          'All cells will be included.')
            clusterstoanalyze = None
    if frametimingsfraction:
        if not 0 < frametimingsfraction < 1:
            raise ValueError('Invalid input for frametimingsfraction: {}. '
                             'It should be a number between 0 and 1'
                             ''.format(frametimingsfraction))

    scr_width = metadata['screen_width']
    scr_height = metadata['screen_height']

    refresh_rate = metadata['refresh_rate']

    parameters = asc.read_parameters(exp_dir, stimulusnr)

    stx_h = parameters['stixelheight']
    stx_w = parameters['stixelwidth']

    # Check whether any parameters are given for margins, calculate
    # screen dimensions.
    marginkeys = ['tmargin', 'bmargin', 'rmargin', 'lmargin']
    margins = []
    for key in marginkeys:
        margins.append(parameters.get(key, 0))

    # Subtract bottom and top from vertical dimension; left and right
    # from horizontal dimension
    scr_width = scr_width - sum(margins[2:])
    scr_height = scr_height - sum(margins[:2])

    nblinks = parameters['Nblinks']
    bw = parameters.get('blackwhite', False)

    # Gaussian stimuli are not supported yet, we need to ensure we
    # have a black and white stimulus
    if bw is not True:
        raise ValueError('Gaussian stimuli are not supported yet!')

    seed = parameters.get('seed', -1000)

    sx, sy = scr_height / stx_h, scr_width / stx_w

    # Make sure that the number of stimulus pixels are integers
    # Rounding down is also possible but might require
    # other considerations.
    if sx % 1 == 0 and sy % 1 == 0:
        sx, sy = int(sx), int(sy)
    else:
        raise ValueError('sx and sy must be integers')

    filter_length, frametimings = asc.ft_nblinks(exp_dir, stimulusnr)

    if parameters['stimulus_type'] in [
            'FrozenNoise', 'checkerflickerplusmovie'
    ]:
        runfr = parameters['RunningFrames']
        frofr = parameters['FrozenFrames']
        # To generate the frozen noise, a second seed is used.
        # The default value of this is -10000 as per StimulateOpenGL
        secondseed = parameters.get('secondseed', -10000)

        if parameters['stimulus_type'] == 'checkerflickerplusmovie':
            mblinks = parameters['Nblinksmovie']
            # Retrivee the number of frames (files) from parameters['path']
            ipath = PureWindowsPath(parameters['path']).as_posix()
            repldict = iof.config('stimuli_path_replace')
            for needle, repl in repldict.items():
                ipath = ipath.replace(needle, repl)
            ipath = os.path.normpath(ipath)  # Windows compatiblity
            moviefr = len([
                name for name in os.listdir(ipath)
                if os.path.isfile(os.path.join(ipath, name))
                and name.lower().endswith('.raw')
            ])
            noiselen = (runfr + frofr) * nblinks
            movielen = moviefr * mblinks
            triallen = noiselen + movielen

            ft_on, ft_off = asc.readframetimes(exp_dir,
                                               stimulusnr,
                                               returnoffsets=True)
            frametimings = np.empty(ft_on.shape[0] * 2, dtype=float)
            frametimings[::2] = ft_on
            frametimings[1::2] = ft_off

            import math
            ntrials = math.floor(frametimings.size / triallen)
            trials = np.zeros((ntrials, runfr + frofr + moviefr))
            for t in range(ntrials):
                frange = frametimings[t * triallen:(t + 1) * triallen]
                trials[t, :runfr + frofr] = frange[:noiselen][::nblinks]
                trials[t, runfr + frofr:] = frange[noiselen:][::mblinks]
            frametimings = trials.ravel()

            filter_length = np.int(np.round(.666 * refresh_rate / nblinks))

            # Add frozen movie to frozen noise (for masking)
            frofr += moviefr

    savefname = str(stimulusnr) + '_data'

    if clusterstoanalyze:
        clusters = clusters[:clusterstoanalyze, :]
        print('Analyzing first %s cells' % clusterstoanalyze)
        savefname += '_' + str(clusterstoanalyze) + 'cells'
    if frametimingsfraction:
        frametimingsindex = int(len(frametimings) * frametimingsfraction)
        frametimings = frametimings[:frametimingsindex]
        print('Analyzing first {}% of'
              ' the recording'.format(frametimingsfraction * 100))
        savefname += '_' + str(frametimingsfraction).replace('.',
                                                             '') + 'fraction'
    frame_duration = np.average(np.ediff1d(frametimings))
    total_frames = frametimings.shape[0]

    all_spiketimes = []
    # Store spike triggered averages in a list containing correct shaped
    # arrays
    stas = []

    for i in range(len(clusters[:, 0])):
        spiketimes = asc.read_raster(exp_dir, stimulusnr, clusters[i, 0],
                                     clusters[i, 1])

        spikes = asc.binspikes(spiketimes, frametimings)
        all_spiketimes.append(spikes)
        stas.append(np.zeros((sx, sy, filter_length)))

    # Separate out the repeated parts
    all_spiketimes = np.array(all_spiketimes)
    mask = runfreezemask(total_frames, runfr, frofr, refresh_rate)
    repeated_spiketimes = all_spiketimes[:, ~mask]
    run_spiketimes = all_spiketimes[:, mask]

    # We need to cut down the total_frames by the same amount
    # as spiketimes
    total_run_frames = run_spiketimes.shape[1]
    # To be able to use the same code as checkerflicker analyzer,
    # convert to list again.
    run_spiketimes = list(run_spiketimes)

    # Empirically determined to be best for 32GB RAM
    desired_chunk_size = 21600000

    # Length of the chunks (specified in number of frames)
    chunklength = int(desired_chunk_size / (sx * sy))

    chunksize = chunklength * sx * sy
    nrofchunks = int(np.ceil(total_run_frames / chunklength))

    print(f'\nAnalyzing {stimname}.\nTotal chunks: {nrofchunks}')

    time = startime = datetime.datetime.now()
    timedeltas = []

    quals = np.zeros(len(stas))

    frame_counter = 0

    for i in range(nrofchunks):
        randnrs, seed = randpy.ranb(seed, chunksize)
        # Reshape and change 0's to -1's
        stimulus = np.reshape(randnrs,
                              (sx, sy, chunklength), order='F') * 2 - 1
        del randnrs

        # Range of indices we are interested in for the current chunk
        if (i + 1) * chunklength < total_run_frames:
            chunkind = slice(i * chunklength, (i + 1) * chunklength)
            chunkend = chunklength
        else:
            chunkind = slice(i * chunklength, None)
            chunkend = total_run_frames - i * chunklength

        for k in range(filter_length, chunkend - filter_length + 1):
            stim_small = stimulus[:, :,
                                  k - filter_length + 1:k + 1][:, :, ::-1]
            for j in range(clusters.shape[0]):
                spikes = run_spiketimes[j][chunkind]
                if spikes[k] != 0:
                    stas[j] += spikes[k] * stim_small
        qual = np.array([])
        for c in range(clusters.shape[0]):
            qual = np.append(qual, asc.staquality(stas[c]))
        quals = np.vstack((quals, qual))

        # Draw progress bar
        width = 50  # Number of characters
        prog = i / (nrofchunks - 1)
        bar_complete = int(prog * width)
        bar_noncomplete = width - bar_complete
        timedeltas.append(msc.timediff(time))  # Calculate running avg
        avgelapsed = np.mean(timedeltas)
        elapsed = np.sum(timedeltas)
        etc = startime + elapsed + avgelapsed * (nrofchunks - i)
        sys.stdout.flush()
        sys.stdout.write('\r{}{} |{:4.1f}% ETC: {}'.format(
            '█' * bar_complete, '-' * bar_noncomplete, prog * 100,
            etc.strftime("%a %X")))
        time = datetime.datetime.now()
    sys.stdout.write('\n')

    # Remove the first row which is full of random nrs.
    quals = quals[1:, :]

    max_inds = []
    spikenrs = np.array([spikearr.sum() for spikearr in run_spiketimes])

    for i in range(clusters.shape[0]):
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '.*true_divide*.')
            stas[i] = stas[i] / spikenrs[i]
        # Find the pixel with largest absolute value
        max_i = np.squeeze(
            np.where(np.abs(stas[i]) == np.max(np.abs(stas[i]))))
        # If there are multiple pixels with largest value,
        # take the first one.
        if max_i.shape != (3, ):
            try:
                max_i = max_i[:, 0]
            # If max_i cannot be found just set it to zeros.
            except IndexError:
                max_i = np.array([0, 0, 0])

        max_inds.append(max_i)

    print(f'Completed. Total elapsed time: {msc.timediff(startime)}\n' +
          f'Finished on {datetime.datetime.now().strftime("%A %X")}')

    savepath = os.path.join(exp_dir, 'data_analysis', stimname)
    if not os.path.isdir(savepath):
        os.makedirs(savepath, exist_ok=True)
    savepath = os.path.join(savepath, savefname)

    keystosave = [
        'clusters', 'frametimings', 'mask', 'repeated_spiketimes',
        'run_spiketimes', 'frame_duration', 'max_inds', 'nblinks', 'stas',
        'stx_h', 'stx_w', 'total_run_frames', 'sx', 'sy', 'filter_length',
        'stimname', 'exp_name', 'spikenrs', 'clusterstoanalyze',
        'frametimingsfraction', 'cutoff', 'quals', 'nrofchunks', 'chunklength'
    ]
    datadict = {}

    for key in keystosave:
        datadict[key] = locals()[key]

    np.savez(savepath, **datadict)

    t = (np.arange(nrofchunks) * chunklength * frame_duration) / refresh_rate
    qmax = np.max(quals, axis=0)
    qualsn = quals / qmax[np.newaxis, :]

    ax = plt.subplot(111)
    ax.plot(t, qualsn, alpha=0.3)
    plt.ylabel('Z-score of center pixel (normalized)')
    plt.xlabel('Minutes of stimulus analyzed')
    plt.ylim([0, 1])
    plf.spineless(ax, 'tr')
    plt.title(f'Recording duration optimization\n{exp_name}\n {savefname}')
    plt.savefig(savepath + '.svg', format='svg')
    plt.close()
コード例 #22
0
@author: ycan
"""
#%%
import matplotlib.pyplot as plt
import miscfuncs as msc
import analysis_scripts as asc

path = ('/home/ycan/Documents/data/Erol_20171122_252MEA_fr_re_fp/data'
        '_analysis/7_checkerflicker5x5bw2blinks/7_data.h5')

locals().update(msc.loadh5(path))
index = 25
fit_frame = stas[index][:, :, max_inds[index][2]]

exp_dir = '/home/ycan/Documents/data/Erol_20171122_252MEA_fr_re_fp'
_, parameters = asc.read_spikesheet(exp_dir)
px_size = parameters['pixel_size(um)']

ax = plt.subplot(111)
ax.imshow(fit_frame, cmap='RdBu')

from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar

scalebar = AnchoredSizeBar(ax.transData,
                           3,
                           '{} µm'.format(3 * stx_h * px_size),
                           'lower left',
                           pad=1,
                           color='k',
                           frameon=False,
                           size_vertical=.5)
コード例 #23
0
def plotcheckersurround(exp_name, stim_nr, filename=None, spikecutoff=1000,
                        ratingcutoff=4, staqualcutoff=0, inner_b=2,
                        outer_b=4):

    """
    Divides into center and surround by fitting 2D Gaussian, and plot
    temporal components.

    spikecutoff:
        Minimum number of spikes to include.

    ratingcutoff:
        Minimum spike sorting rating to include.

    staqualcutoff:
        Minimum STA quality (as measured by z-score) to include.

    inner_b:
        Defined limit between receptive field center and surround
        in units of sigma.

    outer_b:
        Defined limit of the end of receptive field surround.
    """

    exp_dir = iof.exp_dir_fixer(exp_name)
    stim_nr = str(stim_nr)
    if filename:
        filename = str(filename)

    if not filename:
        savefolder = 'surroundplots'
        label = ''
    else:
        label = filename.strip('.npz')
        savefolder = 'surroundplots_' + label

    _, metadata = asc.read_spikesheet(exp_name)
    px_size = metadata['pixel_size(um)']

    data = iof.load(exp_name, stim_nr, fname=filename)

    clusters = data['clusters']
    stas = data['stas']
    stx_h = data['stx_h']
    exp_name = data['exp_name']
    stimname = data['stimname']
    max_inds = data['max_inds']
    frame_duration = data['frame_duration']
    filter_length = data['filter_length']
    quals = data['quals'][-1, :]

    spikenrs = data['spikenrs']

    c1 = np.where(spikenrs > spikecutoff)[0]
    c2 = np.where(clusters[:, 2] <= ratingcutoff)[0]
    c3 = np.where(quals > staqualcutoff)[0]

    choose = [i for i in range(clusters.shape[0]) if ((i in c1) and
                                                      (i in c2) and
                                                      (i in c3))]
    clusters = clusters[choose]
    stas = list(np.array(stas)[choose])
    max_inds = list(np.array(max_inds)[choose])

    clusterids = plf.clusters_to_ids(clusters)

    t = np.arange(filter_length)*frame_duration*1000

    # Determine frame size so that the total frame covers
    # an area large enough i.e. 2*700um
    f_size = int(700/(stx_h*px_size))

    del data

    for i in range(clusters.shape[0]):

        sta_original = stas[i]
        max_i_original = max_inds[i]

        try:
            sta, max_i = mf.cut_around_center(sta_original,
                                              max_i_original, f_size)
        except ValueError:
            continue

        fit_frame = sta[:, :, max_i[2]]

        if np.max(fit_frame) != np.max(np.abs(fit_frame)):
            onoroff = -1
        else:
            onoroff = 1



        Y, X = np.meshgrid(np.arange(fit_frame.shape[1]),
                           np.arange(fit_frame.shape[0]))

        with warnings.catch_warnings():
            warnings.filterwarnings('ignore',
                                    '.*divide by zero*.', RuntimeWarning)
            pars = gfit.gaussfit(fit_frame*onoroff)
            f = gfit.twodgaussian(pars)
            Z = f(X, Y)

        # Correcting for Mahalonobis dist.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore',
                                    '.*divide by zero*.', RuntimeWarning)
            Zm = np.log((Z-pars[0])/pars[1])
        Zm[np.isinf(Zm)] = np.nan
        Zm = np.sqrt(Zm*-2)

        ax = plt.subplot(1, 2, 1)

        plf.stashow(fit_frame, ax)
        ax.set_aspect('equal')

        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', category=UserWarning)
            warnings.filterwarnings('ignore', '.*invalid value encountered*.')
            ax.contour(Y, X, Zm, [inner_b, outer_b],
                       cmap=plf.RFcolormap(('C0', 'C1')))

        barsize = 100/(stx_h*px_size)
        scalebar = AnchoredSizeBar(ax.transData,
                                   barsize, '100 µm',
                                   'lower left',
                                   pad=1,
                                   color='k',
                                   frameon=False,
                                   size_vertical=.2)
        ax.add_artist(scalebar)

        with warnings.catch_warnings():
            warnings.filterwarnings('ignore',
                                    '.*invalid value encountered in*.',
                                    RuntimeWarning)
            center_mask = np.logical_not(Zm < inner_b)
            center_mask_3d = np.broadcast_arrays(sta,
                                                 center_mask[..., None])[1]
            surround_mask = np.logical_not(np.logical_and(Zm > inner_b,
                                                          Zm < outer_b))
            surround_mask_3d = np.broadcast_arrays(sta,
                                                   surround_mask[..., None])[1]

        sta_center = np.ma.array(sta, mask=center_mask_3d)
        sta_surround = np.ma.array(sta, mask=surround_mask_3d)

        sta_center_temporal = np.mean(sta_center, axis=(0, 1))
        sta_surround_temporal = np.mean(sta_surround, axis=(0, 1))

        ax1 = plt.subplot(1, 2, 2)
        l1 = ax1.plot(t, sta_center_temporal,
                      label='Center\n(<{}σ)'.format(inner_b),
                      color='C0')
        sct_max = np.max(np.abs(sta_center_temporal))
        ax1.set_ylim(-sct_max, sct_max)
        ax2 = ax1.twinx()
        l2 = ax2.plot(t, sta_surround_temporal,
                      label='Surround\n({}σ<x<{}σ)'.format(inner_b, outer_b),
                      color='C1')
        sst_max = np.max(np.abs(sta_surround_temporal))
        ax2.set_ylim(-sst_max, sst_max)
        plf.spineless(ax1)
        plf.spineless(ax2)
        ax1.tick_params('y', colors='C0')
        ax2.tick_params('y', colors='C1')
        plt.xlabel('Time[ms]')
        plt.axhline(0, linestyle='dashed', linewidth=1)

        lines = l1+l2
        labels = [line.get_label() for line in lines]
        plt.legend(lines, labels, fontsize=7)
        plt.title('Temporal components')
        plt.suptitle(f'{exp_name}\n{stimname}\n{clusterids[i]}')

        plt.subplots_adjust(wspace=.5, top=.85)

        plotpath = os.path.join(exp_dir, 'data_analysis',
                                stimname, savefolder)
        if not os.path.isdir(plotpath):
            os.makedirs(plotpath, exist_ok=True)

        plt.savefig(os.path.join(plotpath, clusterids[i])+'.svg',
                    format='svg', dpi=300)
        plt.close()
    print(f'Plotted checkerflicker surround for {stimname}')
コード例 #24
0
def checkerflickeranalyzer(exp_name, stimulusnr, clusterstoanalyze=None,
                           frametimingsfraction=None, cutoff=4):
    """
    Analyzes checkerflicker data. Saves the results in .npz and .h5
    formats.

    Parameters:
    ----------
        exp_name:
            Experiment name.
        stimulusnr:
            Number of the stimulus to be analyzed.
        clusterstoanalyze:
            Number of clusters should be analyzed. Default is None.

            First N cells will be analyzed if this parameter is given.
            In case of long recordings it might make sense to first
            look at a subset of cells before starting to analyze
            the whole dataset.

        frametimingsfraction:
            Fraction of the recording to analyze. Should be a number
            between 0 and 1. e.g. 0.3 will analyze the first 30% of
            the whole recording.
        cutoff:
           Worst rating that is wanted for the analysis. Default
           is 4. The source of this value is manual rating of each
           cluster.
    """
    exp_dir = iof.exp_dir_fixer(exp_name)
    try:
        stimfiles = glob.glob(os.path.join(exp_dir, '%s_*.mcd' % stimulusnr))
        stimfiles = np.sort(stimfiles)[0]
    except IndexError:
        raise IOError('File not found: %s_*.mcd' % os.path.join(exp_dir,
                                                                str(stimulusnr)))
    stimname = os.path.split(stimfiles)[-1]
    stimname = stimname.split('.mcd')[0]
    exp_name = os.path.split(exp_dir)[-1]

    clusters, metadata = asc.read_spikesheet(exp_dir, cutoff=cutoff)

    # Check that the inputs are as expected.
    if clusterstoanalyze:
        if clusterstoanalyze > len(clusters[:, 0]):
            warnings.warn('clusterstoanalyze is larger '
                          'than number of clusters in dataset. '
                          'All cells will be included.')
            clusterstoanalyze = None
    if frametimingsfraction:
        if not 0 < frametimingsfraction < 1:
            raise ValueError('Invalid input for frametimingsfraction: {}. '
                             'It should be a number between 0 and 1'
                             ''.format(frametimingsfraction))

    scr_width = metadata['screen_width']
    scr_height = metadata['screen_height']

    parameters = asc.read_parameters(exp_dir, stimulusnr)

    stx_h = parameters['stixelheight']
    stx_w = parameters['stixelwidth']

    # Check whether any parameters are given for margins, calculate
    # screen dimensions.
    marginkeys = ['tmargin', 'bmargin', 'rmargin', 'lmargin']
    margins = []
    for key in marginkeys:
        try:
            margins.append(parameters[key])
        except KeyError:
            margins.append(0)
コード例 #25
0
def stripesurround_SVD(exp_name, stimnrs, nrcomponents=5):
    """
    nrcomponents:
        first N components of singular value decomposition (SVD)
        will be used to reduce noise.
    """
    exp_dir = iof.exp_dir_fixer(exp_name)

    if isinstance(stimnrs, int):
        stimnrs = [stimnrs]

    for stimnr in stimnrs:
        data = iof.load(exp_name, stimnr)

        _, metadata = asc.read_spikesheet(exp_dir)
        px_size = metadata['pixel_size(um)']

        clusters = data['clusters']
        stas = data['stas']
        max_inds = data['max_inds']
        filter_length = data['filter_length']
        stx_w = data['stx_w']
        exp_name = data['exp_name']
        stimname = data['stimname']
        frame_duration = data['frame_duration']
        quals = data['quals']

        # Record which clusters are ignored during analysis
        try:
            included = data['included']
        except KeyError:
            included = [True] * clusters.shape[0]

        # Average STA values 100 ms around the brightest frame to
        # minimize noise
        cut_time = int(100 / (frame_duration * 1000) / 2)

        # Tolerance for distance between center and surround
        # distributions 60 μm
        dtol = int((60 / px_size) / 2)

        clusterids = plf.clusters_to_ids(clusters)

        fsize = int(700 / (stx_w * px_size))
        t = np.arange(filter_length) * frame_duration * 1000
        vscale = fsize * stx_w * px_size

        cs_inds = np.empty(clusters.shape[0])
        polarities = np.empty(clusters.shape[0])

        savepath = os.path.join(exp_dir, 'data_analysis', stimname)

        for i in range(clusters.shape[0]):
            sta = stas[i]
            max_i = max_inds[i]

            # From this point on, use the low-rank approximation
            # version
            sta_reduced = sumcomponent(nrcomponents, sta)

            try:
                sta_reduced, max_i = msc.cutstripe(sta_reduced, max_i,
                                                   fsize * 2)
            except ValueError as e:
                if str(e) == 'Cutting outside the STA range.':
                    included[i] = False
                    continue
                else:
                    print(f'Error while analyzing {stimname}\n' +
                          f'Index:{i}    Cluster:{clusterids[i]}')
                    raise

            # Isolate the time point from which the fit will
            # be obtained
            if max_i[1] < cut_time:
                max_i[1] = cut_time + 1
            fitv = np.mean(sta_reduced[:, max_i[1] - cut_time:max_i[1] +
                                       cut_time + 1],
                           axis=1)

            # Make a space vector
            s = np.arange(fitv.shape[0])

            if np.max(fitv) != np.max(np.abs(fitv)):
                onoroff = -1
            else:
                onoroff = 1
            polarities[i] = onoroff
            # Determine the peak values for center and surround
            # to give as initial parameters for curve fitting
            centerpeak = onoroff * np.max(fitv * onoroff)
            surroundpeak = onoroff * np.max(fitv * -onoroff)

            # Define initial guesses for the center and surround gaussians
            # First set of values are for center, second for surround.
            p_initial = [centerpeak, max_i[0], 2, surroundpeak, max_i[0], 8]
            if onoroff == 1:
                bounds = ([0, -np.inf, -np.inf, 0, max_i[0] - dtol, 4], [
                    np.inf, np.inf, np.inf, np.inf, max_i[0] + dtol, 20
                ])
            elif onoroff == -1:
                bounds = ([
                    -np.inf, -np.inf, -np.inf, -np.inf, max_i[0] - dtol, 4
                ], [0, np.inf, np.inf, 0, max_i[0] + dtol, 20])

            try:
                popt, _ = curve_fit(centersurround_onedim,
                                    s,
                                    fitv,
                                    p0=p_initial,
                                    bounds=bounds)
            except (ValueError, RuntimeError) as e:
                er = str(e)
                if (er == "`x0` is infeasible."
                        or er.startswith("Optimal parameters not found")):
                    popt, _ = curve_fit(onedgauss, s, fitv, p0=p_initial[:3])
                    popt = np.append(popt, [0, popt[1], popt[2]])
                elif er == "array must not contain infs or NaNs":
                    included[i] = False
                    continue
                else:
                    print(f'Error while analyzing {stimname}\n' +
                          f'Index:{i}    Cluster:{clusterids[i]}')
                    import pdb
                    pdb.set_trace()
                    raise

            fit = centersurround_onedim(s, *popt)
            popt[0] = popt[0] * onoroff
            popt[3] = popt[3] * onoroff

            csi = popt[3] / popt[0]
            cs_inds[i] = csi

            plt.figure(figsize=(10, 9))
            ax = plt.subplot(121)
            plf.stashow(sta_reduced, ax, extent=[0, t[-1], -vscale, vscale])
            ax.set_xlabel('Time [ms]')
            ax.set_ylabel('Distance [µm]')
            ax.set_title(f'Using first {nrcomponents} components of SVD',
                         fontsize='small')

            ax = plt.subplot(122)
            plf.spineless(ax)
            ax.set_yticks([])
            # We need to flip the vertical axis to match
            # with the STA next to it
            plt.plot(onoroff * fitv, -s, label='Data')
            plt.plot(onoroff * fit, -s, label='Fit')
            plt.axvline(0, linestyle='dashed', alpha=.5)
            plt.title(f'Center: a: {popt[0]:4.2f}, μ: {popt[1]:4.2f},' +
                      f' σ: {popt[2]:4.2f}\n' +
                      f'Surround: a: {popt[3]:4.2f}, μ: {popt[4]:4.2f},' +
                      f' σ: {popt[5]:4.2f}' + f'\n CS index: {csi:4.2f}')
            plt.subplots_adjust(top=.85)
            plt.suptitle(f'{exp_name}\n{stimname}\n{clusterids[i]} ' +
                         f'Q: {quals[i]:4.2f}')
            os.makedirs(os.path.join(savepath, 'stripesurrounds_SVD'),
                        exist_ok=True)
            plt.savefig(os.path.join(savepath, 'stripesurrounds_SVD',
                                     clusterids[i] + '.svg'),
                        bbox_inches='tight')
            plt.close()

        data.update({
            'cs_inds': cs_inds,
            'polarities': polarities,
            'included': included
        })
        np.savez(os.path.join(savepath, f'{stimnr}_data_SVD.npz'), **data)
        print(f'Surround plotted and saved for {stimname}.')
コード例 #26
0
def plotcheckersvd(expname, stimnr, filename=None):
    """
    Plot the first two components of SVD analysis.
    """
    if filename:
        filename = str(filename)

    exp_dir = iof.exp_dir_fixer(expname)
    _, metadata = asc.read_spikesheet(exp_dir)
    px_size = metadata['pixel_size(um)']

    if not filename:
        savefolder = 'SVD'
        label = ''
    else:
        label = filename.strip('.npz')
        savefolder = 'SVD_' + label

    data = iof.load(expname, stimnr, filename)

    stas = data['stas']
    max_inds = data['max_inds']
    clusters = data['clusters']
    stx_h = data['stx_h']
    frame_duration = data['frame_duration']
    stimname = data['stimname']
    exp_name = data['exp_name']

    clusterids = plf.clusters_to_ids(clusters)

    # Determine frame size so that the total frame covers
    # an area large enough i.e. 2*700um
    f_size = int(700 / (stx_h * px_size))

    for i in range(clusters.shape[0]):
        sta = stas[i]
        max_i = max_inds[i]

        try:
            sta, max_i = msc.cut_around_center(sta, max_i, f_size=f_size)
        except ValueError:
            continue
        fit_frame = sta[:, :, max_i[2]]

        try:
            sp1, sp2, t1, t2, _, _ = msc.svd(sta)
        # If the STA is noisy (msc.cut_around_center produces an empty array)
        # SVD cannot be calculated, in this case we skip that cluster.
        except np.linalg.LinAlgError:
            continue

        plotthese = [fit_frame, sp1, sp2]

        plt.figure(dpi=200)
        plt.suptitle(f'{exp_name}\n{stimname}\n{clusterids[i]}')
        rows = 2
        cols = 3

        vmax = np.max(np.abs([sp1, sp2]))
        vmin = -vmax

        for j in range(len(plotthese)):
            ax = plt.subplot(rows, cols, j + 1)
            im = plt.imshow(plotthese[j],
                            vmin=vmin,
                            vmax=vmax,
                            cmap=iof.config('colormap'))
            ax.set_aspect('equal')
            plt.xticks([])
            plt.yticks([])
            for child in ax.get_children():
                if isinstance(child, matplotlib.spines.Spine):
                    child.set_color('C{}'.format(j % 3))
                    child.set_linewidth(2)
            if j == 0:
                plt.title('center px')
            elif j == 1:
                plt.title('SVD spatial 1')
            elif j == 2:
                plt.title('SVD spatial 2')
                plf.colorbar(im, ticks=[vmin, 0, vmax], format='%.2f')
                barsize = 100 / (stx_h * px_size)
                scalebar = AnchoredSizeBar(ax.transData,
                                           barsize,
                                           '100 µm',
                                           'lower left',
                                           pad=0,
                                           color='k',
                                           frameon=False,
                                           size_vertical=.3)
                ax.add_artist(scalebar)

        t = np.arange(sta.shape[-1]) * frame_duration * 1000
        plt.subplots_adjust(wspace=0.3, hspace=0)
        ax = plt.subplot(rows, 1, 2)
        plt.plot(t, sta[max_i[0], max_i[1], :], label='center px')
        plt.plot(t, t1, label='Temporal 1')
        plt.plot(t, t2, label='Temporal 2')
        plt.xlabel('Time[ms]')
        plf.spineless(ax, 'trlb')  # Turn off spines using custom function

        plotpath = os.path.join(exp_dir, 'data_analysis', stimname, savefolder)
        if not os.path.isdir(plotpath):
            os.makedirs(plotpath, exist_ok=True)
        plt.savefig(os.path.join(plotpath, clusterids[i] + '.svg'), dpi=300)
        plt.close()
    print(f'Plotted checkerflicker SVD for {stimname}')
コード例 #27
0
def randomizestripes(label, exp_name='20180124', stim_nrs=6):
    exp_dir = iof.exp_dir_fixer(exp_name)

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]

    for stim_nr in stim_nrs:
        stimname = iof.getstimname(exp_name, stim_nr)

        clusters, metadata = asc.read_spikesheet(exp_dir)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        scr_width = metadata['screen_width']
        px_size = metadata['pixel_size(um)']

        stx_w = parameters['stixelwidth']
        stx_h = parameters['stixelheight']

        if (stx_h/stx_w) < 2:
            raise ValueError('Make sure the stimulus is stripeflicker.')

        sy = scr_width/stx_w
#        sy = sy*4
        sy = int(sy)

        nblinks = parameters['Nblinks']
        try:
            bw = parameters['blackwhite']
        except KeyError:
            bw = False

        try:
            seed = parameters['seed']
            initialseed = parameters['seed']
        except KeyError:
            seed = -10000
            initialseed = -10000

        if nblinks == 1:
            ft_on, ft_off = asc.readframetimes(exp_dir, stim_nr,
                                               returnoffsets=True)
            # Initialize empty array twice the size of one of them, assign
            # value from on or off to every other element.
            frametimings = np.empty(ft_on.shape[0]*2, dtype=float)
            frametimings[::2] = ft_on
            frametimings[1::2] = ft_off
            # Set filter length so that temporal filter is ~600 ms.
            # The unit here is number of frames.
            filter_length = 40
        elif nblinks == 2:
            frametimings = asc.readframetimes(exp_dir, stim_nr)
            filter_length = 20
        else:
            raise ValueError('Unexpected value for nblinks.')

        # Omit everything that happens before the first 10 seconds
        cut_time = 10

        frame_duration = np.average(np.ediff1d(frametimings))
        total_frames = int(frametimings.shape[0]/4)

        all_spiketimes = []
        # Store spike triggered averages in a list containing correct
        # shaped arrays
        stas = []

        for i in range(len(clusters[:, 0])):
            spikes_orig = asc.read_raster(exp_dir, stim_nr,
                                         clusters[i, 0], clusters[i, 1])
            spikesneeded = spikes_orig.shape[0]*1000

            spiketimes = np.random.random_sample(spikesneeded)*spikes_orig.max()
            spiketimes = np.sort(spiketimes)
            spikes = asc.binspikes(spiketimes, frametimings)
            all_spiketimes.append(spikes)
            stas.append(np.zeros((sy, filter_length)))

        if bw:
            randnrs, seed = randpy.ran1(seed, sy*total_frames)
#            randnrs = mersennetw(sy*total_frames, seed1=seed)
            randnrs = [1 if i > .5 else -1 for i in randnrs]
        else:
            randnrs, seed = randpy.gasdev(seed, sy*total_frames)

        stimulus = np.reshape(randnrs, (sy, total_frames), order='F')
        del randnrs

        for k in range(filter_length, total_frames-filter_length+1):
            stim_small = stimulus[:, k-filter_length+1:k+1][:, ::-1]
            for j in range(clusters.shape[0]):
                spikes = all_spiketimes[j]
                if spikes[k] != 0 and frametimings[k]>cut_time:
                    stas[j] += spikes[k]*stim_small

        max_inds = []

        spikenrs = np.array([spikearr.sum() for spikearr in all_spiketimes])

        quals = np.array([])

        for i in range(clusters.shape[0]):
            stas[i] = stas[i]/spikenrs[i]
            # Find the pixel with largest absolute value
            max_i = np.squeeze(np.where(np.abs(stas[i])
                                        == np.max(np.abs(stas[i]))))
            # If there are multiple pixels with largest value,
            # take the first one.
            if max_i.shape != (2,):
                try:
                    max_i = max_i[:, 0]
                # If max_i cannot be found just set it to zeros.
                except IndexError:
                    max_i = np.array([0, 0])

            max_inds.append(max_i)

            quals = np.append(quals, asc.staquality(stas[i]))

#        savefname = str(stim_nr)+'_data'
#        savepath = pjoin(exp_dir, 'data_analysis', stimname)
#
#        exp_name = os.path.split(exp_dir)[-1]
#
#        if not os.path.isdir(savepath):
#            os.makedirs(savepath, exist_ok=True)
#        savepath = os.path.join(savepath, savefname)
#
#        keystosave = ['stas', 'max_inds', 'clusters', 'sy',
#                      'frame_duration', 'all_spiketimes', 'stimname',
#                      'total_frames', 'stx_w', 'spikenrs', 'bw',
#                      'quals', 'nblinks', 'filter_length', 'exp_name']
#        data_in_dict = {}
#        for key in keystosave:
#            data_in_dict[key] = locals()[key]
#
#        np.savez(savepath, **data_in_dict)
#        print(f'Analysis of {stimname} completed.')


        clusterids = plf.clusters_to_ids(clusters)

#        assert(initialseed.ty)
        correction = corrector(sy, total_frames, filter_length, initialseed)
        correction = np.outer(correction, np.ones(filter_length))

        t = np.arange(filter_length)*frame_duration*1000
        vscale = int(stas[0].shape[0] * stx_w*px_size/1000)
        for i in range(clusters.shape[0]):
            sta = stas[i]-correction

            vmax = 0.03
            vmin = -vmax
            plt.figure(figsize=(6, 15))
            ax = plt.subplot(111)
            im = ax.imshow(sta, cmap='RdBu', vmin=vmin, vmax=vmax,
                           extent=[0, t[-1], -vscale, vscale], aspect='auto')
            plt.xlabel('Time [ms]')
            plt.ylabel('Distance [mm]')

            plf.spineless(ax)
            plf.colorbar(im, ticks=[vmin, 0, vmax], format='%.2f', size='2%')
            plt.suptitle('{}\n{}\n'
                         '{} Rating: {}\n'
                         'nrofspikes {:5.0f}'.format(exp_name,
                                                       stimname,
                                                       clusterids[i],
                                                       clusters[i][2],
                                                       spikenrs[i]))
            plt.subplots_adjust(top=.90)
            savepath = os.path.join(exp_dir, 'data_analysis',
                                    stimname, 'STAs_randomized')
            svgpath = pjoin(savepath, label)
            if not os.path.isdir(svgpath):
                os.makedirs(svgpath, exist_ok=True)
            plt.savefig(os.path.join(svgpath, clusterids[i]+'.svg'),
                        bbox_inches='tight')
            plt.close()

    os.system(f"convert -delay 25 {svgpath}/*svg {savepath}/animated_{label}.gif")
コード例 #28
0
ファイル: genlinmod.py プロジェクト: gollischlab/pymer
def loadstim(exp, stim_nr, maxframenr=10000):
    """
    Recreate the stimulus based on the seed for a given stimulus type.

    Each type of stimulus requires a different way of handling the
    random numbers from the PRNG.
    """
    sortedstim = asc.stimulisorter(exp)
    clusters, metadata = asc.read_spikesheet(exp)
    pars = asc.read_parameters(exp, stim_nr)

    for key, val in sortedstim.items():
        if stim_nr in val:
            stimtype = key
    if stimtype in ['fff', 'stripeflicker', 'checkerflicker', 'frozennoise']:
        seed = pars.get('seed', -10000)
        bw = pars.get('blackwhite', False)
        filter_length, frametimings = asc.ft_nblinks(exp, stim_nr)
        total_frames = frametimings.shape[0]

        if stimtype == 'fff':
            if bw:
                randnrs, seed = randpy.ranb(seed, total_frames)
                # Since ranb returns zeros and ones, we need to convert
                # the zeros into -1s.
                stimulus = np.array(randnrs) * 2 - 1
            else:
                randnrs, seed = randpy.gasdev(seed, total_frames)
                stimulus = np.array(randnrs)
        elif stimtype in ['checkerflicker', 'frozennoise']:
            scr_width = metadata['screen_width']
            scr_height = metadata['screen_height']
            stx_h = pars['stixelheight']
            stx_w = pars['stixelwidth']
            # Check whether any parameters are given for margins, calculate
            # screen dimensions.
            marginkeys = ['tmargin', 'bmargin', 'rmargin', 'lmargin']
            margins = []
            for key in marginkeys:
                margins.append(pars.get(key, 0))
            # Subtract bottom and top from vertical dimension; left and right
            # from horizontal dimension
            scr_width = scr_width - sum(margins[2:])
            scr_height = scr_height - sum(margins[:2])
            sx, sy = scr_height / stx_h, scr_width / stx_w
            # Make sure that the number of stimulus pixels are integers
            # Rounding down is also possible but might require
            # other considerations.
            if sx % 1 == 0 and sy % 1 == 0:
                sx, sy = int(sx), int(sy)
            else:
                raise ValueError('sx and sy must be integers')

            # HINT: fixing stimulus length for now because of memory
            # capacity
            total_frames = maxframenr

            randnrs, seed = randpy.ranb(seed, sx * sy * total_frames)
            # Reshape and change 0's to -1's
            stimulus = np.reshape(randnrs,
                                  (sx, sy, total_frames), order='F') * 2 - 1
        return stimulus
    if stimtype == 'OMB':
        stimframes = pars.get('stimFrames', 108000)
        preframes = pars.get('preFrames', 200)
        nblinks = pars.get('Nblinks', 2)

        seed = pars.get('seed', -10000)
        seed2 = pars.get('objseed', -1000)

        stepsize = pars.get('stepsize', 2)

        ntotal = int(stimframes / nblinks)

        clusters, metadata = asc.read_spikesheet(exp)

        refresh_rate = metadata['refresh_rate']
        filter_length, frametimings = asc.ft_nblinks(exp, stim_nr, nblinks,
                                                     refresh_rate)
        frame_duration = np.ediff1d(frametimings).mean()
        frametimings = frametimings[:-1]
        if ntotal != frametimings.shape[0]:
            print(
                f'For {exp}\nstimulus {iof.getstimname(exp, stim_nr)} :\n'
                f'Number of frames specified in the parameters file ({ntotal}'
                f' frames) and frametimings ({frametimings.shape[0]}) do not'
                ' agree!'
                ' The stimulus was possibly interrupted during recording.'
                ' ntotal is changed to match actual frametimings.')
            ntotal = frametimings.shape[0]

        randnrs, seed = randpy.gasdev(seed, ntotal * 2)
        randnrs = np.array(randnrs) * stepsize

        xsteps = randnrs[::2]
        ysteps = randnrs[1::2]

        return np.vstack((xsteps, ysteps))
    return None
コード例 #29
0
def onoffstepsanalyzer(exp_name, stim_nrs):
    """
    Analyze onoffsteps data, plot and save it. Will make a directory
    /data_analysis/<stimulus_name> and save svg [and pdf in subfolder.].

    Parameters:
        exp_name:
            Experiment name.
        stim_nr:
            Order of the onoff steps stimulus.

    """

    exp_dir = iof.exp_dir_fixer(exp_name)

    exp_name = os.path.split(exp_dir)[-1]

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]

    for stim_nr in stim_nrs:
        stim_nr = str(stim_nr)

        stimname = iof.getstimname(exp_dir, stim_nr)

        clusters, metadata = asc.read_spikesheet(exp_dir, cutoff=4)

        clusterids = plf.clusters_to_ids(clusters)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        refresh_rate = metadata['refresh_rate']

        # Divide by the refresh rate to convert from number of
        # frames to seconds
        pars_stim_duration = parameters['Nframes'] / refresh_rate

        pars_preframe_duration = parameters.get('preframes', 0) / refresh_rate

        if pars_preframe_duration == 0:
            nopreframe = True
            nr_periods = 2
        else:
            nopreframe = False
            nr_periods = 4
        # The first trial will be discarded by dropping the first four frames
        # If we don't save the original and re-initialize for each cell,
        # frametimings will get smaller over time.
        frametimings_original = asc.readframetimes(exp_dir, stim_nr)

        trial_durs = stim_prefr_durations_frametimes(frametimings_original,
                                                     nr_per=nr_periods)
        avg_trial_durs = trial_durs.mean(axis=0)

        if not nopreframe:
            stim_duration = avg_trial_durs[1::2].mean()
            preframe_duration = avg_trial_durs[::2].mean()
        else:
            stim_duration = avg_trial_durs.mean()
            preframe_duration = 0
            warnings.warn('On-off steps analysis with no preframes'
                          'is not tested, proceed with caution.')

        contrast = parameters['contrast']

        total_cycle = avg_trial_durs.sum()

        # Set the bins to be 10 ms
        tstep = 0.01
        bins = int(total_cycle / tstep) + 1
        t = np.linspace(0, total_cycle, num=bins)

        # Setup for onoff bias calculation
        onbegin = preframe_duration
        onend = onbegin + stim_duration
        offbegin = onend + preframe_duration
        offend = offbegin + stim_duration

        # Determine the indices for each period
        a = []
        for i in [onbegin, onend, offbegin, offend]:
            yo = np.asscalar(np.where(np.abs(t - i) < tstep / 1.5)[0][-1])
            a.append(yo)

        # To exclude stimulus offset affecting the bias, use
        # last 1 second of preframe period
        prefs = []
        for i in [onbegin - 1, onbegin, offbegin - 1, offbegin]:
            yo = np.asscalar(np.where(np.abs(t - i) < tstep / 1.5)[0][-1])
            prefs.append(yo)

        onper = slice(a[0], a[1])
        offper = slice(a[2], a[3])

        pref1 = slice(prefs[0], prefs[1])
        pref2 = slice(prefs[2], prefs[3])

        onoffbias = np.empty(clusters.shape[0])
        baselines = np.empty(clusters.shape[0])

        savedir = os.path.join(exp_dir, 'data_analysis', stimname)
        os.makedirs(os.path.join(savedir, 'pdf'), exist_ok=True)

        # Collect all firing rates in a list
        all_frs = []

        for i in range(len(clusters[:, 0])):
            spikes = asc.read_raster(exp_dir, stim_nr, clusters[i, 0],
                                     clusters[i, 1])
            frametimings = frametimings_original
            # Discard all the spikes that happen after the last frame
            spikes = spikes[spikes < frametimings[-1]]
            # Discard the first trial
            spikes = spikes[spikes > frametimings[4]]
            frametimings = frametimings[4:]
            # Find which trial each spike belongs to, and subtract one
            # to be able to use as indices
            trial_indices = np.digitize(spikes, frametimings[::4]) - 1

            rasterplot = []
            # Iterate over all the trials, create an empty array for each
            for j in range(int(np.ceil(frametimings.max() / total_cycle))):
                rasterplot.append([])
            # plt.eventplot requires a list containing spikes in each
            # trial separately
            for k in range(len(spikes)):
                trial = trial_indices[k]
                rasterplot[trial].append(spikes[k] - frametimings[::4][trial])

            # Workaround for matplotlib issue #6412.
            # https://github.com/matplotlib/matplotlib/issues/6412
            # If a cell has no spikes for the first trial i.e. the first
            # element of the list is empty, an error is raised due to
            # a plt.eventplot bug.
            if len(rasterplot[0]) == 0:
                rasterplot[0] = [-1]

            plt.figure(figsize=(9, 9))
            ax1 = plt.subplot(211)
            plt.eventplot(rasterplot, linewidth=.5, color='r')
            # Set the axis so they align with the rectangles
            plt.axis([0, total_cycle, -1, len(rasterplot)])

            # Draw rectangles to represent different parts of the on off
            # steps stimulus
            plf.drawonoff(ax1,
                          preframe_duration,
                          stim_duration,
                          contrast=contrast)

            plt.ylabel('Trial')
            plt.gca().invert_yaxis()
            ax1.set_xticks([])
            plf.spineless(ax1)

            # Collect all trials in one array to calculate firing rates
            ras = np.array([])
            for ii in range(len(rasterplot)):
                ras = np.append(ras, rasterplot[ii])

            # Sort into time bins and count how many spikes happened in each
            fr = np.digitize(ras, t)
            fr = np.bincount(fr)
            # Normalize so that units are spikes/s
            fr = fr * (bins / total_cycle) / (len(rasterplot) - 1)
            # Equalize the length of the two arrays for plotting.
            # np.bincount(x) normally produces x.max()+1 bins
            if fr.shape[0] == bins + 1:
                fr = fr[:-1]
            # If there aren't any spikes at the last trial, the firing
            # rates array is too short and plt.plot raises error.
            while fr.shape[0] < bins:
                fr = np.append(fr, 0)

            prefr = np.append(fr[pref1], fr[pref2])
            baseline = np.median(np.round(prefr))

            fr_corr = fr - baseline

            r_on = np.sum(fr_corr[onper])
            r_off = np.sum(fr_corr[offper])

            if r_on == 0 and r_off == 0:
                bias = np.nan
            else:
                bias = (r_on - r_off) / (np.abs(r_on) + np.abs(r_off))

            plt.suptitle(f'{exp_name}\n{stimname}'
                         f'\n{clusterids[i]} Rating: {clusters[i][2]}\n')

            if fr.max() < 20:
                bias = np.nan

            onoffbias[i] = bias
            baselines[i] = baseline

            all_frs.append(fr)

            ax2 = plt.subplot(212)
            plt.plot(t, fr)
            for eachslice in [onper, offper]:
                ax2.fill_between(t[eachslice],
                                 fr[eachslice],
                                 baseline,
                                 where=fr[eachslice] > baseline,
                                 facecolor='lightgray')

            plf.spineless(ax2)
            plt.axis([0, total_cycle, fr.min(), fr.max()])

            plt.title(f'Baseline: {baseline:2.0f} Hz Bias: {bias:0.2f}')
            plt.xlabel('Time[s]')
            plt.ylabel('Firing rate[spikes/s]')

            # Save as svg for looking through data, pdf for
            # inserting into presentations
            plt.savefig(
                savedir +
                '/{:0>3}{:0>2}.svg'.format(clusters[i, 0], clusters[i, 1]),
                format='svg',
                bbox_inches='tight')
            plt.savefig(os.path.join(
                savedir, 'pdf', '{:0>3}'
                '{:0>2}.pdf'.format(clusters[i, 0], clusters[i, 1])),
                        format='pdf',
                        bbox_inches='tight')
            plt.close()

        keystosave = [
            'clusters', 'total_cycle', 'bins', 'tstep', 'stimname',
            'stim_duration', 'preframe_duration', 'contrast', 'all_frs', 't',
            'exp_name', 'onoffbias', 'baselines'
        ]
        data_in_dict = {}
        for key in keystosave:
            data_in_dict[key] = locals()[key]

        np.savez(os.path.join(savedir, stim_nr + '_data'), **data_in_dict)
        print(f'Analysis of {stimname} completed.')
コード例 #30
0
def plotstripestas(exp_name, stim_nrs):
    """
    Plot and save all the STAs from multiple stripe flicker stimuli.
    """
    exp_dir = iof.exp_dir_fixer(exp_name)

    _, metadata = asc.read_spikesheet(exp_dir)
    px_size = metadata['pixel_size(um)']

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]
    elif len(stim_nrs) == 0:
        return

    for stim_nr in stim_nrs:
        data = iof.load(exp_name, stim_nr)

        clusters = data['clusters']
        stas = data['stas']
        filter_length = data['filter_length']
        stx_w = data['stx_w']
        exp_name = data['exp_name']
        stimname = data['stimname']
        frame_duration = data['frame_duration']
        quals = data['quals']

        clusterids = plf.clusters_to_ids(clusters)

        # Determine frame size so that the total frame covers
        # an area large enough i.e. 2*700um
        t = np.arange(filter_length) * frame_duration * 1000
        vscale = int(stas[0].shape[0] * stx_w * px_size / 1000)
        for i in range(clusters.shape[0]):
            sta = stas[i]

            vmax = np.max(np.abs(sta))
            vmin = -vmax
            plt.figure(figsize=(6, 15))
            ax = plt.subplot(111)
            im = ax.imshow(sta,
                           cmap='RdBu',
                           vmin=vmin,
                           vmax=vmax,
                           extent=[0, t[-1], -vscale, vscale],
                           aspect='auto')
            plt.xlabel('Time [ms]')
            plt.ylabel('Distance [mm]')

            plf.spineless(ax)
            plf.colorbar(im, ticks=[vmin, 0, vmax], format='%.2f', size='2%')
            plt.suptitle(f'{exp_name}\n{stimname}\n'
                         f'{clusterids[i]} Rating: {clusters[i][2]}\n'
                         f'STA quality: {quals[i]:4.2f}')
            plt.subplots_adjust(top=.90)
            savepath = os.path.join(exp_dir, 'data_analysis', stimname, 'STAs')
            if not os.path.isdir(savepath):
                os.makedirs(savepath, exist_ok=True)
            plt.savefig(os.path.join(savepath, clusterids[i] + '.svg'),
                        bbox_inches='tight')
            plt.close()
        print(f'Plotting of {stimname} completed.')