def corr4(i1, i2, window, bin_length=0.001):
    """
    Correlation between spike trains. Accepts spike rasters.

    Parameters
    --------
    i1, i2:
        Indices of spike rasters to compare
    window:
        The maximum time lag to consider, in seconds.
    bin_length:
        Length of each bin. Default is 1 ms.
    """
    i1 = clusters[i1][:2]
    i2 = clusters[i2][:2]
    x1 = asc.read_raster(exp, stim, *i1)
    x2 = asc.read_raster(exp, stim, *i2)
    corr_bins = np.arange(-window, window, bin_length)
    corr = pycorrelate.pcorrelate(x1, x2, corr_bins, normalize=True)
    return corr
 def read_raster(self, i):
     ch, cl = self.clusters[i, :2]
     return asc.read_raster(self.exp, self.stimnr, ch, cl)
def checkerflickerplusanalyzer(exp_name,
                               stimulusnr,
                               clusterstoanalyze=None,
                               frametimingsfraction=None,
                               cutoff=4):
    """
    Analyzes checkerflicker-like data, typically interspersed
    stimuli in between chunks of checkerflicker.
    e.g. checkerflickerplusmovie, frozennoise

    Parameters:
    ----------
        exp_name:
            Experiment name.
        stimulusnr:
            Number of the stimulus to be analyzed.
        clusterstoanalyze:
            Number of clusters should be analyzed. Default is None.

            First N cells will be analyzed if this parameter is given.
            In case of long recordings it might make sense to first
            look at a subset of cells before starting to analyze
            the whole dataset.

        frametimingsfraction:
            Fraction of the recording to analyze. Should be a number
            between 0 and 1. e.g. 0.3 will analyze the first 30% of
            the whole recording.
        cutoff:
           Worst rating that is wanted for the analysis. Default
           is 4. The source of this value is manual rating of each
           cluster.
    """
    exp_dir = iof.exp_dir_fixer(exp_name)

    stimname = iof.getstimname(exp_dir, stimulusnr)

    exp_name = os.path.split(exp_dir)[-1]

    clusters, metadata = asc.read_spikesheet(exp_dir, cutoff=cutoff)

    # Check that the inputs are as expected.
    if clusterstoanalyze:
        if clusterstoanalyze > len(clusters[:, 0]):
            warnings.warn('clusterstoanalyze is larger '
                          'than number of clusters in dataset. '
                          'All cells will be included.')
            clusterstoanalyze = None
    if frametimingsfraction:
        if not 0 < frametimingsfraction < 1:
            raise ValueError('Invalid input for frametimingsfraction: {}. '
                             'It should be a number between 0 and 1'
                             ''.format(frametimingsfraction))

    scr_width = metadata['screen_width']
    scr_height = metadata['screen_height']

    refresh_rate = metadata['refresh_rate']

    parameters = asc.read_parameters(exp_dir, stimulusnr)

    stx_h = parameters['stixelheight']
    stx_w = parameters['stixelwidth']

    # Check whether any parameters are given for margins, calculate
    # screen dimensions.
    marginkeys = ['tmargin', 'bmargin', 'rmargin', 'lmargin']
    margins = []
    for key in marginkeys:
        margins.append(parameters.get(key, 0))

    # Subtract bottom and top from vertical dimension; left and right
    # from horizontal dimension
    scr_width = scr_width - sum(margins[2:])
    scr_height = scr_height - sum(margins[:2])

    nblinks = parameters['Nblinks']
    bw = parameters.get('blackwhite', False)

    # Gaussian stimuli are not supported yet, we need to ensure we
    # have a black and white stimulus
    if bw is not True:
        raise ValueError('Gaussian stimuli are not supported yet!')

    seed = parameters.get('seed', -1000)

    sx, sy = scr_height / stx_h, scr_width / stx_w

    # Make sure that the number of stimulus pixels are integers
    # Rounding down is also possible but might require
    # other considerations.
    if sx % 1 == 0 and sy % 1 == 0:
        sx, sy = int(sx), int(sy)
    else:
        raise ValueError('sx and sy must be integers')

    filter_length, frametimings = asc.ft_nblinks(exp_dir, stimulusnr)

    if parameters['stimulus_type'] in [
            'FrozenNoise', 'checkerflickerplusmovie'
    ]:
        runfr = parameters['RunningFrames']
        frofr = parameters['FrozenFrames']
        # To generate the frozen noise, a second seed is used.
        # The default value of this is -10000 as per StimulateOpenGL
        secondseed = parameters.get('secondseed', -10000)

        if parameters['stimulus_type'] == 'checkerflickerplusmovie':
            mblinks = parameters['Nblinksmovie']
            # Retrivee the number of frames (files) from parameters['path']
            ipath = PureWindowsPath(parameters['path']).as_posix()
            repldict = iof.config('stimuli_path_replace')
            for needle, repl in repldict.items():
                ipath = ipath.replace(needle, repl)
            ipath = os.path.normpath(ipath)  # Windows compatiblity
            moviefr = len([
                name for name in os.listdir(ipath)
                if os.path.isfile(os.path.join(ipath, name))
                and name.lower().endswith('.raw')
            ])
            noiselen = (runfr + frofr) * nblinks
            movielen = moviefr * mblinks
            triallen = noiselen + movielen

            ft_on, ft_off = asc.readframetimes(exp_dir,
                                               stimulusnr,
                                               returnoffsets=True)
            frametimings = np.empty(ft_on.shape[0] * 2, dtype=float)
            frametimings[::2] = ft_on
            frametimings[1::2] = ft_off

            import math
            ntrials = math.floor(frametimings.size / triallen)
            trials = np.zeros((ntrials, runfr + frofr + moviefr))
            for t in range(ntrials):
                frange = frametimings[t * triallen:(t + 1) * triallen]
                trials[t, :runfr + frofr] = frange[:noiselen][::nblinks]
                trials[t, runfr + frofr:] = frange[noiselen:][::mblinks]
            frametimings = trials.ravel()

            filter_length = np.int(np.round(.666 * refresh_rate / nblinks))

            # Add frozen movie to frozen noise (for masking)
            frofr += moviefr

    savefname = str(stimulusnr) + '_data'

    if clusterstoanalyze:
        clusters = clusters[:clusterstoanalyze, :]
        print('Analyzing first %s cells' % clusterstoanalyze)
        savefname += '_' + str(clusterstoanalyze) + 'cells'
    if frametimingsfraction:
        frametimingsindex = int(len(frametimings) * frametimingsfraction)
        frametimings = frametimings[:frametimingsindex]
        print('Analyzing first {}% of'
              ' the recording'.format(frametimingsfraction * 100))
        savefname += '_' + str(frametimingsfraction).replace('.',
                                                             '') + 'fraction'
    frame_duration = np.average(np.ediff1d(frametimings))
    total_frames = frametimings.shape[0]

    all_spiketimes = []
    # Store spike triggered averages in a list containing correct shaped
    # arrays
    stas = []

    for i in range(len(clusters[:, 0])):
        spiketimes = asc.read_raster(exp_dir, stimulusnr, clusters[i, 0],
                                     clusters[i, 1])

        spikes = asc.binspikes(spiketimes, frametimings)
        all_spiketimes.append(spikes)
        stas.append(np.zeros((sx, sy, filter_length)))

    # Separate out the repeated parts
    all_spiketimes = np.array(all_spiketimes)
    mask = runfreezemask(total_frames, runfr, frofr, refresh_rate)
    repeated_spiketimes = all_spiketimes[:, ~mask]
    run_spiketimes = all_spiketimes[:, mask]

    # We need to cut down the total_frames by the same amount
    # as spiketimes
    total_run_frames = run_spiketimes.shape[1]
    # To be able to use the same code as checkerflicker analyzer,
    # convert to list again.
    run_spiketimes = list(run_spiketimes)

    # Empirically determined to be best for 32GB RAM
    desired_chunk_size = 21600000

    # Length of the chunks (specified in number of frames)
    chunklength = int(desired_chunk_size / (sx * sy))

    chunksize = chunklength * sx * sy
    nrofchunks = int(np.ceil(total_run_frames / chunklength))

    print(f'\nAnalyzing {stimname}.\nTotal chunks: {nrofchunks}')

    time = startime = datetime.datetime.now()
    timedeltas = []

    quals = np.zeros(len(stas))

    frame_counter = 0

    for i in range(nrofchunks):
        randnrs, seed = randpy.ranb(seed, chunksize)
        # Reshape and change 0's to -1's
        stimulus = np.reshape(randnrs,
                              (sx, sy, chunklength), order='F') * 2 - 1
        del randnrs

        # Range of indices we are interested in for the current chunk
        if (i + 1) * chunklength < total_run_frames:
            chunkind = slice(i * chunklength, (i + 1) * chunklength)
            chunkend = chunklength
        else:
            chunkind = slice(i * chunklength, None)
            chunkend = total_run_frames - i * chunklength

        for k in range(filter_length, chunkend - filter_length + 1):
            stim_small = stimulus[:, :,
                                  k - filter_length + 1:k + 1][:, :, ::-1]
            for j in range(clusters.shape[0]):
                spikes = run_spiketimes[j][chunkind]
                if spikes[k] != 0:
                    stas[j] += spikes[k] * stim_small
        qual = np.array([])
        for c in range(clusters.shape[0]):
            qual = np.append(qual, asc.staquality(stas[c]))
        quals = np.vstack((quals, qual))

        # Draw progress bar
        width = 50  # Number of characters
        prog = i / (nrofchunks - 1)
        bar_complete = int(prog * width)
        bar_noncomplete = width - bar_complete
        timedeltas.append(msc.timediff(time))  # Calculate running avg
        avgelapsed = np.mean(timedeltas)
        elapsed = np.sum(timedeltas)
        etc = startime + elapsed + avgelapsed * (nrofchunks - i)
        sys.stdout.flush()
        sys.stdout.write('\r{}{} |{:4.1f}% ETC: {}'.format(
            '█' * bar_complete, '-' * bar_noncomplete, prog * 100,
            etc.strftime("%a %X")))
        time = datetime.datetime.now()
    sys.stdout.write('\n')

    # Remove the first row which is full of random nrs.
    quals = quals[1:, :]

    max_inds = []
    spikenrs = np.array([spikearr.sum() for spikearr in run_spiketimes])

    for i in range(clusters.shape[0]):
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '.*true_divide*.')
            stas[i] = stas[i] / spikenrs[i]
        # Find the pixel with largest absolute value
        max_i = np.squeeze(
            np.where(np.abs(stas[i]) == np.max(np.abs(stas[i]))))
        # If there are multiple pixels with largest value,
        # take the first one.
        if max_i.shape != (3, ):
            try:
                max_i = max_i[:, 0]
            # If max_i cannot be found just set it to zeros.
            except IndexError:
                max_i = np.array([0, 0, 0])

        max_inds.append(max_i)

    print(f'Completed. Total elapsed time: {msc.timediff(startime)}\n' +
          f'Finished on {datetime.datetime.now().strftime("%A %X")}')

    savepath = os.path.join(exp_dir, 'data_analysis', stimname)
    if not os.path.isdir(savepath):
        os.makedirs(savepath, exist_ok=True)
    savepath = os.path.join(savepath, savefname)

    keystosave = [
        'clusters', 'frametimings', 'mask', 'repeated_spiketimes',
        'run_spiketimes', 'frame_duration', 'max_inds', 'nblinks', 'stas',
        'stx_h', 'stx_w', 'total_run_frames', 'sx', 'sy', 'filter_length',
        'stimname', 'exp_name', 'spikenrs', 'clusterstoanalyze',
        'frametimingsfraction', 'cutoff', 'quals', 'nrofchunks', 'chunklength'
    ]
    datadict = {}

    for key in keystosave:
        datadict[key] = locals()[key]

    np.savez(savepath, **datadict)

    t = (np.arange(nrofchunks) * chunklength * frame_duration) / refresh_rate
    qmax = np.max(quals, axis=0)
    qualsn = quals / qmax[np.newaxis, :]

    ax = plt.subplot(111)
    ax.plot(t, qualsn, alpha=0.3)
    plt.ylabel('Z-score of center pixel (normalized)')
    plt.xlabel('Minutes of stimulus analyzed')
    plt.ylim([0, 1])
    plf.spineless(ax, 'tr')
    plt.title(f'Recording duration optimization\n{exp_name}\n {savefname}')
    plt.savefig(savepath + '.svg', format='svg')
    plt.close()
Example #4
0
stas = np.array(data['stas'])

predstas = np.zeros(stas.shape)
predmus = np.zeros(stas.shape[0])
start = dt.datetime.now()

allspikes = np.zeros((stas.shape[0], frametimes.shape[0]), dtype=np.int8)

for i, cluster in enumerate(clusters):

    #cluster = data['clusters'][i]
    sta = data['stas'][i]

    frame_dur = data['frame_duration']

    spikes = asc.read_raster(exp_name, stim_nr, *cluster)
    spikes = asc.binspikes(spikes, frametimes)
    allspikes[i, :] = spikes

    res = glm.minimize_loglhd(np.zeros(sta.shape), 0, stimulus, frame_dur,
                              spikes)
    #%%
    k_pred = res['x'][:-1]
    mu_pred = res['x'][-1]

    predstas[i, :] = k_pred
    predmus[i] = mu_pred

    #ax1 = plt.subplot(111)
    #ax1.plot(k_pred)
    #ax1.plot(sta)
Example #5
0
def OMBanalyzer(exp_name, stimnr, plotall=False, nr_bins=20):
    """
    Analyze responses to object moving background stimulus. STA and STC
    are calculated.

    Note that there are additional functions that make use of the
    OMB class. This function was written before the OMB class existed
    """
    # TODO
    # Add iteration over multiple stimuli

    exp_dir = iof.exp_dir_fixer(exp_name)
    exp_name = os.path.split(exp_dir)[-1]
    stimname = iof.getstimname(exp_dir, stimnr)

    parameters = asc.read_parameters(exp_name, stimnr)
    assert parameters['stimulus_type'] == 'objectsmovingbackground'
    stimframes = parameters.get('stimFrames', 108000)
    preframes = parameters.get('preFrames', 200)
    nblinks = parameters.get('Nblinks', 2)

    seed = parameters.get('seed', -10000)
    seed2 = parameters.get('objseed', -1000)

    stepsize = parameters.get('stepsize', 2)

    ntotal = int(stimframes / nblinks)

    clusters, metadata = asc.read_spikesheet(exp_name)

    refresh_rate = metadata['refresh_rate']
    filter_length, frametimings = asc.ft_nblinks(exp_name, stimnr, nblinks,
                                                 refresh_rate)
    frame_duration = np.ediff1d(frametimings).mean()
    frametimings = frametimings[:-1]

    if ntotal != frametimings.shape[0]:
        print(f'For {exp_name}\nstimulus {stimname} :\n'
              f'Number of frames specified in the parameters file ({ntotal}'
              f' frames) and frametimings ({frametimings.shape[0]}) do not'
              ' agree!'
              ' The stimulus was possibly interrupted during recording.'
              ' ntotal is changed to match actual frametimings.')
        ntotal = frametimings.shape[0]

    # Generate the numbers to be used for reconstructing the motion
    # ObjectsMovingBackground.cpp line 174, steps are generated in an
    # alternating fashion. We can generate all of the numbers at once
    # (total lengths is defined by stimFrames) and then assign
    # to x and y directions. Although there is more
    # stuff around line 538
    randnrs, seed = randpy.gasdev(seed, ntotal * 2)
    randnrs = np.array(randnrs) * stepsize

    xsteps = randnrs[::2]
    ysteps = randnrs[1::2]

    clusterids = plf.clusters_to_ids(clusters)

    all_spikes = np.empty((clusters.shape[0], ntotal))
    for i, (cluster, channel, _) in enumerate(clusters):
        spiketimes = asc.read_raster(exp_name, stimnr, cluster, channel)
        spikes = asc.binspikes(spiketimes, frametimings)
        all_spikes[i, :] = spikes

    # Collect STA for x and y movement in one array
    stas = np.zeros((clusters.shape[0], 2, filter_length))
    stc_x = np.zeros((clusters.shape[0], filter_length, filter_length))
    stc_y = np.zeros((clusters.shape[0], filter_length, filter_length))
    t = np.arange(filter_length) * 1000 / refresh_rate * nblinks
    for k in range(filter_length, ntotal - filter_length + 1):
        x_mini = xsteps[k - filter_length + 1:k + 1][::-1]
        y_mini = ysteps[k - filter_length + 1:k + 1][::-1]
        for i, (cluster, channel, _) in enumerate(clusters):
            if all_spikes[i, k] != 0:
                stas[i, 0, :] += all_spikes[i, k] * x_mini
                stas[i, 1, :] += all_spikes[i, k] * y_mini
                # Calculate non-centered STC (Cantrell et al., 2010)
                stc_x[i, :, :] += all_spikes[i, k] * calc_covar(x_mini)
                stc_y[i, :, :] += all_spikes[i, k] * calc_covar(y_mini)

    eigvals_x = np.zeros((clusters.shape[0], filter_length))
    eigvals_y = np.zeros((clusters.shape[0], filter_length))
    eigvecs_x = np.zeros((clusters.shape[0], filter_length, filter_length))
    eigvecs_y = np.zeros((clusters.shape[0], filter_length, filter_length))

    bins_x = np.zeros((clusters.shape[0], nr_bins))
    bins_y = np.zeros((clusters.shape[0], nr_bins))
    spikecount_x = np.zeros(bins_x.shape)
    spikecount_y = np.zeros(bins_x.shape)
    generators_x = np.zeros(all_spikes.shape)
    generators_y = np.zeros(all_spikes.shape)
    # Normalize STAs and STCs with respect to spike numbers
    for i in range(clusters.shape[0]):
        totalspikes = all_spikes.sum(axis=1)[i]
        stas[i, :, :] = stas[i, :, :] / totalspikes
        stc_x[i, :, :] = stc_x[i, :, :] / totalspikes
        stc_y[i, :, :] = stc_y[i, :, :] / totalspikes
        try:
            eigvals_x[i, :], eigvecs_x[i, :, :] = np.linalg.eigh(
                stc_x[i, :, :])
            eigvals_y[i, :], eigvecs_y[i, :, :] = np.linalg.eigh(
                stc_y[i, :, :])
        except np.linalg.LinAlgError:
            continue
        # Calculate the generator signals and nonlinearities
        generators_x[i, :] = np.convolve(eigvecs_x[i, :, -1],
                                         xsteps,
                                         mode='full')[:-filter_length + 1]
        generators_y[i, :] = np.convolve(eigvecs_y[i, :, -1],
                                         ysteps,
                                         mode='full')[:-filter_length + 1]
        spikecount_x[i, :], bins_x[i, :] = nlt.calc_nonlin(
            all_spikes[i, :], generators_x[i, :], nr_bins)
        spikecount_y[i, :], bins_y[i, :] = nlt.calc_nonlin(
            all_spikes[i, :], generators_y[i, :], nr_bins)
    savepath = os.path.join(exp_dir, 'data_analysis', stimname)
    if not os.path.isdir(savepath):
        os.makedirs(savepath, exist_ok=True)

    # Calculated based on last eigenvector
    magx = eigvecs_x[:, :, -1].sum(axis=1)
    magy = eigvecs_y[:, :, -1].sum(axis=1)
    r_ = np.sqrt(magx**2 + magy**2)
    theta_ = np.arctan2(magy, magx)
    # To draw the vectors starting from origin, insert zeros every other element
    r = np.zeros(r_.shape[0] * 2)
    theta = np.zeros(theta_.shape[0] * 2)
    r[1::2] = r_
    theta[1::2] = theta_
    plt.polar(theta, r)
    plt.gca().set_xticks(np.pi / 180 * np.array([0, 90, 180, 270]))
    plt.title(f'Population plot for motion STAs\n{exp_name}')
    plt.savefig(os.path.join(savepath, 'population.svg'))
    if plotall:
        plt.show()
    plt.close()

    for i in range(stas.shape[0]):
        stax = stas[i, 0, :]
        stay = stas[i, 1, :]
        ax1 = plt.subplot(211)
        ax1.plot(t, stax, label=r'STA$_{\rm X}$')
        ax1.plot(t, stay, label=r'STA$_{\rm Y}$')
        ax1.plot(t, eigvecs_x[i, :, -1], label='Eigenvector_X 0')
        ax1.plot(t, eigvecs_y[i, :, -1], label='Eigenvector_Y 0')
        plt.legend(fontsize='x-small')

        ax2 = plt.subplot(4, 4, 9)
        ax3 = plt.subplot(4, 4, 13)
        ax2.set_yticks([])
        ax2.set_xticklabels([])
        ax3.set_yticks([])
        ax2.set_title('Eigenvalues', size='small')
        ax2.plot(eigvals_x[i, :],
                 'o',
                 markerfacecolor='C0',
                 markersize=4,
                 markeredgewidth=0)
        ax3.plot(eigvals_y[i, :],
                 'o',
                 markerfacecolor='C1',
                 markersize=4,
                 markeredgewidth=0)
        ax4 = plt.subplot(2, 3, 5)
        ax4.plot(bins_x[i, :], spikecount_x[i, :] / frame_duration)
        ax4.plot(bins_y[i, :], spikecount_y[i, :] / frame_duration)
        ax4.set_ylabel('Firing rate [Hz]')
        ax4.set_title('Nonlinearities', size='small')
        plf.spineless([ax1, ax2, ax3, ax4], 'tr')
        ax5 = plt.subplot(2, 3, 6, projection='polar')
        ax5.plot(theta, r, color='k', alpha=.3)
        ax5.plot(theta[2 * i:2 * i + 2], r[2 * i:2 * i + 2], lw=3)
        ax5.set_xticklabels(['0', '', '', '', '180', '', '270', ''])
        ax5.set_title('Vector sum of X and Y STCs', size='small')
        plt.suptitle(f'{exp_name}\n{stimname}\n{clusterids[i]}')
        plt.subplots_adjust(hspace=.4)
        plt.savefig(os.path.join(savepath, clusterids[i] + '.svg'),
                    bbox_inches='tight')
        if plotall:
            plt.show()
        plt.close()

    keystosave = [
        'nblinks', 'all_spikes', 'clusters', 'frame_duration', 'eigvals_x',
        'eigvals_y', 'eigvecs_x', 'eigvecs_y', 'filter_length', 'magx', 'magy',
        'ntotal', 'r', 'theta', 'stas', 'stc_x', 'stc_y', 'bins_x', 'bins_y',
        'nr_bins', 'spikecount_x', 'spikecount_y', 'generators_x',
        'generators_y', 't'
    ]
    datadict = {}

    for key in keystosave:
        datadict[key] = locals()[key]

    npzfpath = os.path.join(savepath, str(stimnr) + '_data')
    np.savez(npzfpath, **datadict)
def stripeflickeranalysis(exp_name, stim_nrs):
    exp_dir = iof.exp_dir_fixer(exp_name)

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]

    for stim_nr in stim_nrs:
        stimname = iof.getstimname(exp_name, stim_nr)

        clusters, metadata = asc.read_spikesheet(exp_dir)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        scr_width = metadata['screen_width']
        px_size = metadata['pixel_size(um)']

        stx_w = parameters['stixelwidth']
        stx_h = parameters['stixelheight']

        if (stx_h / stx_w) < 2:
            raise ValueError('Make sure the stimulus is stripeflicker.')

        sy = scr_width / stx_w
        if sy % 1 == 0:
            sy = int(sy)
        else:
            raise ValueError('sy is not an integer')

        nblinks = parameters['Nblinks']
        try:
            bw = parameters['blackwhite']
        except KeyError:
            bw = False

        try:
            seed = parameters['seed']
        except KeyError:
            seed = -10000

        if nblinks == 1:
            ft_on, ft_off = asc.readframetimes(exp_dir,
                                               stim_nr,
                                               returnoffsets=True)
            # Initialize empty array twice the size of one of them, assign
            # value from on or off to every other element.
            frametimings = np.empty(ft_on.shape[0] * 2, dtype=float)
            frametimings[::2] = ft_on
            frametimings[1::2] = ft_off
            # Set filter length so that temporal filter is ~600 ms.
            # The unit here is number of frames.
            filter_length = 40
        elif nblinks == 2:
            frametimings = asc.readframetimes(exp_dir, stim_nr)
            filter_length = 20
        else:
            raise ValueError('Unexpected value for nblinks.')

        # Omit everything that happens before the first 10 seconds
        cut_time = 10

        frame_duration = np.average(np.ediff1d(frametimings))
        total_frames = frametimings.shape[0]

        all_spiketimes = []
        # Store spike triggered averages in a list containing correct
        # shaped arrays
        stas = []

        for i in range(len(clusters[:, 0])):
            spiketimes = asc.read_raster(exp_dir, stim_nr, clusters[i, 0],
                                         clusters[i, 1])
            spikes = asc.binspikes(spiketimes, frametimings)
            all_spiketimes.append(spikes)
            stas.append(np.zeros((sy, filter_length)))

        if bw:
            randnrs, seed = randpy.ran1(seed, sy * total_frames)
            randnrs = [1 if i > .5 else -1 for i in randnrs]
        else:
            randnrs, seed = randpy.gasdev(seed, sy * total_frames)

        stimulus = np.reshape(randnrs, (sy, total_frames), order='F')
        del randnrs

        for k in range(filter_length, total_frames - filter_length + 1):
            stim_small = stimulus[:, k - filter_length + 1:k + 1][:, ::-1]
            for j in range(clusters.shape[0]):
                spikes = all_spiketimes[j]
                if spikes[k] != 0 and frametimings[k] > cut_time:
                    stas[j] += spikes[k] * stim_small

        max_inds = []
        spikenrs = np.array([spikearr.sum() for spikearr in all_spiketimes])

        quals = np.array([])

        for i in range(clusters.shape[0]):
            stas[i] = stas[i] / spikenrs[i]
            # Find the pixel with largest absolute value
            max_i = np.squeeze(
                np.where(np.abs(stas[i]) == np.max(np.abs(stas[i]))))
            # If there are multiple pixels with largest value,
            # take the first one.
            if max_i.shape != (2, ):
                try:
                    max_i = max_i[:, 0]
                # If max_i cannot be found just set it to zeros.
                except IndexError:
                    max_i = np.array([0, 0])

            max_inds.append(max_i)

            quals = np.append(quals, asc.staquality(stas[i]))

        savefname = str(stim_nr) + '_data'
        savepath = pjoin(exp_dir, 'data_analysis', stimname)

        exp_name = os.path.split(exp_dir)[-1]

        if not os.path.isdir(savepath):
            os.makedirs(savepath, exist_ok=True)
        savepath = os.path.join(savepath, savefname)

        keystosave = [
            'stas', 'max_inds', 'clusters', 'sy', 'frame_duration',
            'all_spiketimes', 'stimname', 'total_frames', 'stx_w', 'spikenrs',
            'bw', 'quals', 'nblinks', 'filter_length', 'exp_name'
        ]
        data_in_dict = {}
        for key in keystosave:
            data_in_dict[key] = locals()[key]

        np.savez(savepath, **data_in_dict)
        print(f'Analysis of {stimname} completed.')
predstas = stas.copy()
predmus = np.zeros((stas.shape[0], stas.shape[-1]))

parameters = asc.read_parameters(exp_name, stim_nr)

_, frametimes = asc.ft_nblinks(exp_name, stim_nr, parameters.get('Nblinks', 2))
frametimes = frametimes[:-1]
frame_dur = np.ediff1d(frametimes).mean()

stashape = stas[:, 0, :].shape
#%%
start = dt.datetime.now()
for i, cluster in enumerate(clusters):
    for j, direction in enumerate(['x', 'y']):
        spikes = asc.read_raster(exp_name, stim_nr, cluster[0], cluster[1])
        spikes = asc.binspikes(spikes, frametimes)

        res = glm.minimize_loglhd(stas[i, j, :],
                                  0,
                                  stimulus[j, :],
                                  frame_dur,
                                  spikes,
                                  usegrad=True,
                                  method='BFGS')
        k_pred = res['x'][:-1]
        mu_pred = res['x'][-1]

        predstas[i, j, :] = k_pred
        predmus[i, j] = mu_pred
Example #8
0
def spontanalyzer(exp_name, stim_nrs):
    """
    Analyze spontaneous activity, plot and save it. Will make a directory
    /data_analysis/<stimulus_name> and save svg [and pdf in subfolder.].

    """

    exp_dir = iof.exp_dir_fixer(exp_name)

    exp_name = os.path.split(exp_dir)[-1]

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]
    elif len(stim_nrs) == 0:
        return

    for stim_nr in stim_nrs:
        stim_nr = str(stim_nr)

        stimname = iof.getstimname(exp_dir, stim_nr)

        clusters, _ = asc.read_spikesheet(exp_dir, cutoff=4)

        # Length of chunks we use for dividing the activity for plotting.
        step = 1

        allspikes = []

        for i in range(clusters.shape[0]):
            spikes = asc.read_raster(exp_dir, stim_nr, clusters[i, 0],
                                     clusters[i, 1])
            allspikes.append(spikes)

        # Use the time of the last spike to determine the total recording time.
        last_spike = np.max([np.max(allspikes[i])\
                             for i in range(clusters.shape[0])
                             if len(allspikes[i]) > 0])
        totalrecordingtime = np.int(np.ceil(last_spike) + 1)
        times = np.arange(0, totalrecordingtime, step)

        for i in range(len(clusters[:, 0])):
            spikes = allspikes[i]
            # Find which trial each spike belongs to, and subtract one
            # to be able to use as indices
            trial_indices = np.digitize(spikes, times) - 1

            rasterplot = []
            # Iterate over all the trials, create an empty array for each
            for j in range(totalrecordingtime):
                rasterplot.append([])
            # plt.eventplot requires a list containing spikes in each
            # trial separately
            for k in range(len(spikes)):
                trial = trial_indices[k]
                rasterplot[trial].append(spikes[k] - times[trial])

            # Workaround for matplotlib issue #6412.
            # https://github.com/matplotlib/matplotlib/issues/6412
            # If a cell has no spikes for the first trial i.e. the first
            # element of the list is empty, an error is raised due to
            # a plt.eventplot bug.
            if len(rasterplot[0]) == 0:
                rasterplot[0] = [-1]

            plt.figure(figsize=(9, 6))
            ax1 = plt.subplot(111)
            plt.eventplot(rasterplot, linewidth=.5, color='k')
            # Set the axis so they align with the rectangles
            plt.axis([0, step, -1, len(rasterplot)])

            plt.suptitle('{}\n{}'.format(exp_name, stimname))
            plt.title('{:0>3}{:0>2} Rating: {}'.format(clusters[i][0],
                                                       clusters[i][1],
                                                       clusters[i][2]))
            plt.ylabel('Time index')
            plt.xlabel('Time[s]')
            plt.gca().invert_yaxis()
            ax1.set_xticks([0, .5, 1])
            plf.spineless(ax1)

            savedir = os.path.join(exp_dir, 'data_analysis', stimname)
            os.makedirs(os.path.join(savedir, 'pdf'), exist_ok=True)

            # Save as svg for looking through data, pdf for
            # inserting into presentations
            plt.savefig(
                savedir +
                '/{:0>3}{:0>2}.svg'.format(clusters[i, 0], clusters[i, 1]),
                format='svg',
                bbox_inches='tight')
            plt.savefig(os.path.join(
                savedir, 'pdf', '{:0>3}'
                '{:0>2}.pdf'.format(clusters[i, 0], clusters[i, 1])),
                        format='pdf',
                        bbox_inches='tight')
            plt.close()
        print(f'Analysis of {stimname} completed.')
Example #9
0
def OMSpatchesanalyzer(exp_name, stim_nrs):
    """
    Analyze and plot the responses to object motion patches stimulus.
    """

    exp_dir = iof.exp_dir_fixer(exp_name)

    exp_name = os.path.split(exp_dir)[-1]

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]
    elif len(stim_nrs) == 0:
        return

    clusters, metadata = asc.read_spikesheet(exp_dir, cutoff=4)
    clusterids = plf.clusters_to_ids(clusters)
    all_omsi = np.empty((clusters.shape[0], len(stim_nrs)))
    stimnames = []
    for stim_index, stim_nr in enumerate(stim_nrs):
        stim_nr = str(stim_nr)

        stimname = iof.getstimname(exp_dir, stim_nr)
        stimnames.append(stimname)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        refresh_rate = metadata['refresh_rate']

        nblinks = parameters.get('Nblinks', 1)
        seed = parameters.get('seed', -10000)
        stim_duration = parameters.get('stimFrames', 1400)
        # The duration in the parameters refers to the total duration of both
        # epochs. We divide by two to get the length of a single stim_duration
        stim_duration = int(stim_duration / 2)
        prefr_duration = parameters.get('preFrames', 100)

        frametimings = asc.readframetimes(exp_dir, stim_nr)

        # ntrials is the number of trials containing both
        ntrials = np.floor((frametimings.shape[0] / (stim_duration + 1))) / 2
        ntrials = ntrials.astype(int)
        frametimings_rs = frametimings[:ntrials * 2 * (stim_duration + 1)]
        frametimings_rs = frametimings_rs.reshape(
            (ntrials * 2, stim_duration + 1))

        ft_local = frametimings_rs[::2][:, :-1]
        ft_global = frametimings_rs[1::2][:, :-1]

        localspikes = np.empty((clusters.shape[0], ntrials, stim_duration))
        globalspikes = np.empty((clusters.shape[0], ntrials, stim_duration))

        for i, cluster in enumerate(clusters):
            spikes = asc.read_raster(exp_name, stim_nr, cluster[0], cluster[1])
            for j in range(ntrials):
                localspikes[i, j, :] = asc.binspikes(spikes, ft_local[j, :])
                globalspikes[i, j, :] = asc.binspikes(spikes, ft_global[j, :])

        response_local = localspikes.mean(axis=1)
        response_global = globalspikes.mean(axis=1)

        # Differential and coherent firing rates
        fr_d = response_local.mean(axis=1)
        fr_c = response_global.mean(axis=1)

        # Calculate object motion sensitivity index (OMSI) as defined in
        # Kühn et al, 2016
        # There the first second of each trial is discarded, here it does not
        # seem to be very different from the rest.
        omsi = (fr_d - fr_c) / (fr_d + fr_c)

        # Create a time array for plotting
        time = np.linspace(0,
                           stim_duration * 2 / refresh_rate,
                           num=stim_duration)

        savepath = os.path.join(exp_dir, 'data_analysis', stimname)
        if not os.path.isdir(savepath):
            os.makedirs(savepath, exist_ok=True)

        for i, cluster in enumerate(clusters):
            gs = gridspec.GridSpec(2, 1)
            ax1 = plt.subplot(gs[0])
            ax2 = plt.subplot(gs[1])

            rastermat = np.vstack(
                (localspikes[i, :, :], globalspikes[i, :, :]))
            ax1.matshow(rastermat, cmap='Greys')
            ax1.axhline(ntrials - 1, color='r', lw=.1)
            ax1.plot([0, 0], [ntrials, 0])
            ax1.plot([0, 0], [ntrials * 2, ntrials])
            ax1.set_xticks([])
            ax1.set_yticks([])
            plf.spineless(ax1)

            ax2.plot(time, response_local[i, :], label='Local')
            ax2.plot(time, response_global[i, :], label='Global')
            ax2.set_xlabel('Time [s]')
            ax2.set_ylabel('Average firing rate [au]')
            ax2.set_xlim([time.min(), time.max()])
            plf.spineless(ax2, 'tr')
            ax2.legend(fontsize='x-small')

            plt.suptitle(f'{exp_name}\n{stimname}\n'
                         f'{clusterids[i]} OMSI: {omsi[i]:4.2f}')
            plt.tight_layout()
            plt.savefig(os.path.join(savepath, clusterids[i] + '.svg'),
                        bbox_inches='tight')
            plt.close()
        keystosave = [
            'nblinks', 'refresh_rate', 'stim_duration', 'prefr_duration',
            'ntrials', 'response_local', 'response_global', 'fr_d', 'fr_c',
            'omsi', 'clusters'
        ]
        datadict = {}

        for key in keystosave:
            datadict[key] = locals()[key]

        npzfpath = os.path.join(savepath, str(stim_nr) + '_data')
        np.savez(npzfpath, **datadict)
        all_omsi[:, stim_index] = omsi
    print(f'Analysis of {stimname} completed.')
    # Draw the distribution of the OMSI for all OMSI stimuli
    # If there is only one OMS stimulus, draw it in the same folder
    # If there are multiple stimuli, save it in the data analysis folder
    if len(stim_nrs) == 1:
        pop_plot_savepath = os.path.join(savepath, 'omsi_population.svg')
    else:
        pop_plot_savepath = os.path.split(savepath)[0]
        pop_plot_savepath = os.path.join(pop_plot_savepath, 'all_omsi.svg')

    plt.figure(figsize=(5, 2 * len(stim_nrs)))
    ax2 = plt.subplot(111)
    for j, stim_nr in enumerate(stim_nrs):
        np.random.seed(j)
        ax2.scatter(all_omsi[:, j],
                    j + (np.random.random(omsi.shape) - .5) / 1.1)
    np.random.seed()
    ax2.set_yticks(np.arange(len(stim_nrs)))
    ax2.set_yticklabels(stimnames, fontsize='xx-small', rotation='45')
    ax2.set_xlabel('Object-motion sensitivity index')
    ax2.set_title(f'{exp_name}\nDistribution of OMSI')
    plf.spineless(ax2, 'tr')
    plt.savefig(pop_plot_savepath, bbox_inches='tight')
    plt.close()
Example #10
0
def fffanalyzer(exp_name, stimnrs):
    """
    Analyzes and plots data from full field flicker
    stimulus.
    """
    exp_dir = iof.exp_dir_fixer(exp_name)
    exp_name = os.path.split(exp_dir)[-1]

    if isinstance(stimnrs, int):
        stimnrs = [stimnrs]

    for stimnr in stimnrs:
        stimnr = str(stimnr)

        stimname = iof.getstimname(exp_name, stimnr)

        clusters, metadata = asc.read_spikesheet(exp_dir)

        parameters = asc.read_parameters(exp_dir, stimnr)

        clusterids = plf.clusters_to_ids(clusters)

        refresh_rate = metadata['refresh_rate']

        if parameters['stixelheight'] < 600 or parameters['stixelwidth'] < 800:
            raise ValueError('Make sure the stimulus is full field flicker.')

        nblinks = parameters['Nblinks']

        bw = parameters.get('blackwhite', False)

        seed = parameters.get('seed', -10000)

        filter_length, frametimings = asc.ft_nblinks(exp_dir, stimnr)

        frame_duration = np.average(np.ediff1d(frametimings))
        total_frames = frametimings.shape[0]

        all_spiketimes = []
        # Store spike triggered averages in a list containing correct shaped
        # arrays
        stas = []
        # Make a list for covariances of the spike triggered ensemble
        covars = []
        for i in range(len(clusters[:, 0])):
            spiketimes = asc.read_raster(exp_dir, stimnr,
                                         clusters[i, 0], clusters[i, 1])
            spikes = asc.binspikes(spiketimes, frametimings)
            all_spiketimes.append(spikes)
            stas.append(np.zeros(filter_length))
            covars.append(np.zeros((filter_length, filter_length)))

        if bw:
            randnrs, seed = randpy.ranb(seed, total_frames)
            # Since ranb returns zeros and ones, we need to convert the zeros
            # into -1s.
            stimulus = np.array(randnrs) * 2 - 1
        else:
            randnrs, seed = randpy.gasdev(seed, total_frames)
            stimulus = np.array(randnrs)

        for k in range(filter_length, total_frames-filter_length+1):
            stim_small = stimulus[k-filter_length+1:k+1][::-1]
            for j in range(clusters.shape[0]):
                spikes = all_spiketimes[j]
                if spikes[k] != 0:
                    stas[j] += spikes[k]*stim_small
                    # This trick is needed to use .T for tranposing
                    stim_small_n = stim_small[np.newaxis, :]
                    # Calculate the covariance as the weighted outer product
                    # of small stimulus(i.e. snippet) with itself
                    # This is non-centered STC (a la Cantrell et al., 2010)
                    covars[j] += spikes[k]*(np.dot(stim_small_n.T,
                                                   stim_small_n))
        spikenrs = np.array([spikearr.sum() for spikearr in all_spiketimes])

        plotpath = os.path.join(exp_dir, 'data_analysis',
                                stimname, 'filters')
        if not os.path.isdir(plotpath):
            os.makedirs(plotpath, exist_ok=True)

        t = np.arange(filter_length)*frame_duration*1000

        eigvals = [np.zeros((filter_length)) for i in range(clusters.shape[0])]
        eigvecs = [np.zeros((filter_length,
                             filter_length)) for i in range(clusters.shape[0])]

        for i in range(clusters.shape[0]):
            stas[i] = stas[i]/spikenrs[i]
            covars[i] = covars[i]/spikenrs[i]
            try:
                eigvals[i], eigvecs[i] = np.linalg.eigh(covars[i])
            except np.linalg.LinAlgError:
                eigvals[i] = np.full((filter_length), np.nan)
                eigvecs[i] = np.full((filter_length, filter_length), np.nan)
            fig = plt.figure(figsize=(9, 6))
            ax = plt.subplot(111)
            ax.plot(t, stas[i], label='STA')
            ax.plot(t, eigvecs[i][:, 0], label='STC component 1', alpha=.5)
            ax.plot(t, eigvecs[i][:, -1], label='STC component 2', alpha=.5)
            # Add eigenvalues as inset
            ax2 = fig.add_axes([.65, .15, .2, .2])
            # Highlight the first and second components which are plotted
            ax2.plot(0, eigvals[i][0], 'o',
                     markersize=7, markerfacecolor='C1', markeredgewidth=0)
            ax2.plot(filter_length-1, eigvals[i][-1], 'o',
                     markersize=7, markerfacecolor='C2', markeredgewidth=0)
            ax2.plot(eigvals[i], 'ko', alpha=.5, markersize=4,
                     markeredgewidth=0)
            ax2.set_axis_off()
            plf.spineless(ax)
            ax.set_xlabel('Time[ms]')
            ax.set_title(f'{exp_name}\n{stimname}\n{clusterids[i]} Rating:'
                         f' {clusters[i, 2]} {int(spikenrs[i])} spikes')
            plt.savefig(os.path.join(plotpath, clusterids[i])+'.svg',
                        format='svg', dpi=300)
            plt.close()

        savepath = os.path.join(os.path.split(plotpath)[0], stimnr+'_data')

        keystosave = ['stas', 'clusters', 'frame_duration', 'all_spiketimes',
                      'stimname', 'total_frames', 'spikenrs', 'bw', 'nblinks',
                      'filter_length', 'exp_name', 'covars', 'eigvals',
                      'eigvecs']
        data_in_dict = {}
        for key in keystosave:
            data_in_dict[key] = locals()[key]

        np.savez(savepath, **data_in_dict)
        print(f'Analysis of {stimname} completed.')
Example #11
0
        frametimingsindex = int(len(frametimings)*frametimingsfraction)
        frametimings = frametimings[:frametimingsindex]
        print('Analyzing first {}% of'
              ' the recording'.format(frametimingsfraction*100))
        savefname += '_'+str(frametimingsfraction).replace('.', '')+'fraction'
    frame_duration = np.average(np.ediff1d(frametimings))
    total_frames = frametimings.shape[0]

    all_spiketimes = []
    # Store spike triggered averages in a list containing correct shaped
    # arrays
    stas = []
    # Store number of spikes during the calculation to use in the averaging
    spikenrs = np.zeros(clusters.shape[0]).astype('int')
    for i in range(len(clusters[:, 0])):
        spiketimes = asc.read_raster(exp_dir, stimulusnr,
                                     clusters[i, 0], clusters[i, 1])

        spikes = asc.binspikes(spiketimes, frametimings)
        all_spiketimes.append(spikes)
        stas.append(np.zeros((sx, sy, filter_length)))

    # Length of the chunks (specified in number of frames)
    chunklength = 5000
    chunksize = chunklength*sx*sy
    nrofchunks = int(np.ceil(total_frames/chunklength))
    time = startime = datetime.datetime.now()
    for i in range(nrofchunks):
        randnrs, seed = randpy.ran1(seed, chunksize)
        randnrs = [1 if i > .5 else -1 for i in randnrs]
        stimulus = np.reshape(randnrs, (sx, sy, chunklength), order='F')
        del randnrs
#%%
from scipy import signal


def plotpeaks(i, j):
    xcorr = xcorrs[i, j, :]
    peaks = signal.find_peaks(xcorr, prominence=200)[0]
    plt.plot(xcorr)
    plt.plot(peaks, xcorr[peaks], 'x')
    plt.axvline(xcorr.shape[0] / 2, color='grey', linestyle='dashed')
    plt.show()


plotpeaks(10, 1)
#%%
spikes = asc.read_raster(exp, stim, 1, 1)
spikes2 = asc.read_raster(exp, stim, 1, 3)

t = np.arange(0, spikes[-1], 0.001)
bsp = asc.binspikes(spikes, t)
bsp2 = asc.binspikes(spikes2, t)

#plt.plot(corr(bsp, window=40))

sprcorwindow = 0.05
corr_bin = np.arange(-sprcorwindow, sprcorwindow, .001)

pycorp = pycorrelate.pcorrelate(spikes, spikes2, corr_bin, normalize=True)
plt.plot(pycorp)
plt.show()
def saccadegratingsanalyzer(exp_name, stim_nr):
    """
    Analyze and save responses to saccadegratings stimulus.
    """

    exp_dir = iof.exp_dir_fixer(exp_name)
    exp_name = os.path.split(exp_dir)[-1]
    stimname = iof.getstimname(exp_dir, stim_nr)
    clusters, metadata = asc.read_spikesheet(exp_dir)
    clusterids = plf.clusters_to_ids(clusters)

    refresh_rate = metadata['refresh_rate']

    parameters = asc.read_parameters(exp_name, stim_nr)
    if parameters['stimulus_type'] != 'saccadegrating':
        raise ValueError('Unexpected stimulus type: '
                         f'{parameters["stimulus_type"]}')
    fixfr = parameters.get('fixationframes', 80)
    sacfr = parameters.get('saccadeframes', 10)
    barwidth = parameters.get('barwidth', 40)
    averageshift = parameters.get('averageshift', 2)
    # The seed is hard-coded in the Stimulator
    seed = -10000

    ftimes = asc.readframetimes(exp_dir, stim_nr)
    ftimes.resize(int(ftimes.shape[0] / 2), 2)
    nfr = ftimes.size
    # Re-generate the stimulus
    # Amplitude of the shift and the transition type (saccade or grey is
    # determined based on the output of ran1
    randnrs = np.array(randpy.ran1(seed, nfr)[0])

    # Separate the amplitude and transitions into two arrays
    stimpos = (4 * randnrs[::2]).astype(int)

    # Transition variable, determines whether grating is moving during
    # the transion or only a grey screen is presented.
    trans = np.array(randnrs[1::2] > 0.5)

    # Record before and after positions in a single array and remove
    # The first element b/c there is no before value
    stimposx = np.append(0, stimpos)[:-1]
    stimtr = np.stack((stimposx, stimpos), axis=1)[1:]
    trans = trans[:-1]

    saccadetr = stimtr[trans, :]
    greytr = stimtr[~trans, :]

    # Create a time vector with defined temporal bin size
    tstep = 0.01  # Bin size is defined here, unit is seconds
    trialduration = (fixfr + sacfr) / refresh_rate
    nrsteps = int(trialduration / tstep) + 1
    t = np.linspace(0, trialduration, num=nrsteps)

    # Collect saccade beginning time for each trial
    trials = ftimes[1:, 0]
    sacftimes = trials[trans]
    greyftimes = trials[~trans]

    sacspikes = np.empty((clusters.shape[0], sacftimes.shape[0], t.shape[0]))
    greyspikes = np.empty((clusters.shape[0], greyftimes.shape[0], t.shape[0]))
    # Collect all the psth in one array. The order is
    # transision type, cluster index, start pos, target pos, time
    psth = np.zeros((2, clusters.shape[0], 4, 4, t.size))

    for i, (chid, clid, _) in enumerate(clusters):
        spiketimes = asc.read_raster(exp_dir, stim_nr, chid, clid)
        for j, _ in enumerate(sacftimes):
            sacspikes[i, j, :] = asc.binspikes(spiketimes, sacftimes[j] + t)
        for k, _ in enumerate(greyftimes):
            greyspikes[i, k, :] = asc.binspikes(spiketimes, greyftimes[k] + t)

    # Sort trials according to the transition type
    # nton[i][j] contains the indexes of trials where saccade was i to j
    nton_sac = [[[] for _ in range(4)] for _ in range(4)]
    for i, trial in enumerate(saccadetr):
        nton_sac[trial[0]][trial[1]].append(i)
    nton_grey = [[[] for _ in range(4)] for _ in range(4)]
    for i, trial in enumerate(greytr):
        nton_grey[trial[0]][trial[1]].append(i)

    savedir = os.path.join(exp_dir, 'data_analysis', stimname)
    os.makedirs(savedir, exist_ok=True)
    for i in range(clusters.shape[0]):
        fig, axes = plt.subplots(4,
                                 4,
                                 sharex=True,
                                 sharey=True,
                                 figsize=(8, 8))
        for j in range(4):
            for k in range(4):
                # Start from bottom left corner
                ax = axes[3 - j][k]
                # Average all transitions of one type
                psth_sac = sacspikes[i, nton_sac[j][k], :].mean(axis=0)
                psth_grey = greyspikes[i, nton_grey[j][k], :].mean(axis=0)
                # Convert to spikes per second
                psth_sac = psth_sac / tstep
                psth_grey = psth_grey / tstep
                psth[0, i, j, k, :] = psth_sac
                psth[1, i, j, k, :] = psth_grey
                ax.axvline(sacfr / refresh_rate * 1000,
                           color='red',
                           linestyle='dashed',
                           linewidth=.5)
                ax.plot(t * 1000, psth_sac, label='Saccadic trans.')
                ax.plot(t * 1000, psth_grey, label='Grey trans.')
                ax.set_yticks([])
                ax.set_xticks([])
                # Cosmetics
                plf.spineless(ax)
                if j == k:
                    ax.set_facecolor((1, 1, 0, 0.15))
                if j == 0:
                    ax.set_xlabel(f'{k}')
                    if k == 3:
                        ax.legend(fontsize='xx-small', loc=0)
                if k == 0:
                    ax.set_ylabel(f'{j}')

        # Add an encompassing label for starting and target positions
        ax0 = fig.add_axes([0.08, 0.08, .86, .86])
        plf.spineless(ax0)
        ax0.patch.set_alpha(0)
        ax0.set_xticks([])
        ax0.set_yticks([])
        ax0.set_ylabel('Start position')
        ax0.set_xlabel('Target position')
        plt.suptitle(f'{exp_name}\n{stimname}\n{clusterids[i]}')
        plt.savefig(os.path.join(savedir, f'{clusterids[i]}.svg'))
        plt.close()
    # Save results
    keystosave = [
        'fixfr', 'sacfr', 't', 'averageshift', 'barwidth', 'seed', 'trans',
        'saccadetr', 'greytr', 'nton_sac', 'nton_grey', 'stimname',
        'sacspikes', 'greyspikes', 'psth', 'nfr', 'parameters'
    ]
    data_in_dict = {}
    for key in keystosave:
        data_in_dict[key] = locals()[key]

    np.savez(os.path.join(savedir, str(stim_nr) + '_data'), **data_in_dict)
    print(f'Analysis of {stimname} completed.')
def stripeflickeranalysis(exp_name, stim_nrs):
    exp_dir = iof.exp_dir_fixer(exp_name)

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]
    elif len(stim_nrs) == 0:
        return

    for stim_nr in stim_nrs:
        stimname = iof.getstimname(exp_name, stim_nr)

        clusters, metadata = asc.read_spikesheet(exp_dir)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        scr_width = metadata['screen_width']
        px_size = metadata['pixel_size(um)']

        refresh_rate = metadata['refresh_rate']

        stx_w = parameters['stixelwidth']
        stx_h = parameters['stixelheight']

        if (stx_h / stx_w) < 2:
            raise ValueError('Make sure the stimulus is stripeflicker.')

        sy = scr_width / stx_w
        if sy % 1 == 0:
            sy = int(sy)
        else:
            raise ValueError('sy is not an integer')

        nblinks = parameters['Nblinks']

        bw = parameters.get('blackwhite', False)

        seed = parameters.get('seed', -10000)

        filter_length, frametimings = asc.ft_nblinks(exp_dir, stim_nr)

        # Omit everything that happens before the first 10 seconds
        cut_time = 10

        frame_duration = np.average(np.ediff1d(frametimings))
        total_frames = frametimings.shape[0]

        all_spiketimes = []
        # Store spike triggered averages in a list containing correct
        # shaped arrays
        stas = []

        for i in range(len(clusters[:, 0])):
            spiketimes = asc.read_raster(exp_dir, stim_nr, clusters[i, 0],
                                         clusters[i, 1])
            spikes = asc.binspikes(spiketimes, frametimings)
            all_spiketimes.append(spikes)
            stas.append(np.zeros((sy, filter_length)))

        # Add one more element to correct for random noise
        clusters = np.vstack((clusters, [0, 0, 0]))
        all_spiketimes.append(np.ones(frametimings.shape, dtype=int))
        stas.append(np.zeros((sy, filter_length)))

        if bw:
            randnrs, seed = randpy.ranb(seed, sy * total_frames)
        else:
            randnrs, seed = randpy.gasdev(seed, sy * total_frames)

        stimulus = np.reshape(randnrs, (sy, total_frames), order='F')

        if bw:
            # Since ranb returns zeros and ones, we need to convert the zeros
            # into -1s.
            stimulus = stimulus * 2 - 1

        del randnrs

        for k in range(filter_length, total_frames - filter_length + 1):
            stim_small = stimulus[:, k - filter_length + 1:k + 1][:, ::-1]
            for j in range(clusters.shape[0]):
                spikes = all_spiketimes[j]
                if spikes[k] != 0 and frametimings[k] > cut_time:
                    stas[j] += spikes[k] * stim_small

        max_inds = []
        spikenrs = np.array([spikearr.sum() for spikearr in all_spiketimes])

        quals = np.array([])

        # Remove the random noise correction element from clusters
        correction = stas.pop() / spikenrs[-1]
        clusters = clusters[:-1, :]
        all_spiketimes.pop()
        spikenrs = spikenrs[:-1]

        for i in range(clusters.shape[0]):
            stas[i] = stas[i] / spikenrs[i]
            stas[i] = stas[i] - correction
            # Find the pixel with largest absolute value
            max_i = np.squeeze(
                np.where(np.abs(stas[i]) == np.max(np.abs(stas[i]))))
            # If there are multiple pixels with largest value,
            # take the first one.
            if max_i.shape != (2, ):
                try:
                    max_i = max_i[:, 0]
                # If max_i cannot be found just set it to zeros.
                except IndexError:
                    max_i = np.array([0, 0])
            # In case of spike numbers being zero, all elements are NaN
            # imshow and savefig do not play nice with NaN so set all to zero
            if np.all(np.isnan(stas[i])):
                stas[i] = np.zeros(stas[i].shape)
            max_inds.append(max_i)

            quals = np.append(quals, asc.staquality(stas[i]))

        savefname = str(stim_nr) + '_data'
        savepath = pjoin(exp_dir, 'data_analysis', stimname)

        exp_name = os.path.split(exp_dir)[-1]

        if not os.path.isdir(savepath):
            os.makedirs(savepath, exist_ok=True)
        savepath = os.path.join(savepath, savefname)

        keystosave = [
            'stas', 'max_inds', 'clusters', 'sy', 'correction',
            'frame_duration', 'all_spiketimes', 'stimname', 'total_frames',
            'stx_w', 'spikenrs', 'bw', 'quals', 'nblinks', 'filter_length',
            'exp_name'
        ]
        data_in_dict = {}
        for key in keystosave:
            data_in_dict[key] = locals()[key]

        np.savez(savepath, **data_in_dict)
        print(f'Analysis of {stimname} completed.')
def randomizestripes(label, exp_name='20180124', stim_nrs=6):
    exp_dir = iof.exp_dir_fixer(exp_name)

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]

    for stim_nr in stim_nrs:
        stimname = iof.getstimname(exp_name, stim_nr)

        clusters, metadata = asc.read_spikesheet(exp_dir)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        scr_width = metadata['screen_width']
        px_size = metadata['pixel_size(um)']

        stx_w = parameters['stixelwidth']
        stx_h = parameters['stixelheight']

        if (stx_h/stx_w) < 2:
            raise ValueError('Make sure the stimulus is stripeflicker.')

        sy = scr_width/stx_w
#        sy = sy*4
        sy = int(sy)

        nblinks = parameters['Nblinks']
        try:
            bw = parameters['blackwhite']
        except KeyError:
            bw = False

        try:
            seed = parameters['seed']
            initialseed = parameters['seed']
        except KeyError:
            seed = -10000
            initialseed = -10000

        if nblinks == 1:
            ft_on, ft_off = asc.readframetimes(exp_dir, stim_nr,
                                               returnoffsets=True)
            # Initialize empty array twice the size of one of them, assign
            # value from on or off to every other element.
            frametimings = np.empty(ft_on.shape[0]*2, dtype=float)
            frametimings[::2] = ft_on
            frametimings[1::2] = ft_off
            # Set filter length so that temporal filter is ~600 ms.
            # The unit here is number of frames.
            filter_length = 40
        elif nblinks == 2:
            frametimings = asc.readframetimes(exp_dir, stim_nr)
            filter_length = 20
        else:
            raise ValueError('Unexpected value for nblinks.')

        # Omit everything that happens before the first 10 seconds
        cut_time = 10

        frame_duration = np.average(np.ediff1d(frametimings))
        total_frames = int(frametimings.shape[0]/4)

        all_spiketimes = []
        # Store spike triggered averages in a list containing correct
        # shaped arrays
        stas = []

        for i in range(len(clusters[:, 0])):
            spikes_orig = asc.read_raster(exp_dir, stim_nr,
                                         clusters[i, 0], clusters[i, 1])
            spikesneeded = spikes_orig.shape[0]*1000

            spiketimes = np.random.random_sample(spikesneeded)*spikes_orig.max()
            spiketimes = np.sort(spiketimes)
            spikes = asc.binspikes(spiketimes, frametimings)
            all_spiketimes.append(spikes)
            stas.append(np.zeros((sy, filter_length)))

        if bw:
            randnrs, seed = randpy.ran1(seed, sy*total_frames)
#            randnrs = mersennetw(sy*total_frames, seed1=seed)
            randnrs = [1 if i > .5 else -1 for i in randnrs]
        else:
            randnrs, seed = randpy.gasdev(seed, sy*total_frames)

        stimulus = np.reshape(randnrs, (sy, total_frames), order='F')
        del randnrs

        for k in range(filter_length, total_frames-filter_length+1):
            stim_small = stimulus[:, k-filter_length+1:k+1][:, ::-1]
            for j in range(clusters.shape[0]):
                spikes = all_spiketimes[j]
                if spikes[k] != 0 and frametimings[k]>cut_time:
                    stas[j] += spikes[k]*stim_small

        max_inds = []

        spikenrs = np.array([spikearr.sum() for spikearr in all_spiketimes])

        quals = np.array([])

        for i in range(clusters.shape[0]):
            stas[i] = stas[i]/spikenrs[i]
            # Find the pixel with largest absolute value
            max_i = np.squeeze(np.where(np.abs(stas[i])
                                        == np.max(np.abs(stas[i]))))
            # If there are multiple pixels with largest value,
            # take the first one.
            if max_i.shape != (2,):
                try:
                    max_i = max_i[:, 0]
                # If max_i cannot be found just set it to zeros.
                except IndexError:
                    max_i = np.array([0, 0])

            max_inds.append(max_i)

            quals = np.append(quals, asc.staquality(stas[i]))

#        savefname = str(stim_nr)+'_data'
#        savepath = pjoin(exp_dir, 'data_analysis', stimname)
#
#        exp_name = os.path.split(exp_dir)[-1]
#
#        if not os.path.isdir(savepath):
#            os.makedirs(savepath, exist_ok=True)
#        savepath = os.path.join(savepath, savefname)
#
#        keystosave = ['stas', 'max_inds', 'clusters', 'sy',
#                      'frame_duration', 'all_spiketimes', 'stimname',
#                      'total_frames', 'stx_w', 'spikenrs', 'bw',
#                      'quals', 'nblinks', 'filter_length', 'exp_name']
#        data_in_dict = {}
#        for key in keystosave:
#            data_in_dict[key] = locals()[key]
#
#        np.savez(savepath, **data_in_dict)
#        print(f'Analysis of {stimname} completed.')


        clusterids = plf.clusters_to_ids(clusters)

#        assert(initialseed.ty)
        correction = corrector(sy, total_frames, filter_length, initialseed)
        correction = np.outer(correction, np.ones(filter_length))

        t = np.arange(filter_length)*frame_duration*1000
        vscale = int(stas[0].shape[0] * stx_w*px_size/1000)
        for i in range(clusters.shape[0]):
            sta = stas[i]-correction

            vmax = 0.03
            vmin = -vmax
            plt.figure(figsize=(6, 15))
            ax = plt.subplot(111)
            im = ax.imshow(sta, cmap='RdBu', vmin=vmin, vmax=vmax,
                           extent=[0, t[-1], -vscale, vscale], aspect='auto')
            plt.xlabel('Time [ms]')
            plt.ylabel('Distance [mm]')

            plf.spineless(ax)
            plf.colorbar(im, ticks=[vmin, 0, vmax], format='%.2f', size='2%')
            plt.suptitle('{}\n{}\n'
                         '{} Rating: {}\n'
                         'nrofspikes {:5.0f}'.format(exp_name,
                                                       stimname,
                                                       clusterids[i],
                                                       clusters[i][2],
                                                       spikenrs[i]))
            plt.subplots_adjust(top=.90)
            savepath = os.path.join(exp_dir, 'data_analysis',
                                    stimname, 'STAs_randomized')
            svgpath = pjoin(savepath, label)
            if not os.path.isdir(svgpath):
                os.makedirs(svgpath, exist_ok=True)
            plt.savefig(os.path.join(svgpath, clusterids[i]+'.svg'),
                        bbox_inches='tight')
            plt.close()

    os.system(f"convert -delay 25 {svgpath}/*svg {savepath}/animated_{label}.gif")
Example #16
0
savedir = os.path.join(exp_dir, 'data_analysis', stimname, gqmlabel)
os.makedirs(savedir, exist_ok=True)

kall = np.zeros((clusters.shape[0], l))
Qall = np.zeros((clusters.shape[0], l, l))
muall = np.zeros((clusters.shape[0]))

eigvals = np.zeros((clusters.shape[0], l))
eigvecs = np.zeros((clusters.shape[0], l, l))


clids = plf.clusters_to_ids(clusters)

for i, cl in enumerate(clusters):
    sta = data['stas'][i][0]
    rawspikes = asc.read_raster(exp_name, stim_nr, *clusters[i][:2])

    spikes = asc.binspikes(rawspikes, frametimes)

    usegrad = True
    method = 'Newton-CG'

    import time
    start = time.time()
    res = gqm.minimize_loglikelihood(np.zeros(l), np.zeros((l, l)), 0,
                                     stimulus, bin_length, spikes,
                                     usegrad=usegrad,
                                     minimize_disp=True, method=method)
    elapsed = time.time()-start

    print(f'Time elapsed: {elapsed/60:6.1f} mins')
Example #17
0
def onoffstepsanalyzer(exp_name, stim_nrs):
    """
    Analyze onoffsteps data, plot and save it. Will make a directory
    /data_analysis/<stimulus_name> and save svg [and pdf in subfolder.].

    Parameters:
        exp_name:
            Experiment name.
        stim_nr:
            Order of the onoff steps stimulus.

    """

    exp_dir = iof.exp_dir_fixer(exp_name)

    exp_name = os.path.split(exp_dir)[-1]

    if isinstance(stim_nrs, int):
        stim_nrs = [stim_nrs]

    for stim_nr in stim_nrs:
        stim_nr = str(stim_nr)

        stimname = iof.getstimname(exp_dir, stim_nr)

        clusters, metadata = asc.read_spikesheet(exp_dir, cutoff=4)

        clusterids = plf.clusters_to_ids(clusters)

        parameters = asc.read_parameters(exp_dir, stim_nr)

        refresh_rate = metadata['refresh_rate']

        # Divide by the refresh rate to convert from number of
        # frames to seconds
        pars_stim_duration = parameters['Nframes'] / refresh_rate

        pars_preframe_duration = parameters.get('preframes', 0) / refresh_rate

        if pars_preframe_duration == 0:
            nopreframe = True
            nr_periods = 2
        else:
            nopreframe = False
            nr_periods = 4
        # The first trial will be discarded by dropping the first four frames
        # If we don't save the original and re-initialize for each cell,
        # frametimings will get smaller over time.
        frametimings_original = asc.readframetimes(exp_dir, stim_nr)

        trial_durs = stim_prefr_durations_frametimes(frametimings_original,
                                                     nr_per=nr_periods)
        avg_trial_durs = trial_durs.mean(axis=0)

        if not nopreframe:
            stim_duration = avg_trial_durs[1::2].mean()
            preframe_duration = avg_trial_durs[::2].mean()
        else:
            stim_duration = avg_trial_durs.mean()
            preframe_duration = 0
            warnings.warn('On-off steps analysis with no preframes'
                          'is not tested, proceed with caution.')

        contrast = parameters['contrast']

        total_cycle = avg_trial_durs.sum()

        # Set the bins to be 10 ms
        tstep = 0.01
        bins = int(total_cycle / tstep) + 1
        t = np.linspace(0, total_cycle, num=bins)

        # Setup for onoff bias calculation
        onbegin = preframe_duration
        onend = onbegin + stim_duration
        offbegin = onend + preframe_duration
        offend = offbegin + stim_duration

        # Determine the indices for each period
        a = []
        for i in [onbegin, onend, offbegin, offend]:
            yo = np.asscalar(np.where(np.abs(t - i) < tstep / 1.5)[0][-1])
            a.append(yo)

        # To exclude stimulus offset affecting the bias, use
        # last 1 second of preframe period
        prefs = []
        for i in [onbegin - 1, onbegin, offbegin - 1, offbegin]:
            yo = np.asscalar(np.where(np.abs(t - i) < tstep / 1.5)[0][-1])
            prefs.append(yo)

        onper = slice(a[0], a[1])
        offper = slice(a[2], a[3])

        pref1 = slice(prefs[0], prefs[1])
        pref2 = slice(prefs[2], prefs[3])

        onoffbias = np.empty(clusters.shape[0])
        baselines = np.empty(clusters.shape[0])

        savedir = os.path.join(exp_dir, 'data_analysis', stimname)
        os.makedirs(os.path.join(savedir, 'pdf'), exist_ok=True)

        # Collect all firing rates in a list
        all_frs = []

        for i in range(len(clusters[:, 0])):
            spikes = asc.read_raster(exp_dir, stim_nr, clusters[i, 0],
                                     clusters[i, 1])
            frametimings = frametimings_original
            # Discard all the spikes that happen after the last frame
            spikes = spikes[spikes < frametimings[-1]]
            # Discard the first trial
            spikes = spikes[spikes > frametimings[4]]
            frametimings = frametimings[4:]
            # Find which trial each spike belongs to, and subtract one
            # to be able to use as indices
            trial_indices = np.digitize(spikes, frametimings[::4]) - 1

            rasterplot = []
            # Iterate over all the trials, create an empty array for each
            for j in range(int(np.ceil(frametimings.max() / total_cycle))):
                rasterplot.append([])
            # plt.eventplot requires a list containing spikes in each
            # trial separately
            for k in range(len(spikes)):
                trial = trial_indices[k]
                rasterplot[trial].append(spikes[k] - frametimings[::4][trial])

            # Workaround for matplotlib issue #6412.
            # https://github.com/matplotlib/matplotlib/issues/6412
            # If a cell has no spikes for the first trial i.e. the first
            # element of the list is empty, an error is raised due to
            # a plt.eventplot bug.
            if len(rasterplot[0]) == 0:
                rasterplot[0] = [-1]

            plt.figure(figsize=(9, 9))
            ax1 = plt.subplot(211)
            plt.eventplot(rasterplot, linewidth=.5, color='r')
            # Set the axis so they align with the rectangles
            plt.axis([0, total_cycle, -1, len(rasterplot)])

            # Draw rectangles to represent different parts of the on off
            # steps stimulus
            plf.drawonoff(ax1,
                          preframe_duration,
                          stim_duration,
                          contrast=contrast)

            plt.ylabel('Trial')
            plt.gca().invert_yaxis()
            ax1.set_xticks([])
            plf.spineless(ax1)

            # Collect all trials in one array to calculate firing rates
            ras = np.array([])
            for ii in range(len(rasterplot)):
                ras = np.append(ras, rasterplot[ii])

            # Sort into time bins and count how many spikes happened in each
            fr = np.digitize(ras, t)
            fr = np.bincount(fr)
            # Normalize so that units are spikes/s
            fr = fr * (bins / total_cycle) / (len(rasterplot) - 1)
            # Equalize the length of the two arrays for plotting.
            # np.bincount(x) normally produces x.max()+1 bins
            if fr.shape[0] == bins + 1:
                fr = fr[:-1]
            # If there aren't any spikes at the last trial, the firing
            # rates array is too short and plt.plot raises error.
            while fr.shape[0] < bins:
                fr = np.append(fr, 0)

            prefr = np.append(fr[pref1], fr[pref2])
            baseline = np.median(np.round(prefr))

            fr_corr = fr - baseline

            r_on = np.sum(fr_corr[onper])
            r_off = np.sum(fr_corr[offper])

            if r_on == 0 and r_off == 0:
                bias = np.nan
            else:
                bias = (r_on - r_off) / (np.abs(r_on) + np.abs(r_off))

            plt.suptitle(f'{exp_name}\n{stimname}'
                         f'\n{clusterids[i]} Rating: {clusters[i][2]}\n')

            if fr.max() < 20:
                bias = np.nan

            onoffbias[i] = bias
            baselines[i] = baseline

            all_frs.append(fr)

            ax2 = plt.subplot(212)
            plt.plot(t, fr)
            for eachslice in [onper, offper]:
                ax2.fill_between(t[eachslice],
                                 fr[eachslice],
                                 baseline,
                                 where=fr[eachslice] > baseline,
                                 facecolor='lightgray')

            plf.spineless(ax2)
            plt.axis([0, total_cycle, fr.min(), fr.max()])

            plt.title(f'Baseline: {baseline:2.0f} Hz Bias: {bias:0.2f}')
            plt.xlabel('Time[s]')
            plt.ylabel('Firing rate[spikes/s]')

            # Save as svg for looking through data, pdf for
            # inserting into presentations
            plt.savefig(
                savedir +
                '/{:0>3}{:0>2}.svg'.format(clusters[i, 0], clusters[i, 1]),
                format='svg',
                bbox_inches='tight')
            plt.savefig(os.path.join(
                savedir, 'pdf', '{:0>3}'
                '{:0>2}.pdf'.format(clusters[i, 0], clusters[i, 1])),
                        format='pdf',
                        bbox_inches='tight')
            plt.close()

        keystosave = [
            'clusters', 'total_cycle', 'bins', 'tstep', 'stimname',
            'stim_duration', 'preframe_duration', 'contrast', 'all_frs', 't',
            'exp_name', 'onoffbias', 'baselines'
        ]
        data_in_dict = {}
        for key in keystosave:
            data_in_dict[key] = locals()[key]

        np.savez(os.path.join(savedir, stim_nr + '_data'), **data_in_dict)
        print(f'Analysis of {stimname} completed.')
Example #18
0
finally:
    os.chdir(wdir)
stim_fnames = sum(stim_fnames, [])  # Hacky solution to convert list of lists
stim_names = [s.split('.mcd')[0] for s in stim_fnames]
stim_nrs = [s.split('_')[0] for s in stim_names]

clusters, _ = asc.read_spikesheet(experiment_dir, cutoff=3)

total_spikes = np.empty([len(stim_fnames), len(clusters[:, 0])])
total_time = 0

for i in range(len(stim_fnames)):
    all_spikes = []
    interspike_int = []
    for j in range(len(clusters[:, 0])):
        spikes = asc.read_raster(experiment_dir, stim_nrs[i],
                                 clusters[j, 0], clusters[j, 1])
        all_spikes.append(spikes)
        total_spikes[i, j] = len(spikes)
        try:
            lastspike = spikes.max()
        except ValueError:
            lastspike = 0
        if lastspike > total_time:
            total_time = lastspike
        interspike_int.append(np.ediff1d(spikes))
    plt.eventplot(all_spikes)
    plt.ylabel('Cluster')
    plt.xlabel('Time[s]')
    plt.title('{} rasters'.format(stim_names[i]))
    plt.show()
#%%