예제 #1
0
def test_blur_image(basic_ratemap):
    filt = ['box', 'gaussian']
    rmap1D = basic_ratemap[0, :]
    rmap2D = basic_ratemap
    rmap3D = np.atleast_3d(rmap2D)
    rmaps = [rmap1D, rmap2D, rmap3D]
    b = utils.blurImage(rmap2D, 3, 4)
    for f in filt:
        for rmap in rmaps:
            b = utils.blurImage(rmap, 3, ftype=f)
            assert (isinstance(b, np.ndarray))
예제 #2
0
def field_lims(A):
    """
    Returns a labelled matrix of the ratemap A.
    Uses anything >
    than the half peak rate to select as a field. Data is heavily smoothed
    Parameters
    ----------
    A: np.array
        The ratemap
    Returns
    -------
    label: np.array
        The labelled ratemap
    """
    nan_idx = np.isnan(A)
    A[nan_idx] = 0
    h = int(np.max(A.shape) / 2)
    sm_rmap = blurImage(A, h, ftype='gaussian')
    thresh = np.max(sm_rmap.ravel()) * 0.2  # select area > 20% of peak
    distance = ndimage.distance_transform_edt(sm_rmap > thresh)
    mask = skimage.feature.peak_local_max(
        distance, indices=False,
        exclude_border=False,
        labels=sm_rmap > thresh)
    label = ndimage.label(mask)[0]
    w = watershed(
        image=-distance, markers=label,
        mask=sm_rmap > thresh)
    label = ndimage.label(w)[0]
    return label
예제 #3
0
 def makeSpeedVsHeadDirectionPlot(self, spk_times: np.array,
                                  ax: matplotlib.axes = None,
                                  **kwargs) -> matplotlib.axes:
     self.initialise()
     spk_times_in_pos_samples = self.getSpikePosIndices(spk_times)
     idx = np.array(spk_times_in_pos_samples, dtype=int)
     if np.ma.is_masked(self.speed):
         w = self.speed.mask
         w = np.array(~w, dtype=int)
     else:
         w = np.bincount(idx, minlength=self.speed.shape[0])
     dir_bins = np.arange(0, 360, 6)
     spd_bins = np.arange(0, 30, 1)
     h = np.histogram2d(
         self.dir, self.speed, [dir_bins, spd_bins], weights=w)
     from ephysiopy.common.utils import blurImage
     im = blurImage(h[0], 5, ftype='gaussian')
     im = np.ma.MaskedArray(im)
     # mask low rates...
     im = np.ma.masked_where(im <= 1, im)
     # ... and where less than 0.5% of data is accounted for
     x, y = np.meshgrid(dir_bins, spd_bins)
     vmax = np.max(np.ravel(im))
     if ax is None:
         fig = plt.figure()
         ax = fig.add_subplot(111)
     ax.pcolormesh(x, y, im.T, cmap=plt.cm.get_cmap("jet"),
                   edgecolors='face',
                   vmax=vmax, shading='auto')
     plt.xticks([90, 180, 270], fontweight='normal', size=6)
     plt.yticks([10, 20], fontweight='normal', size=6)
     return ax
예제 #4
0
def test_field_props(basic_ratemap):
    fp = fieldcalcs.field_props(basic_ratemap)
    assert (isinstance(fp, dict))
    fieldcalcs.field_props(basic_ratemap,
                           clear_border=True,
                           neighbours=100,
                           calc_angs=True,
                           min_distance=5)
    # test something that should fail as it's poorly formed
    x, y = np.indices((80, 80))
    x1, y1, x2, y2 = 28, 28, 44, 52
    r1, r2 = 16, 20
    mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
    mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
    image = np.logical_or(mask_circle1, mask_circle2)
    from ephysiopy.common.utils import blurImage
    im = blurImage(image, 15)
    im[im < 0.1] = 0
    fieldcalcs.field_props(im)
예제 #5
0
    def tWinSAC(
        self,
        xy,
        spkIdx,
        ppm=365,
        winSize=10,
        pos_sample_rate=50,
        nbins=71,
        boxcar=5,
        Pthresh=100,
        downsampfreq=50,
        plot=False,
    ):
        """
        Temporal windowed spatial autocorrelation.

        Parameters
        ----------
        xy : array_like
            The position data
        spkIdx : array_like
            The indices in xy where the cell fired
        ppm : int, optional
            The camera pixels per metre. Default 365
        winSize : int, optional
            The window size for the temporal search
        pos_sample_rate : int, optional
            The rate at which position was sampled. Default 50
        nbins : int, optional
            The number of bins for creating the resulting ratemap. Default 71
        boxcar : int, optional
            The size of the smoothing kernel to smooth ratemaps. Default 5
        Pthresh : int, optional
            The cut=off for values in the ratemap; values < Pthresh become
            nans.
            Default 100
        downsampfreq : int, optional
            How much to downsample. Default 50
        plot : bool, optional
            Whether to show a plot of the result. Default False

        Returns
        -------
        H : array_like
            The temporal windowed SAC

        """
        # [Stage 0] Get some numbers
        xy = xy / ppm * 100
        n_samps = xy.shape[1]
        n_spks = len(spkIdx)
        winSizeBins = np.min([winSize * pos_sample_rate, n_samps])
        # factor by which positions are downsampled
        downsample = np.ceil(pos_sample_rate / downsampfreq)
        Pthresh = Pthresh / downsample  # take account of downsampling

        # [Stage 1] Calculate number of spikes in the window for each spikeInd
        # (ignoring spike itself)
        # 1a. Loop preparation
        nSpikesInWin = np.zeros(n_spks, dtype=int)

        # 1b. Keep looping until we have dealt with all spikes
        for i, s in enumerate(spkIdx):
            t = np.searchsorted(spkIdx, (s, s + winSizeBins))
            nSpikesInWin[i] = len(spkIdx[t[0]:t[1]]) - 1  # ignore ith spike

        # [Stage 2] Prepare for main loop
        # 2a. Work out offset inidices to be used when storing spike data
        off_spike = np.cumsum([nSpikesInWin])
        off_spike = np.pad(off_spike, (1, 0), "constant", constant_values=(0))

        # 2b. Work out number of downsampled pos bins in window and
        # offset indices for storing data
        nPosInWindow = np.minimum(winSizeBins, n_samps - spkIdx)
        nDownsampInWin = np.floor((nPosInWindow - 1) / downsample) + 1

        off_dwell = np.cumsum(nDownsampInWin.astype(int))
        off_dwell = np.pad(off_dwell, (1, 0), "constant", constant_values=(0))

        # 2c. Pre-allocate dwell and spike arrays, singles for speed
        dwell = np.zeros((2, off_dwell[-1]), dtype=np.single) * np.nan
        spike = np.zeros((2, off_spike[-1]), dtype=np.single) * np.nan

        filled_pvals = 0
        filled_svals = 0

        for i in range(n_spks):
            # calculate dwell displacements
            winInd_dwell = np.arange(
                spkIdx[i] + 1,
                np.minimum(spkIdx[i] + winSizeBins, n_samps),
                downsample,
                dtype=int,
            )
            WL = len(winInd_dwell)
            dwell[:, filled_pvals:filled_pvals + WL] = np.rot90(
                np.array(np.rot90(xy[:, winInd_dwell]) - xy[:, spkIdx[i]]))
            filled_pvals = filled_pvals + WL
            # calculate spike displacements
            winInd_spks = (
                i +
                np.nonzero(spkIdx[i + 1:n_spks] < spkIdx[i] + winSizeBins)[0])
            WL = len(winInd_spks)
            spike[:, filled_svals:filled_svals + WL] = np.rot90(
                np.array(
                    np.rot90(xy[:, spkIdx[winInd_spks]]) - xy[:, spkIdx[i]]))
            filled_svals = filled_svals + WL

        dwell = np.delete(dwell, np.isnan(dwell).nonzero()[1], axis=1)
        spike = np.delete(spike, np.isnan(spike).nonzero()[1], axis=1)

        dwell = np.hstack((dwell, -dwell))
        spike = np.hstack((spike, -spike))

        dwell_min = np.min(dwell, axis=1)
        dwell_max = np.max(dwell, axis=1)

        binsize = (dwell_max[1] - dwell_min[1]) / nbins

        dwell = np.round(
            (dwell - np.ones_like(dwell) * dwell_min[:, np.newaxis]) / binsize)
        spike = np.round(
            (spike - np.ones_like(spike) * dwell_min[:, np.newaxis]) / binsize)

        binsize = np.max(dwell, axis=1).astype(int)
        binedges = np.array(((-0.5, -0.5), binsize + 0.5)).T
        Hp = np.histogram2d(dwell[0, :],
                            dwell[1, :],
                            range=binedges,
                            bins=binsize)[0]
        Hs = np.histogram2d(spike[0, :],
                            spike[1, :],
                            range=binedges,
                            bins=binsize)[0]

        # reverse y,x order
        Hp = np.swapaxes(Hp, 1, 0)
        Hs = np.swapaxes(Hs, 1, 0)

        fHp = blurImage(Hp, boxcar)
        fHs = blurImage(Hs, boxcar)

        H = fHs / fHp
        H[Hp < Pthresh] = np.nan

        return H
예제 #6
0
    def getMap(self, spkWeights, varType="xy", mapType="rate", smoothing=True):
        """
        Bins up the variable type varType and returns a tuple of
        (rmap, binnedPositionDir) or
        (rmap, binnedPostionX, binnedPositionY)

        Parameters
        ----------
        spkWeights : array_like
            Shape equal to number of positions samples captured and consists of
            position weights. For example, if there were 5 positions
            recorded and a cell spiked once in position 2 and 5 times in
            position 3 and nothing anywhere else then pos_weights looks
            like: [0 0 1 5 0]
        varType : str, optional, default 'xy'
            The variable to bin up. Legal values are: 'xy', 'dir', and 'speed'
        mapType : str, optional, default 'rate'
            If 'rate' then the binned up spikes are divided by varType.
            Otherwise return binned up position. Options are 'rate' or 'pos'
        smoothing : bool, optional, default True
            Whether to smooth the data or not

        Returns
        -------
        binned_data, binned_pos : tuple
            This is either a 2-tuple or a 3-tuple depening on whether binned
            pos (mapType 'pos') or binned spikes (mapType 'rate') is asked
            for respectively

        """
        sample = getattr(self, varType, "xy")
        assert sample is not None
        # might happen if head direction not supplied for example

        if "xy" in varType:
            self.binsize = self._calcBinSize(self.cmsPerBin)
        elif "dir" in varType:
            self.binsize = np.arange(0, 360 + self.cmsPerBin, self.cmsPerBin)
        elif "speed" in varType:
            self.binsize = np.arange(0, 50, 1)

        binned_pos = self._binData(sample, self.binsize, self.pos_weights)

        binned_pos_edges = binned_pos[1]
        binned_pos = binned_pos[0]
        nanIdx = binned_pos == 0

        if "pos" in mapType:  # return just binned up position
            if smoothing:
                if "dir" in varType:
                    binned_pos = self._circPadSmooth(binned_pos,
                                                     n=self.smooth_sz)
                else:
                    binned_pos = blurImage(binned_pos,
                                           self.smooth_sz,
                                           ftype=self.smoothingType)
            return binned_pos, binned_pos_edges

        binned_spk = self._binData(sample, self.binsize, spkWeights)[0]
        # binned_spk is returned as a tuple of the binned data and the bin
        # edges
        if "after" in self.whenToSmooth:
            rmap = binned_spk / binned_pos
            if "dir" in varType:
                rmap = self._circPadSmooth(rmap, self.smooth_sz)
            else:
                rmap = blurImage(rmap,
                                 self.smooth_sz,
                                 ftype=self.smoothingType)
        else:  # default case
            if not smoothing:
                return binned_spk / binned_pos, binned_pos_edges
            if "dir" in varType:
                binned_pos = self._circPadSmooth(binned_pos, self.smooth_sz)
                binned_spk = self._circPadSmooth(binned_spk, self.smooth_sz)
                rmap = binned_spk / binned_pos
            else:
                binned_pos = blurImage(binned_pos,
                                       self.smooth_sz,
                                       ftype=self.smoothingType)
                if binned_spk.ndim == 2:
                    pass
                elif binned_spk.ndim == 1:
                    binned_spk_tmp = np.zeros(
                        [binned_spk.shape[0], binned_spk.shape[0], 1])
                    for i in range(binned_spk.shape[0]):
                        binned_spk_tmp[i, :, :] = binned_spk[i]
                    binned_spk = binned_spk_tmp
                binned_spk = blurImage(np.squeeze(binned_spk),
                                       self.smooth_sz,
                                       ftype=self.smoothingType)
                rmap = binned_spk / binned_pos
                if rmap.ndim <= 2:
                    rmap[nanIdx] = np.nan

        return rmap, binned_pos_edges
예제 #7
0
    def ifr_sp_corr(self,
                    x1,
                    speed,
                    minSpeed=2.0,
                    maxSpeed=40.0,
                    sigma=3,
                    shuffle=False,
                    nShuffles=100,
                    minTime=30,
                    plot=False):
        """
        x1 : np.array
            The indices of pos at which the cluster fired
        speed: np.array (1 x nSamples)
            instantaneous speed
        minSpeed: int
            speeds below this value are ignored - defaults to 2cm/s as with
            Kropff et al., 2015
        """
        speed = speed.ravel()
        posSampRate = 50
        nSamples = len(speed)
        # position is sampled at 50Hz and so is 'automatically' binned into
        # 20ms bins
        spk_hist = np.bincount(x1, minlength=nSamples)
        # smooth the spk_hist (which is a temporal histogram) with a 250ms
        # gaussian as with Kropff et al., 2015
        h = signal.gaussian(13, sigma)
        h = h / float(np.sum(h))
        # filter for low speeds
        lowSpeedIdx = speed < minSpeed
        highSpeedIdx = speed > maxSpeed
        speed_filt = speed[~np.logical_or(lowSpeedIdx, highSpeedIdx)]
        spk_hist_filt = spk_hist[~np.logical_or(lowSpeedIdx, highSpeedIdx)]
        spk_sm = signal.filtfilt(h.ravel(), 1, spk_hist_filt)
        sm_spk_rate = spk_sm * posSampRate
        res = stats.pearsonr(sm_spk_rate, speed_filt)
        if plot:
            # do some fancy plotting stuff
            _, sp_bin_edges = np.histogram(speed_filt, bins=50)
            sp_dig = np.digitize(speed_filt, sp_bin_edges, right=True)
            spks_per_sp_bin = [
                spk_hist_filt[sp_dig == i] for i in range(len(sp_bin_edges))
            ]
            rate_per_sp_bin = []
            for x in spks_per_sp_bin:
                rate_per_sp_bin.append(np.mean(x) * posSampRate)
            rate_filter = signal.gaussian(5, 1.0)
            rate_filter = rate_filter / np.sum(rate_filter)
            binned_spk_rate = signal.filtfilt(rate_filter, 1, rate_per_sp_bin)
            # instead of plotting a scatter plot of the firing rate at each
            # speed bin, plot a log normalised heatmap and overlay results

            spk_binning_edges = np.linspace(np.min(sm_spk_rate),
                                            np.max(sm_spk_rate),
                                            len(sp_bin_edges))
            speed_mesh, spk_mesh = np.meshgrid(sp_bin_edges, spk_binning_edges)
            binned_rate, _, _ = np.histogram2d(
                speed_filt,
                sm_spk_rate,
                bins=[sp_bin_edges, spk_binning_edges])
            # blur the binned rate a bit to make it look nicer
            from ephysiopy.common.utils import blurImage
            sm_binned_rate = blurImage(binned_rate, 5)
            fig = plt.figure()
            ax = fig.add_subplot(111)
            from matplotlib.colors import LogNorm
            speed_mesh = speed_mesh[:-1, :-1]
            spk_mesh = spk_mesh[:-1, :-1]
            ax.pcolormesh(speed_mesh,
                          spk_mesh,
                          sm_binned_rate,
                          norm=LogNorm(),
                          alpha=0.5,
                          shading='nearest',
                          edgecolors='None')
            # overlay the smoothed binned rate against speed
            ax.plot(sp_bin_edges, binned_spk_rate, 'r')
            # do the linear regression and plot the fit too
            # TODO: linear regression is broken ie not regressing the correct
            # variables
            lr = stats.linregress(speed_filt, sm_spk_rate)
            end_point = lr.intercept + (
                (sp_bin_edges[-1] - sp_bin_edges[0]) * lr.slope)
            ax.plot([np.min(sp_bin_edges),
                     np.max(sp_bin_edges)], [lr.intercept, end_point], 'r--')
            ax.set_xlim(np.min(sp_bin_edges), np.max(sp_bin_edges[-2]))
            ax.set_ylim(0, np.nanmax(binned_spk_rate) * 1.1)
            ax.set_ylabel('Firing rate(Hz)')
            ax.set_xlabel('Running speed(cm/s)')
            ax.set_title(
                'Intercept: {0:.3f}   Slope: {1:.5f}\nPearson: {2:.5f}'.format(
                    lr.intercept, lr.slope, lr.rvalue))
        # do some shuffling of the data to see if the result is signficant
        if shuffle:
            # shift spikes by at least 30 seconds after trial start and
            # 30 seconds before trial end
            timeSteps = np.random.randint(30 * posSampRate,
                                          nSamples - (30 * posSampRate),
                                          nShuffles)
            shuffled_results = []
            for t in timeSteps:
                spk_count = np.roll(spk_hist, t)
                spk_count_filt = spk_count[~lowSpeedIdx]
                spk_count_sm = signal.filtfilt(h.ravel(), 1, spk_count_filt)
                shuffled_results.append(
                    stats.pearsonr(spk_count_sm, speed_filt)[0])
            if plot:
                fig = plt.figure()
                ax = fig.add_subplot(1, 1, 1)
                ax.hist(np.abs(shuffled_results), 20)
                ylims = ax.get_ylim()
                ax.vlines(res, ylims[0], ylims[1], 'r')
        if isinstance(fig, plt.Figure):
            return fig