def run_phase_stats_with_shuffle(events, spike_rel_times, phase_data_hilbert, phase_bin_start,
                                 phase_bin_stop, parallel=None, num_perms=100, shuffle_type=1):

    # first, get the stats on the non-permuted data
    stats_real, pvals_real, novel_phases, rep_phases = compute_phase_stats_with_shuffle(events, spike_rel_times,
                                                                                        phase_data_hilbert,
                                                                                        phase_bin_start,
                                                                                        phase_bin_stop,
                                                                                        do_permute=False)

    # then run the permutations
    f = compute_phase_stats_with_shuffle
    if ~np.any(np.isnan(stats_real)):

        if isinstance(parallel, Parallel):
            shuff_res = parallel((delayed(f)(events, spike_rel_times, phase_data_hilbert, phase_bin_start,
                                             phase_bin_stop, True, shuffle_type) for _ in range(num_perms)))
        else:
            shuff_res = []
            for _ in range(num_perms):
                shuff_res.append(f(events, spike_rel_times, phase_data_hilbert, phase_bin_start,
                                   phase_bin_stop, do_permute=True))
        shuff_res = [x[0] for x in shuff_res]
        mean_shuf_novel_phases = np.array([pycircstat.mean(x[2], axis=0) for x in shuff_res])
        mean_shuf_rep_phases = np.array([pycircstat.mean(x[3], axis=0) for x in shuff_res])

        # compare the true stats to the distributions of permuted stats
        stats_percentiles = np.mean(np.array(stats_real) > np.array(shuff_res), axis=0)

        return np.array(stats_real), stats_percentiles, np.array(pvals_real), novel_phases, rep_phases, \
               mean_shuf_novel_phases, mean_shuf_rep_phases

    else:
        return np.full((10, phase_data_hilbert.shape[2]), np.nan), np.full((10, phase_data_hilbert.shape[2]), np.nan), \
               np.full((5, phase_data_hilbert.shape[2]), np.nan), novel_phases, rep_phases, np.array([]), np.array([])
def run_phase_stats_with_shuffle(events, spike_rel_times, phase_data_hilbert, phase_bin_start,
                                 phase_bin_stop, parallel=None, num_perms=100):

    # first, get the stats on the non-permuted data
    stats_real, pvals_real, novel_phases, rep_phases = compute_phase_stats_with_shuffle(events, spike_rel_times,
                                                                                        phase_data_hilbert,
                                                                                        phase_bin_start,
                                                                                        phase_bin_stop,
                                                                                        do_permute=False)

    # then run the permutations
    f = compute_phase_stats_with_shuffle
    if ~np.any(np.isnan(stats_real)):

        if isinstance(parallel, Parallel):
            shuff_res = parallel((delayed(f)(events, spike_rel_times, phase_data_hilbert, phase_bin_start,
                                             phase_bin_stop, True) for _ in range(num_perms)))
        else:
            shuff_res = []
            for _ in range(num_perms):
                shuff_res.append(f(events, spike_rel_times, phase_data_hilbert, phase_bin_start,
                                   phase_bin_stop, do_permute=True))
        shuff_res = [x[0] for x in shuff_res]
        mean_shuf_novel_phases = np.array([pycircstat.mean(x[2]) for x in shuff_res])
        mean_shuf_rep_phases = np.array([pycircstat.mean(x[3]) for x in shuff_res])

        # compare the true stats to the distributions of permuted stats
        stats_percentiles = np.mean(np.array(stats_real) > np.array(shuff_res), axis=0)

        return np.array(stats_real), stats_percentiles, np.array(pvals_real), novel_phases, rep_phases, \
               mean_shuf_novel_phases, mean_shuf_rep_phases

    else:
        return np.full((8, phase_data_hilbert.shape[2]), np.nan), np.full((8, phase_data_hilbert.shape[2]), np.nan), \
               np.full((4, phase_data_hilbert.shape[2]), np.nan), novel_phases, rep_phases, np.array([]), np.array([])
def test_center():
    data = np.random.rand(1000) * 2 * np.pi
    try:
        assert_allclose(pycircstat.mean(pycircstat.center(data)),
                        0, rtol=1e-3, atol=1e-3)
    except:
        assert_allclose(pycircstat.mean(pycircstat.center(data)),
                        2 * np.pi, rtol=1e-3, atol=1e-3)
Example #4
0
def test_center():
    data = np.random.rand(1000) * 2 * np.pi
    try:
        assert_allclose(pycircstat.mean(pycircstat.center(data)),
                        0, rtol=1e-3, atol=1e-3)
    except:
        assert_allclose(pycircstat.mean(pycircstat.center(data)),
                        2 * np.pi, rtol=1e-3, atol=1e-3)
Example #5
0
def gaborbank_mean_orientation(d):
    """ Compute the mean orientation for each point in the image
    by summing energy over spatial frequencies and then computing the
    circular mean of orientations for each point, weighted by the filter
    response.

    Args:
        d: the dict output by gaborbank_convolve.

    Returns:
        image containing interpolated orientations. Possible values
        run 0 to pi radians, increasing counterclockwise (pi/2 is vertical).
    """
    res = d['res']
    theta = d['theta']

    e = res.real**2 + res.imag**2
    e = e.sum(axis=2)  # sum energy over scales.

    # reshape the angles into an image plane for each angle:
    t = np.tile(theta, e.shape[0]*e.shape[1])
    t = np.reshape(t, e.shape)
    # t has the same shape as e, with each orientation along axis 2.

    """compute circular mean, with energy as weights. Axial correction
    is to change direction 0-pi --> orientation 0-2*pi. Correct afterward
    by folding orientations > pi back around.
    """

    out = circ.mean(t, w=e, axis=2, axial_correction=2)
    out[out > np.pi] -= np.pi
    return out
Example #6
0
 def _prepare(self):
     if len(PUnitPhases()) != len(self):
         df = pd.DataFrame(PUnitPhases().fetch())
         df['phase'] = [circ.mean(e) for e in df.phases]
         df['jitter'] = [circ.std(ph) for ph in df.phases]
         self.insert([e.to_dict() for _, e in df.ix[:, ('fish_id', 'cell_id', 'phase', 'jitter')].iterrows()],
                     skip_duplicates=True)
def test_mean():
    data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057,
                     1.54256458, 5.19290675, 2.18474784,
                     4.77054777, 1.51736933, 0.72727580])

    # We cannot use `assert_equal`, due to numerical rounding errors.
    assert_allclose(pycircstat.mean(data), 1.35173983)
Example #8
0
def head_direction_stats(head_angle_bins, rate):
    """
    Calculeate firing rate at head direction in head angles for time t

    Parameters
    ----------
    head_angle_bins : quantities.Quantity array in degrees
        binned head directions
    rate : np.ndarray
        firing rate magnitude coresponding to angles

    Returns
    -------
    out : float, float
        mean angle, mean vector length
    """
    import math
    import pycircstat as pc
    if head_angle_bins.units == pq.degrees:
        head_angle_bins = [math.radians(deg)
                           for deg in head_angle_bins] * pq.radians
    nanIndices = np.where(np.isnan(rate))[0]
    nanIndices = np.unique(nanIndices)
    rate = np.delete(rate, nanIndices)
    head_angle_bins = np.delete(head_angle_bins, nanIndices)
    mean_ang = pc.mean(head_angle_bins, w=rate)
    mean_vec_len = pc.resultant_vector_length(head_angle_bins, w=rate)
    # ci_lim = pc.mean_ci_limits(head_angle_bins, w=rate)
    return math.degrees(mean_ang), mean_vec_len
Example #9
0
def test_mean():
    data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057,
                     1.54256458, 5.19290675, 2.18474784,
                     4.77054777, 1.51736933, 0.72727580])

    # We cannot use `assert_equal`, due to numerical rounding errors.
    assert_allclose(pycircstat.mean(data), 1.35173983)
Example #10
0
def histogramFromData(angleData, numBins, **kwargs):
    ''' This function plots the histograms of angles with the number of bins
        equal to numBins, assuming the angles are between 0 and 180.    
  '''
    kwargs = {'title': 'Bobby', 'ylimit': 0.1}
    binsRange = [((2.0 * i + 1) / 2) * (180.0 / numBins)
                 for i in xrange(-1, numBins)]
    bins = np.array(binsRange)
    hist1, bins = np.histogram(angleData, bins=bins)
    widths = np.zeros(numBins)  # @TODO not required. better way to look
    center = (bins[:-1] + bins[1:]) / 2
    widths = np.diff(bins)
    histplt = hist1 / np.sum(hist1 * widths)
    plt.bar(center, histplt, align='center', width=widths, facecolor='c')
    plt.ylim(0, kwargs['ylimit'])
    plt.title(kwargs['title'])
    plt.show()

    print('Total number of pixels on the centerline is = {}'.format(
        len(angleData)))
    mean = (0.5) * pcirc.mean(2 * np.array(angleData) * np.pi / 180)
    var = pcirc.var(2 * np.array(angleData) * np.pi / 180)
    print('The circular mean and variance are ' + str(mean * 180 / np.pi) +
          ' and ' + str(var) + ' respectively.')


#histogramFromData(angleData,numBins,title,ylimit,**kwargs)
Example #11
0
def gaborbank_mean_orientation(d):
    """ Compute the mean orientation for each point in the image
    by summing energy over spatial frequencies and then computing the
    circular mean of orientations for each point, weighted by the filter
    response.

    Args:
        d: the dict output by gaborbank_convolve.

    Returns:
        image containing interpolated orientations. Possible values
        run 0 to pi radians, increasing counterclockwise (pi/2 is vertical).
    """
    res = d['res']
    theta = d['theta']

    e = res.real**2 + res.imag**2
    e = e.sum(axis=2)  # sum energy over scales.

    # reshape the angles into an image plane for each angle:
    t = np.tile(theta, e.shape[0] * e.shape[1])
    t = np.reshape(t, e.shape)
    # t has the same shape as e, with each orientation along axis 2.
    """compute circular mean, with energy as weights. Axial correction
    is to change direction 0-pi --> orientation 0-2*pi. Correct afterward
    by folding orientations > pi back around.
    """

    out = circ.mean(t, w=e, axis=2, axial_correction=2)
    out[out > np.pi] -= np.pi
    return out
def circvVals(rad, w, d):
    vStr = pcirc.vector_strength(rad, w=w, d=d/2, ci=None)
    vDir = pcirc.mean(rad, w=w, d=d/2, ci=None)
    astd = pcirc.astd(rad, w=w, d=d/2, ci=None)
    skew = pcirc.skewness(rad, w=w, ci=None)
    kurt = pcirc.kurtosis(rad, w=w, ci=None)
    return vStr, vDir, astd, skew, kurt
def histogramFromData(angleData,numBins,**kwargs):
  
  ''' This function plots the histograms of angles with the number of bins
        equal to numBins, assuming the angles are between 0 and 180.    
  '''
  kwargs = {'title': 'Bobby', 'ylimit': 0.1}
  binsRange = [ ((2.0*i+1)/2)*(180.0/numBins) for i in xrange(-1,numBins)]
  bins =np.array( binsRange)
  hist1, bins = np.histogram(angleData,bins = bins)
  widths = np.zeros(numBins)   # @TODO not required. better way to look
  center = (bins[:-1] + bins[1:]) / 2
  widths = np.diff(bins)
  histplt=hist1/np.sum(hist1*widths)
  plt.bar(center, histplt, align='center', width=widths,facecolor='c')
  plt.ylim(0,kwargs['ylimit'])
  plt.title(kwargs['title']) 
  plt.show()
  
  print('Total number of pixels on the centerline is = {}'.format(len(angleData)))
  mean=(0.5)*pcirc.mean(2*np.array(angleData)*np.pi/180)
  var=pcirc.var(2*np.array(angleData)*np.pi/180)
  print('The circular mean and variance are ' + str(mean*180/np.pi)+
  ' and ' + str(var) +' respectively.' )  
  
#histogramFromData(angleData,numBins,title,ylimit,**kwargs)
    def compute_hilbert_for_cluster(self, this_cluster_name):

        # first, get the eeg for just channels in cluster
        cluster_rows = self.res['clusters'][this_cluster_name].notna()
        cluster_elec_labels = self.res['clusters'][cluster_rows]['label']
        cluster_eeg = self.subject_data[:, np.in1d(self.subject_data.channel, cluster_elec_labels)]

        # bandpass eeg at the mean frequency, making sure the lower frequency isn't too low
        cluster_mean_freq = self.res['clusters'][cluster_rows][this_cluster_name].mean()
        cluster_freq_range = [cluster_mean_freq - self.hilbert_half_range, cluster_mean_freq + self.hilbert_half_range]
        if cluster_freq_range[0] < SubjectTravelingWaveAnalysis.LOWER_MIN_FREQ:
            cluster_freq_range[0] = SubjectTravelingWaveAnalysis.LOWER_MIN_FREQ
        filtered_eeg = RAM_helpers.band_pass_eeg(cluster_eeg, cluster_freq_range)
        filtered_eeg = filtered_eeg.transpose('channel', 'event', 'time')

        # run the hilbert transform
        complex_hilbert_res = hilbert(filtered_eeg.data, N=filtered_eeg.shape[-1], axis=-1)

        # compute the phase of the filtered eeg
        phase_data = filtered_eeg.copy()
        phase_data.data = np.unwrap(np.angle(complex_hilbert_res))

        # compute the power
        power_data = filtered_eeg.copy()
        power_data.data = np.abs(complex_hilbert_res) ** 2

        # compute mean phase and phase difference between ref phase and each electrode phase
        ref_phase = pycircstat.mean(phase_data.data, axis=0)
        phase_data.data = pycircstat.cdiff(phase_data.data, ref_phase)
        return phase_data, power_data, cluster_mean_freq
def test_mean_ci_1d():
    data = np.array([0.88333, 2.22854, 3.06369, 1.49836, 1.51748])
    muplus = 2.7003
    muminus = 0.89931
    mu = 1.7998

    mu_tmp, (muminus_tmp, muplus_tmp) = pycircstat.mean(data, ci=0.95)
    assert_allclose(muplus, muplus_tmp, rtol=1e-4)
    assert_allclose(muminus, muminus_tmp, rtol=1e-4)
    assert_allclose(mu, mu_tmp, rtol=1e-4)
Example #16
0
def test_mean_ci_1d():
    data = np.array([0.88333, 2.22854, 3.06369, 1.49836, 1.51748])
    muplus = 2.7003
    muminus = 0.89931
    mu = 1.7998

    mu_tmp, (muminus_tmp, muplus_tmp) = pycircstat.mean(data, ci=0.95)
    assert_allclose(muplus, muplus_tmp, rtol=1e-4)
    assert_allclose(muminus, muminus_tmp, rtol=1e-4)
    assert_allclose(mu, mu_tmp, rtol=1e-4)
Example #17
0
def drawHistograms(fileFactory, msMap, histRange):
    '''
  INPUT: histRange : (start, end) angle of histogram
         msMap:
  '''
    numBands = msMap[0][0][0][0]
    #@TODO 2,180 can be replace with histRange[1] - histRange[0] ? no use case now
    binsRange = [((2.0 * i + 1) / 2) * (180.0 / numBands)
                 for i in xrange(-1, numBands)]
    bins = np.array(binsRange) + histRange[0]
    #widths = np.zeros(numBands)   # @TODO not required. better way to look
    center = (bins[:-1] + bins[1:]) / 2
    # @TODO 1, review np.diff
    widths = np.diff(bins)
    maxYLimit = _estimateYLimit(msMap, histRange, bins, widths)
    for (xSeg, name), (histCont, _) in itertools.izip(fileFactory, msMap):
        #@TODO May be need tom kae color map a parameter
        plt.imshow(xSeg, cmap=plt.get_cmap('gray'))
        plt.title('mutliscale directional histogram for file {0}'.format(name))
        plt.xticks([])
        plt.yticks([])
        plt.show()
        for index, (nBands, mSize, newAngles) in enumerate(histCont):
            #  hist=msMap[index][0][0] # histogram information
            #  newAngles=msMap[index][0][1] # angle information between 0 and 180 degrees
            ## converting angles to angles between -90 and 90 degrees
            if histRange == (-90, 90):
                newAngles1 = [
                    -180 + angle for angle in newAngles if angle > 90
                ]
                newAngles2 = [angle for angle in newAngles if angle <= 90]
                newAngles = newAngles1 + newAngles2

            #mSize=msMap[index][0][2]

            hist1, bins = np.histogram(newAngles, bins=bins)
            histplt = hist1 / np.sum(hist1 * widths)

            plt.bar(center,
                    histplt,
                    align='center',
                    width=widths,
                    facecolor='c')
            plt.ylim(0, maxYLimit)
            plt.title('At Scale: ' + str(index) + '_msize_' + str(mSize) +
                      'Angles density')
            plt.show()

            print('Total number of pixels on the centerline is = {}'.format(
                len(newAngles)))
            mean = (0.5) * pcirc.mean(2 * np.array(newAngles) * np.pi / 180)
            var = pcirc.var(2 * np.array(newAngles) * np.pi / 180)
            print('The circular mean and variance are ' +
                  str(-180 + mean * 180 / np.pi) + ' and ' + str(var) +
                  ' respectively.')
Example #18
0
    def _prepare(self):
        if len(PUnitPhases()) != len(self):
            df = pd.DataFrame(PUnitPhases().fetch())
            df['phase'] = [circ.mean(e) for e in df.phases]

            def center(x):
                x['phase'] = circ.center(x.phase)
                return x

            df = df.groupby('fish_id').apply(center)
            df['jitter'] = [circ.std(ph) for ph in df.phases]
            self.insert([e.to_dict() for _, e in df.ix[:, ('fish_id', 'cell_id', 'phase', 'jitter')].iterrows()],
                        skip_duplicates=True)
Example #19
0
    def plot(self):
        # plot mean phase of spikes to show that they are fish dependent
        df = pd.DataFrame(self.fetch())
        df['eod'] = [1 / np.median(np.diff(e)) for e in df.eod_times]
        df['cmean'] = [circ.mean(e) for e in df.phases]
        df['jitter'] = [circ.std(ph) / 2 / np.pi / e for ph, e in zip(df.phases, df.eod)]

        model = ols('cmean ~ C(fish_id)', data=df).fit()
        table = sm.stats.anova_lm(model)
        print(table)

        sns.factorplot('fish_id', 'cmean', data=df, kind='bar')
        g = sns.pairplot(df.ix[:, ('cmean', 'jitter', 'fish_id')], hue='fish_id')
        plt.show()
Example #20
0
 def mean_var(self, restrictions):
     """
     Computes the mean and variance of the baseline psth
     :param restrictions: restriction that identify one baseline trial
     :return: mean and variance
     """
     rel = self & restrictions
     spikes = (Baseline.SpikeTimes() & rel).fetch1('times')
     eod = rel.fetch1('eod')
     period = 1 / eod
     factor = 2 * np.pi / period
     t = (spikes % period)
     mu = circ.mean(t * factor) / factor
     sigma2 = circ.var(t * factor) / factor**2
     return mu, sigma2
Example #21
0
    def _make_tuples(self, key):
        print('Processing', key['cell_id'])
        sampling_rate, eod = (Baseline() & key).fetch1['samplingrate', 'eod']
        dt = 1. / sampling_rate

        trials = Baseline.LocalEODPeaksTroughs() * Baseline.SpikeTimes() & key

        aggregated_spikes = np.hstack([s / 1000 - p[0] * dt for s, p in zip(*trials.fetch['times', 'peaks'])])

        aggregated_spikes %= 1 / eod

        aggregated_spikes *= eod * 2 * np.pi  # normalize to 2*pi
        key['base_var'], key['base_mean'], key['base_std'] = \
            circ.var(aggregated_spikes), circ.mean(aggregated_spikes), circ.std(aggregated_spikes)
        self.insert1(key)
Example #22
0
 def mean_var(self, restrictions):
     """
     Computes the mean and variance of the baseline psth
     :param restrictions: restriction that identify one baseline trial
     :return: mean and variance
     """
     rel = self & restrictions
     spikes = (Baseline.SpikeTimes() & rel).fetch1['times']
     eod = rel.fetch1['eod']
     period = 1 / eod
     factor = 2 * np.pi / period
     t = (spikes % period)
     mu = circ.mean(t * factor) / factor
     sigma2 = circ.var(t * factor) / factor ** 2
     return mu, sigma2
Example #23
0
def test_mean_ci_2d():
    data = np.array([
        [0.58429, 0.88333],
        [1.14892, 2.22854],
        [2.87128, 3.06369],
        [1.07677, 1.49836],
        [2.96969, 1.51748],
    ])
    muplus = np.array([np.NaN, 2.7003])
    muminus = np.array([np.NaN, 0.89931])
    mu = np.array([1.6537, 1.7998])

    mu_tmp, (muminus_tmp, muplus_tmp) = pycircstat.mean(data, ci=0.95, axis=0)
    assert_allclose(muplus, muplus_tmp, rtol=1e-4)
    assert_allclose(muminus, muminus_tmp, rtol=1e-4)
    assert_allclose(mu, mu_tmp, rtol=1e-4)
def drawHistograms(fileFactory, msMap, histRange):
  '''
  INPUT: histRange : (start, end) angle of histogram
         msMap:
  '''
  numBands=msMap[0][0][0][0]
  #@TODO 2,180 can be replace with histRange[1] - histRange[0] ? no use case now
  binsRange = [ ((2.0*i+1)/2)*(180.0/numBands) for i in xrange(-1,numBands)]
  bins =np.array( binsRange) + histRange[0]
  #widths = np.zeros(numBands)   # @TODO not required. better way to look
  center = (bins[:-1] + bins[1:]) / 2
 # @TODO 1, review np.diff   
  widths = np.diff(bins)
  maxYLimit = _estimateYLimit(msMap, histRange, bins, widths)
  for (xSeg, name), (histCont, _) in itertools.izip(fileFactory,msMap):
    #@TODO May be need tom kae color map a parameter
    plt.imshow(xSeg, cmap = plt.get_cmap('gray'))
    plt.title('mutliscale directional histogram for file {0}'.format(name))
    plt.xticks([])
    plt.yticks([])
    plt.show()
    for index, (nBands, mSize, newAngles) in enumerate(histCont):
    #  hist=msMap[index][0][0] # histogram information
    #  newAngles=msMap[index][0][1] # angle information between 0 and 180 degrees
      ## converting angles to angles between -90 and 90 degrees
      if histRange == (-90, 90):
        newAngles1=[-180+angle for angle in newAngles if angle>90]
        newAngles2=[angle for angle in newAngles if angle<=90]
        newAngles=newAngles1+newAngles2
        
        
      #mSize=msMap[index][0][2]

      hist1, bins = np.histogram(newAngles,bins = bins)
      histplt=hist1/np.sum(hist1*widths)

      plt.bar(center, histplt, align='center', width=widths,facecolor='c')
      plt.ylim(0, maxYLimit)
      plt.title('At Scale: '+ str(index)+'_msize_'+str(mSize) + 'Angles density') 
      plt.show()

      print('Total number of pixels on the centerline is = {}'.format(len(newAngles)))
      mean=(0.5)*pcirc.mean(2*np.array(newAngles)*np.pi/180)
      var=pcirc.var(2*np.array(newAngles)*np.pi/180)
      print('The circular mean and variance are ' + str(-180+mean*180/np.pi)+
      ' and ' + str(var) +' respectively.' )
Example #25
0
    def _make_tuples(self, key):
        print('Processing', key['cell_id'])
        sampling_rate, eod = (Baseline() & key).fetch1('samplingrate', 'eod')
        dt = 1. / sampling_rate

        trials = Baseline.LocalEODPeaksTroughs() * Baseline.SpikeTimes() & key

        aggregated_spikes = np.hstack([
            s / 1000 - p[0] * dt
            for s, p in zip(*trials.fetch('times', 'peaks'))
        ])

        aggregated_spikes %= 1 / eod

        aggregated_spikes *= eod * 2 * np.pi  # normalize to 2*pi
        key['base_var'], key['base_mean'], key['base_std'] = \
            circ.var(aggregated_spikes), circ.mean(aggregated_spikes), circ.std(aggregated_spikes)
        self.insert1(key)
Example #26
0
    def _make_tuples(self, key):
        print('Processing', key['cell_id'], 'run', key['run_id'], )
        if SecondOrderSignificantPeaks() & dict(key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0, refined=1):
            eod, vs = (SecondOrderSignificantPeaks() & dict(key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0,
                                                            refined=1)).fetch1['frequency', 'vector_strength']
        elif SecondOrderSignificantPeaks() & dict(key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0, refined=0):
            eod, vs = (SecondOrderSignificantPeaks() & dict(key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0,
                                                            refined=0)).fetch1['frequency', 'vector_strength']
        else:
            eod = (Runs() & key).fetch1['eod']

        aggregated_spikes = np.hstack(TrialAlign().load_trials(key))
        aggregated_spikes %= 1 / eod

        aggregated_spikes *= eod * 2 * np.pi  # normalize to 2*pi
        if len(aggregated_spikes) > 1:
            key['stim_var'], key['stim_mean'], key['stim_std'] = \
                circ.var(aggregated_spikes), circ.mean(aggregated_spikes), circ.std(aggregated_spikes)
            self.insert1(key)
def test_mean_ci_2d():
    data = np.array([
                    [0.58429, 0.88333],
                    [1.14892, 2.22854],
                    [2.87128, 3.06369],
                    [1.07677, 1.49836],
                    [2.96969, 1.51748],
                    ])
    muplus = np.array([np.NaN, 2.7003])
    muminus = np.array([np.NaN, 0.89931])
    mu = np.array([1.6537, 1.7998])

    try:
        mu_tmp, (muminus_tmp, muplus_tmp) = pycircstat.mean(data, ci=0.95, axis=0)
        assert_allclose(muplus, muplus_tmp, rtol=1e-4)
        assert_allclose(muminus, muminus_tmp, rtol=1e-4)
        assert_allclose(mu, mu_tmp, rtol=1e-4)
    except UserWarning:
        pass
    def bin_phase_by_region(self, phase_data, this_cluster_name):
        """
        Bin the channel x event by time phase data into rois x event x time. Means over all electrodes in a given
        region of interest.
        """

        cluster_rows = self.res['clusters'][this_cluster_name].notna()
        cluster_region_df = self.get_electrode_roi_by_hemi()[cluster_rows]

        mean_phase_data = {}
        for this_roi in self.rois:
            if this_roi[1] == 'both':
                cluster_elecs = cluster_region_df.region == this_roi[0]
            else:
                cluster_elecs = (cluster_region_df.region == this_roi[0]) & (cluster_region_df.hemi == this_roi[1])
            if cluster_elecs.any():
                mean_phase_data[this_roi[1] + '-' + this_roi[0]] = pycircstat.mean(phase_data[cluster_elecs.values],
                                                                                   axis=0)
        return mean_phase_data
Example #29
0
    def bin_phase_by_region(self, phase_data, this_cluster_name):
        """
        Bin the channel x event by time phase data into rois x event x time. Means over all electrodes in a given
        region of interest.
        """

        cluster_rows = self.res['clusters'][this_cluster_name].notna()
        cluster_region_df = self.get_electrode_roi_by_hemi()[cluster_rows]

        mean_phase_data = {}
        for this_roi in self.rois:
            if this_roi[1] == 'both':
                cluster_elecs = cluster_region_df.region == this_roi[0]
            else:
                cluster_elecs = (cluster_region_df.region == this_roi[0]) & (
                    cluster_region_df.hemi == this_roi[1])
            if cluster_elecs.any():
                mean_phase_data[this_roi[1] + '-' +
                                this_roi[0]] = pycircstat.mean(
                                    phase_data[cluster_elecs.values], axis=0)
        return mean_phase_data
Example #30
0
    def compute_hilbert_for_cluster(self, this_cluster_name):

        # first, get the eeg for just channels in cluster
        cluster_rows = self.res['clusters'][this_cluster_name].notna()
        cluster_elec_labels = self.res['clusters'][cluster_rows]['label']
        cluster_eeg = self.subject_data[:,
                                        np.in1d(self.subject_data.
                                                channel, cluster_elec_labels)]

        # bandpass eeg at the mean frequency, making sure the lower frequency isn't too low
        cluster_mean_freq = self.res['clusters'][cluster_rows][
            this_cluster_name].mean()
        cluster_freq_range = [
            cluster_mean_freq - self.hilbert_half_range,
            cluster_mean_freq + self.hilbert_half_range
        ]
        if cluster_freq_range[0] < SubjectTravelingWaveAnalysis.LOWER_MIN_FREQ:
            cluster_freq_range[0] = SubjectTravelingWaveAnalysis.LOWER_MIN_FREQ
        filtered_eeg = ecog_helpers.band_pass_eeg(cluster_eeg,
                                                  cluster_freq_range)
        filtered_eeg = filtered_eeg.transpose('channel', 'event', 'time')

        # run the hilbert transform
        complex_hilbert_res = hilbert(filtered_eeg.data,
                                      N=filtered_eeg.shape[-1],
                                      axis=-1)

        # compute the phase of the filtered eeg
        phase_data = filtered_eeg.copy()
        phase_data.data = np.unwrap(np.angle(complex_hilbert_res))

        # compute the power
        power_data = filtered_eeg.copy()
        power_data.data = np.abs(complex_hilbert_res)**2

        # compute mean phase and phase difference between ref phase and each electrode phase
        ref_phase = pycircstat.mean(phase_data.data, axis=0)
        phase_data.data = pycircstat.cdiff(phase_data.data, ref_phase)
        return phase_data, power_data, cluster_mean_freq
    def rose_plot(angles, n_bins=16, ax=None, is_diff=False):
        if is_diff:
            bins = np.linspace(-np.pi, np.pi, n_bins + 1)
        else:
            bins = np.linspace(0, 2 * np.pi, n_bins + 1)
        bins -= np.diff(bins)[0] / 2
        width = 2 * np.pi / n_bins

        if ax is None:
            fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
        else:
            fig = plt.gca()

        n, _ = np.histogram(angles, bins)
        mean_ang = pycircstat.mean(angles)
        bars = ax.bar(bins[:n_bins], n, width=width, bottom=0.0, align='edge')
        for bar in bars:
            bar.set_edgecolor('k')
            bar.set_lw(1)
            bar.set_alpha(.5)
        ax.plot([mean_ang, mean_ang], [0, np.max(n)], '-k', lw=3)
        return ax
Example #32
0
    def rose_plot(angles, n_bins=16, ax=None, is_diff=False):
        if is_diff:
            bins = np.linspace(-np.pi, np.pi, n_bins + 1)
        else:
            bins = np.linspace(0, 2 * np.pi, n_bins + 1)
        bins -= np.diff(bins)[0] / 2
        width = 2 * np.pi / n_bins

        if ax is None:
            fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
        else:
            fig = plt.gca()

        n, _ = np.histogram(angles, bins)
        mean_ang = pycircstat.mean(angles)
        bars = ax.bar(bins[:n_bins], n, width=width, bottom=0.0, align='edge')
        for bar in bars:
            bar.set_edgecolor('k')
            bar.set_lw(1)
            bar.set_alpha(.5)
        ax.plot([mean_ang, mean_ang], [0, np.max(n)], '-k', lw=3)
        return ax
Example #33
0
    def _make_tuples(self, key):
        print('Processing', key['cell_id'], 'run', key['run_id'])
        if SecondOrderSignificantPeaks() & dict(
                key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0,
                refined=1):
            eod, vs = (SecondOrderSignificantPeaks()
                       & dict(key,
                              eod_coeff=1,
                              stimulus_coeff=0,
                              baseline_coeff=0,
                              refined=1)).fetch1('frequency',
                                                 'vector_strength')
        elif SecondOrderSignificantPeaks() & dict(
                key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0,
                refined=0):
            eod, vs = (SecondOrderSignificantPeaks()
                       & dict(key,
                              eod_coeff=1,
                              stimulus_coeff=0,
                              baseline_coeff=0,
                              refined=0)).fetch1('frequency',
                                                 'vector_strength')
        else:
            eod = (Runs() & key).fetch1('eod')

        aggregated_spikes = TrialAlign().load_trials(key)
        if len(aggregated_spikes) == 0:
            warn('TrialAlign returned no spikes. Skipping')
            return
        else:
            aggregated_spikes = np.hstack(aggregated_spikes)
        aggregated_spikes %= 1 / eod

        aggregated_spikes *= eod * 2 * np.pi  # normalize to 2*pi
        if len(aggregated_spikes) > 1:
            key['stim_var'], key['stim_mean'], key['stim_std'] = \
                circ.var(aggregated_spikes), circ.mean(aggregated_spikes), circ.std(aggregated_spikes)
            self.insert1(key)
Example #34
0
def head_direction_score(head_angle_bins, rate):
    """
    Calculeate firing rate at head direction in head angles for time t

    Parameters
    ----------
    head_angle_bins : array in radians
        binned head directions
    rate : array
        firing rate magnitude coresponding to angles

    Returns
    -------
    out : float, float
        mean angle, mean vector length
    """
    import math
    import pycircstat as pc
    nanIndices = np.where(np.isnan(rate))
    head_angle_bins = np.delete(head_angle_bins, nanIndices)
    mean_ang = pc.mean(head_angle_bins, w=rate)
    mean_vec_len = pc.resultant_vector_length(head_angle_bins, w=rate)
    # ci_lim = pc.mean_ci_limits(head_angle_bins, w=rate)
    return mean_ang, mean_vec_len
Example #35
0
def test_mean_axial():
    data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057,
                     1.54256458, 5.19290675, 2.18474784,
                     4.77054777, 1.51736933, 0.72727580])
    assert_allclose(pycircstat.mean(data, axial_correction=3), 0.95902619)
Example #36
0
def test_mean_constant_data():
    data = np.ones(10)

    # We cannot use `assert_equal`, due to numerical rounding errors.
    assert_allclose(pycircstat.mean(data), 1.0)
    def analysis(self):
        """
        For each cluster in res['clusters']:

        1.

        """

        # make sure we have data
        if self.subject_data is None:
            print('%s: compute or load data first with .load_data()!' % self.subject)
            return

        # we must have 'clusters' in self.res
        if 'clusters' in self.res:
            self.res['traveling_waves'] = {}

            # get cluster names from dataframe columns
            cluster_names = list(filter(re.compile('cluster[0-9]+').match, self.res['clusters'].columns))

            # get circular-linear regression parameters
            theta_r, params = self.compute_grid_parameters()

            # compute cluster stats for each cluster
            with Parallel(n_jobs=12, verbose=5) as parallel:
                for this_cluster_name in cluster_names:
                    cluster_res = {}

                    # get the names of the channels in this cluster
                    cluster_elecs = self.res['clusters'][self.res['clusters'][this_cluster_name].notna()]['label']

                    # for the channels in this cluster, bandpass and then hilbert to get the phase info
                    phase_data, power_data, cluster_mean_freq = self.compute_hilbert_for_cluster(this_cluster_name)

                    # reduce to only time inverval of interest
                    time_inds = (phase_data.time >= self.cluster_stat_start_time) & (
                            phase_data.time <= self.cluster_stat_end_time)
                    phase_data = phase_data[:, :, time_inds]

                    # get electrode coordinates in 2d
                    norm_coords = self.compute_2d_elec_coords(this_cluster_name)

                    # run the cluster stats for time-averaged data
                    mean_rel_phase = pycircstat.mean(phase_data.data, axis=2)
                    mean_cluster_wave_ang, mean_cluster_wave_freq, mean_cluster_r2_adj = \
                        circ_lin_regress(mean_rel_phase.T, norm_coords, theta_r, params)
                    cluster_res['mean_cluster_wave_ang'] = mean_cluster_wave_ang
                    cluster_res['mean_cluster_wave_freq'] = mean_cluster_wave_freq
                    cluster_res['mean_cluster_r2_adj'] = mean_cluster_r2_adj

                    # and run it for each time point
                    num_times = phase_data.shape[-1]
                    data_as_list = zip(phase_data.T, [norm_coords] * num_times, [theta_r] * num_times, [params] * num_times)

                    res_as_list = parallel(delayed(circ_lin_regress)(x[0].data, x[1], x[2], x[3]) for x in data_as_list)
                    cluster_res['cluster_wave_ang'] = np.stack([x[0] for x in res_as_list], axis=0).astype('float32')
                    cluster_res['cluster_wave_freq'] = np.stack([x[1] for x in res_as_list], axis=0).astype('float32')
                    cluster_res['cluster_r2_adj'] = np.stack([x[2] for x in res_as_list], axis=0).astype('float32')
                    cluster_res['mean_freq'] = cluster_mean_freq
                    cluster_res['channels'] = cluster_elecs.values
                    cluster_res['time'] = phase_data.time.data
                    cluster_res['phase_data'] = pycircstat.mean(phase_data, axis=1).astype('float32')
                    cluster_res['phase_rvl'] = pycircstat.resultant_vector_length(phase_data, axis=1).astype('float32')

                    # finally, compute the subsequent memory effect
                    if hasattr(self, 'recall_filter_func') and callable(self.recall_filter_func):
                        recalled = self.recall_filter_func(self.subject_data)
                        cluster_res['recalled'] = recalled
                        delta_z, ts, ps = self.compute_sme_for_cluster(power_data)
                        cluster_res['sme_t'] = ts
                        cluster_res['sme_z'] = delta_z
                        cluster_res['ps'] = ps
                        cluster_res['phase_data_recalled'] = pycircstat.mean(phase_data[:, recalled], axis=1).astype(
                            'float32')
                        cluster_res['phase_data_not_recalled'] = pycircstat.mean(phase_data[:, ~recalled], axis=1).astype(
                            'float32')

                        # compute resultant vector length for recalled and not recalled. Then take the difference
                        # between recalled and not recalled
                        rec_rvl, not_rec_rvl = compute_rvl_by_memory(recalled, phase_data, False)
                        rvl_sme = rec_rvl - not_rec_rvl

                        # compute a null distribution of rvl_sme vules
                        rvl_shuff_list = parallel(delayed(compute_rvl_by_memory)(recalled, phase_data, True) for _ in range(self.num_perms))
                        rvl_sme_shuff = np.stack([x[0] - x[1] for x in rvl_shuff_list])

                        # get the rank of the real sme values compared to the shuffled data
                        rvl_sme_shuff_perc = (rvl_sme > rvl_sme_shuff).mean(axis=0)
                        rvl_sme_shuff_perc[rvl_sme_shuff_perc == 0] += 1 / self.num_perms
                        rvl_sme_shuff_perc[rvl_sme_shuff_perc == 1] -= 1 / self.num_perms

                        # convert the ranks to a zscore
                        z = norm.ppf(rvl_sme_shuff_perc)

                        # store in res along with the number of significant electrodes in each direction
                        cluster_res['rvl_sme_z'] = z.astype('float32')
                        cluster_res['rvl_sme_sig_pos_n'] = np.sum(rvl_sme_shuff_perc > 0.975, axis=0)
                        cluster_res['rvl_sme_sig_neg_n'] = np.sum(rvl_sme_shuff_perc < 0.025, axis=0)

                    # finally finally, bin phase by roi
                    cluster_res['phase_by_roi'] = self.bin_phase_by_region(phase_data, this_cluster_name)
                    self.res['traveling_waves'][this_cluster_name] = cluster_res

        else:
            print('{}: self.res must have a clusters entry before running.'.format(self.subject))
            return
Example #38
0
    def analysis(self):
        """
        For each cluster in res['clusters']:

        1.

        """

        # make sure we have data
        if self.subject_data is None:
            print('%s: compute or load data first with .load_data()!' %
                  self.subject)
            return

        # we must have 'clusters' in self.res
        if 'clusters' in self.res:
            self.res['traveling_waves'] = {}

            # get cluster names from dataframe columns
            cluster_names = list(
                filter(
                    re.compile('cluster[0-9]+').match,
                    self.res['clusters'].columns))

            # get circular-linear regression parameters
            theta_r, params = self.compute_grid_parameters()

            # compute cluster stats for each cluster
            with Parallel(n_jobs=12, verbose=5) as parallel:
                for this_cluster_name in cluster_names:
                    cluster_res = {}

                    # get the names of the channels in this cluster
                    cluster_elecs = self.res['clusters'][self.res['clusters'][
                        this_cluster_name].notna()]['label']

                    # for the channels in this cluster, bandpass and then hilbert to get the phase info
                    phase_data, power_data, cluster_mean_freq = self.compute_hilbert_for_cluster(
                        this_cluster_name)

                    # reduce to only time inverval of interest
                    time_inds = (
                        phase_data.time >= self.cluster_stat_start_time) & (
                            phase_data.time <= self.cluster_stat_end_time)
                    phase_data = phase_data[:, :, time_inds]

                    # get electrode coordinates in 2d
                    norm_coords = self.compute_2d_elec_coords(
                        this_cluster_name)

                    # run the cluster stats for time-averaged data
                    mean_rel_phase = pycircstat.mean(phase_data.data, axis=2)
                    mean_cluster_wave_ang, mean_cluster_wave_freq, mean_cluster_r2_adj = \
                        circ_lin_regress(mean_rel_phase.T, norm_coords, theta_r, params)
                    cluster_res[
                        'mean_cluster_wave_ang'] = mean_cluster_wave_ang
                    cluster_res[
                        'mean_cluster_wave_freq'] = mean_cluster_wave_freq
                    cluster_res['mean_cluster_r2_adj'] = mean_cluster_r2_adj

                    # and run it for each time point
                    num_times = phase_data.shape[-1]
                    data_as_list = zip(phase_data.T, [norm_coords] * num_times,
                                       [theta_r] * num_times,
                                       [params] * num_times)

                    res_as_list = parallel(
                        delayed(circ_lin_regress)(x[0].data, x[1], x[2], x[3])
                        for x in data_as_list)
                    cluster_res['cluster_wave_ang'] = np.stack(
                        [x[0] for x in res_as_list], axis=0).astype('float32')
                    cluster_res['cluster_wave_freq'] = np.stack(
                        [x[1] for x in res_as_list], axis=0).astype('float32')
                    cluster_res['cluster_r2_adj'] = np.stack(
                        [x[2] for x in res_as_list], axis=0).astype('float32')
                    cluster_res['mean_freq'] = cluster_mean_freq
                    cluster_res['channels'] = cluster_elecs.values
                    cluster_res['time'] = phase_data.time.data
                    cluster_res['phase_data'] = pycircstat.mean(
                        phase_data, axis=1).astype('float32')
                    cluster_res[
                        'phase_rvl'] = pycircstat.resultant_vector_length(
                            phase_data, axis=1).astype('float32')

                    # finally, compute the subsequent memory effect
                    if hasattr(self, 'recall_filter_func') and callable(
                            self.recall_filter_func):
                        recalled = self.recall_filter_func(self.subject_data)
                        cluster_res['recalled'] = recalled
                        delta_z, ts, ps = self.compute_sme_for_cluster(
                            power_data)
                        cluster_res['sme_t'] = ts
                        cluster_res['sme_z'] = delta_z
                        cluster_res['ps'] = ps
                        cluster_res['phase_data_recalled'] = pycircstat.mean(
                            phase_data[:, recalled], axis=1).astype('float32')
                        cluster_res[
                            'phase_data_not_recalled'] = pycircstat.mean(
                                phase_data[:, ~recalled],
                                axis=1).astype('float32')

                        # compute resultant vector length for recalled and not recalled. Then take the difference
                        # between recalled and not recalled
                        rec_rvl, not_rec_rvl = compute_rvl_by_memory(
                            recalled, phase_data, False)
                        rvl_sme = rec_rvl - not_rec_rvl

                        # compute a null distribution of rvl_sme vules
                        rvl_shuff_list = parallel(
                            delayed(compute_rvl_by_memory)(recalled,
                                                           phase_data, True)
                            for _ in range(self.num_perms))
                        rvl_sme_shuff = np.stack(
                            [x[0] - x[1] for x in rvl_shuff_list])

                        # get the rank of the real sme values compared to the shuffled data
                        rvl_sme_shuff_perc = (rvl_sme > rvl_sme_shuff).mean(
                            axis=0)
                        rvl_sme_shuff_perc[rvl_sme_shuff_perc ==
                                           0] += 1 / self.num_perms
                        rvl_sme_shuff_perc[rvl_sme_shuff_perc ==
                                           1] -= 1 / self.num_perms

                        # convert the ranks to a zscore
                        z = norm.ppf(rvl_sme_shuff_perc)

                        # store in res along with the number of significant electrodes in each direction
                        cluster_res['rvl_sme_z'] = z.astype('float32')
                        cluster_res['rvl_sme_sig_pos_n'] = np.sum(
                            rvl_sme_shuff_perc > 0.975, axis=0)
                        cluster_res['rvl_sme_sig_neg_n'] = np.sum(
                            rvl_sme_shuff_perc < 0.025, axis=0)

                    # finally finally, bin phase by roi
                    cluster_res['phase_by_roi'] = self.bin_phase_by_region(
                        phase_data, this_cluster_name)
                    self.res['traveling_waves'][
                        this_cluster_name] = cluster_res

        else:
            print('{}: self.res must have a clusters entry before running.'.
                  format(self.subject))
            return
        #        print ac
        histInformation.append(collectrow)

    binNum, lBin, uBin, freq = zip(*histInformation)

    bins = (np.array(lBin) + np.array(uBin)) / 2

    hist1 = np.array(freq)

    widths = np.ones(len(bins)) * (bins[1] - bins[0])
    histplt = hist1 / np.sum(hist1 * widths)

    newAngles = [bins[i] * np.ones(freq[i]) for i in xrange(len(bins))]
    angles = np.array(list(itertools.chain(*newAngles)))

    mean = (0.5) * pcirc.mean(2 * np.array(angles) * np.pi / 180)
    var = pcirc.var(2 * np.array(angles) * np.pi / 180)

    meanIndegrees = mean * 180 / np.pi
    if meanIndegrees > 90:
        meanIndegrees = -180 + meanIndegrees

    print("The circular mean and variance are " + str(meanIndegrees) + " and " + str(var) + " respectively.")

    histName = (
        "Histogram_Sigma_" + str(Sigma) + "_mean" + str(round(meanIndegrees, 2)) + "_var_" + str(round(var, 2)) + ".png"
    )

    fig = plt.figure()
    plt.bar(bins, histplt, align="center", width=widths, facecolor="r")
    plt.ylim(0, ylimit)
def compute_phase_stats_with_shuffle(events, spike_rel_times, phase_data_hilbert, phase_bin_start,
                                     phase_bin_stop, do_permute=False):

    spike_rel_times_tmp = spike_rel_times.copy()
    if do_permute:

        # permute the novel
        novel_events = np.where(events.isFirst.values)[0]
        perm_novel_events = np.random.permutation(novel_events)
        spike_rel_times_tmp[novel_events] = spike_rel_times_tmp[perm_novel_events]

        # and repeated separately
        rep_events = np.where(~events.isFirst.values)[0]
        perm_rep_events = np.random.permutation(rep_events)
        spike_rel_times_tmp[rep_events] = spike_rel_times_tmp[perm_rep_events]

    # get the phases at which the spikes occurred and bin into novel and repeated items for each hilbert band
    spike_phases_hilbert = _compute_spike_phase_by_freq(spike_rel_times_tmp,
                                                        phase_bin_start,
                                                        phase_bin_stop,
                                                        phase_data_hilbert,
                                                        events)

    # bin into repeated and novel phases
    novel_phases, rep_phases = _bin_phases_into_cond(spike_phases_hilbert, events)

    if (len(novel_phases) > 0) & (len(rep_phases) > 0):

        # rayleigh test for uniformity
        rvl_novel = pycircstat.resultant_vector_length(novel_phases, axis=0)
        rvl_rep = pycircstat.resultant_vector_length(rep_phases, axis=0)
        rvl_diff = rvl_novel - rvl_rep

        # compute rayleigh test for each condition
        rayleigh_pval_novel, rayleigh_z_novel = pycircstat.rayleigh(novel_phases, axis=0)
        rayleigh_pval_rep, rayleigh_z_rep = pycircstat.rayleigh(rep_phases, axis=0)
        rayleigh_diff = rayleigh_z_novel - rayleigh_z_rep

        # watson williams test for equal means
        ww_pval, ww_tables = pycircstat.watson_williams(novel_phases, rep_phases, axis=0)
        ww_fstat = np.array([x.loc['Columns'].F for x in ww_tables])

        # kuiper test, to test for difference in dispersion (not mean, because I'm making them equal)
        kuiper_pval, stat_kuiper = pycircstat.kuiper(novel_phases - pycircstat.mean(novel_phases),
                                                     rep_phases - pycircstat.mean(rep_phases), axis=0)

        return (rvl_novel, rvl_rep, rvl_diff, ww_fstat, stat_kuiper, rayleigh_z_novel, rayleigh_z_rep, rayleigh_diff), \
               (rayleigh_pval_novel, rayleigh_pval_rep, ww_pval, kuiper_pval), novel_phases, rep_phases

    else:
        return (np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2])), \
               (np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2])), novel_phases, rep_phases
def test_mean_axial():
    data = np.array([
        1.80044838, 2.02938314, 1.03534016, 4.84225057, 1.54256458, 5.19290675,
        2.18474784, 4.77054777, 1.51736933, 0.72727580
    ])
    assert_allclose(pycircstat.mean(data, axial_correction=3), 0.95902619)
def test_mean_constant_data():
    data = np.ones(10)

    # We cannot use `assert_equal`, due to numerical rounding errors.
    assert_allclose(pycircstat.mean(data), 1.0)
Example #43
0
                # points is even. That means his partner point shares the same value (degenerate)
                # E.g. alpha = [ 0., 0., ..., 0. ]
                case += 'degenerate_'
                min_ind = np.array([min_ind_closest])

    if min_ind.size == 1:
        # check that we really have the median
        if dm[min_ind[0]] == 0:
            med = alpha[min_ind[0]]
        else:
            # Example ???
            print('NAN ', case, alpha)
            return np.nan
    else:
        # compute the median and check it
        med = pyc.mean(alpha[min_ind])
        dd_med = delta_angle(alpha, med)
        dm_med = np.sum(dd_med >= 0) - np.sum(
            dd_med <= 0)  # signed unbalance between sides
        if dm_med != 0:
            # Example: [ 3.8163336, -0.34886142, 2.09754686, 1.19611513, -3.72993505, -1.75901783, 4.69444219, 4.05781821, -4.41354647, -0.17710378]
            #          In that case, the used pair is opposite to the mean and wide apart, so that data points on the mean side are crossed when moving from one boundary to the other...
            case += 'circmeanNotMedian_'
            print('NAN ', case, alpha)
            return np.nan

    # Remove +-pi degeneracy of median by taking solution closest to mean
    if np.abs(delta_angle(med, mean_alpha)) > np.abs(
            delta_angle(med + np.pi, mean_alpha)):
        med += np.pi
def compute_phase_stats_with_shuffle(events, spike_rel_times, phase_data_hilbert, phase_bin_start,
                                     phase_bin_stop, do_permute=False, shuffle_type=1):

    spike_rel_times_tmp = spike_rel_times.copy()
    e_tmp = events.copy()

    if do_permute:

        if shuffle_type == 1:

            # permute the novel
            novel_events = np.where(events.isFirst.values)[0]
            perm_novel_events = np.random.permutation(novel_events)
            spike_rel_times_tmp[novel_events] = spike_rel_times_tmp[perm_novel_events]

            # and repeated separately
            rep_events = np.where(~events.isFirst.values)[0]
            perm_rep_events = np.random.permutation(rep_events)
            spike_rel_times_tmp[rep_events] = spike_rel_times_tmp[perm_rep_events]

        else:

            e_tmp['isFirst'] = np.random.permutation(e_tmp.isFirst)

    # get the phases at which the spikes occurred and bin into novel and repeated items for each hilbert band
    spike_phases_hilbert = _compute_spike_phase_by_freq(spike_rel_times_tmp,
                                                        phase_bin_start,
                                                        phase_bin_stop,
                                                        phase_data_hilbert,
                                                        events)

    # bin into repeated and novel phases
    novel_phases, rep_phases = _bin_phases_into_cond(spike_phases_hilbert, e_tmp)

    if (len(novel_phases) > 0) & (len(rep_phases) > 0):

        # test phase locking for all spikes comboined
        all_spikes_phases = np.vstack([novel_phases, rep_phases])
        rayleigh_pval_all, rayleigh_z_all = pycircstat.rayleigh(all_spikes_phases, axis=0)
        rvl_all = pycircstat.resultant_vector_length(all_spikes_phases, axis=0)

        # rayleigh test for uniformity
        rvl_novel = pycircstat.resultant_vector_length(novel_phases, axis=0)
        rvl_rep = pycircstat.resultant_vector_length(rep_phases, axis=0)
        rvl_diff = rvl_novel - rvl_rep

        # compute rayleigh test for each condition
        rayleigh_pval_novel, rayleigh_z_novel = pycircstat.rayleigh(novel_phases, axis=0)
        rayleigh_pval_rep, rayleigh_z_rep = pycircstat.rayleigh(rep_phases, axis=0)
        rayleigh_diff = rayleigh_z_novel - rayleigh_z_rep

        # watson williams test for equal means
        ww_pval, ww_tables = pycircstat.watson_williams(novel_phases, rep_phases, axis=0)
        ww_fstat = np.array([x.loc['Columns'].F for x in ww_tables])

        # kuiper test, to test for difference in dispersion (not mean, because I'm making them equal)
        kuiper_pval, stat_kuiper = pycircstat.kuiper(novel_phases - pycircstat.mean(novel_phases),
                                                     rep_phases - pycircstat.mean(rep_phases), axis=0)

        return (rvl_novel, rvl_rep, rvl_diff, ww_fstat, stat_kuiper, rayleigh_z_novel, rayleigh_z_rep, rayleigh_diff,
                rayleigh_z_all, rvl_all), \
               (rayleigh_pval_novel, rayleigh_pval_rep, ww_pval, kuiper_pval, rayleigh_pval_all), novel_phases, rep_phases

    else:
        return (np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2])), \
               (np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2]),
                np.array([np.nan] * phase_data_hilbert.shape[2])), novel_phases, rep_phases