Example #1
0
def _circcorrcoef(alpha, beta, axis=None):
    """ Computes the circular correlation coefficient between two array of
    circular data.

    Parameters
    ----------
    alpha : numpy.ndarray or Quantity
        Array of circular (directional) data, which is assumed to be in
        radians whenever ``data`` is ``numpy.ndarray``.
    beta : numpy.ndarray or Quantity
        Array of circular (directional) data, which is assumed to be in
        radians whenever ``data`` is ``numpy.ndarray``.
    axis : int, optional
        Axis along which circular correlation coefficients are computed.
        The default is the compute the circular correlation coefficient of the
        flattened array.
    weights_alpha : numpy.ndarray, optional
        In case of grouped data, the i-th element of ``weights_alpha``
        represents a weighting factor for each group such that
        ``sum(weights_alpha, axis)`` equals the number of observations.
        See [1]_, remark 1.4, page 22, for detailed explanation.
    weights_beta : numpy.ndarray, optional
        See description of ``weights_alpha``.

    Returns
    -------
    rho : numpy.ndarray or dimensionless Quantity
        Circular correlation coefficient.

    References
    ----------
    .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
       Series on Multivariate Analysis, Vol. 5, 2001.
    .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
       Circular Statistics (2001)'". 2015.
       <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
    """
    if (np.size(alpha, axis) != np.size(beta, axis)):
        raise ValueError("alpha and beta must be arrays of the same size")

    mu_a = circmean(alpha, axis)
    mu_b = circmean(beta, axis)

    sin_a = np.sin(alpha - mu_a[:, None])
    sin_b = np.sin(beta - mu_b[:, None])
    rho = np.sum(sin_a * sin_b, axis) / np.sqrt(
        np.sum(sin_a * sin_a, axis) * np.sum(sin_b * sin_b, axis))

    return rho
Example #2
0
    def from_fits_bintable(cls, bintable, tolerance=0.01):
        """
        Insantiate a beam from a bintable from a CASA-produced image HDU.

        Parameters
        ----------
        bintable : fits.BinTableHDU
            The table data containing the beam information
        tolerance : float
            The fractional tolerance on the beam size to include when averaging
            to a single beam
        """
        from astropy.stats import circmean

        bmaj = bintable.data['BMAJ']
        bmin = bintable.data['BMIN']
        bpa = bintable.data['BPA']
        if np.any(np.isnan(bmaj) | np.isnan(bmin) | np.isnan(bpa)):
            raise ValueError("NaN beam encountered.")
        for par in (bmin,bmaj):
            par_mean = par.mean()
            if (par.max() > par_mean*(1+tolerance)) or (par.min()<par_mean*(1-tolerance)):
                raise ValueError("Beams are not within specified tolerance")

        meta = {key: bintable.data[key].mean() for key in bintable.data.names if
                key not in ('BMAJ','BPA', 'BMIN')}
        if meta:
            warnings.warn("Metadata was averaged for keywords "
                          "{0}".format(",".join([key for key in meta])))

        return cls(major=bmaj.mean()*u.arcsec, minor=bmin.mean()*u.arcsec,
                   pa=circmean(bpa*u.deg, weights=bmaj/bmin))
Example #3
0
    def from_fits_bintable(cls, bintable, tolerance=0.01):
        """
        Insantiate a beam from a bintable from a CASA-produced image HDU.

        Parameters
        ----------
        bintable : fits.BinTableHDU
            The table data containing the beam information
        tolerance : float
            The fractional tolerance on the beam size to include when averaging
            to a single beam
        """
        from astropy.stats import circmean

        bmaj = bintable.data['BMAJ']
        bmin = bintable.data['BMIN']
        bpa = bintable.data['BPA']
        if np.any(np.isnan(bmaj) | np.isnan(bmin) | np.isnan(bpa)):
            raise ValueError("NaN beam encountered.")
        for par in (bmin,bmaj):
            par_mean = par.mean()
            if (par.max() > par_mean*(1+tolerance)) or (par.min()<par_mean*(1-tolerance)):
                raise ValueError("Beams are not within specified tolerance")

        meta = {key: bintable.data[key].mean() for key in bintable.data.names if
                key not in ('BMAJ','BPA', 'BMIN')}
        if meta:
            warnings.warn("Metadata was averaged for keywords "
                          "{0}".format(",".join([key for key in meta])))

        return cls(major=bmaj.mean()*u.arcsec, minor=bmin.mean()*u.arcsec,
                   pa=circmean(bpa*u.deg, weights=bmaj/bmin))
    def average_beam(self, includemask=None, raise_for_nan=True):
        """
        Average the beam major, minor, and PA attributes.

        This is usually a dumb thing to do!
        """

        warnings.warn("Do not use the average beam for convolution! Use the"
                      " smallest common beam from `Beams.common_beam()`.")

        from astropy.stats import circmean

        if includemask is None:
            includemask = self.isfinite
        else:
            includemask = np.logical_and(includemask, self.isfinite)

        new_beam = Beam(major=self.major[includemask].mean(),
                        minor=self.minor[includemask].mean(),
                        pa=circmean(self.pa[includemask],
                        weights=(self.major / self.minor)[includemask]))

        if raise_for_nan and np.any(np.isnan(new_beam)):
            raise ValueError("NaNs after averaging.  This is a bug.")

        return new_beam
Example #5
0
    def average_beam(self, includemask=None, raise_for_nan=True):
        """
        Average the beam major, minor, and PA attributes.

        This is usually a dumb thing to do!
        """

        warnings.warn("Do not use the average beam for convolution! Use the"
                      " smallest common beam from `Beams.common_beam()`.")

        from astropy.stats import circmean

        if includemask is None:
            includemask = self.isfinite
        else:
            includemask = np.logical_and(includemask, self.isfinite)

        new_beam = Beam(major=self.major[includemask].mean(),
                        minor=self.minor[includemask].mean(),
                        pa=circmean(self.pa[includemask],
                                    weights=(self.major /
                                             self.minor)[includemask]))

        if raise_for_nan and np.any(np.isnan(new_beam)):
            raise ValueError("NaNs after averaging.  This is a bug.")

        return new_beam
Example #6
0
    def responses(sun_ele=np.pi / 3, uniform=True, noise=.5, bl=.5):
        sun_azi = np.linspace(-np.pi, np.pi, 36, endpoint=False)
        sun_ele = np.full_like(sun_azi, sun_ele)

        phi_maxs = [[]] * 8
        r_means = [[]] * 8
        r_stds = [[]] * 8
        p_values = [[]] * 8
        tb1s = np.empty((0, sun_azi.shape[0], 8), dtype=sun_azi.dtype)

        for n_tb1 in np.arange(8):
            tb1s = np.empty((0, sun_azi.shape[0], 8), dtype=sun_azi.dtype)

            for _ in np.linspace(0, 1, 100):
                d_deg, d_eff, t, phi, r_tb1 = evaluate(
                    uniform_polariser=uniform,
                    sun_azi=sun_azi,
                    sun_ele=sun_ele,
                    tilting=False,
                    noise=noise)
                tb1s = np.vstack([tb1s, np.transpose(r_tb1, axes=(1, 0, 2))])

            r_mean = np.median(tb1s[..., n_tb1], axis=0)
            z = r_mean.max() - r_mean.min()

            r_mean = (r_mean - r_mean.min()) / z - bl
            r_means[n_tb1] = r_mean
            r_stds[n_tb1] = tb1s[..., n_tb1].std(axis=0) / np.sqrt(z)

            p_values[n_tb1] = rayleightest(sun_azi, weights=r_mean + bl)
            phi_max = circmean(sun_azi, weights=np.power(r_mean + bl, 50))
            phi_maxs[n_tb1] = phi_max

        z = tb1s.max() - tb1s.min()
        tb1s = (tb1s - tb1s.min()) / z
        phis = np.transpose(np.array([[sun_azi] * 100] * 8), axes=(1, 2, 0))
        phi_means = circmean(phis, axis=1, weights=np.power(tb1s, 50)).T

        return np.array(phi_maxs)[::-1], phi_means[::-1], np.array(
            r_means)[::-1], np.array(r_stds)[::-1], np.array(p_values)[::-1]
Example #7
0
def average_beams(beams, includemask=None):
    """
    Average the beam major, minor, and PA attributes.

    This is usually a dumb thing to do!
    """

    from radio_beam import Beam
    from astropy.stats import circmean

    major, minor, pa = beam_props(beams, includemask)
    new_beam = Beam(major=major.mean(), minor=minor.mean(),
                    pa=circmean(pa, weights=major/minor))

    return new_beam
Example #8
0
def average_beams(beams, includemask=None):
    """
    Average the beam major, minor, and PA attributes.

    This is usually a dumb thing to do!
    """

    from radio_beam import Beam
    from astropy.stats import circmean

    if includemask is None:
        includemask = itertools.cycle([True])

    major = u.Quantity([bm.major for bm,incl in zip(beams,includemask) if incl], u.deg)
    minor = u.Quantity([bm.minor for bm,incl in zip(beams,includemask) if incl], u.deg)
    pa = u.Quantity([bm.pa for bm,incl in zip(beams,includemask) if incl], u.deg)
    new_beam = Beam(major=major.mean(), minor=minor.mean(),
                    pa=circmean(pa, weights=major/minor))

    return new_beam
Example #9
0
def average_beams(beams, includemask=None, raise_for_nan=True):
    """
    Average the beam major, minor, and PA attributes.

    This is usually a dumb thing to do!
    """

    from radio_beam import Beam
    from astropy.stats import circmean

    major, minor, pa = beam_props(beams, includemask)

    if raise_for_nan and (np.any(np.isnan(major)) or np.any(np.isnan(minor)) or
                          np.any(np.isnan(pa))):
        raise ValueError("Some beam parameters are NaN.  Try masking out the bad beams.")

    new_beam = Beam(major=major.mean(), minor=minor.mean(),
                    pa=circmean(pa, weights=major/minor))

    if raise_for_nan and np.any(np.isnan(new_beam)):
        raise ValueError("NaNs after averaging.  This is a bug.")

    return new_beam
Example #10
0
def average_beams(beams, includemask=None):
    """
    Average the beam major, minor, and PA attributes.

    This is usually a dumb thing to do!
    """

    from radio_beam import Beam
    from astropy.stats import circmean

    if includemask is None:
        includemask = itertools.cycle([True])

    major = u.Quantity(
        [bm.major for bm, incl in zip(beams, includemask) if incl], u.deg)
    minor = u.Quantity(
        [bm.minor for bm, incl in zip(beams, includemask) if incl], u.deg)
    pa = u.Quantity([bm.pa for bm, incl in zip(beams, includemask) if incl],
                    u.deg)
    new_beam = Beam(major=major.mean(),
                    minor=minor.mean(),
                    pa=circmean(pa, weights=major / minor))

    return new_beam
Example #11
0
def heinze_experiment(n_tb1=0,
                      eta=.0,
                      sun_ele=np.pi / 2,
                      absolute=False,
                      uniform=False):
    sun_azi = np.linspace(-np.pi, np.pi, 36, endpoint=False)
    sun_ele = np.full_like(sun_azi, sun_ele)
    tb1s = np.empty((0, sun_azi.shape[0], 8), dtype=sun_azi.dtype)

    for _ in np.linspace(0, 1, 100):
        d_deg, d_eff, t, phi, r_tb1 = evaluate(uniform_polariser=uniform,
                                               sun_azi=sun_azi,
                                               sun_ele=sun_ele,
                                               tilting=False,
                                               noise=eta)
        tb1s = np.vstack([tb1s, np.transpose(r_tb1, axes=(1, 0, 2))])

    if absolute:
        tb1s = np.absolute(tb1s)
    bl = .5
    r_mean = np.median(tb1s[..., n_tb1], axis=0)
    z = r_mean.max() - r_mean.min()

    r_mean = (r_mean - r_mean.min()) / z - bl
    r_std = tb1s[..., n_tb1].std(axis=0) / np.sqrt(z)

    p_value = rayleightest(sun_azi, weights=r_mean + bl)
    if uniform:
        # phi_mean_00 = circmean((sun_azi - np.pi / 2) % np.pi + np.pi / 2, weights=np.power(r_mean + bl, 8))
        # phi_var_00 = circvar((sun_azi - np.pi / 2) % np.pi + np.pi / 2, weights=np.power(r_mean + bl, 8))
        # phi_mean_90 = circmean(sun_azi % np.pi, weights=np.power(r_mean + bl, 8))
        # phi_var_90 = circvar(sun_azi % np.pi, weights=np.power(r_mean + bl, 8))
        # phi_mean = phi_mean_00 if phi_var_00 < phi_var_90 else phi_mean_90
        phi_mean = circmean(sun_azi, weights=np.power(r_mean + bl, 50))
    else:
        phi_mean = circmean(sun_azi, weights=np.power(r_mean + bl, 50))
    # phi_max[j].append(phi_mean)

    plt.figure(
        "heinze-%s%s" %
        ("abs-" if absolute else "uni-" if uniform else "", tb1_names[n_tb1]),
        figsize=(3, 3))
    ax = plt.subplot(111, polar=True)
    ax.set_theta_zero_location("N")
    ax.set_theta_direction(-1)

    y_min, y_max = -.3, 1.1
    plt.bar((sun_azi + np.pi) % (2 * np.pi) - np.pi,
            bl + r_mean,
            .1,
            yerr=r_std,
            facecolor='black')
    plt.plot(np.linspace(-np.pi, np.pi, 361), np.full(361, bl), 'k-')
    if uniform:
        x_mean = [phi_mean, phi_mean, phi_mean + np.pi, phi_mean + np.pi]
        plt.plot(x_mean, [y_max, y_min, y_min, y_max], 'r-.')
    else:
        plt.plot([phi_mean, phi_mean], [y_max, y_min], 'r-.')
    plt.yticks([])
    plt.xticks(np.linspace(0, 2 * np.pi, 8, endpoint=False), [
        r'%d$^\circ$' % x
        for x in ((np.linspace(0, 360, 8, endpoint=False) + 180) % 360 - 180)
    ])
    plt.ylim([y_min, y_max])
    # plt.savefig("heinze-%s%d.eps" % ("abs-" if absolute else "uni-" if uniform else "", n_tb1))
    plt.show()
Example #12
0
def set_geometry(self, footprint):

    deg2rad = math.pi / 180.0
    ra_min, ra_max, dec_min, dec_max, lambda_min, lambda_max = footprint  # in degrees
    dec_ave = (dec_min + dec_max) / 2.0

    # we can not average ra values because of the convergence of hour angles.
    ravalues = np.zeros(
        2)  # we might want to increase the number of ravalues later
    # is just taking min and max is not sufficient
    ravalues[0] = ra_min
    ravalues[1] = ra_max

    # astropy circmean assumes angles are in radians
    # we have angles in degrees
    ra_ave = circmean(ravalues * u.deg).value
    log.info('Ra average %f12.8', ra_ave)

    self.Crval1 = ra_ave
    self.Crval2 = dec_ave
    xi_center, eta_center = coord.radec2std(self.Crval1, self.Crval2, ra_ave,
                                            dec_ave)

    xi_min, eta_min = coord.radec2std(self.Crval1, self.Crval2, ra_min,
                                      dec_min)
    xi_max, eta_max = coord.radec2std(self.Crval1, self.Crval2, ra_max,
                                      dec_max)

    #________________________________________________________________________________
    # find the CRPIX1 CRPIX2 - xi and eta centered at 0,0
    # to find location of center abs of min values is how many pixels

    n1a = int(math.ceil(math.fabs(xi_min) / self.Cdelt1))
    n2a = int(math.ceil(math.fabs(eta_min) / self.Cdelt2))

    n1b = int(math.ceil(math.fabs(xi_max) / self.Cdelt1))
    n2b = int(math.ceil(math.fabs(eta_max) / self.Cdelt2))

    xi_min = 0.0 - (n1a * self.Cdelt1) - self.Cdelt1 / 2.0
    xi_max = (n1b * self.Cdelt1) + self.Cdelt1 / 2.0

    eta_min = 0.0 - (n2a * self.Cdelt2) - self.Cdelt2 / 2.0
    eta_max = (n2b * self.Cdelt2) + self.Cdelt2 / 2.0

    self.Crpix1 = float(n1a) + 1.0
    self.Crpix2 = float(n2a) + 1.0

    self.naxis1 = n1a + n1b
    self.naxis2 = n2a + n2b

    self.a_min = xi_min
    self.a_max = xi_max
    self.b_min = eta_min
    self.b_max = eta_max

    # center of spaxels
    self.xcoord = np.zeros(self.naxis1)
    xstart = xi_min + self.Cdelt1 / 2.0
    for i in range(self.naxis1):
        self.xcoord[i] = xstart
        xstart = xstart + self.Cdelt1

    self.ycoord = np.zeros(self.naxis2)
    ystart = eta_min + self.Cdelt2 / 2.0

    for i in range(self.naxis2):
        self.ycoord[i] = ystart
        ystart = ystart + self.Cdelt2

#        yy,xx = np.mgrid[ystart:yend:self.Cdelt2,
#                         xstart:xend:self.Cdelt1]

    ygrid = np.zeros(self.naxis2 * self.naxis1)
    xgrid = np.zeros(self.naxis2 * self.naxis1)

    k = 0
    ystart = self.ycoord[0]
    for i in range(self.naxis2):
        xstart = self.xcoord[0]
        for j in range(self.naxis1):
            xgrid[k] = xstart
            ygrid[k] = ystart
            xstart = xstart + self.Cdelt1
            k = k + 1
        ystart = ystart + self.Cdelt2

#        print('y start end',ystart,yend)
#        print('x start end',xstart,xend)

#        print('yy shape',yy.shape,self.ycoord.shape)
#        print('xx shape',xx.shape,self.xcoord.shape)

#        self.Ycenters = np.ravel(yy)
#        self.Xcenters = np.ravel(xx)

    self.Xcenters = xgrid
    self.Ycenters = ygrid
    #_______________________________________________________________________
    #set up the lambda (z) coordinate of the cube

    self.lambda_min = lambda_min
    self.lambda_max = lambda_max
    range_lambda = self.lambda_max - self.lambda_min
    self.naxis3 = int(math.ceil(range_lambda / self.Cdelt3))

    # adjust max based on integer value of naxis3
    lambda_center = (self.lambda_max + self.lambda_min) / 2.0
    self.lambda_min = lambda_center - (self.naxis3 / 2.0) * self.Cdelt3
    self.lambda_max = self.lambda_min + (self.naxis3) * self.Cdelt3

    self.zcoord = np.zeros(self.naxis3)
    self.Crval3 = self.lambda_min
    self.Crpix3 = 1.0
    zstart = self.lambda_min + self.Cdelt3 / 2.0

    for i in range(self.naxis3):
        self.zcoord[i] = zstart
        zstart = zstart + self.Cdelt3
Example #13
0
def fill_values(draw, dpro, rawkey, prokey, i, j, nof, ktype='tel',
                deg=False, lowlim=None, uplim=None, fill_value=np.ma.masked):

    if lowlim is None:
        lowlim = np.ma.min(draw[rawkey][j:j+nof])

    if uplim is None:
        uplim = np.ma.max(draw[rawkey][j:j+nof])

    ids = np.where((draw[rawkey][j:j+nof] >= lowlim)
                   & (draw[rawkey][j:j+nof] <= uplim))[0]

    if len(ids) == 0:
        if ktype == 'tel':
             dpro[prokey+"_mean"][i] = fill_value
             dpro[prokey+"_start"][i] = fill_value
             dpro[prokey+"_end"][i] = fill_value

        else:
           dpro[prokey+"_median"][i] = fill_value
           dpro[prokey+"_stddev"][i] = fill_value

        return

    else:
        vals = draw[rawkey][j:j+nof][ids]

    if not any(draw[rawkey].mask[j:j+nof]):

        if ktype == 'tel':
            if deg:
               dpro[prokey+"_mean"][i] = circmean(vals * u.deg).value

               if dpro[prokey+"_mean"][i] < 0:
                   dpro[prokey+"_mean"][i] += 360
               elif dpro[prokey+"_mean"][i] > 360:
                   dpro[prokey+"_mean"][i] -= 360
            else:
                dpro[prokey+"_mean"][i] = np.ma.mean(vals)

            dpro[prokey+"_start"][i] = draw[rawkey][j]
            dpro[prokey+"_end"][i] = draw[rawkey][j+nof-1]

        else:

            if deg:
                dpro[prokey+"_median"][i] = circmean(vals * u.deg).value

                if dpro[prokey+"_median"][i] < 0:
                    dpro[prokey+"_median"][i] += 360

                elif dpro[prokey+"_median"][i] > 360:
                    dpro[prokey+"_median"][i] -= 360

                dpro[prokey+"_stddev"][i] = np.sqrt(circvar(vals * u.deg).value)

            else:
                dpro[prokey+"_median"][i] = np.ma.median(vals)
                dpro[prokey+"_stddev"][i] = np.ma.std(vals)

    else:
        if ktype == 'tel':
             dpro[prokey+"_mean"][i] = fill_value
             dpro[prokey+"_start"][i] = fill_value
             dpro[prokey+"_end"][i] = fill_value

        else:
           dpro[prokey+"_median"][i] = fill_value
           dpro[prokey+"_stddev"][i] = fill_value
Example #14
0
def compute_sync(complex_signal: np.ndarray,
                 mode: str,
                 epochs_average: bool = True) -> np.ndarray:
    """
    Computes frequency- or time-frequency-domain connectivity measures from analytic signals.

    Arguments:

        complex_signal:
            shape = (2, n_epochs, n_channels, n_freq_bins, n_times).
            Analytic signals for computing connectivity between two participants.

        mode:
            Connectivity measure. Options in the notes.

        epochs_average:
          option to either return the average connectivity across epochs (collapse across time) or preserve epoch-by-epoch connectivity, boolean.
          If False, PSD won't be averaged over epochs (the time course is maintained).
          If True, PSD values are averaged over epochs.


    Returns:
        con:
          Connectivity matrix. The shape is either
          (n_freq, n_epochs, 2*n_channels, 2*n_channels) if time_resolved is False,
          or (n_freq, 2*n_channels, 2*n_channels) if time_resolved is True.

          To extract inter-brain connectivity values, slice the last two dimensions of con with [0:n_channels, n_channels: 2*n_channels].

    Note:
        **supported connectivity measures**
          - 'envelope_corr': envelope correlation
          - 'pow_corr': power correlation
          - 'plv': phase locking value
          - 'ccorr': circular correlation coefficient
          - 'coh': coherence
          - 'imaginary_coh': imaginary coherence

    """

    n_epoch, n_ch, n_freq, n_samp = complex_signal.shape[1], complex_signal.shape[2], \
                                    complex_signal.shape[3], complex_signal.shape[4]

    # calculate all epochs at once, the only downside is that the disk may not have enough space
    complex_signal = complex_signal.transpose(
        (1, 3, 0, 2, 4)).reshape(n_epoch, n_freq, 2 * n_ch, n_samp)
    transpose_axes = (0, 1, 3, 2)
    if mode.lower() == 'plv':
        phase = complex_signal / np.abs(complex_signal)
        c = np.real(phase)
        s = np.imag(phase)
        dphi = _multiply_conjugate(c, s, transpose_axes=transpose_axes)
        con = abs(dphi) / n_samp

    elif mode.lower() == 'envelope_corr':
        env = np.abs(complex_signal)
        mu_env = np.mean(env, axis=3).reshape(n_epoch, n_freq, 2 * n_ch, 1)
        env = env - mu_env
        con = np.einsum('nilm,nimk->nilk', env, env.transpose(transpose_axes)) / \
              np.sqrt(np.einsum('nil,nik->nilk', np.sum(env ** 2, axis=3), np.sum(env ** 2, axis=3)))

    elif mode.lower() == 'pow_corr':
        env = np.abs(complex_signal)**2
        mu_env = np.mean(env, axis=3).reshape(n_epoch, n_freq, 2 * n_ch, 1)
        env = env - mu_env
        con = np.einsum('nilm,nimk->nilk', env, env.transpose(transpose_axes)) / \
              np.sqrt(np.einsum('nil,nik->nilk', np.sum(env ** 2, axis=3), np.sum(env ** 2, axis=3)))

    elif mode.lower() == 'coh':
        c = np.real(complex_signal)
        s = np.imag(complex_signal)
        amp = np.abs(complex_signal)**2
        dphi = _multiply_conjugate(c, s, transpose_axes=transpose_axes)
        con = np.abs(dphi) / np.sqrt(
            np.einsum('nil,nik->nilk', np.nansum(amp, axis=3),
                      np.nansum(amp, axis=3)))

    elif mode.lower() == 'imaginary_coh':
        c = np.real(complex_signal)
        s = np.imag(complex_signal)
        amp = np.abs(complex_signal)**2
        dphi = _multiply_conjugate(c, s, transpose_axes=transpose_axes)
        con = np.abs(np.imag(dphi)) / np.sqrt(
            np.einsum('nil,nik->nilk', np.nansum(amp, axis=3),
                      np.nansum(amp, axis=3)))

    elif mode.lower() == 'ccorr':
        angle = np.angle(complex_signal)
        mu_angle = circmean(angle, axis=3).reshape(n_epoch, n_freq, 2 * n_ch,
                                                   1)
        angle = np.sin(angle - mu_angle)

        formula = 'nilm,nimk->nilk'
        con = np.einsum(formula, angle, angle.transpose(transpose_axes)) / \
              np.sqrt(np.einsum('nil,nik->nilk', np.sum(angle ** 2, axis=3), np.sum(angle ** 2, axis=3)))

    else:
        ValueError('Metric type not supported.')

    con = con.swapaxes(0, 1)  # n_freq x n_epoch x 2*n_ch x 2*n_ch
    if epochs_average:
        con = np.nanmean(con, axis=1)

    return con
if __name__ == '__main__':
    """
    This simple centering solves the warped histogram issue. Is this a smarter
    way of measuring group synchrony?
    """

    data = 2 * np.pi * np.random.rand(6, 1000)
    s = np.shape(data)
    print('Imported data with %.0f' % s[0], 'rows (variables) by %.0f' % s[1],
          'columns (observations).')

    data_cent = data * 0
    i = 0
    for d in data:
        d = np.mod(d - circmean(d), np.pi * 2)
        data_cent[i, ] = d
        i = i + 1

    qgroupt, rhogroupt = circ_mean(data_cent)
    plt.plot(rhogroupt, '-k')
    plt.ylabel(r'$ \rho _{group,i}$')
    plt.show()

    polar_histogram(data, s)
    rp0 = rayleightest(np.reshape(data, (s[0] * s[1], 1)))
    print('The Rayleigh test is p=%.4f.' % rp0)

    polar_histogram(data_cent, s)
    rp1 = rayleightest(np.reshape(data_cent, (s[0] * s[1], 1)))
    print('The Rayleigh test is p=%.4f' % rp1)
Example #16
0
def main(h5parm1,
         h5parm2,
         outh5parm,
         mode,
         solset1='sol000',
         solset2='sol000',
         reweight=False,
         cal_names=None,
         cal_fluxes=None):
    """
    Combines two h5parms

    Parameters
    ----------
    h5parm1 : str
        Filename of h5parm 1
    h5parm2 : str
        Filename of h5parm 2
    outh5parm : str
        Filename of the output h5parm
    mode : str
        Mode to use when combining:
        'p1a2' - phases from 1 and amplitudes from 2
        'p1a1a2' - phases and amplitudes from 1 and amplitudes from 2 (amplitudes 1 and 2
        are multiplied to create combined amplitudes)
        'p1p2a2' - phases from 1 and phases and amplitudes from 2 (phases 2 are averaged
        over XX and YY, then interpolated to time grid of 1 and summed)
    solset1 : str, optional
        Name of solset for h5parm1
    solset2 : str, optional
        Name of solset for h5parm2
    reweight : bool, optional
        If True, reweight the solutions by their detrended noise
    cal_names : str or list, optional
        List of calibrator names (for use in reweighting)
    cal_fluxes : str or list, optional
        List of calibrator flux densities (for use in reweighting)
    """
    reweight = misc.string2bool(reweight)
    cal_names = misc.string2list(cal_names)
    cal_fluxes = misc.string2list(cal_fluxes)

    # Make copies of the input h5parms (since they may be altered by steps below) and
    # open them
    with tempfile.TemporaryDirectory() as tmpdir:
        h5parm1_copy = shutil.copy(h5parm1, tmpdir)
        h5parm2_copy = shutil.copy(h5parm2, tmpdir)
        h1 = h5parm(h5parm1_copy, readonly=False)
        h2 = h5parm(h5parm2_copy, readonly=False)

    ss1 = h1.getSolset(solset=solset1)
    ss2 = h2.getSolset(solset=solset2)

    # Initialize the output h5parm
    if os.path.exists(outh5parm):
        os.remove(outh5parm)
    ho = h5parm(outh5parm, readonly=False)
    sso = ho.makeSolset(solsetName='sol000', addTables=False)

    if mode == 'p1a2':
        # Take phases from 1 and amplitudes from 2
        # Remove unneeded soltabs from 1 and 2, then copy
        if 'amplitude000' in ss1.getSoltabNames():
            st = ss1.getSoltab('amplitude000')
            st.delete()
        if 'phase000' in ss2.getSoltabNames():
            st = ss2.getSoltab('phase000')
            st.delete()
        ss1.obj._f_copy_children(sso.obj, recursive=True, overwrite=True)
        ss2.obj._f_copy_children(sso.obj, recursive=True, overwrite=True)

    elif mode == 'p1a1a2':
        # Take phases and amplitudes from 1 and amplitudes from 2 (amplitudes 1 and 2
        # are multiplied to create combined values)
        # First, copy phases and amplitudes from 1
        ss1.obj._f_copy_children(sso.obj, recursive=True, overwrite=True)

        # Then read amplitudes from 1 and 2, multiply them together, and store
        st1 = ss1.getSoltab('amplitude000')
        st2 = ss2.getSoltab('amplitude000')
        sto = sso.getSoltab('amplitude000')
        sto.setValues(st1.val * st2.val)

    elif mode == 'p1p2a2':
        # Take phases from 1 and phases and amplitudes from 2 (phases 2 are averaged
        # over XX and YY, then interpolated to time grid of 1 and summed)
        # First, copy phases from 1
        ss1.obj._f_copy_children(sso.obj, recursive=True, overwrite=True)

        # Read phases from 2, average XX and YY (using circmean), interpolate to match
        # those from 1, and sum. Note: the interpolation is done in phase space (instead
        # of real/imag space) since phase wraps are not expected to be present in the
        # slow phases
        st1 = ss1.getSoltab('phase000')
        st2 = ss2.getSoltab('phase000')
        axis_names = st1.getAxesNames()
        time_ind = axis_names.index('time')
        freq_ind = axis_names.index('freq')
        axis_names = st2.getAxesNames()
        pol_ind = axis_names.index('pol')
        val2 = circmean(st2.val, axis=pol_ind)  # average over XX and YY
        if len(st2.time) > 1:
            f = si.interp1d(st2.time,
                            val2,
                            axis=time_ind,
                            kind='nearest',
                            fill_value='extrapolate')
            v1 = f(st1.time)
        else:
            v1 = val2
        if len(st2.freq) > 1:
            f = si.interp1d(st2.freq,
                            v1,
                            axis=freq_ind,
                            kind='linear',
                            fill_value='extrapolate')
            vals = f(st1.freq) + st1.val
        else:
            vals = v1 + st1.val
        sto = sso.getSoltab('phase000')
        sto.setValues(vals)

        # Copy amplitudes from 2
        # Remove unneeded phase soltab from 2, then copy
        if 'phase000' in ss2.getSoltabNames():
            st = ss2.getSoltab('phase000')
            st.delete()
        ss2.obj._f_copy_children(sso.obj, recursive=True, overwrite=True)

    else:
        print('ERROR: mode not understood')
        sys.exit(1)

    # Close the files, copies are removed automatically
    h1.close()
    h2.close()
    ho.close()

    # Reweight
    if reweight:
        # Use the scatter on the solutions for weighting, with an additional scaling
        # by the calibrator flux densities in each direction
        ho = h5parm(outh5parm, readonly=False)
        sso = ho.getSolset(solset='sol000')

        # Reweight the phases. Reweighting doesn't work when there are too few samples,
        # so check there are at least 10
        soltab_ph = sso.getSoltab('phase000')
        if len(soltab_ph.time) > 10:
            # Set window size for std. dev. calculation. We try to get one of around
            # 30 minutes, as that is roughly the timescale on which the global properties
            # of the ionosphere are expected to change
            delta_times = soltab_ph.time[1:] - soltab_ph.time[:-1]
            timewidth = np.min(delta_times)
            nstddev = min(251, max(11, int(1800 / timewidth)))
            if nstddev % 2 == 0:
                # Ensure window is odd
                nstddev += 1
            losoto.operations.reweight.run(soltab_ph,
                                           mode='window',
                                           nmedian=3,
                                           nstddev=nstddev)

        # Reweight the amplitudes
        soltab_amp = sso.getSoltab('amplitude000')
        if len(soltab_amp.time) > 10:
            # Set window size for std. dev. calculation. We try to get one of around
            # 90 minutes, as that is roughly the timescale on which the global properties
            # of the beam errors are expected to change
            delta_times = soltab_amp.time[1:] - soltab_amp.time[:-1]
            timewidth = np.min(delta_times)
            nstddev = min(251, max(11, int(5400 / timewidth)))
            if nstddev % 2 == 0:
                # Ensure window is odd
                nstddev += 1
            losoto.operations.reweight.run(soltab_amp,
                                           mode='window',
                                           nmedian=5,
                                           nstddev=nstddev)
        ho.close()

        # Use the input calibrator flux densities to adjust the weighting done above
        # to ensure that the average weights are proportional to the square of the
        # calibrator flux densities
        ho = h5parm(outh5parm, readonly=False)
        sso = ho.getSolset(solset='sol000')
        soltab_ph = sso.getSoltab('phase000')
        soltab_amp = sso.getSoltab('amplitude000')
        dir_names = [d.strip('[]') for d in soltab_ph.dir[:]]
        cal_weights = []
        for dir_name in dir_names:
            cal_weights.append(cal_fluxes[cal_names.index(dir_name)])
        cal_weights = [float(c) for c in cal_weights]
        cal_weights = np.array(cal_weights)**2

        # Convert weights to float64 from float16 to avoid clipping in the
        # intermediate steps, and set flagged (weight = 0) solutions to NaN
        # so they are not included in the calculations
        weights_ph = np.array(soltab_ph.weight, dtype=np.float)
        weights_amp = np.array(soltab_amp.weight, dtype=np.float)
        weights_ph[weights_ph == 0.0] = np.nan
        weights_amp[weights_amp == 0.0] = np.nan

        # Reweight, keeping the median value of the weights the same (to avoid
        # changing the overall normalization, which should be the inverse square of the
        # uncertainty (scatter) in the solutions).
        global_median_ph = np.nanmedian(weights_ph)
        global_median_amp = np.nanmedian(weights_amp)
        for d in range(len(dir_names)):
            # Input data are [time, freq, ant, dir, pol] for slow amplitudes
            # and [time, freq, ant, dir] for fast phases (scalarphase)
            norm_factor = cal_weights[d] / np.nanmedian(weights_ph[:, :, :, d])
            weights_ph[:, :, :, d] *= norm_factor
            norm_factor = cal_weights[d] / np.nanmedian(weights_amp[:, :, :,
                                                                    d, :])
            weights_amp[:, :, :, d, :] *= norm_factor
        weights_ph *= global_median_ph / np.nanmedian(weights_ph)
        weights_amp *= global_median_amp / np.nanmedian(weights_amp)
        weights_ph[np.isnan(weights_ph)] = 0.0
        weights_amp[np.isnan(weights_amp)] = 0.0

        # Clip to fit in float16 (required by LoSoTo)
        float16max = 65504.0
        weights_ph[weights_ph > float16max] = float16max
        weights_amp[weights_amp > float16max] = float16max

        # Write new weights
        soltab_ph.setValues(weights_ph, weight=True)
        soltab_amp.setValues(weights_amp, weight=True)
        ho.close()
Example #17
0
        bars[i].set_alpha(0.2)
        print(i)

    plt.show()
    return


if __name__ == '__main__':
    """
    Same problem here.
    """
    
    data = 2*np.pi*np.random.rand(100,1000)
    data = data - np.pi
    s = np.shape(data)
    qt = circmean(data, axis=0)
    phikt = data - qt
    phikt = np.mod(phikt+np.pi*2, 2*np.pi) - np.pi
    phik = circmean(phikt, axis=1)
    phik_cent = np.zeros((s[0],s[1]), dtype=float)
    i = 0
    for p in np.transpose(phikt):
        phik_cent[:,i] = p-phik
        i = i+1
    phik_cent = np.mod(phik_cent + np.pi*2, 2*np.pi) - np.pi

    qgroupt, rhogroupt = circ_mean(phik_cent)
    plt.plot(rhogroupt,'-k')
    plt.ylabel(r'$ \rho _{group,i}$')
    plt.show()
Example #18
0
def fit_King_prof(nchains,
                  nruns,
                  nburn,
                  x,
                  y,
                  cl_cent,
                  field_dens,
                  cl_rad,
                  n_memb_i,
                  rt_max_f,
                  N_integ=1000,
                  N_conv=1000,
                  tau_stable=0.05):
    """
    rt_max_f: factor that caps the maximum tidal radius, given the previously
    estimated cluster radius.
    """

    from emcee import ensemble
    from emcee import moves

    # HARDCODED ##########################
    # Move used by emcee
    mv = [
        (moves.DESnookerMove(), 0.1),
        (moves.DEMove(), 0.9 * 0.9),
        (moves.DEMove(gamma0=1.0), 0.9 * 0.1),
    ]
    # mv = moves.StretchMove()
    # mv = moves.KDEMove()

    # Steps to store Bayes params.
    KP_steps = int(nruns * .01)

    # Field density and estimated number of members (previously
    # obtained)
    fd = field_dens

    # Fix N_memb
    N_memb = n_memb_i
    # Estimate N_memb with each sampler step
    # N_memb = None

    # Select the number of parameters to fit:
    # ndim = 2 fits (rc, rt)
    # ndim = 4 fits (rc, rt, ecc, theta)
    ndim = 2
    # HARDCODED ##########################

    # The tidal radius can not be larger than 'rt_max' times the estimated
    # "optimal" cluster radius. Used as a prior.
    rt_max = rt_max_f * cl_rad
    # Tidal radius array. Used for integrating
    rt_rang = np.linspace(0., rt_max, int(rt_max_f * N_integ))

    # Initial positions for the sampler.
    if ndim == 2:
        # Dimensions: rc, rt
        rc_pos0 = np.random.uniform(.05 * rt_max, rt_max, nchains)
        rt_pos0 = np.random.uniform(rc_pos0, rt_max, nchains)
        pos0 = np.array([rc_pos0, rt_pos0]).T
    elif ndim == 4:
        # Dimensions: rc, rt, ecc, theta
        rc_pos0 = np.random.uniform(.05 * rt_max, rt_max, nchains)
        rt_pos0 = np.random.uniform(rc_pos0, rt_max, nchains)
        ecc = np.random.uniform(0., 1., nchains)
        theta = np.random.uniform(0., np.pi, nchains)
        pos0 = np.array([rc_pos0, rt_pos0, ecc, theta]).T

    # Identify stars inside the cut-out given by the 'rt_max' value. Only these
    # stars will be processed below.
    xy = np.array((x, y)).T
    xy_cent_dist = spatial.distance.cdist([cl_cent], xy)[0]
    msk = xy_cent_dist <= rt_max
    xy_in = xy[msk].T
    r_in = xy_cent_dist[msk]

    args = {
        'ndim': ndim,
        'rt_max': rt_max,
        'cl_cent': cl_cent,
        'fd': fd,
        'N_memb': N_memb,
        'rt_rang': rt_rang,
        'xy_in': xy_in,
        'r_in': r_in
    }

    # emcee sampler
    sampler = ensemble.EnsembleSampler(nchains,
                                       ndim,
                                       lnprob,
                                       kwargs=args,
                                       moves=mv)

    # Run the smpler hiding some warnings
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")

        tau_index, autocorr_vals, afs = 0, np.empty(nruns), np.empty(nruns)
        old_tau = np.inf
        for i, (pos, prob,
                stat) in enumerate(sampler.sample(pos0, iterations=nruns)):

            # Every X steps
            if i % KP_steps and i < (nruns - 1):
                continue

            afs[tau_index] = np.mean(sampler.acceptance_fraction)
            tau = np.mean(sampler.get_autocorr_time(tol=0))
            autocorr_vals[tau_index] = tau
            tau_index += 1

            # Check convergence
            converged = tau * (N_conv / nchains) < i * nburn
            converged &= np.abs(old_tau - tau) / tau < tau_stable
            if converged:
                print("")
                break
            old_tau = tau

            update_progress.updt(nruns, i + 1)

        KP_mean_afs = afs[:tau_index]
        KP_tau_autocorr = autocorr_vals[:tau_index]

        # Remove burn-in
        nburn = int(i * nburn)
        samples = sampler.get_chain(discard=nburn, flat=True)

        # Extract mean, median, mode, 16th, 84th percentiles for each parameter
        rc, rt = np.mean(samples[:, :2], 0)
        rc_16, rc_50, rc_84, rt_16, rt_50, rt_84 = np.percentile(
            samples[:, :2], (16, 50, 84), 0).T.flatten()

        if ndim == 2:
            ecc, theta = 0., 0.
            ecc_16, ecc_50, ecc_84, theta_16, theta_50, theta_84 =\
                [np.array([np.nan] * 3) for _ in range(6)]
            # Mode and KDE to plot
            # This simulates the 'fundam_params and 'varIdxs' arrays.
            fp, vi = [[-np.inf, np.inf], [-np.inf, np.inf]], [0, 1]
            KP_Bys_mode, KP_Bayes_kde = modeKDE(fp, vi, samples.T)
            KP_Bys_mode += [0., 0.]
            KP_Bayes_kde += [[], []]

        elif ndim == 4:
            ecc = np.mean(samples[:, 2], 0)
            theta = circmean(samples[:, 3] * u.rad).value
            # Beware: the median and percentiles for theta might not be
            # properly defined.
            ecc_16, ecc_50, ecc_84, theta_16, theta_50, theta_84 =\
                np.percentile(samples[:, 2:], (16, 50, 84), 0).T.flatten()
            # Estimate the mode
            fp, vi = [[-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf],
                      [-np.inf, np.inf]], [0, 1, 2, 3]
            KP_Bys_mode, KP_Bayes_kde = modeKDE(fp, vi, samples.T)

        # Store: 16th, median, 84th, mean, mode
        KP_Bys_rc = np.array([rc_16, rc_50, rc_84, rc, KP_Bys_mode[0]])
        KP_Bys_rt = np.array([rt_16, rt_50, rt_84, rt, KP_Bys_mode[1]])
        KP_Bys_ecc = np.array([ecc_16, ecc_50, ecc_84, ecc, KP_Bys_mode[2]])

        KP_Bys_theta = np.array(
            [theta_16, theta_50, theta_84, theta, KP_Bys_mode[3]])

        # Effective sample size
        KP_ESS = samples.shape[0] / np.mean(sampler.get_autocorr_time(tol=0))

        # For plotting, (nsteps, nchains, ndim)
        KP_samples = sampler.get_chain()

    # Central density, for plotting. Use mean values for all the parameters.
    KP_cd = lnlike((rc, rt, ecc, theta), ndim, rt_max, cl_cent, fd, N_memb,
                   xy_in, r_in, rt_rang, True)

    return KP_cd, KP_steps, KP_mean_afs, KP_tau_autocorr, KP_ESS, KP_samples,\
        KP_Bys_rc, KP_Bys_rt, KP_Bys_ecc, KP_Bys_theta, KP_Bayes_kde
Example #19
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 03:16:08 2019

@author: dobri
"""

import numpy as np
from astropy.stats import circmean
from astropy import units as u
import csv

data = []
with open('data.csv') as csv_file:
    csv_reader = csv.reader(csv_file, delimiter=',')
    line_count = 0
    for row in csv_reader:
        data.append(np.array(row,dtype='float'))
        print(data[line_count])
        print(circmean(data[line_count]/360*2*np.pi)*u.rad)
        print(circmean(data[line_count]/360*2*np.pi)/np.pi/2*360*u.deg)
        line_count += 1
    
    print(f'Processed {line_count} lines.')
Example #20
0
def heinze_real(mode=1, n_tb1=0):

    columns = [
        ['fz1028'],  # L8/R1
        ['060126', '060131', '050105a', 'fz1049'],  # L7/R2
        ['050329', '050309a', '050124b', '041215', 'fz1020',
         'fz1038'],  # L6/R3
        ['060124', '060206a', '060206b', 'fz1016'],  # L5/R4
        ['050520', '040604b', 'fz1040'],  # L4/R5
        [],  # L3/R6
        ['050309b', '050222'],  # L2/R7
        ['041209', 'fz1051'],  # L1/R8
    ]

    phi_tb1 = 3 * np.pi / 2 - np.linspace(0, np.pi, 8)
    # phi_tb1 = np.linspace(0, np.pi, 8) + np.pi/2
    phi = np.linspace(np.deg2rad(5), np.deg2rad(355), 36)
    phi_max = []

    for j, filenames in enumerate(
            columns if n_tb1 is None else [columns[n_tb1]]):
        col = j if n_tb1 is None else n_tb1
        phi_max.append([])

        for i, filename in enumerate(filenames):
            if 'fz' in filename:
                continue
            tb1s = loadmat("../data/TB1_neurons/mean_rotation_%s.mat" %
                           filename)['mean_rotation'][:, ::2]
            tb1s = tb1s.reshape((-1, 2)).mean(axis=1).reshape((1, -1))

            z = tb1s.max() - tb1s.min()
            r_std = tb1s.std(axis=0) / np.sqrt(z)
            bl = .5
            r_mean = tb1s.flatten() / tb1s.max() - bl
            p_value = rayleightest(phi, weights=r_mean + bl)
            phi_mean_00 = circmean((phi - np.pi / 2) % np.pi + np.pi / 2,
                                   weights=np.power(r_mean + bl, 8))
            phi_var_00 = circvar((phi - np.pi / 2) % np.pi + np.pi / 2,
                                 weights=np.power(r_mean + bl, 8))
            phi_mean_90 = circmean(phi % np.pi,
                                   weights=np.power(r_mean + bl, 8))
            phi_var_90 = circvar(phi % np.pi, weights=np.power(r_mean + bl, 8))
            phi_mean = phi_mean_00 if phi_var_00 < phi_var_90 else phi_mean_90
            d_00 = np.absolute((phi_mean - phi_tb1[-1 - i] + np.pi) %
                               (2 * np.pi) - np.pi)
            d_pi = np.absolute((phi_mean - phi_tb1[-1 - i]) % (2 * np.pi) -
                               np.pi)
            if d_00 > d_pi:
                phi_mean += np.pi
            phi_max[j].append(phi_mean)
            print "Col %d - %s, mean: % 3.2f, p = %.4f" % (
                col, filename, np.rad2deg(phi_mean), p_value)

            if mode == 1:
                plt.figure("heinze-L%d-R%d-%s" % (8 - col, col + 1, filename),
                           figsize=(3, 3))
                ax = plt.subplot(111, polar=True)
                ax.set_theta_zero_location("N")
                ax.set_theta_direction(-1)

                y_min, y_max = -.3, 1.1
                plt.bar(phi, bl + r_mean, .1, yerr=r_std, facecolor='black')
                plt.plot(np.linspace(-np.pi, np.pi, 361), np.full(361, bl),
                         'k-')
                x_mean = [
                    phi_mean, phi_mean, phi_mean + np.pi, phi_mean + np.pi
                ]
                plt.plot(x_mean, [y_max, y_min, y_min, y_max], 'r-.')
                plt.yticks([])
                plt.xticks(np.linspace(0, 2 * np.pi, 8, endpoint=False), [
                    r'%d$^\circ$' % x for x in (
                        (np.linspace(0, 360, 8, endpoint=False) + 180) % 360 -
                        180)
                ])
                plt.ylim([y_min, y_max])
                # plt.savefig("heinze-%s%d.eps" % ("abs-" if absolute else "uni-" if uniform else "", n_tb1))
                plt.show()

    if mode == 2:
        plt.figure("heinze-real-fig-1F", figsize=(5, 5))
        tb1_names = []
        x, y = [], []
        for i, phi_max_i in enumerate(phi_max):
            col = i if n_tb1 is None else n_tb1
            for j, phi_max_j in enumerate(phi_max_i):
                d_00 = np.absolute((phi_max_j - phi_tb1[-1 - i] + np.pi) %
                                   (2 * np.pi) - np.pi)
                d_pi = np.absolute((phi_max_j - phi_tb1[-1 - i]) %
                                   (2 * np.pi) - np.pi)
                if d_00 > d_pi:
                    phi_max_i[j] += np.pi

            phi_mean_i = circmean(np.array(phi_max_i)) % (2 * np.pi)
            if not np.isnan(phi_mean_i):
                x.append([phi_mean_i, 1])
                y.append(col)
            tb1_names.append('L%d/R%d' % (8 - col, col + 1))
            plt.scatter([col] * len(phi_max_i),
                        np.rad2deg(phi_max_i) % 360,
                        s=20,
                        c='black')
            plt.scatter(col,
                        np.rad2deg(phi_mean_i) % 360,
                        s=50,
                        c='red',
                        marker='*')
        x = np.array(x)
        y = np.array(y)
        a, b = np.linalg.pinv(x).dot(y)
        plt.plot([-1, 8], np.rad2deg([(-1 - b) / a, (8 - b) / a]), 'r-.')
        plt.xticks([0, 1, 2, 3, 4, 5, 6, 7], [
            tb1_names[0], '', tb1_names[2], '', tb1_names[4], '', tb1_names[6],
            ''
        ])
        plt.yticks([0, 45, 90, 135, 180, 225, 270, 315, 360],
                   ['0', '', '90', '', '180', '', '270', '', '360'])
        plt.ylim([-20, 380])
        plt.xlim([-1, 8])
        plt.show()
    def compute_sync(self, complex_signal: np.ndarray,
                     mode: str) -> np.ndarray:
        """
        helper function for computing connectivity value.
        The result is a connectivity matrix of all possible electrode pairs between the dyad, including inter- and intra-brain connectivities.
        :param complex_signal: complex signal of shape (n_freq, 2, n_channel_count, n_sample_size). data for one dyad.
        :param mode: connectivity mode. see notes for details.
        :return: connectivity matrix of shape (n_freq, 2*n_channel_count, 2*channel_count)
        """
        n_ch, n_freq, n_samp = complex_signal.shape[2], complex_signal.shape[0], \
                               complex_signal.shape[3]

        complex_signal = complex_signal.reshape(n_freq, 2 * n_ch, n_samp)
        transpose_axes = (0, 2, 1)
        if mode.lower() == 'plv':
            phase = complex_signal / np.abs(complex_signal)
            c = np.real(phase)
            s = np.imag(phase)
            dphi = self._multiply_conjugate(c,
                                            s,
                                            transpose_axes=transpose_axes)
            con = abs(dphi) / n_samp

        elif mode.lower() == 'envelope correlation':
            env = np.abs(complex_signal)
            mu_env = np.mean(env, axis=2).reshape(n_freq, 2 * n_ch, 1)
            env = env - mu_env
            con = np.einsum('ilm,imk->ilk', env, env.transpose(transpose_axes)) / \
                  np.sqrt(np.einsum('il,ik->ilk', np.sum(env ** 2, axis=2), np.sum(env ** 2, axis=2)))

        elif mode.lower() == 'power correlation':
            env = np.abs(complex_signal)**2
            mu_env = np.mean(env, axis=2).reshape(n_freq, 2 * n_ch, 1)
            env = env - mu_env
            con = np.einsum('ilm,imk->ilk', env, env.transpose(transpose_axes)) / \
                  np.sqrt(np.einsum('il,ik->ilk', np.sum(env ** 2, axis=2), np.sum(env ** 2, axis=2)))

        elif mode.lower() == 'coherence':
            c = np.real(complex_signal)
            s = np.imag(complex_signal)
            amp = np.abs(complex_signal)**2
            dphi = self._multiply_conjugate(c,
                                            s,
                                            transpose_axes=transpose_axes)
            con = np.abs(dphi) / np.sqrt(
                np.einsum('il,ik->ilk', np.nansum(amp, axis=2),
                          np.nansum(amp, axis=2)))
            # self.logger.warning('con '+str(con[2,18:,0:18]))
        elif mode.lower() == 'imaginary coherence':
            c = np.real(complex_signal)
            s = np.imag(complex_signal)
            amp = np.abs(complex_signal)**2
            dphi = self._multiply_conjugate(c,
                                            s,
                                            transpose_axes=transpose_axes)
            con = np.abs(np.imag(dphi)) / np.sqrt(
                np.einsum('il,ik->ilk', np.nansum(amp, axis=2),
                          np.nansum(amp, axis=2)))

        elif mode.lower() == 'ccorr':
            angle = np.angle(complex_signal)
            mu_angle = circmean(angle, axis=2).reshape(n_freq, 2 * n_ch, 1)
            angle = np.sin(angle - mu_angle)

            formula = 'ilm,imk->ilk'
            con = np.einsum(formula, angle, angle.transpose(transpose_axes)) / \
                  np.sqrt(np.einsum('il,ik->ilk', np.sum(angle ** 2, axis=2), np.sum(angle ** 2, axis=2)))

        else:
            ValueError('Metric type not supported.')

        return con
Example #22
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr  1 11:14:13 2019

@author: dobri
"""

import numpy as np
from astropy.stats import circmean

x = np.multiply(np.pi, [(0, 1 / 4, 2 / 4, 3 / 4, 4 / 4),
                        (1, 5 / 4, 6 / 4, 7 / 4, 8 / 4),
                        (5 / 4, 5 / 4, 5 / 4, 5 / 4, 5 / 4),
                        (0 / 5, 2 / 5, 4 / 5, 6 / 5, 8 / 5)])
s = np.shape(x)

phikprime = np.array(x * 0, dtype=complex)
phikprimebar = np.zeros((s[1], 1), dtype=complex)
phikbar = np.zeros((s[0], 1))
rhok = np.zeros((s[0], 1))

for j in range(0, len(x)):
    for k in range(0, len(x[j, :])):
        phikprime[j, k] = np.complex(np.cos(x[j, k]), np.sin(x[j, k]))

    phikprimebar[j] = np.sum(phikprime[j, :]) / s[1]
    phikbar[j] = np.angle(phikprimebar[j])
    rhok[j] = np.absolute(phikprimebar[j])
    print(phikbar[j], circmean(x[j, :]), rhok[j])
Example #23
0
def tune_curv_estims(gab_oris,
                     roi_data,
                     ngabs_tot,
                     nrois="all",
                     ngabs="all",
                     comb_gabs=False,
                     hist_n=1000,
                     collapse=True,
                     parallel=False):
    """
    tune_curv_estims(gab_oris, roi_data, ngabs_tot)

    Returns estimates of von Mises distributions for ROIs based on the 
    input orientations and ROI activations.

    Required args:
        - gab_oris (2D array): gabor orientation values (deg), 
                               structured as gab x seq
        - roi_data (2D array): ROI fluorescence data, structured as ROI x seq
        - ngabs_tot (int)    : total number of ROIs

    Optional args:
        - nrois (int)     : number of ROIs to include in analysis
                            default: "all"
        - ngabs (int)     : number of gabors to include in analysis (set to 1 
                            if comb_gabs, as all gabors are combined)
                            default: "all"
        - comb_gabs (bool): if True, all gabors have been combined for 
                            gab_oris and roi_data 
                            default: False
        - hist_n (int)    : value by which to multiply fluorescence data to 
                            obtain histogram values
                            default: 1000
        - collapse (bool) : if True, opposite orientations in the 0 to 360 
                            range are collapsed to the 0 to 180 range 
        - parallel (bool) : if True, some of the analysis is parallelized 
                            across CPU cores

    Returns:
        - gab_tc_oris (list)      : list of orientation values (deg) 
                                    corresponding to the gab_tc_data, 
                                    structured as 
                                       gabor (1 if comb_gabs) x oris 
        - gab_tc_data (list)      : list of mean integrated fluorescence data 
                                    per orientation, for each ROI, structured 
                                    as: 
                                       ROI x gabor (1 if comb_gabs) x oris
        - gab_vm_pars (3D array)  : array of Von Mises parameters for each ROI: 
                                       ROI x gabor (1 if comb_gabs) x par
        - gab_vm_mean (2D array)  : array of mean Von Mises means for each ROI, 
                                    not weighted by kappa value or weighted 
                                    (if not comb_gabs) (in rad): 
                                        ROI x kappa weighted (False, (True))
        - gab_hist_pars (3D array): parameters used to convert tc_data to 
                                    histogram values (sub, mult) used in Von 
                                    Mises parameter estimation, structured as:
                                       ROI x gabor (1 if comb_gabs) x 
                                             param (sub, mult)
    """

    gab_oris = collapse_dir(gab_oris)

    kapw_bool = [0, 1]
    if comb_gabs:
        hist_n *= ngabs_tot
        kapw_bool = [0]
        ngabs = 1

    if ngabs == "all":
        ngabs = ngabs_tot
    if nrois == "all":
        roi_data.shape[0]

    # optionally runs in parallel
    if parallel and ngabs > np.max([1, nrois]):
        n_jobs = gen_util.get_n_jobs(ngabs)
        with gen_util.ParallelLogging():
            returns = Parallel(n_jobs=n_jobs)(delayed(estim_vm_by_roi)(
                gab_oris[g], roi_data, hist_n, parallel=False)
                                              for g in range(ngabs))
    else:
        returns = []
        for g in range(ngabs):
            returns.append(
                estim_vm_by_roi(gab_oris[g],
                                roi_data,
                                hist_n,
                                parallel=parallel))
    returns = list(zip(*returns))

    gab_tc_oris = [list(ret) for ret in returns[0]]
    gab_tc_data = [list(ret) for ret in zip(*returns[1])]  # move ROIs to first
    gab_vm_pars = np.transpose(np.asarray([list(ret) for ret in returns[2]]),
                               [1, 0, 2])
    gab_hist_pars = np.transpose(np.asarray([list(ret) for ret in returns[3]]),
                                 [1, 0, 2])
    means = gab_vm_pars[:, :, 1]
    kaps = gab_vm_pars[:, :, 0]

    gab_vm_mean = np.empty([nrois, len(kapw_bool)])
    gab_vm_mean[:, 0] = st.circmean(means, 0, np.pi, axis=1)
    if not comb_gabs:
        import astropy.stats as astrost
        # astropy only implemented with -pi to pi range -> 0 to pi
        gab_vm_mean[:,
                    1] = (astrost.circmean(means * 2., axis=1, weights=kaps) +
                          np.pi) / 2

    return gab_tc_oris, gab_tc_data, gab_vm_pars, gab_vm_mean, gab_hist_pars
    def compute_sync(self, complex_signal: np.ndarray,
                     mode: str) -> np.ndarray:
        n_ch, n_freq, n_samp = complex_signal.shape[2], complex_signal.shape[0], \
                               complex_signal.shape[3]

        complex_signal = complex_signal.reshape(n_freq, 2 * n_ch, n_samp)
        transpose_axes = (0, 2, 1)
        if mode.lower() == 'plv':
            phase = complex_signal / np.abs(complex_signal)
            c = np.real(phase)
            s = np.imag(phase)
            dphi = self._multiply_conjugate(c,
                                            s,
                                            transpose_axes=transpose_axes)
            con = abs(dphi) / n_samp

        elif mode.lower() == 'envelope correlation':
            env = np.abs(complex_signal)
            mu_env = np.mean(env, axis=2).reshape(n_freq, 2 * n_ch, 1)
            env = env - mu_env
            con = np.einsum('ilm,imk->ilk', env, env.transpose(transpose_axes)) / \
                  np.sqrt(np.einsum('il,ik->ilk', np.sum(env ** 2, axis=2), np.sum(env ** 2, axis=2)))

        elif mode.lower() == 'power correlation':
            env = np.abs(complex_signal)**2
            mu_env = np.mean(env, axis=2).reshape(n_freq, 2 * n_ch, 1)
            env = env - mu_env
            con = np.einsum('ilm,imk->ilk', env, env.transpose(transpose_axes)) / \
                  np.sqrt(np.einsum('il,ik->ilk', np.sum(env ** 2, axis=2), np.sum(env ** 2, axis=2)))

        elif mode.lower() == 'coherence':
            c = np.real(complex_signal)
            s = np.imag(complex_signal)
            amp = np.abs(complex_signal)**2
            dphi = self._multiply_conjugate(c,
                                            s,
                                            transpose_axes=transpose_axes)
            con = np.abs(dphi) / np.sqrt(
                np.einsum('il,ik->ilk', np.nansum(amp, axis=2),
                          np.nansum(amp, axis=2)))

        elif mode.lower() == 'imaginary coherence':
            c = np.real(complex_signal)
            s = np.imag(complex_signal)
            amp = np.abs(complex_signal)**2
            dphi = self._multiply_conjugate(c,
                                            s,
                                            transpose_axes=transpose_axes)
            con = np.abs(np.imag(dphi)) / np.sqrt(
                np.einsum('il,ik->ilk', np.nansum(amp, axis=2),
                          np.nansum(amp, axis=2)))

        elif mode.lower() == 'ccorr':
            angle = np.angle(complex_signal)
            mu_angle = circmean(angle, axis=2).reshape(n_freq, 2 * n_ch, 1)
            angle = np.sin(angle - mu_angle)

            formula = 'ilm,imk->ilk'
            con = np.einsum(formula, angle, angle.transpose(transpose_axes)) / \
                  np.sqrt(np.einsum('il,ik->ilk', np.sum(angle ** 2, axis=2), np.sum(angle ** 2, axis=2)))

        else:
            ValueError('Metric type not supported.')

        return con
Example #25
0
    def dbscan_average(self, keepgood):
        db = []
        gooddata = []

        for i in range(len(keepgood)):
            if keepgood[i][3] != None:
                try:
                    gooddata.append([
                        float(keepgood[i][0]),
                        float(keepgood[i][1]),
                        int(keepgood[i][2]),
                        int(keepgood[i][3]),
                        float(keepgood[i][4]),
                        float(keepgood[i][5]),
                        float(keepgood[i][6])
                    ])
                except ValueError:
                    pass  # or whatever

            #print("gooddata = ",gooddata)
        bad_xy = []  #might need to change this
        X = np.array(gooddata)
        #print("X = ",X)
        #print("\n len(X) = ",len(X))
        #X = X[:,[0,1]]
        #try:
        #    db = DBSCAN(eps=18, min_samples=3).fit(X[:,[0,1]])
        #except IndexError:
        #    try:
        #        db = DBSCAN(eps=18, min_samples=2).fit(X[:,[0,1]])
        #    except IndexError:
        #        pass
        #    except AttributeError:
        #        pass
        if len(X) > 0:
            db = DBSCAN(eps=18, min_samples=3).fit(X[:, [0, 1]])

            core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
            core_samples_mask[db.core_sample_indices_] = True
            labels = db.labels_

            n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
            unique_labels = set(labels)

            colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))

            #fig = plt.figure(1)
            #ax = fig.add_subplot(212, aspect='equal')
            x_mean = []
            y_mean = []
            fringe_count_mean = []
            rx_mean = []
            ry_mean = []
            angle_mean = []

            for k, col in zip(unique_labels, colors):
                if k == -1:
                    # Black used for noise.
                    col = 'k'

                x = []
                y = []
                fringe_count = []
                rx = []
                ry = []
                angle = []

                class_member_mask = (labels == k)
                #print("class_member_mask =",class_member_mask)
                # These are the definitely "good" xy values.
                xy = X[class_member_mask & core_samples_mask]
                #plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
                #         markeredgecolor='k', markersize=14)
                #print("k = ",k)
                for each in range(len(xy)):
                    #print("x = ",xy[each][0])
                    x.append(xy[each][0])
                    #print("y = ",xy[each][1])
                    y.append(xy[each][1])
                    #print("fringe_count = ",xy[each][3])
                    fringe_count.append(xy[each][3])
                    #print("rx = ",xy[each][4])
                    rx.append(xy[each][4])
                    #print("ry = ",xy[each][5])
                    ry.append(xy[each][5])
                    #print("angle = ",xy[each][6])
                    angle.append(xy[each][6])
                x_mean.append(np.mean(x))
                y_mean.append(np.mean(y))
                fringe_count_mean.append(np.mean(fringe_count))
                rx_mean.append(np.mean(rx))
                ry_mean.append(np.mean(ry))
                angles = np.array(angle) * u.deg
                angle_mean.append(circmean(angles).value)
                #angle_mean = [x / 2 for x in angle_mean]
                #print("\n Good? xy = ",xy)
                #print("X = ",X)
                # These are the "bad" xy values. Note that some maybe-bad and maybe-good are included here.
                xy = X[class_member_mask & ~core_samples_mask]
                #plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
                #         markeredgecolor='k', markersize=6)
                #print("\n Bad? xy = ",xy)
                bad_xy.append(xy)

            #    plt.title('From average - Estimated number of clusters: %d' % n_clusters_)
            #    plt.xlim(0, 512)
            #    plt.ylim(0, 384)
            #    print("x_mean = ",x_mean)
            #    print("y_mean = ",y_mean)
            #    print("fringe_count_mean = ",fringe_count_mean)
            #    print("rx_mean = ",rx_mean)
            #    print("ry_mean = ",ry_mean)

            #    print("angle_mean = ",angle_mean)

            averages = [
                x_mean, y_mean, fringe_count_mean, rx_mean, ry_mean, angle_mean
            ]
            ell = []
            for i in range(len(averages[0])):
                ell.append(
                    Ellipse(xy=[x_mean[i], y_mean[i]],
                            width=2 * rx_mean[i],
                            height=2 * ry_mean[i],
                            angle=angle_mean[i]))

            with open("all-subject-ids.csv") as csvfile:
                reader = csv.reader(csvfile)
                next(reader)
                subjects = [r for r in reader]

                #print(subjects)
            for i in range(len(subjects)):
                if subjects[i][0] == keepgood[0][2]:
                    print(subjects[i][1])
                    filetoopen = subjects[i][1]
                    img = plt.imread("./images/" + filetoopen)
                    fig, ax_new = plt.subplots(figsize=(9, 8),
                                               dpi=72,
                                               facecolor='w',
                                               edgecolor='k')
                    ax_new.imshow(img,
                                  origin='lower',
                                  extent=[0, 512, 0, 384],
                                  cmap='gray')
                    for e in ell:
                        ax_new.add_artist(e)
                        e.set_alpha(.3)
                    ax_new.set_xlim(0, 512)
                    ax_new.set_ylim(0, 384)
                    plt.savefig("./output/" + subjects[i][0] + ".png",
                                bbox_inches='tight')
                    #plt.show()

                #print(filetoopen)
                #subjects.index(keepgood[0][2])
                #print(keepgood[0][2])

        else:
            averages = [[], [], [], [], []]

        return averages
Example #26
0
def heinze_1f(eta=.5, uniform=False):
    from astropy.stats import circmean

    phi_tb1 = 3 * np.pi / 2 - np.linspace(np.pi, 0, 8)
    # phi_tb1 = np.pi / 2 + np.linspace(0, np.pi, 8)

    sun_azi = np.linspace(-np.pi, np.pi, 36, endpoint=False)
    sun_ele = np.full_like(sun_azi, np.pi / 2)
    # phi_tb1 = np.linspace(0., 2 * np.pi, 8, endpoint=False)  # TB1 preference angles
    tb1_ids = np.empty((0, sun_azi.shape[0], 8), dtype=sun_azi.dtype)
    tb1s = np.empty((0, sun_azi.shape[0], 8), dtype=sun_azi.dtype)

    for _ in np.linspace(0, 1, 100):
        d_deg, d_eff, t, phi, r_tb1 = evaluate(uniform_polariser=uniform,
                                               sun_azi=sun_azi,
                                               sun_ele=sun_ele,
                                               tilting=False,
                                               noise=eta)
        tb1s = np.vstack([tb1s, np.transpose(r_tb1, axes=(1, 0, 2))])
        tb1_ids = np.vstack(
            [tb1_ids, np.array([sun_azi] * 8).T.reshape((1, 36, 8))])
    z = tb1s.max() - tb1s.min()
    tb1s = (tb1s - tb1s.min()) / z

    phis = np.transpose(np.array([[sun_azi] * 100] * 8), axes=(1, 2, 0))
    d_deg, d_eff, t, phi, r_tb1 = evaluate(uniform_polariser=uniform,
                                           sun_azi=sun_azi,
                                           sun_ele=sun_ele,
                                           tilting=False,
                                           noise=0.)

    tb1 = np.transpose(r_tb1, axes=(1, 0, 2)) * 1e+15
    if uniform:
        phi_mean = circmean(phis, axis=1, weights=np.power(tb1s, 50))

        for i in xrange(phi_mean.shape[1]):
            d_00 = np.absolute((phi_mean[:, i] - phi_tb1[-1 - i] + np.pi) %
                               (2 * np.pi) - np.pi)
            d_pi = np.absolute((phi_mean[:, i] - phi_tb1[-1 - i]) %
                               (2 * np.pi) - np.pi)
            phi_mean[d_00 > d_pi, i] += np.pi

        phi_max = circmean(phis[0][np.newaxis],
                           axis=1,
                           weights=np.power(tb1, 50)).flatten()
        for i, phi_max_i in enumerate(phi_max):
            d_00 = np.absolute((phi_max_i - phi_tb1[-1 - i] + np.pi) %
                               (2 * np.pi) - np.pi)
            d_pi = np.absolute((phi_max_i - phi_tb1[-1 - i]) % (2 * np.pi) -
                               np.pi)
            if d_00 > d_pi:
                phi_max[i] += np.pi
    else:
        phi_mean = circmean(phis, axis=1, weights=np.power(tb1s, 50))
        phi_max = circmean(phi_mean, axis=0)

    x, y = [], []
    for i, phi_max_i in enumerate(phi_max):
        x.append([(phi_max[i] + np.pi / 18) % (2 * np.pi) - np.pi / 18, 1])
        y.append(i)

    # for i, phi_mean_i in enumerate(phi_mean.T):
    #     for phi_mean_j in phi_mean_i:
    #         x.append([(phi_mean_j + np.pi/18) % (2 * np.pi) - np.pi/18, 1])
    #         y.append(i)

    x = np.array(x)
    y = np.array(y)
    a, b = np.linalg.pinv(x).dot(y)

    plt.figure("heinze-%sfig-1F" % ("uni-" if uniform else ""), figsize=(5, 5))
    # phi = circmean(tb1_ids, weights=tb1s, axis=1)
    plt.scatter([0, 1, 2, 3, 4, 5, 6, 7][::-1] * 100,
                np.rad2deg(phi_mean) % 360,
                s=20,
                c='black')
    plt.scatter([0, 1, 2, 3, 4, 5, 6, 7][::-1],
                np.rad2deg(phi_max) % 360,
                s=50,
                c='red',
                marker='*')
    plt.plot([-1, 8][::-1], np.rad2deg([(-1 - b) / a, (8 - b) / a]), 'r-.')
    plt.xticks([0, 1, 2, 3, 4, 5, 6, 7], [
        tb1_names[0], '', tb1_names[1], '', tb1_names[2], '', tb1_names[3], ''
    ])
    plt.yticks([0, 45, 90, 135, 180, 225, 270, 315, 360],
               ['0', '', '90', '', '180', '', '270', '', '360'])
    plt.ylim([-20, 380])
    plt.xlim([-1, 8])
    plt.show()