Exemple #1
0
    def momenta(self,chanrange=None,p=1):
        """abs(intensity)-weighted moment
           Does somewhat better than signed intensity-weighted moment.

           Parameters
           ----------
           chanrange: range of channels over which to compute moment
                      [startchan, endchan]
           p:         the moment to compute (the power of the frequency in the sum)
 
           Returns:
             The computed moment
        """
        # get the masked array
        s = self.spec()
        chupper = len(s)-1
        chanrange = self._sanitizechanrange(chanrange,chupper)
        sum_s = ma.sum(ma.abs(s[chanrange[0]:chanrange[1]+1]))
        sum_sf = 0
        mean = 0
        if p > 1:
           mean = self.moment(chanrange,p-1)
        for i in range(chanrange[0],chanrange[1]+1):
           sum_sf += ma.abs(s[i])*math.pow((self._freq[i]-mean),p)
        return sum_sf/sum_s
def combine_nights(combined_catalog, filterlist, refcat):
    header = [
        'BEGIN CATALOG HEADER', 'nfields 13', '    ra    1 0 d degrees %10.6f',
        '    dec   2 0 d degrees %10.6f', '    id    3 0 c INDEF %3d'
    ]
    for filt in filterlist:
        header.append('    {}    {:2d} 0 r INDEF %6.3f'.format(
            filt,
            len(header) - 1))
        header.append('    {}err {:2d} 0 r INDEF %6.3f'.format(
            filt,
            len(header) - 1))
    header += ['END CATALOG HEADER', '']
    catalog = Table([refcat['ra'], refcat['dec'], refcat['id']],
                    meta={'comments': header},
                    masked=True)
    for filt in filterlist:
        mags = combined_catalog['mag'][combined_catalog['filter'] == filt]
        median = np.median(mags, axis=0)
        absdev_mag = mags - median
        mad = np.median(np.abs(absdev_mag), axis=0) * np.sqrt(pi / 2)
        mags.mask |= np.abs(absdev_mag) > 5 * mad
        catalog[filt] = np.median(mags, axis=0)
        catalog[filt + 'err'] = np.median(np.abs(mags - catalog[filt]),
                                          axis=0) * np.sqrt(pi / 2)
    return catalog
Exemple #3
0
    def momenta(self,chanrange=None,p=1):
        """abs(intensity)-weighted moment
           Does somewhat better than signed intensity-weighted moment.

           Parameters
           ----------
           chanrange: range of channels over which to compute moment
                      [startchan, endchan]
           p:         the moment to compute (the power of the frequency in the sum)
 
           Returns:
             The computed moment
        """
        # get the masked array
        s = self.spec()
        chupper = len(s)-1
        chanrange = self._sanitizechanrange(chanrange,chupper)
        sum_s = ma.sum(ma.abs(s[chanrange[0]:chanrange[1]+1]))
        sum_sf = 0
        mean = 0
        if p > 1:
           mean = self.moment(chanrange,p-1)
        for i in range(chanrange[0],chanrange[1]+1):
           sum_sf += ma.abs(s[i])*math.pow((self._freq[i]-mean),p)
        return sum_sf/sum_s
def find_lines(peaks, fwhm, y=None, verbose=False):
    if y is None:
        y = np.arange(len(peaks))
    # Делаем все строки одинаковой длины (по наидленнейшей)
    peaks = np.array(list(zip_longest(*peaks)), dtype='float')
    # if verbose:
    #     plt.plot(peaks.T, y, 'o')
    #     plt.show()
    msk = np.isnan(peaks)
    peaks = ma.array(peaks, mask=msk)
    col = ['C' + str(j) for j in range(9)]
    #     print(len(peaks))
    #     print()
    for i in range(len(peaks)):
        f**k = peaks[i:]
        line = f**k[0]
    #     msk = np.logical_not(np.isnan(line))
    #     k = ma.polyfit(y, line, 2)
    #     print(k)
        est = np.ones(len(y)) * ma.median(line)
    #     est = np.polyval(k, y)
        err = est - line
        move_right = ma.filled((err > 5 * ma.median(ma.abs(err))), False)
        move_left = ma.filled((err < -5 * ma.median(ma.abs(err))), False)
        not_move = np.logical_not(move_right + move_left)
        # plt.plot(y[not_move], f**k[0][not_move], '.' + col[i % 9])
        # plt.plot(y, est, col[i % 9], ls='--')
        # plt.plot(y[move_right], f**k[0][move_right], 'x' + col[i % 9])
        # plt.plot(y[move_left], f**k[0][move_left], '+' + col[i % 9])
        # plt.show()

    #         print(i)
    #         print(ma.mean(ma.abs(err)))
    #         print(ma.median(line))
    #         print()
        if np.sum(move_right) > 0:  # Те, что меньше медианы (слева)
            nonearray = ma.array([[None] * np.sum(move_right.astype('int'))], mask=[[True] * np.sum(move_right.astype('int'))])
            f**k[:, move_right] = ma.append(f**k[:, move_right][1:, :], nonearray, axis=0)
        if np.sum(move_left) > 0:
            nonearray = ma.array([[None] * np.sum(move_left.astype('int'))], mask=[[True] * np.sum(move_left.astype('int'))])
            f**k[:, move_left] = ma.append(nonearray, f**k[:, move_left][:-1, :], axis=0)
    #     plt.plot(f**k[0], col[i%9])
        peaks[i:] = f**k
    plt.show()
    peaks = peaks.T
    msk = np.isnan(peaks)
    peaks = ma.array(peaks, mask=msk)
    good_lines = (np.sum(np.logical_not(msk), axis=0) > len(y) / 4.)
    peaks = peaks[:, good_lines]
    return peaks
Exemple #5
0
    def fitBearingDrift(self, trajectory, windowSize=None, plotFit=False):
        lags = np.linspace(0, np.round(50. * trajectory.frameRate), 200)
        tau = lags / trajectory.frameRate
        if windowSize is None:
            psi = unwrapma(trajectory.getMaskedPosture(trajectory.psi))
            D = drift(psi, lags, trajectory.excluded)
            sel = ~ma.getmaskarray(psi)
            #p = np.polyfit(tau, D, 1)
            p = np.polyfit(trajectory.t[sel], psi[sel], 1)
            self.k_psi = p[0]
        else:

            def result(traj):
                psi = traj.getMaskedPosture(traj.psi)
                if float(len(psi.compressed())) / float(len(psi)) > 0.2:
                    psi = unwrapma(psi)
                    D = drift(psi, lags, traj.excluded)
                    sel = ~ma.getmaskarray(psi)
                    p = np.polyfit(traj.t[sel], psi[sel], 1)
                    return D, p[0]
                else:
                    return ma.zeros((len(lags), )) * ma.masked, ma.masked

            results = [
                result(traj) for traj in trajectory.asWindows(windowSize)
            ]
            D = ma.array([resulti[0] for resulti in results])
            k = ma.array([resulti[1] for resulti in results])
            self.k_psi = ma.abs(k).mean()
            D = ma.abs(D).T.mean(axis=1)

        if plotFit:
            plt.plot(tau, D, 'k.')
            plt.plot(tau, self.k_psi * tau, 'r-')
            plt.xlabel(r'$\tau$ (s)')
            plt.ylabel(r'$\langle |\psi(\tau) - \psi(0)| \rangle$ (rad)')
            textstr = r'$\langle|k_\psi|\rangle=%.2f$ rad/s' % (self.k_psi)
            props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
            # place a text box in lower left in axes coords
            ax = plt.gca()
            ax.text(0.95,
                    0.05,
                    textstr,
                    transform=ax.transAxes,
                    fontsize=14,
                    horizontalalignment='right',
                    verticalalignment='bottom',
                    bbox=props)
            plt.show()
Exemple #6
0
def openImage(nameFile, intersection):
    # this allows GDAL to throw Python Exceptions
    gdal.UseExceptions()
    band_num = 1
    #Tvis-animated.gif
    try:
        src_ds = gdal.Open(nameFile)
    except (RuntimeError, e):
        print('Unable to open INPUT.tif')
        print(e)
        sys.exit(1)
    try:
        srcband = src_ds.GetRasterBand(band_num)
    except (RuntimeError, e):
        # for example, try GetRasterBand(10)
        print('Band ( %i ) not found' % band_num)
        print(e)
        sys.exit(1)
    gt1 = src_ds.GetGeoTransform()
    Project = src_ds.GetProjection()
    print(nameFile)
    print(gt1)
    xOrigin = gt1[0]
    yOrigin = gt1[3]
    pixelWidth = float(gt1[1])
    pixelHeight = float(gt1[5])
    xmin = intersection[0]
    ymax = intersection[1]
    xoff = int((xmin - xOrigin) / pixelWidth)
    yoff = int((yOrigin - ymax) / pixelWidth)
    xcount = int(
        (np.abs(intersection[0]) - np.abs(intersection[2])) / pixelWidth)
    ycount = int(
        (np.abs(intersection[1]) - np.abs(intersection[3])) / pixelHeight)
    #print nameFile
    if (xoff == 0 and yoff == 0):
        lc = srcband.ReadAsArray()
    else:
        #print nameFile
        #print xoff
        #print yoff
        #print xcount
        #print ycount
        #if "MODIS" in nameFile:
        #lc = srcband.ReadAsArray(xoff, yoff, int(xcount), int(ycount))
        #else:
        lc = srcband.ReadAsArray(xoff, yoff, int(xcount), int(ycount))
    return src_ds, lc, gt1, Project
Exemple #7
0
    def _calc_correlation(self, values_1, values_2, conf_level=0.95):
        """ Calculates Pearson's correlation coeffcient.
        Arguments:
            values_1 -- first data
            values_2 -- second data
            conf_level -- confidence level
        Returns:
            (corr_coeff, significance) -- correlation coefficient and significance arrays
        """
        n_samples = values_1.shape[0]  # Sample length
        # Calculate Pearson's correlatiob coefficient
        values_cov = ma.sum((values_1 - ma.mean(values_1, axis=0)) *
                            (values_2 - ma.mean(values_2, axis=0)),
                            axis=0)
        corr_coef = values_cov / (ma.std(values_1, axis=0) *
                                  ma.std(values_2, axis=0)) / n_samples

        # Calculate significance using t-distribution with n-2 degrees of freedom.
        deg_fr = n_samples - 2  # Degrees of freedom.
        t_distr = ma.abs(
            corr_coef *
            ma.sqrt(deg_fr / (1. - corr_coef**2)))  # Student's t-distribution.
        prob = 0.5 + conf_level / 2  # Probability for two tails.
        cr_value = student_t.ppf(prob, deg_fr)  # Student's Critical value.
        significance = ma.greater(t_distr, cr_value)

        return corr_coef, significance
Exemple #8
0
def ma_mad(x, axis=None):
    """Median absolute deviation"""
    median_x = ma.median(x, axis=axis)
    if axis is not None:
        median_x = ma.expand_dims(median_x, axis=axis)

    return ma.median(ma.abs(x - median_x), axis=axis)
Exemple #9
0
    def ray_intersect(self, orig, direction) -> ma.array:
        normal = np.cross(self.a, self.b)

        ndotray = ma.array(v3_dots(normal, direction))
        ndotray.mask = ma.abs(ndotray) < 1e-3
        d = v3_dots(normal, orig)
        t = (v3_dots(normal, self.corner) + d) / ndotray
        t.mask |= t < 0

        pt = orig + t.reshape(-1, 1) * direction

        # TODO: try an alternative to find points in a parallelogram (or plane)
        # if that's easier. Maybe project onto a plane then use a parallelogram
        # as a basis in 2d?
        # https://stackoverflow.com/questions/59128744/raytracing-ray-vs-parallelogram
        # edge 0
        t.mask |= v3_dots(normal, np.cross(self.a, pt - self.corner)) < 0
        # edge 1
        t.mask |= v3_dots(normal, np.cross(pt - self.corner, self.b)) < 0
        # edge 2
        t.mask |= v3_dots(normal, np.cross(pt - self.corner - self.b,
                                           self.a)) < 0
        # edge 3
        t.mask |= v3_dots(normal, np.cross(self.b,
                                           pt - self.corner - self.a)) < 0

        return t
def psi_trend(traj, maxMissing):    
    D = ma.array([tsstats.drift(tsstats.unwrapma(trajw.getMaskedPosture(trajw.psi)), lags, exclude=trajw.excluded)
                  for trajw in traj.asWindows(100.)
                  if fractionMissing(trajw)>maxMissing])
    D[np.isnan(D)] = ma.masked
    D = ma.abs(D).mean(axis=0)
    if len(D) == 0:
        D = ma.zeros((len(lags),))
        D[:] = ma.masked
    return D
Exemple #11
0
def tdpep(t,fm,PG0):
    """
    Transit-duration - Period - Epoch

    Parameters 
    ---------- 
    fm  : Flux with bad data points masked out.  It is assumed that
          elements of f are evenly spaced in time.
    PG0 : Initial period grid.

    Returns
    -------

    epoch2d : Grid (twd,P) of best epoch 
    df2d    : Grid (twd,P) of depth epoch 
    count2d : number of filled data for particular (twd,P)
    noise   : Grid (twd) typical scatter 
    PG      : The Period grid
    twd     : Grid of trial transit widths.

    """
    assert fm.fill_value ==0
    # Determine the grid of periods that corresponds to integer
    # multiples of cadence values
    PcadG,PG = P2Pcad(PG0)
       
    # Initialize tdur grid.  
    twdMi = a2tdur( P2a( PG[0 ] ) ) /keptoy.lc
    twdMa = a2tdur( P2a( PG[-1] ) ) /keptoy.lc
    twdG = np.round(np.linspace(twdMi,twdMa,4)).astype(int)

    rec2d = []
    noise = []
    for twd in twdG:
        dM = mtd(t,fm.filled(),twd)
        dM.mask = fm.mask | ~isfilled(t,fm,twd)
        rec2d.append( pep(t[0],dM,PcadG) )

        # Noise per transit 
        mad = ma.abs(dM)
        mad = ma.median(mad)
        noise.append(mad)

    rec2d = np.vstack(rec2d)

    make2d = lambda x : np.tile( np.vstack(x), (1,rec2d.shape[1] ))
    rec2d = mlab.rec_append_fields(rec2d,'noise',make2d(noise))
    rec2d = mlab.rec_append_fields(rec2d,'twd',  make2d(twdG))

    PG = np.tile( PG, (rec2d.shape[0],1 ))
    rec2d = mlab.rec_append_fields(rec2d,'PG',PG)

    s2n   = rec2d['fom']/rec2d['noise']*rec2d['count']
    rec2d = mlab.rec_append_fields(rec2d,'s2n',  s2n )
    return rec2d
Exemple #12
0
def combine_nights(combined_catalog, filterlist, refcat):
    header = ['BEGIN CATALOG HEADER',
              'nfields 13',
              '    ra    1 0 d degrees %10.6f',
              '    dec   2 0 d degrees %10.6f',
              '    id    3 0 c INDEF %3d']
    for filt in filterlist:
        header.append('    {}    {:2d} 0 r INDEF %6.3f'.format(filt, len(header) - 1))
        header.append('    {}err {:2d} 0 r INDEF %6.3f'.format(filt, len(header) - 1))
    header += ['END CATALOG HEADER', '']
    catalog = Table([refcat['ra'], refcat['dec'], refcat['id']], meta={'comments': header}, masked=True)
    for filt in filterlist:
        mags = combined_catalog['mag'][combined_catalog['filter'] == filt]
        median = np.median(mags, axis=0)
        absdev_mag = mags - median
        mad = np.median(np.abs(absdev_mag), axis=0) * np.sqrt(pi / 2)
        mags.mask |= np.abs(absdev_mag) > 5 * mad
        catalog[filt] = np.median(mags, axis=0)
        catalog[filt+'err'] = np.median(np.abs(mags - catalog[filt]), axis=0) * np.sqrt(pi / 2)
    return catalog
Exemple #13
0
    def fitBearingDrift(self, trajectory, windowSize=None, plotFit=False):
        lags = np.linspace(0, np.round(50.*trajectory.frameRate), 200)
        tau = lags / trajectory.frameRate
        if windowSize is None:
            psi = unwrapma(trajectory.getMaskedPosture(trajectory.psi))
            D = drift(psi, lags, trajectory.excluded)
            sel = ~ma.getmaskarray(psi)
            #p = np.polyfit(tau, D, 1)
            p = np.polyfit(trajectory.t[sel], psi[sel], 1)
            self.k_psi = p[0]
        else:
            def result(traj):
                psi = traj.getMaskedPosture(traj.psi)
                if float(len(psi.compressed()))/float(len(psi)) > 0.2:
                    psi = unwrapma(psi)
                    D = drift(psi, lags, traj.excluded)
                    sel = ~ma.getmaskarray(psi)
                    p = np.polyfit(traj.t[sel], psi[sel], 1)
                    return D, p[0]
                else:
                    return ma.zeros((len(lags),))*ma.masked, ma.masked

            results = [result(traj) for traj in trajectory.asWindows(windowSize)]
            D = ma.array([resulti[0] for resulti in results])
            k = ma.array([resulti[1] for resulti in results])
            self.k_psi = ma.abs(k).mean()
            D = ma.abs(D).T.mean(axis=1)
        
        
        if plotFit:
            plt.plot(tau, D, 'k.')
            plt.plot(tau, self.k_psi*tau, 'r-')
            plt.xlabel(r'$\tau$ (s)')
            plt.ylabel(r'$\langle |\psi(\tau) - \psi(0)| \rangle$ (rad)')
            textstr = r'$\langle|k_\psi|\rangle=%.2f$ rad/s'%(self.k_psi)
            props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
            # place a text box in lower left in axes coords
            ax = plt.gca()
            ax.text(0.95, 0.05, textstr, transform=ax.transAxes, fontsize=14,
                    horizontalalignment='right', verticalalignment='bottom', bbox=props)
            plt.show()
Exemple #14
0
def s2n_known(d, t, fm):
    """
    Compute the FFA S/N assuming we knew where the signal was before hand

    Parameters
    ----------

    d  : Simulation Parameters
    t  : time
    fm : lightcurve (maksed)

    """
    # Compute the twd from the twdG with the closest to the injected tdur
    tdur = keptoy.tdurMA(d)
    itwd_close = np.argmin(np.abs(np.array(config.twdG) - tdur / config.lc))
    twd_close = config.twdG[itwd_close]
    dM = tfind.mtd(t, fm, twd_close)

    # Fold the LC on the closest period we're computing all the FFA
    # periods around the P closest.  This is a little complicated, but
    # it allows us to use the same functions as we do in the complete
    # period scan.

    Pcad0 = int(np.floor(d['P'] / config.lc))
    t0cad, Pcad, meanF, countF = tfind.fold(dM, Pcad0)

    iPcad_close = np.argmin(np.abs(Pcad - d['P'] / config.lc))
    Pcad_close = Pcad[iPcad_close]
    P_close = Pcad_close * config.lc

    # Find the epoch that is closest to the injected phase.
    t0 = t[0] + t0cad * config.lc  # Epochs in days.
    phase = np.mod(t0, P_close) / P_close  # Phases [0,1)

    # The following 3 lines compute the difference between the FFA
    # phases and the injected phase.  Phases can be measured going
    # clockwise or counter clockwise, so we must choose the minmum
    # value.  No phases are more distant than 0.5.
    dP = np.abs(phase - d['phase'])
    dP = np.vstack([dP, 1 - dP])
    dP = dP.min(axis=0)

    iphase_close = np.argmin(dP)
    phase_close = phase[iphase_close]

    # s2n for the closest twd,P,phas
    noise = ma.median(ma.abs(dM))
    s2nF = meanF / noise * np.sqrt(countF)
    s2nP = s2nF[iPcad_close]  # length Pcad0 array with s2n for all the
    # P at P_closest
    s2n_close = s2nP[iphase_close]

    return phase, s2nP, twd_close, P_close, phase_close, s2n_close
def my_polyfit(x, y, deg, degatt=0):
    k = ma.polyfit(x, y, degatt)
    res = my_poly(k, x)
    resid = ma.abs(res - y)
    medresid = ma.median(resid, axis=0)
    if y.ndim != 1:
        mask = ma.logical_not(ma.sum((resid > 3 * medresid), axis=1).astype('bool'))
    else:
        mask = (resid < 3 * medresid)
    y = y[mask]
    x = x[mask]
    k = ma.polyfit(x, y, deg)
    return(k, mask)
Exemple #16
0
 def transform_non_affine(self, a):
     # NOTE: Critical to truncate valid range inside transform *and*
     # in limit_range_for_scale or get weird duplicate tick labels. This
     # is not necessary for positive-only scales because it is harder to
     # run up right against the scale boundaries.
     with np.errstate(divide='ignore', invalid='ignore'):
         m = ma.masked_where((a <= -90) | (a >= 90), a)
         if m.mask.any():
             m = np.deg2rad(m)
             return ma.log(ma.abs(ma.tan(m) + 1 / ma.cos(m)))
         else:
             a = np.deg2rad(a)
             return np.log(np.abs(np.tan(a) + 1 / np.cos(a)))
Exemple #17
0
def psi_trend(traj, maxMissing):
    D = ma.array([
        tsstats.drift(tsstats.unwrapma(trajw.getMaskedPosture(trajw.psi)),
                      lags,
                      exclude=trajw.excluded) for trajw in traj.asWindows(100.)
        if fractionMissing(trajw) > maxMissing
    ])
    D[np.isnan(D)] = ma.masked
    D = ma.abs(D).mean(axis=0)
    if len(D) == 0:
        D = ma.zeros((len(lags), ))
        D[:] = ma.masked
    return D
Exemple #18
0
def tdpep(t, fm, par):
    """
    Transit-duration - Period - Epoch

    Parameters 
    ---------- 
    fm   : Flux with bad data points masked out.  It is assumed that
           elements of f are evenly spaced in time.
    P1   : First period (cadences)
    P2   : Last period (cadences)
    twdG : Grid of transit durations (cadences)

    Returns
    -------

    rtd : 2-D record array with the following fields at every trial
          (twd,Pcad):
          - noise
          - s2n
          - twd
          - fields in rep
    """
    PcadG = np.arange(par['Pcad1'], par['Pcad2'])
    twdG = par['twdG']
    assert fm.fill_value == 0
    # Determine the grid of periods that corresponds to integer
    # multiples of cadence values
    try:
        ntwd = len(twdG)
    except:
        ntwd = 1

    rtd = []
    for i in range(ntwd):  # Loop over twd
        twd = np.int(twdG[i])
        dM = mtd(fm, twd)

        func = lambda Pcad: ep(dM, Pcad)
        rep = list(map(func, PcadG))
        rep = np.hstack(rep)
        r = np.empty(rep.size, dtype=tddtype)
        for k in epdtype.names:
            r[k] = rep[k]
        r['noise'] = ma.median(ma.abs(dM))
        r['twd'] = twd
        r['t0'] = r['t0cad'] * config.lc + t[0]
        rtd.append(r)
    rtd = np.vstack(rtd)
    rtd['s2n'] = rtd['mean'] / rtd['noise'] * np.sqrt(rtd['count'])
    return rtd
Exemple #19
0
 def calc_directionality(dat, dist_range):
     nbin = len(dat)
     min_d,max_d = dist_range
     if min_d < 0 or max_d < min_d:
         raise ValueError('calc_insulation() requires 0 <= min_d <= max_d')
     directionality = ma.zeros(nbin)
     for i in xrange(nbin):
         if i < max_d or i >= nbin-max_d:
             directionality[i] = 0.0
         else:
             up = dat[i,(i-max_d):(i-min_d)].sum()
             down = dat[i,(i+min_d):(i+max_d)].sum()
             avg = (up+down)/2.0
             directionality[i] = (up-down)/ma.abs(up-down)*((up-avg)**2/avg + (down-avg)**2/avg)
     return directionality
Exemple #20
0
 def _find_fog_profiles(
     self,
     n_gates_for_signal_sum: int = 20,
     signal_sum_threshold: float = 1e-3,
     variance_threshold: float = 1e-15,
 ) -> np.ndarray:
     """Finds saturated (usually fog) profiles from beta_raw."""
     signal_sum = ma.sum(ma.abs(
         self.data["beta_raw"][:, :n_gates_for_signal_sum]),
                         axis=1)
     variance = _calc_var_from_top_gates(self.data["beta_raw"])
     is_fog = (signal_sum > signal_sum_threshold) | (variance <
                                                     variance_threshold)
     logging.info(f"Cleaned {sum(is_fog)} profiles with fog filter")
     return is_fog
Exemple #21
0
 def iterative_correction(self, max_iter=100, tolerance=1e-5):
     totalBias = ma.ones(self.nbin, float)
     mat = self.dat.copy()
     for r in xrange(max_iter):
         print('.', end='', file=sys.stderr)
         binSum = mat.sum(axis=1)
         mask = binSum==0
         bias = binSum/binSum[~mask].mean()
         bias[mask] = 1
         bias -= 1
         bias *= 0.8
         bias += 1
         totalBias *= bias
         biasMat = bias.reshape(1,len(bias)) * bias.reshape(len(bias),1)
         mat = mat / biasMat
         if ma.abs(bias-1).max() < tolerance:
             break
     self.dat = mat
     corr = totalBias[~mask].mean()
     self.bias = totalBias/corr
     self.corrected |= 0x1
Exemple #22
0
    def identifyReversals(self, transitionWindow=2.):
        dpsi = self.getMaskedPosture(self.dpsi)
        rev = ma.abs(dpsi) > np.pi / 2.

        inRev = False
        state = np.zeros(self.t.shape, int)
        state[~rev] = 1
        state[rev] = 2
        state[ma.getmaskarray(rev)] = 0
        self.state = state

        revBoundaries = []
        currentRev = ma.zeros((2, ))
        for j in xrange(1, self.t.shape[0] - 2):
            if not inRev:
                if ((state[j] == 2 & state[j + 1] == 2) |
                    (state[j] == 2 & state[j + 1] == 0 & state[j + 2] == 2)):
                    if state[j - 1] == 0:
                        currentRev[0] = ma.masked
                    else:
                        currentRev[0] = j
                    inRev = True
            else:
                if ((state[j] == 1 & state[j + 1] == 1) |
                    (state[j] == 1 & state[j + 1] == 0 & state[j + 2] == 1)):
                    currentRev[1] = j
                    inRev = False
                    revBoundaries.append(currentRev.copy())
                elif (state[j] == 0 & state[j + 1] == 0):
                    currentRev[1] = ma.masked
                    inRev = False
                    revBoundaries.append(currentRev.copy())
        self.revBoundaries = ma.array(revBoundaries, dtype='int')

        revEdges = self.revBoundaries[:].compressed()
        for boundary in revEdges:
            self.nearRev[
                (self.t > boundary / self.frameRate - transitionWindow / 2.)
                & (self.t < boundary / self.frameRate +
                   transitionWindow / 2.)] = True
def plot_glider(cube, cmap=plt.cm.viridis, figsize=(9, 3.75), track_inset=False):

    data = apply_range(cube)
    x = apply_range(cube.coord(axis="X"))
    y = apply_range(cube.coord(axis="Y"))
    z = apply_range(cube.coord(axis="Z"))
    t = cube.coord(axis="T")
    t = t.units.num2date(t.points.squeeze())

    fig, ax = plt.subplots(figsize=figsize)
    dist = distance(x, y)
    z = ma.abs(z)
    dist, _ = np.broadcast_arrays(dist[..., np.newaxis], z.filled(fill_value=np.NaN))
    dist, z = map(ma.masked_invalid, (dist, z))
    cs = ax.pcolor(dist, z, data, cmap=cmap, snap=True)
    kw = dict(orientation="horizontal", extend="both", shrink=0.65)
    cbar = fig.colorbar(cs, **kw)

    if track_inset:
        axin = inset_axes(
            ax,
            width=2,
            height=2,
            loc=4,
            bbox_to_anchor=(1.15, 0.35),
            bbox_transform=ax.figure.transFigure,
        )
        axin.plot(x, y, "k.")
        start, end = (x[0], y[0]), (x[-1], y[-1])
        kw = dict(marker="o", linestyle="none")
        axin.plot(*start, color="g", **kw)
        axin.plot(*end, color="r", **kw)
        axin.axis("off")

    ax.invert_yaxis()
    ax.invert_xaxis()
    ax.set_xlabel("Distance (km)")
    ax.set_ylabel("Depth (m)")
    return fig, ax, cbar
Exemple #24
0
    def identifyReversals(self, transitionWindow=2.):
        dpsi = self.getMaskedPosture(self.dpsi)
        rev = ma.abs(dpsi)>np.pi/2.

        inRev = False
        state = np.zeros(self.t.shape, int)
        state[~rev] = 1
        state[rev] = 2
        state[ma.getmaskarray(rev)] = 0
        self.state = state

        revBoundaries = []
        currentRev = ma.zeros((2,))
        for j in xrange(1,self.t.shape[0]-2):
            if not inRev:
                if ((state[j]==2 & state[j+1]==2) |
                    (state[j]==2 & state[j+1]==0 & state[j+2]==2)):
                    if state[j-1] == 0:
                        currentRev[0] = ma.masked
                    else:
                        currentRev[0] = j
                    inRev = True
            else:
                if ((state[j]==1 & state[j+1]==1) |
                    (state[j]==1 & state[j+1]==0 & state[j+2]==1)):
                    currentRev[1] = j
                    inRev = False
                    revBoundaries.append(currentRev.copy())
                elif (state[j]==0 & state[j+1]==0):
                    currentRev[1] = ma.masked
                    inRev = False
                    revBoundaries.append(currentRev.copy())
        self.revBoundaries = ma.array(revBoundaries, dtype='int')

        revEdges = self.revBoundaries[:].compressed()
        for boundary in revEdges:
            self.nearRev[(self.t>boundary/self.frameRate-transitionWindow/2.) &
                         (self.t<boundary/self.frameRate+transitionWindow/2.)] = True
Exemple #25
0
def noiseyReg(t, dM, thresh=2):
    """
    Noisey Region
    
    If certain regions are much noisier than others, we should remove
    them.  A typical value of the noise is computed using a median
    absolute deviation (MAD) on individual regions.  If certain regions are
    noiser by thresh, we mask them out.

    Parameters
    ----------

    dM     : Single event statistic (masked array)
    thresh : If region has MAD > thresh * typical MAD throw it out.
    """

    tm = ma.masked_array(t, mask=dM.mask)
    label = detrend.sepseg(tm)
    sL = ma.notmasked_contiguous(label)

    madseg = np.array([ma.median(ma.abs(dM[s])) for s in sL])
    madLC = np.median(madseg)  # Typical MAD of the light curve.

    isNoisey = np.zeros(tm.size).astype(bool)
    sNL = []  # List to keep track of the noisey segments
    for s, mads in zip(sL, madseg):
        if mads > thresh * madLC:
            isNoisey[s] = True
            sNL.append(s)

    if len(sNL) != 0:
        print "Removed following time ranges because data is noisey"
        print "----------------------------------------------------"

    for s in sNL:
        print "%.2f %.2f " % (t[s.start], t[s.stop - 1])

    return isNoisey
Exemple #26
0
    def peak(self,chanrange=None):
        """Return the peak intensity in the given channel range
           If a mask exists, this function operates on the masked spectrum.

           Parameters
           ----------
           chanrange: range of channels over which to compute dispersion
                      [startchan, endchan]

           Returns
           ----------
           Maximum of the absolute value of the spectrum in the channel range
                     max(abs(spectrum[startchan:endchan]))
        """
        s = self.spec()
        chupper = len(s)-1
        chanrange = self._sanitizechanrange(chanrange,chupper)

        # Handle one-channel ranges.
        if (chanrange[0] == chanrange[1]):
           return s[chanrange[0]]

        return ma.max(ma.abs(s[chanrange[0]:chanrange[1]]))
Exemple #27
0
    def peak(self,chanrange=None):
        """Return the peak intensity in the given channel range
           If a mask exists, this function operates on the masked spectrum.

           Parameters
           ----------
           chanrange: range of channels over which to compute dispersion
                      [startchan, endchan]

           Returns
           ----------
           Maximum of the absolute value of the spectrum in the channel range
                     max(abs(spectrum[startchan:endchan]))
        """
        s = self.spec()
        chupper = len(s)-1
        chanrange = self._sanitizechanrange(chanrange,chupper)

        # Handle one-channel ranges.
        if (chanrange[0] == chanrange[1]):
           return s[chanrange[0]]

        return ma.max(ma.abs(s[chanrange[0]:chanrange[1]]))
            if np.all(group['dc1'][f0]):
                dc0 = np.sum(np.power(group['dc1'][f0], -2))**-0.5
                c0 = np.sum(
                    group['c1'][f0] * np.power(group['dc1'][f0], -2)) * dc0**2
            else:
                dc0 = 0.
                c0 = np.mean(group['c1'][f0])
            if np.all(group['dc2'][f1]):
                dc1 = np.sum(np.power(group['dc2'][f1], -2))**-0.5
                c1 = np.sum(
                    group['c2'][f1] * np.power(group['dc2'][f1], -2)) * dc1**2
            else:
                dc1 = 0.
                c1 = np.mean(group['c2'][f1])
            color = np.divide(m0 - m1 + z0 - z1, 1 - c0 + c1)
            dcolor = np.abs(color) * np.sqrt(
                np.divide(dm0**2 + dm1**2 + dz0**2 + dz1**2,
                          (m0 - m1 + z0 - z1)**2) +
                np.divide(dc0**2 + dc1**2, (1 - c0 + c1)**2))
            for row in group:
                colors.append(color)
                dcolors.append(dcolor)
        targets[filters] = np.array(colors)
        targets['d' + filters] = np.array(dcolors)

    # calibrate all the instrumental magnitudes
    zcol = [
        color_to_use[row['filter']][0]
        if color_to_use[row['filter']] else row['filter'] * 2
        for row in targets
    ]
Exemple #29
0
 def manhattan(a, b):
     """
     Manhattan distance between a and b
     """
     return ma.abs(a.real - b.real) + ma.abs(a.imag - b.imag)
Exemple #30
0
 def euclid(a,b):
     """
     Euclidian distance between a and b
     """
     return ma.abs(a - b)
Exemple #31
0
    def __call__(self, value, clip=None):

        # read in parameters
        method = self.stretch
        exponent = self.exponent
        midpoint = self.midpoint

        # ORIGINAL MATPLOTLIB CODE

        if clip is None:
            clip = self.clip

        if np.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(np.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(np.float)

        self.autoscale_None(val)
        vmin, vmax = self.vmin, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin == vmax:
            return 0.0 * val
        else:
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                               mask=mask)
            result = (val - vmin) * (1.0 / (vmax - vmin))

            # CUSTOM APLPY CODE

            # Keep track of negative values
            negative = result < 0.

            if self.stretch == 'Linear':

                pass

            elif self.stretch == 'Log':

                # result = np.log(result * (self.midpoint - 1.) + 1.) \
                #        / np.log(self.midpoint)
                result = ma.log10(result * (self.midpoint - 1.) + 1.) \
                         / ma.log10(self.midpoint)

            elif self.stretch == 'Sqrt':

                result = ma.sqrt(ma.abs(result))

            elif self.stretch == 'Arcsinh':

                result = ma.arcsinh(result / self.midpoint) \
                         / ma.arcsinh(1. / self.midpoint)

            elif self.stretch == 'Arccosh':

                result = ma.arccosh(result / self.midpoint) \
                         / ma.arccosh(1. / self.midpoint)

            elif self.stretch == 'Power':

                result = ma.power(result, exponent)

            elif self.stretch == 'Exp':

                result = np.exp(result)

            else:

                raise Exception("Unknown stretch in APLpyNormalize: %s" %
                                self.stretch)

            # Now set previously negative values to 0, as these are
            # different from true NaN values in the FITS image
            result[negative] = -np.inf

        if vtype == 'Scalar':
            result = result[0]

        return result
Exemple #32
0
def measure(mode, x, y, x0, x1, thresh=0):
    """ return the a measure of y in the window x0 to x1
    """
    xm = ma.masked_outside(x, x0, x1)  # .compressed()
    ym = ma.array(y, mask=ma.getmask(xm))  # .compressed()
    if mode == 'mean':
        r1 = np.mean(ym)
        r2 = np.std(ym)
    if mode == 'max' or mode == 'maximum':
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == 'min' or mode == 'minimum':
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == 'minormax':
        r1p = ma.max(ym)
        r1n = ma.min(ym)
        if ma.abs(r1p) > ma.abs(r1n):
            r1 = r1p
            r2 = xm[ma.argmax(ym)]

        else:
            r1 = r1n
            r2 = xm[ma.argmin(ym)]

    if mode == 'median':
        r1 = ma.median(ym)
        r2 = 0
    if mode == 'p2p':  # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == 'std':  # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == 'var':  # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == 'cumsum':  # cumulative sum
        r1 = ma.cumsum(ym)  # Note: returns an array
        r2 = 0
    if mode == 'anom':  # anomalies = difference from averge
        r1 = ma.anom(ym)  # returns an array
        r2 = 0
    if mode == 'sum':
        r1 = ma.sum(ym)
        r2 = 0
    if mode == 'area' or mode == 'charge':
        r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm))
        r2 = 0
    if mode == 'latency':  # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == '1090':  #measure 10-90% time, also returns max
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
        y10 = 0.1 * r1
        y90 = 0.9 * r1
        sm1 = ma.nonzero(ym >= y10)
        sm9 = ma.nonzero(ym >= y90)
        r1 = xm[sm9] - xm[sm1]

    if mode == 'count':
        r1 = ma.count(ym)
        r2 = 0
    if mode == 'maxslope':
        return (0, 0)
        slope = np.array([])
        win = ma.flatnotmasked_contiguous(ym)
        st = int(len(win) / 20)  # look over small ranges
        for k in win:  # move through the slope measurementwindow
            tb = range(k - st, k + st)  # get tb array
            newa = np.array(self.dat[i][j, thisaxis, tb])
            ppars = np.polyfit(
                x[tb], ym[tb],
                1)  # do a linear fit - smooths the slope measures
            slope = np.append(slope, ppars[0])  # keep track of max slope
        r1 = np.amax(slope)
        r2 = np.argmax(slope)
    return (r1, r2)
Exemple #33
0
            z0, dz0 = average_in_flux(group['z1'][f0], group['dz1'][f0])
            z1, dz1 = average_in_flux(group['z2'][f1], group['dz2'][f1])
            if np.all(group['dc1'][f0]):
                dc0 = np.sum(np.power(group['dc1'][f0], -2))**-0.5
                c0 = np.sum(group['c1'][f0] * np.power(group['dc1'][f0], -2)) * dc0**2
            else:
                dc0 = 0.
                c0 = np.mean(group['c1'][f0])
            if np.all(group['dc2'][f1]):
                dc1 = np.sum(np.power(group['dc2'][f1], -2))**-0.5
                c1 = np.sum(group['c2'][f1] * np.power(group['dc2'][f1], -2)) * dc1**2
            else:
                dc1 = 0.
                c1 = np.mean(group['c2'][f1])
            color = np.divide(m0 - m1 + z0 - z1, 1 - c0 + c1)
            dcolor = np.abs(color) * np.sqrt(
                        np.divide(dm0**2 + dm1**2 + dz0**2 + dz1**2, (m0 - m1 + z0 - z1)**2)
                        + np.divide(dc0**2 + dc1**2, (1 - c0 + c1)**2)
                                            )
            for row in group:
                colors.append(color)
                dcolors.append(dcolor)
        targets[filters] = np.array(colors)
        targets['d'+filters] = np.array(dcolors)

    # calibrate all the instrumental magnitudes
    zcol = [color_to_use[row['filter']][0] if color_to_use[row['filter']] else row['filter']*2 for row in targets]
    zeropoint = np.choose(zcol == targets['zcol1'], [targets['z2'], targets['z1']])
    dzeropoint = np.choose(zcol == targets['zcol1'], [targets['dz2'], targets['dz1']])
    colorterm = np.choose(zcol == targets['zcol1'], [targets['c2'], targets['c1']])
    dcolorterm = np.choose(zcol == targets['zcol1'], [targets['dc2'], targets['dc1']])
Exemple #34
0
def pgram_max(t,fm,par):
    """
    Periodogram: Check max values

    Computes s2n for range of P, t0, and twd. However, for every
    putative transit, we evaluate the transit depth having cut the
    deepest transit and the second deepest transit. We require that
    the mean depth after having cut the test max two values not be too
    much smaller. Good at removing locations with 2 outliers.

    Parameters 
    ----------
    t : t[0] provides starting time
    fm : masked array with fluxes
    par : dict with following keys
          - Pcad1 (lower period limit)
          - Pcad2 (upper period limit)
          - twdG (grid of trial durations to compute)

    Returns
    -------
    pgram : Record array with following fields

    """
    ncad = fm.size
    PcadG = np.arange(par['Pcad1'],par['Pcad2'])
    get_frac_Pcad = lambda P : np.arange(P,P+1,1.0*P / ncad)
    PcadG = np.hstack(map(get_frac_Pcad,PcadG))
    twdG = par['twdG']
    
    icad = np.arange(ncad)

    dtype_pgram = [
        ('Pcad',float),
        ('twd',float),
        ('s2n',float),
        ('c',float),
        ('mean',float),
        ('t0',float),
        ('noise',float),
        ]

    pgram = np.zeros( (len(twdG),len(PcadG)),dtype=dtype_pgram)

    # dtype of the record array returned from tdpep()
    dtype_temp = [
        ('c',int),
        ('mean',float),
        ('s2n',float),
        ('col',int),
        ('t0',float)
    ] 

    # Loop over different transit durations
    for itwd,twd in enumerate(twdG):
        res = foreman_mackey_1d(fm,twd)    
        dM = ma.masked_array(
            res['depth_1d'],
            ~res['good_trans'].astype(bool),
            fill_value=0
            )

        # Compute noise (robust, based on MAD) on twd timescales
        noise = ma.median( ma.abs(dM) ) * 1.5
        pgram[itwd,:]['noise'] = noise
        pgram[itwd,:]['twd'] = twd
        for iPcad,Pcad in enumerate(PcadG):
            # Compute row and columns for folded data
            row,col = wrap_icad(icad,Pcad)
            
            ncol = np.max(col) + 1
            nrow = np.max(row) + 1
            icol = np.arange(ncol)

            # Shove data and mask into appropriate positions
            data = np.zeros((nrow,ncol))
            mask = np.ones((nrow,ncol)).astype(bool)
            data[row,col] = dM.data
            mask[row,col] = dM.mask

            # Sum along columns (clipping top 0, 1, 2 values)
            datasum,datacnt = cumsum_top(data,mask,2)
            # datasum[-1] are the is the summed columns having not
            # clipped any values. Update results array. For t0, add
            # half the transit with because column index corresponds
            # in ingress
            s = datasum[-1,:]
            c = datacnt[-1,:]
            r = np.zeros(ncol,dtype=dtype_temp)
            r['s2n'] = -1 
            r['mean'] = s/c
            r['s2n'] = s / np.sqrt(c) / noise            
            r['c'][:] = c
            r['col'] = icol
            r['t0'] = ( r['col'] + twd / 2.0) * config.lc + t[0] 

            # Compute mean transit depth after removing the deepest
            # transit, datacnt[-2], and the second deepest transit,
            # datacnt[-3]. The mean transit depth must be > 0.5 it's
            # former value. Also, require 3 transits.
            mean_clip1 = datasum[-2] / datacnt[-2]
            mean_clip2 = datasum[-3] / datacnt[-3]
            b = (
                (mean_clip1 > 0.5 * r['mean'] ) & 
                (mean_clip2 > 0.5 * r['mean']) & 
                (r['c'] >= 3)
            )

            if ~np.any(b):
                continue 

            rcut = r[b]
            rmax = rcut[np.argmax(rcut['s2n'])]
            names = ['mean','s2n','c','t0']
            for n in names:
                pgram[itwd,iPcad][n] = rmax[n]
            pgram[itwd,iPcad]['Pcad'] = Pcad
            
    # Compute the maximum return twd with the maximum s2n
    pgram = pgram[np.argmax(pgram['s2n'],axis=0),np.arange(pgram.shape[1])]
    return pgram
Exemple #35
0
def measure(mode, x, y, x0, x1, thresh=0):
    """ return the a measure of y in the window x0 to x1
    """
    xm = ma.masked_outside(x, x0, x1)  # .compressed()
    ym = ma.array(y, mask=ma.getmask(xm))  # .compressed()
    if mode == "mean":
        r1 = np.mean(ym)
        r2 = np.std(ym)
    if mode == "max" or mode == "maximum":
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == "min" or mode == "minimum":
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == "minormax":
        r1p = ma.max(ym)
        r1n = ma.min(ym)
        if ma.abs(r1p) > ma.abs(r1n):
            r1 = r1p
            r2 = xm[ma.argmax(ym)]

        else:
            r1 = r1n
            r2 = xm[ma.argmin(ym)]

    if mode == "median":
        r1 = ma.median(ym)
        r2 = 0
    if mode == "p2p":  # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == "std":  # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == "var":  # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == "cumsum":  # cumulative sum
        r1 = ma.cumsum(ym)  # Note: returns an array
        r2 = 0
    if mode == "anom":  # anomalies = difference from averge
        r1 = ma.anom(ym)  # returns an array
        r2 = 0
    if mode == "sum":
        r1 = ma.sum(ym)
        r2 = 0
    if mode == "area" or mode == "charge":
        r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm))
        r2 = 0
    if mode == "latency":  # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == "1090":  # measure 10-90% time, also returns max
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
        y10 = 0.1 * r1
        y90 = 0.9 * r1
        sm1 = ma.nonzero(ym >= y10)
        sm9 = ma.nonzero(ym >= y90)
        r1 = xm[sm9] - xm[sm1]

    if mode == "count":
        r1 = ma.count(ym)
        r2 = 0
    if mode == "maxslope":
        return (0, 0)
        slope = np.array([])
        win = ma.flatnotmasked_contiguous(ym)
        st = int(len(win) / 20)  # look over small ranges
        for k in win:  # move through the slope measurementwindow
            tb = range(k - st, k + st)  # get tb array
            newa = np.array(self.dat[i][j, thisaxis, tb])
            ppars = np.polyfit(x[tb], ym[tb], 1)  # do a linear fit - smooths the slope measures
            slope = np.append(slope, ppars[0])  # keep track of max slope
        r1 = np.amax(slope)
        r2 = np.argmax(slope)
    return (r1, r2)
Exemple #36
0
def measure(mode, x, y, x0, x1, thresh=0, slopewin=1.0):
    """ return the a measure of y in the window x0 to x1
    """
    xm = ma.masked_outside(x, x0, x1)# .compressed()
    ym = ma.array(y, mask = ma.getmask(xm))# .compressed()
    if mode == 'mean':
        r1 = np.mean(ym)
        r2 = np.std(ym)
    if mode == 'max' or mode == 'maximum':
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == 'min' or mode == 'minimum':
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == 'minormax':
        r1p = ma.max(ym)
        r1n = ma.min(ym)
        if ma.abs(r1p) > ma.abs(r1n):
            r1 = r1p
            r2 = xm[ma.argmax(ym)]

        else:
            r1 = r1n
            r2 = xm[ma.argmin(ym)]

    if mode == 'median':
        r1 = ma.median(ym)
        r2 = 0
    if mode == 'p2p': # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == 'std': # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == 'var': # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == 'cumsum': # cumulative sum
        r1 = ma.cumsum(ym) # Note: returns an array
        r2 = 0
    if mode == 'anom': # anomalies = difference from averge
        r1 = ma.anom(ym) # returns an array
        r2 = 0
    if mode == 'sum':
        r1 = ma.sum(ym)
        r2 = 0
    if mode == 'area' or mode == 'charge':
        r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm))
        r2 = 0
    if mode == 'latency': # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == '1090': #measure 10-90% time, also returns max
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
        y10 = 0.1*r1
        y90 = 0.9*r1
        sm1 = ma.nonzero(ym >= y10)
        sm9 = ma.nonzero(ym >= y90)
        r1 = xm[sm9] - xm[sm1]

    if mode == 'count':
        r1 = ma.count(ym)
        r2 = 0
    if mode == 'maxslope':
        slope = []
        win = ma.flatnotmasked_contiguous(ym)
        dt = x[1]-x[0]
        st = int(slopewin/dt) # use slopewin duration window for fit.
        print('st: ', st)
        for k, w in enumerate(win): # move through the slope measurementwindow
            tb = range(k-st, k+st) # get tb array
            ppars = np.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures
            slope.append(ppars[0]) # keep track of max slope
        r1 = np.max(slope)
        r2 = np.argmax(slope)
    return(r1, r2)
Exemple #37
0
    if v in var2:
        arr1 = var1[v][:]
        arr2 = var2[v][:]
        mask1 = ma.getmask(arr1)
        mask2 = ma.getmask(arr2)
        diffm1 = ma.where((mask1 == True) & (mask2 == False))
        diffm2 = ma.where((mask2 == 1) & (mask1 == 0))

        if len(diffm1[0]) > 0:
            print(
                "WARNING: SOME " + v +
                " PIXEL ARE MASKED FOR THE FIRST FILE AND NOT FOR THE SECOND ONE"
            )
        if len(diffm2[0]) > 0:

            print(
                "WARNING: SOME " + v +
                " PIXEL ARE MASKED FOR THE SECOND FILE AND NOT FOR THE FIRST ONE"
            )
        arr1 = ma.array(arr1, mask=mask2)
        print(arr1.shape)
        arr2 = ma.array(arr2, mask=mask1)
        diff = ma.abs((arr1[:, 2] - arr2[:, 2])) * 100 / ma.abs(arr2[:, 2])
        diffm = ma.where(diff > .5)
        if len(diffm[0]) > 0:
            print(arr1[diffm])
            print(arr2[diffm])
            for t in time[diffm]:
                print(t)
            print("WARNING NOT ALL VALUES ARE EQUAL FOR " + v + " VARIABLES")
Exemple #38
0
 def manhattan(a,b):
     """
     Manhattan distance between a and b
     """
     return ma.abs(a.real - b.real) + ma.abs(a.imag - b.imag)
    def __fit__(self, rating, row=True):
        if isinstance(rating, ma.MaskedArray):
            self._rating = rating
        else:
            self._rating = ma.masked_equal(rating, 0)

        self._mean = ma.mean(self._rating, axis=1, keepdims=True)
        self._mean_center_rating = self._rating - self._mean
        self._rating_filled = self._rating.filled(0)

        if row:
            self._sim = person(mean_center_rating=self._mean_center_rating)
        else:
            self._rating = self._rating.T
            self._mean_center_rating = self._mean_center_rating.T
            self._sim = person(mean_center_rating=self._mean_center_rating)

        self._sim[np.diag_indices(self._sim.shape[0])] = -999

        self._skip_columns = np.where(self._rating.count(axis=0) == 0)[0]

        # params
        self._neighborhood = np.argsort(self._sim, axis=1)[:,
                                                           -self.config.topk:]
        self._neighborhood_idx = ([
            int(i / self._neighborhood.shape[1])
            for i in range(self._neighborhood.size)
        ], self._neighborhood.flatten())

        if row:
            self._m, self._n = rating.shape
        else:
            self._n, self._m = rating.shape

        if "_weight" not in self.__dict__:
            self._weight = np.random.randn(self._m, self.config.topk)
        if "_m_bias" not in self.__dict__:
            self._m_bias = np.random.randn(self._m)
        if "_n_bias" not in self.__dict__:
            self._n_bias = np.random.randn(self._n)

        assert self._weight.shape[1] == self.config.topk
        assert self._m_bias.shape[0] == self._m
        assert self._n_bias.shape[0] == self._n

        start = time.perf_counter()
        step = self._epoch * self._n

        for epoch in range(self._epoch, self.config.epochs):
            self._epoch = epoch

            for j in range(self._n):

                if j in self._skip_columns:
                    continue

                # forward
                step += 1
                _hat_rating, mid_data = self.__forward__(j)
                _loss = self.__loss__(self._rating[:, j], _hat_rating)

                logger.debug(
                    "[{:4d} step in {:4d} epoch\ttime:{:.2f}s] {}'s loss:{:.2f}"
                    .format(step, self._epoch,
                            time.perf_counter() - start, j, _loss))

                # backward
                _g_m_bias, _g_ngb_m_bias, _g_ngb_n_bias, _g_weight = self.__backward__(
                    _hat_rating, self._rating[:, j], mid_data[0], mid_data[1])

                if not ma.any(_g_m_bias):
                    continue

                for i, g in zip(self._neighborhood.flat, _g_ngb_m_bias.flat):
                    if g is not ma.masked:
                        _g_m_bias[i] += g

                # check gradient
                if self.config.check_gradient:
                    logger.debug("check gradient")
                    self.__check_gradient__(j, _g_weight, _g_m_bias,
                                            _g_ngb_n_bias)

                logger.debug(
                    "[gradient] max(m_bias): {}\tmax(n_bias): {}\tmax(weight):{}"
                    .format(ma.max(ma.abs(_g_m_bias)), ma.abs(_g_ngb_n_bias),
                            ma.max(ma.abs(_g_weight))))

                # update gradient
                self._m_bias -= self.config.lr / self._m * _g_m_bias + self.config.wdecay * self._m_bias
                self._n_bias[
                    j] -= self.config.lr / self._m * _g_ngb_n_bias + self.config.wdecay * self._n_bias[
                        j]
                self._weight -= self.config.lr / self._m * _g_weight + self.config.wdecay * self._weight

            logger.debug(
                "[{:4d} epoch\ttime:{:.2f}s] epoch loss:{:.2f}".format(
                    epoch,
                    time.perf_counter() - start,
                    self.__loss__(self._rating, self.__predict__())))
            if epoch % self.config.save_per_epochs == 0:
                self.save()

        if self._epoch % self.config.save_per_epochs != 0:
            self.save()
Exemple #40
0
#select variable
Salt = RomsNC.variables['salt'][:]
Prime2 = (Salt - ma.mean(Salt))**2
varname = Prime2

#horizontal gradients
ds_dx = gr.x_grad_rho(RomsFile, RomsGrd, varname)
ds_dy = gr.y_grad_rho(RomsFile, RomsGrd, varname)

#grid corrections
xCor = gr.x_grad_GridCor_Rho(RomsFile, RomsGrd, varname)
yCor = gr.y_grad_GridCor_Rho(RomsFile, RomsGrd, varname)

#ratio
x_rat = ma.array(ma.abs(xCor)/ma.abs(ds_dx))
y_rat = ma.array(ma.abs(yCor)/ma.abs(ds_dy))

#depth median ratio
DM_ratx = ma.median(x_rat[0,:,:,:], axis = 0)
DM_raty = ma.median(y_rat[0,:,:,:], axis = 0)

#mask land values
Land = ma.getmask(varname)
Landdt = rt.AddDepthTime(RomsFile, Land)


Xratio = ma.array(x_rat, mask = Land).flatten()
Yratio = ma.array(y_rat, mask = Land).flatten()

#remove mask
Exemple #41
0
 def euclid(a, b):
     """
     Euclidian distance between a and b
     """
     return ma.abs(a - b)
Exemple #42
0
def mean_sigma_predictor(mean, sigma, z, sim, **not_used_kwargs):
    return mean + sigma * ma.sum(sim * z, axis=1) / ma.sum(ma.abs(sim), axis=1)
def correct_dualPRF_cmean(radar,
                          field='velocity',
                          Nprf=3,
                          corr_method='median',
                          Nmin=2):

    v_ma = radar.fields[field]['data']
    vcorr_ma = v_ma.copy()
    out_mask = np.zeros(v_ma.shape)

    # Dual-PRF parameters
    Vny = radar.instrument_parameters['nyquist_velocity']['data'][0]
    prf_flag = radar.instrument_parameters['prf_flag']['data']

    # Array with the primary Nyquist velocity corresponding to each bin
    Nprf_arr = primary_vel(v_ma.shape, Nprf, prf_flag=prf_flag)
    Vny_arr = Vny / Nprf_arr

    for nsweep, sweep_slice in enumerate(radar.iter_slice()):

        v0 = v_ma[sweep_slice]  # velocity field
        vp = Vny_arr[sweep_slice]  # primary velocities
        nprfp = Nprf_arr[sweep_slice]  # dual-PRF factors

        ref, out_mask[sweep_slice] = outlier_detector_cmean(v0,
                                                            Vny,
                                                            nprfp,
                                                            Nmin=Nmin)

        # Convert non-outliers to zero for correction procedure
        v0_out = v0 * out_mask[sweep_slice]
        vp_out = vp * out_mask[sweep_slice]
        ref_out = ref * out_mask[sweep_slice]
        vp_out_L = vp_out.copy()  # Only low PRF outliers
        vp_out_L[nprfp == Nprf] = 0

        dev = ma.abs(v0_out - ref_out)
        nuw = np.zeros(v0.shape)  # Number of unwraps (initialisation)

        for ni in range(-Nprf, (Nprf + 1)):

            # New velocity values for identified outliers
            if abs(ni) == Nprf:
                vcorr_out = v0_out + 2 * ni * vp_out_L
            else:
                vcorr_out = v0_out + 2 * ni * vp_out

            # New deviation for new velocity values
            dev_tmp = ma.abs(vcorr_out - ref_out)
            # Compare with previous
            delta = dev - dev_tmp
            # Update unwrap number
            nuw[delta > 0] = ni
            # Update corrected velocity and deviation
            vcorr_out_tmp = v0_out + 2 * nuw * vp_out
            dev = ma.abs(vcorr_out_tmp - ref_out)

        # Corrected velocity field
        vcorr_ma[sweep_slice] = v0 + 2 * nuw * vp

    return vcorr_ma, out_mask
Exemple #44
0
    def run(self):
        """ Main method of the class. Reads data arrays, process them and returns results. """

        self.logger.info('Started!')

        # Get inputs
        input_uids = self._data_helper.input_uids()
        assert input_uids, 'Error! No input arguments!'

        # Get parameters
        parameters = None
        if len(input_uids
               ) == MAX_N_INPUT_ARGUMENTS:  # If parameters are given.
            parameters = self._data_helper.get(
                input_uids[INPUT_PARAMETERS_INDEX])
        feature = self._get_parameter('Feature', parameters, DEFAULT_VALUES)
        exceedance = self._get_parameter('Exceedance', parameters,
                                         DEFAULT_VALUES)
        condition = self._get_parameter('Condition', parameters,
                                        DEFAULT_VALUES)
        calc_mode = self._get_parameter('Mode', parameters, DEFAULT_VALUES)

        self.logger.info('Calculation feature: %s', feature)
        self.logger.info('Exceedance: %s', exceedance)
        if condition is not None:
            self.logger.info('Condition: %s', condition)
        self.logger.info('Calculation mode: %s', calc_mode)

        # Get outputs
        output_uids = self._data_helper.output_uids()
        assert output_uids, '(CalcExceedance::run) No output arguments!'

        # Get time segments and levels
        study_time_segments = self._data_helper.get_segments(
            input_uids[STUDY_UID])
        study_vertical_levels = self._data_helper.get_levels(
            input_uids[STUDY_UID])

        # Normals time segments should be set for year 1 (as set in a pdftails file)
        normals_time_segments = deepcopy(study_time_segments)
        percentile = self._data_helper.get_levels(input_uids[NORMALS_UID])[
            0]  # There should be only one level - percentile.
        for segment in normals_time_segments:
            segment['@beginning'] = '0001' + segment['@beginning'][4:]
            segment['@ending'] = '0001' + segment['@ending'][4:]

        # Read normals data
        normals_data = self._data_helper.get(input_uids[NORMALS_UID],
                                             segments=normals_time_segments)
        study_data = self._data_helper.get(input_uids[STUDY_UID],
                                           segments=study_time_segments)

        if exceedance == 'low':
            comparison_func = operator.lt
        elif exceedance == 'high':
            comparison_func = operator.gt
        else:
            self.logger.error('Error! Unknown exceedance value: \'%s\'',
                              exceedance)
            raise ValueError

        data_func = ma.max  # For calc_mode == 'data' we calculate max over all segments.

        input_description = self._data_helper.get_data_info(
            output_uids[0])['description']
        output_description = {'@title': input_description['@title']}
        if feature != 'total':
            output_description['@title'] = feature.capitalize(
            ) + ' of ' + output_description['@title']

        for level in study_vertical_levels:
            all_segments_data = []
            for segment in study_time_segments:
                normals_values = normals_data['data'][percentile][
                    segment['@name']]['@values']
                study_values = study_data['data'][level][
                    segment['@name']]['@values']
                study_time_grid = study_data['data'][level][
                    segment['@name']]['@time_grid']

                # Remove Feb 29 from the study array (we do not take this day into consideration).
                study_values = self._remove_feb29(study_values,
                                                  study_time_grid)

                # Apply conditions if there are any.
                if condition is not None:
                    self._apply_condition(study_values, condition)

                # Compare values according chosen exceedance.
                comparison_mask = comparison_func(study_values, normals_values)

                # Perform calculation for the current time segment.
                if feature == 'frequency':  # We can just average 'True's to get a fraction and multiply by 100%.
                    one_segment_data = ma.mean(comparison_mask, axis=0) * 100
                    output_description['@units'] = '%'

                if feature == 'intensity':
                    diff = ma.abs(study_values -
                                  normals_values)  # Calculate difference
                    diff.mask = ma.mask_or(
                        diff.mask, ~comparison_mask,
                        shrink=False)  # and mask out unnecessary values.
                    one_segment_data = ma.mean(diff, axis=0)
                    output_description['@units'] = 'days'

                if feature == 'duration':
                    one_segment_data = self._calc_duration(comparison_mask)
                    output_description['@units'] = 'days'

                if feature == 'total':
                    study_values.mask = ma.mask_or(study_values.mask,
                                                   ~comparison_mask,
                                                   shrink=False)
                    one_segment_data = ma.sum(study_values, axis=0)
                    output_description['@units'] = 'days'

                # For segment-wise averaging send to the output current time segment results
                # or store them otherwise.
                if calc_mode == 'segment':
                    self._data_helper.put(
                        output_uids[0],
                        values=one_segment_data,
                        level=level,
                        segment=segment,
                        longitudes=study_data['@longitude_grid'],
                        latitudes=study_data['@latitude_grid'],
                        fill_value=study_data['@fill_value'],
                        meta=study_data['meta'],
                        description=output_description)
                elif calc_mode == 'data':
                    all_segments_data.append(one_segment_data)
                else:
                    self.logger.error(
                        'Error! Unknown calculation mode: \'%s\'', calc_mode)
                    raise ValueError

            # For data-wise analysis analyse segments analyses :)
            if calc_mode == 'data':
                data_out = data_func(ma.stack(all_segments_data), axis=0)

                # Make a global segment covering all input time segments
                full_range_segment = deepcopy(
                    study_time_segments[0]
                )  # Take the beginning of the first segment...
                full_range_segment['@ending'] = study_time_segments[-1][
                    '@ending']  # and the end of the last one.
                full_range_segment[
                    '@name'] = 'GlobalSeg'  # Give it a new name.

                output_description[
                    '@title'] = 'Maximum ' + output_description['@title']

                self._data_helper.put(output_uids[0],
                                      values=data_out,
                                      level=level,
                                      segment=full_range_segment,
                                      longitudes=study_data['@longitude_grid'],
                                      latitudes=study_data['@latitude_grid'],
                                      fill_value=study_data['@fill_value'],
                                      meta=study_data['meta'],
                                      description=output_description)

        self.logger.info('Finished!')