コード例 #1
0
ファイル: test_qc_tukey53H.py プロジェクト: castelao/CoTeDe
def test():
    dummy_data = {
        'PRES': ma.masked_array([1.0, 100, 200, 300, 500, 5000]),
        'TEMP': ma.masked_array([27.44, 14.55, 11.96, 11.02, 7.65, 2.12]),
        'PSAL': ma.masked_array([35.71, 35.50, 35.13, 35.02, 34.72, 35.03])
        }
    features = {
            'tukey53H': ma.masked_array([0, 0, 0.3525000000000009,
                0.35249999999999915, 0, 0],
                mask=[True, True, False, False, True, True]),
            'tukey53H_norm': ma.masked_array([0, 0, 0.07388721803621254,
                0.07388721803621218, 0, 0],
                mask = [True,  True, False, False, True, True])
            }
    flags = {'tukey53H_norm': np.array([0, 0, 1, 1, 0, 0], dtype='i1')}

    cfg = {
            'l': 5,
            'threshold': 6,
            'flag_good': 1,
            'flag_bad': 4
            }

    y = Tukey53H(dummy_data, 'TEMP', cfg)
    y.test()

    assert type(y.features) is dict
    for f in y.features:
        assert ma.allclose(y.features[f], features[f])
    for f in y.flags:
        assert ma.allclose(y.flags[f], flags[f])
コード例 #2
0
def recip_ex(detector_size, pixel_size, calibrated_center, dist_sample,
             ub_mat, wavelength, motors, i_stack, H_range, K_range, L_range):
    # convert to Q space
    q_values = diffraction.process_to_q(motors, detector_size, pixel_size,
                                        calibrated_center, dist_sample,
                                        wavelength, ub_mat)

    # minimum and maximum values of the voxel
    q_min = np.array([H_range[0], K_range[0], L_range[0]])
    q_max = np.array([H_range[1], K_range[1], L_range[1]])

    # no. of bins
    dqn = np.array([40, 40, 1])

    # process the grid values
    (grid_data, grid_occu, std_err,
     grid_out, bounds) = diffraction.grid3d(q_values, i_stack, dqn[0], dqn[1],
                                            dqn[2])

    grid = np.mgrid[0:dqn[0], 0:dqn[1], 0:dqn[2]]
    r = (q_max - q_min) / dqn

    X = grid[0] * r[0] + q_min[0]
    Y = grid[1] * r[1] + q_min[1]
    Z = grid[2] * r[2] + q_min[2]

    # creating a mask
    _mask = grid_occu <= 10
    grid_mask_data = ma.masked_array(grid_data, _mask)
    grid_mask_std_err = ma.masked_array(std_err, _mask)
    grid_mask_occu = ma.masked_array(grid_occu, _mask)

    return X, Y, Z, grid_mask_data
コード例 #3
0
ファイル: plotter.py プロジェクト: GuiltyDolphin/RAYLEIGH
def _generate_with_coordinates(frame, outliers=None):
    """Generate a numpy array to be used for coordinate plotting.

    Parameters
    ----------
    frame : (list-like (x, y, z))
            The (x, y, z) values to be used in the array
    outliers : (number)
    Default : None
            The value to be used when calculating outliers.
            If the value is None then outliers will not be calculated.

    Returns
    -------
    arr : (ndarray)
        The generated numpy array
    """
    arr = np.vstack(frame)
    xs, ys, zs = arr.transpose()
    zeros = np.zeros((256, 256))
    zeros[(xs, ys)] = zs
    zmask = ma.masked_array(zeros, mask=zeros == 0)

    # Use Chauvenet's criterion to find the outliers
    if outliers is not None:
        d_max = np.abs(zmask - zmask.mean()) / zmask.std()
        return ma.masked_array(zmask, mask=d_max >= outliers)
    return zmask
コード例 #4
0
 def __init__(self, matrix, regions='', overlap = False):
     # Extract bin names
     if matrix.endswith('.gz'):
         with gzip.open(matrix) as inFile:
             self.binNames = inFile.readline().strip.split('\t')
     else:
         with open(matrix) as inFile:
             self.binNames = inFile.readline().strip().split('\t')
     # Create bin dataframe
     binDF = pd.DataFrame()
     binDF['chr'], binDF['start'], binDF['end'] = zip(
         *[re.split(':|-', x) for x in self.binNames])
     binDF[['start', 'end']] = binDF[['start', 'end']].astype(int)
     binDF['chr'] = binDF['chr'].astype(str)
     binDF['centre'] = np.mean([binDF['start'], binDF['end']], axis=0)
     self.binDF = binDF
     # Open probability matrix and check it is square
     probMatrix = np.loadtxt(matrix, skiprows = 1)
     if probMatrix.shape[0] != probMatrix.shape[1]:
         raise IOError('Matrix must be square')
     # Create mask matrix and remove low values
     chrArray = np.array(self.binDF['chr'])
     maskMatrix = chrArray != chrArray[:,None]
     lowValues = probMatrix.sum(axis=0) < 0.5
     maskMatrix[lowValues,:] = True
     maskMatrix[:,lowValues] = True
     # Add group data to dataframe
     groups = self.binDF['chr'].copy()
     groups[lowValues] = np.nan
     self.binDF['group'] = groups
     # Create masked probability and distance matrices
     self.probMatrix = ma.masked_array(probMatrix, mask = maskMatrix)
     centreArray = np.array(self.binDF['centre'])
     distMatrix = np.abs(centreArray - centreArray[:,None])
     self.distMatrix = ma.masked_array(distMatrix, mask = maskMatrix)
コード例 #5
0
 def vectorField(self,plt,X,Y,U,V,title):
     if self.plotFields:
         # Create mask corresponding to 0 fluid velocity values inside obstructions
         M = zeros([X.quiverLength,Y.quiverLength],dtype='bool')
         M = (U.quiver == 0)
         
         # Mask the obstructions in the fluid velocity vector field
         U.quiver = ma.masked_array(U.quiver,mask=M)
         V.quiver = ma.masked_array(V.quiver,mask=M)
         
         # Build and scale the plot
         fig = plt.figure()        
         ax = fig.add_subplot(111)
         ax.set_ylim(Y.minValue, Y.maxValue)
         ax.set_xlim(X.minValue, X.maxValue)
         pos1 = ax.get_position()  
         pos2 = [pos1.x0+((pos1.width - (pos1.width*self.scaleX))*.5), pos1.y0+((pos1.height - (pos1.height*self.scaleY))*.5),  pos1.width*self.scaleX, pos1.height*self.scaleY] 
         ax.set_position(pos2)
         title = title + ' Vector Field'        
         plt.title(title)
         plt.xlabel("X [m]")
         plt.ylabel("Y [m]")
         plt.grid()
         
         quiver(X.quiver,Y.quiver,U.quiver,V.quiver)        
     
     
 
             
コード例 #6
0
ファイル: test_query.py プロジェクト: jzf2101/mixturemodel
def test_posterior_predictive_statistic():
    N, D = 10, 4  # D needs to be even
    defn = model_definition(N, [bb] * D)
    Y = toy_dataset(defn)
    prng = rng()
    view = numpy_dataview(Y)
    latents = [model.initialize(defn, view, prng) for _ in xrange(10)]
    q = ma.masked_array(
        np.array([(False,) * D], dtype=[('', bool)] * D),
        mask=[(False,) * (D / 2) + (True,) * (D / 2)])

    statistic = query.posterior_predictive_statistic(q, latents, prng)
    assert_equals(statistic.shape, (1,))
    assert_equals(len(statistic.dtype), D)

    statistic = query.posterior_predictive_statistic(
        q, latents, prng, merge='mode')
    assert_equals(statistic.shape, (1,))
    assert_equals(len(statistic.dtype), D)

    statistic = query.posterior_predictive_statistic(
        q, latents, prng, merge=['mode', 'mode', 'avg', 'avg'])
    assert_equals(statistic.shape, (1,))
    assert_equals(len(statistic.dtype), D)

    q = ma.masked_array(
        np.array([(False,) * D] * 3, dtype=[('', bool)] * D),
        mask=[(False,) * (D / 2) + (True,) * (D / 2)] * 3)
    statistic = query.posterior_predictive_statistic(q, latents, prng)
    assert_equals(statistic.shape, (3,))
    assert_equals(len(statistic.dtype), D)
コード例 #7
0
ファイル: tval.py プロジェクト: timothydmorton/fpp-old
def alias(t,fm,p):
    """
    Evaluate the Bayes Ratio between signal with P and 0.5*P

    Parameters
    ----------

    t  : Time series
    fm : Flux series
    p  : Parameter dictionary.
    
    """
    pA = copy.deepcopy(p)
    pA['P'] = 0.5 * pA['P']
    resL = LDTwrap(t,fm,pA)
    res  = np.hstack(resL)

    # Masked array corresponding to P = 2 P
    tfold  = getT(res['tdt'],pA['P'],pA['epoch'],pA['tdur'])

    tT     = ma.masked_array(res['tdt'],copy=True,mask=tfold.mask)
    fT     = ma.masked_array(res['fdt'],copy=True,mask=tfold.mask)
    
    X2 = lambda par : ma.sum( (fT - keptoy.P05(pd2a(par),tT))**2 )
    return X2(p),X2(pA)
コード例 #8
0
ファイル: phaseunwrap.py プロジェクト: Venki-Kavuri/bopy
def trgvsref(troot, rdir, rroot, mdir, mroot, wls, srcs, phasetag='median'):

    if phasetag is 'mean':
        print 'Phasetag:', phasetag
        phasetag = np.mean
    else:
        print 'Phasetag:', phasetag
        phasetag = np.median
        
    for s in srcs:
        mname = os.path.join(mdir, '{0}_s{1}.npy'.format(mroot, s))
        m = np.load(mname)
        for w in wls:
            tname = '{0}_wl{1}_s{2}_phi.npy'.format(troot, w, s)
            t = np.load(tname)
            tm = ma.masked_array(t, mask=m)
            rname = os.path.join(rdir, '{0}_wl{1}_s{2}_phi.npy'.format(rroot, w, s))
            r = np.load(rname)
            rm = ma.masked_array(r, mask=m)
            d = phasetag(tm-rm)
            if d > np.pi:
                print 'w:', w, ' s:', s, '-2PI'
                t = t - 2.0*np.pi
                os.rename(tname, '{0}_wrapped'.format(tname))
                np.save(tname, t)
            elif d < -np.pi: 
                print 'w:', w, ' s:', s, '+2PI'
                t = t + 2.0*np.pi
                os.rename(tname, '{0}_wrapped'.format(tname))
                np.save(tname, t)
コード例 #9
0
ファイル: sasmodel.py プロジェクト: HMP1/Sasmodels
def plot_result2D(data, theory, view='linear'):
    import matplotlib.pyplot as plt
    from numpy.ma import masked_array, masked
    #print "not a number",sum(np.isnan(data.data))
    #data.data[data.data<0.05] = 0.5
    mdata = masked_array(data.data, data.mask)
    mdata[np.isnan(mdata)] = masked
    if view is 'log':
        mdata[mdata <= 0] = masked
        mdata = np.log10(mdata)
        mtheory = masked_array(np.log10(theory), mdata.mask)
    else:
        mtheory = masked_array(theory, mdata.mask)
    mresid = masked_array((theory-data.data)/data.err_data, data.mask)
    vmin = min(mdata.min(), mtheory.min())
    vmax = max(mdata.max(), mtheory.max())
    print np.exp(np.mean(mtheory)), np.std(mtheory),np.max(mtheory),np.min(mtheory)

    plt.subplot(1, 3, 1)
    plot_data(data, mdata, vmin=vmin, vmax=vmax)
    plt.colorbar()
    plt.subplot(1, 3, 2)
    plot_data(data, mtheory, vmin=vmin, vmax=vmax)
    plt.colorbar()
    plt.subplot(1, 3, 3)
    print abs(mresid).max()
    plot_data(data, mresid)
    plt.colorbar()
コード例 #10
0
    def getCountyVar(self, var):
        nyears, ncounties, nper = len(self.year), len(self.county), len(self.per)

        nyears1, nyears2 = len(self.cp1.year), len(self.cp2.year)

        v1 = self.cp1.getCountyVar(var)
        v2 = self.cp2.getCountyVar(var)

        # harmonize along county
        varr = masked_array(zeros((nyears1 + nyears2, ncounties, nper)), mask = ones((nyears1 + nyears2, ncounties, nper)))
        for i in range(ncounties):
            c = self.county[i]
            if c in self.cp1.county:
                idx = where(self.cp1.county == c)[0][0]
                varr[: nyears1, i] = v1[:, idx]
            if c in self.cp2.county:
                idx = where(self.cp2.county == c)[0][0]
                varr[nyears1 :, i] = v2[:, idx]

        if self.crop == 'wheat.winter':
            newvarr = masked_array(zeros((nyears, ncounties, nper)), mask = ones((nyears, ncounties, nper)))
            newvarr[: nyears1 + nyears2] = varr
            varr = newvarr

        return varr
コード例 #11
0
ファイル: count_time.py プロジェクト: UManPychron/pychron
    def _detect_outliers(self, xs, ys, outs, degree=2):
        xs = array(xs)
        ys = array(ys)

        mxs = masked_array(xs, mask=outs)
        #        print 's', sum(mxs), outs
        mys = masked_array(ys, mask=outs)
        o = OLS(mxs, mys, fitdegree=degree)
        coeffs = o.get_coefficients()

        n = len(xs) - sum(outs)
        #        coeff_errs = o.get_coefficient_standard_errors()

        #        ymean = ys.mean()
        yeval = polyval(coeffs, xs)

        # calculate detection_tol. use error of fit
        devs = abs(ys - yeval)
        ssr = sum(devs ** 2)
        detection_tol = 2.5 * (ssr / ((n) - (degree))) ** 0.5

        for i, xi, ys, di, mi in zip(xrange(len(xs)), xs, ys, devs, outs):
            if di > detection_tol:
                outs[i] = 1
            omit = 'OK' if di <= detection_tol and not mi else 'User omitted'
            # print xi, ys, di, detection_tol, omit, mi
        return outs
コード例 #12
0
    def setUp(self):
        # Sets up some useful arrays for use with the land/sea mask
        # decompression.
        self.land = np.array([[0, 1, 0, 0],
                              [1, 0, 0, 0],
                              [0, 0, 0, 1]], dtype=np.float64)
        sea = ~self.land.astype(np.bool)
        self.land_masked_data = np.array([1, 3, 4.5])
        self.sea_masked_data = np.array([1, 3, 4.5, -4, 5, 0, 1, 2, 3])

        # Compute the decompressed land mask data.
        self.decomp_land_data = ma.masked_array([[0, 1, 0, 0],
                                                 [3, 0, 0, 0],
                                                 [0, 0, 0, 4.5]],
                                                mask=sea,
                                                dtype=np.float64)
        # Compute the decompressed sea mask data.
        self.decomp_sea_data = ma.masked_array([[1, -10, 3, 4.5],
                                                [-10, -4, 5, 0],
                                                [1, 2, 3, -10]],
                                               mask=self.land,
                                               dtype=np.float64)

        self.land_mask = mock.Mock(data=self.land,
                                   lbrow=self.land.shape[0],
                                   lbnpt=self.land.shape[1])
コード例 #13
0
ファイル: InfSlabMS.py プロジェクト: Venki-Kavuri/bopy
    def set_gen3data(self, Apaths, Ppaths, Xdet, Zdet, omega, mask=None):

        self.omega = omega
        self.wbyv = omega/self.v
        self.amshape = mask.shape
        self.mask = mask

        for Apath, Ppath in zip(Apaths, Ppaths):
            # read the data
            Amp = bp.io.read_frame(Apath)
            Pha = bp.io.read_frame(Ppath)

            # mask all data arrays
            Amp = ma.compressed(ma.masked_array(Amp, self.mask))
            Pha = ma.compressed(ma.masked_array(Pha, self.mask))

            try:
                Amps = np.vstack((Amps, Amp))
                Phas = np.vstack((Phas, Pha))
            except:
                Amps = Amp
                Phas = Pha
        self.Amps = Amps
        self.Phas = Phas

        # masked detector positions
        self.dIdx = np.arange(len(Xdet))
        self.dIdx = ma.compressed(ma.masked_array(self.dIdx.reshape(self.amshape), self.mask))
        self.Xdet = ma.compressed(ma.masked_array(Xdet.reshape(self.amshape), self.mask))
        self.Zdet = ma.compressed(ma.masked_array(Zdet.reshape(self.amshape), self.mask))
コード例 #14
0
ファイル: zfun.py プロジェクト: aaarendt/ice2oceans_api
def make_full(flt):
    """
    Adds top and bottom layers to array fld. This is intended for 3D ROMS data
    fields that are on the vertical rho grid, and where we want (typically for
    plotting purposes) to extend this in a smart way to the sea floor and the
    sea surface.
    
    Input:
        flt is a tuple with either 1 ndarray (fld_mid),
        or 3 ndarrays (fld_bot, fld_mid, fld_top)
        
    Output:
        fld is the "full" field
    """
    
    if len(flt)==3:
       fld = np.concatenate(flt, axis=0) 
    
    elif len(flt)==1:
        fld_mid = flt[0]
        N, M, L = fld_mid.shape
        fld_bot = fld_mid[0].copy()
        fld_bot = ma.masked_array(fld_bot.reshape(1, M, L).copy(), fld_mid[0]._mask)
        fld_top = fld_mid[-1].copy()
        fld_top = ma.masked_array(fld_top.reshape(1, M, L).copy(),fld_mid[-1]._mask)
        fld = np.concatenate((fld_bot, fld_mid, fld_top), axis=0)
        
    return fld
コード例 #15
0
ファイル: InfSlab1.py プロジェクト: Venki-Kavuri/bopy
    def set_gen3data(self, Apath, Ppath, Xdet, Zdet, 
            omega, mask_cutoff, toprowsmask=0, polygonmaskfile=None):

        self.omega = omega
        self.wbyv = omega/self.v

        # read the data
        self.Amp = bp.io.read_frame(Apath)
        self.Pha = bp.io.read_frame(Ppath)
        self.amshape = self.Amp.shape

        # set the mask
        self.mask = np.ones_like(self.Amp, dtype='bool')
        self.mask[self.Amp > mask_cutoff*np.max(self.Amp)] = False
        # mask top rows
        if toprowsmask > 0:
            toprowsmask = toprowsmask -1 
            self.mask[0:toprowsmask,:] = True

        # mask all data arrays
        self.Amp = ma.compressed((ma.masked_array(self.Amp, self.mask)))
        self.Pha = ma.compressed(ma.masked_array(self.Pha, self.mask))

        # masked detector positions
        self.dIdx = np.arange(len(Xdet))
        self.dIdx = ma.compressed(ma.masked_array(self.dIdx.reshape(self.amshape), self.mask))
        self.Xdet = ma.compressed(ma.masked_array(Xdet.reshape(self.amshape), self.mask))
        self.Zdet = ma.compressed(ma.masked_array(Zdet.reshape(self.amshape), self.mask))
コード例 #16
0
ファイル: som.py プロジェクト: darribas/darribas-python-tools
def getZmv(a,mv):
    """
    Helper for stdDat

    Arguments:
    * a: array of strings with the input data
    * mv: string for missing values (e.g. 'x')
    Returns:
    * z: standardized masked array
    """
    mascara=N.zeros(a.shape)
    for i in range(a.shape[0]):
        for j in range(a.shape[1]):
            if a[i,j]==mv:
                mascara[i,j]=1
                a[i,j]=0
    am=ma.masked_array(a,mask=mascara)
    am=N.array(am,dtype=float)
    z=N.copy(am)
    z=(z-z.mean(axis=0))/z.std(axis=0)
    z=ma.masked_array(z,dtype=str)
    for i in range(mascara.shape[0]):
        for j in range(mascara.shape[1]):
            if mascara[i,j]==1:
                z[i,j]='x'
    return z
コード例 #17
0
ファイル: detrend.py プロジェクト: timothydmorton/fpp-old
def segfitm(t,fm,bv):
    """
    Segment fit masked
    
    Parameters
    ----------
    t   : time 
    fm  : flux for a particular segment
    bv  : vstack of basis vectors
    
    """
    ncbv  = bv.shape[0]
    
    tm = ma.masked_array(t,copy=True,mask=fm.mask)
    mask  = fm.mask 

    bv = ma.masked_array(bv)
    bv.mask = np.tile(mask, (ncbv,1) )

    # Eliminate masked elements
    tseg = tm.compressed() 
    fseg = fm.compressed()
    bvseg = bv.compressed() 
    bvseg = bvseg.reshape(ncbv,bvseg.size/ncbv)

    fdtseg,ffitseg,p1seg = segfit(tseg,fseg,bvseg)

    return fdtseg,ffitseg
コード例 #18
0
ファイル: pi.py プロジェクト: pborky/handy-code
def pi(n = 1000, internal = False, draw = False):
    '''Stochastic estimation of number "pi" based on Monte-Carlo method. '''
    n = int(n)
    radius = 1.0
    try:
        r = uniform(low = -radius, high = radius, size= (n,2))
        inside = power(radius,2) >= power(r,2).sum(1)
        nk = inside.sum() # number of hits inside the circle
        if draw:
            x = masked_array(r[:,0:1], mask=inside)
            y = masked_array(r[:,1:2], mask=inside)
            hold(True)
            plot(x.compressed(), y.compressed(),'.b')
            x = masked_array(r[:,0:1], mask=~inside)
            y = masked_array(r[:,1:2], mask=~inside)
            plot(x.compressed(), y.compressed(),'.r')
    except MemoryError: # divide and conquer
        print '** reducing sample count to %d' % int(n/100)
        (n, nk) = array([ pi(n = n / 1E2, internal = True ) for i in range(int(1E2)) ]).sum(0)
    
    if internal:
        return (n, nk)
    
    if draw: show()
    
    p = 4.0 * nk / n
    print 'nk = %d, n = %d, pi = 4 nk / n = %f' % (nk, n, p)
    return p
コード例 #19
0
ファイル: Reference.py プロジェクト: RDCEP/1896
    def getVar(self, years):
        # interpolate to common times and counties
        frac = self.census.getFrac(years)
        frac = self.interpolateCounty(frac, self.census.counties, self.counties)

        # interpolate to common times
        yldsum = self.interpolateTime(self.yldsum, self.ysum.years, years)
        yldirr = self.interpolateTime(self.yldirr, self.yirr.years, years, fillgaps = False) # no gap filling!
        hvtsum = self.interpolateTime(self.hvtsum, self.hsum.years, years)
        hvtirr = self.interpolateTime(self.hvtirr, self.hirr.years, years, fillgaps = False)

        # extrapolate irrigated yield, sum area, and irrigated area fraction
        yldirr, delta = self.extrapolateYield(yldirr, yldsum, years)
        hvtsum        = self.extrapolateTime(hvtsum, years)
        frac          = self.extrapolateFrac(frac, hvtsum, hvtirr, years)

        ny, nc, ni = len(years), len(self.counties), len(self.irr)
        yld = masked_array(zeros((ny, nc, ni)), mask = ones((ny, nc, ni)))
        hvt = masked_array(zeros((ny, nc, ni)), mask = ones((ny, nc, ni)))
        for i in range(nc):
            for j in range(ny):
                hvt[j, i] = self.computeArea(hvtsum[j, i],  hvtirr[j, i], frac[j, i])
                yld[j, i] = self.computeYield(yldsum[j, i], yldirr[j, i], hvt[j, i], delta[j, i])

            # extrapolate area in time
            for j in range(ni):
                area = hvt[:, i, j]
                if isMaskedArray(area) and area.mask.any() and not area.mask.all():
                    hvt[:, i, j] = interp(years, years[~area.mask], area[~area.mask])

        # convert
        yld *= self.yldconv
        hvt *= self.hvtconv

        return yld, hvt
コード例 #20
0
def cloud_statistics(file_name):
    """
    Return core duration, minimum core base, maximum core height, mean core 
    mass, formation time, dissipation time, maximum depth, depth evolution and 
    corresponding times for tracked clouds.
        
    Parameters
    ----------
    file_name : netCDF file name
        id_profile file for a tracked core with dimensions double t(t), 
        double z(z).
      
    Return
    ------
    tuple : cloud_id, lifetime, base, top, mass, l_min, l_max, depths,
        max_depth, times
    """
    
    # Read netCDF dataset
    data = Dataset(file_name)
    
    # Core ID
    cloud_id = int(file_name[-11:-3])
    
    # Core duration (seconds)
    times = data.variables['t'][...]
    lifetime = len(times)*mc.dt
    
    # Formation time, dissipation time (seconds)
    l_min = times.min()*mc.dt
    l_max = times.max()*mc.dt

    # Minimum core base, maximum core height, maximum depth, depth evolution 
    # (metres)
    area = ma.masked_invalid(data.variables['AREA'][...])
    z = data.variables['z'][...]
    z = z*np.ones(np.shape(area))
    z = ma.masked_array(z, ma.getmask(area)) 
    bases = z.min(axis=1)
    tops = z.max(axis=1)
    depths = tops - bases + mc.dz
    max_depth = depths.max()
    base = bases.min()
    top = tops.max()

    # Mean core mass mass (kilograms)
    qn = ma.masked_invalid(data.variables['QN'][...])
    rho = ma.masked_invalid(data.variables['RHO'][...])
    mass = np.mean(np.sum(area*rho*mc.dz, axis=1))

    # Remove missing values
    times = ma.masked_array(times, ma.getmask(depths))
    depths = depths[~depths.mask]
    times = times[~times.mask]
    
    data.close()
    
    return cloud_id, lifetime, base, top, mass, l_min, l_max, depths, \
        max_depth, times
コード例 #21
0
ファイル: test_calc_tools.py プロジェクト: dopplershift/MetPy
def test_delete_masked_points():
    """Test deleting masked points."""
    a = ma.masked_array(np.arange(5), mask=[False, True, False, False, False])
    b = ma.masked_array(np.arange(5), mask=[False, False, False, True, False])
    expected = np.array([0, 2, 4])
    a, b = _delete_masked_points(a, b)
    assert_array_equal(a, expected)
    assert_array_equal(b, expected)
コード例 #22
0
ファイル: mrecords.py プロジェクト: 1950/sawbuck
def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
                 varnames=None, vartypes=None):
    """Creates a mrecarray from data stored in the file `filename`.

    Parameters
    ----------
    filename : {file name/handle}
        Handle of an opened file.
    delimitor : {None, string}, optional
        Alphanumeric character used to separate columns in the file.
        If None, any (group of) white spacestring(s) will be used.
    commentchar : {'#', string}, optional
        Alphanumeric character used to mark the start of a comment.
    missingchar : {'', string}, optional
        String indicating missing data, and used to create the masks.
    varnames : {None, sequence}, optional
        Sequence of the variable names. If None, a list will be created from
        the first non empty line of the file.
    vartypes : {None, sequence}, optional
        Sequence of the variables dtypes. If None, it will be estimated from
        the first non-commented line.


    Ultra simple: the varnames are in the header, one line"""
    # Try to open the file ......................
    f = openfile(fname)
    # Get the first non-empty line as the varnames
    while True:
        line = f.readline()
        firstline = line[:line.find(commentchar)].strip()
        _varnames = firstline.split(delimitor)
        if len(_varnames) > 1:
            break
    if varnames is None:
        varnames = _varnames
    # Get the data ..............................
    _variables = masked_array([line.strip().split(delimitor) for line in f
                                  if line[0] != commentchar and len(line) > 1])
    (_, nfields) = _variables.shape
    # Try to guess the dtype ....................
    if vartypes is None:
        vartypes = _guessvartypes(_variables[0])
    else:
        vartypes = [np.dtype(v) for v in vartypes]
        if len(vartypes) != nfields:
            msg = "Attempting to %i dtypes for %i fields!"
            msg += " Reverting to default."
            warnings.warn(msg % (len(vartypes), nfields))
            vartypes = _guessvartypes(_variables[0])
    # Construct the descriptor ..................
    mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
    mfillv = [ma.default_fill_value(f) for f in vartypes]
    # Get the data and the mask .................
    # We just need a list of masked_arrays. It's easier to create it like that:
    _mask = (_variables.T == missingchar)
    _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
                 for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
    return fromarrays(_datalist, dtype=mdescr)
コード例 #23
0
ファイル: __init__.py プロジェクト: SciTools/iris
 def test_masked_constant_not_in_place(self):
     # Cube in_place arithmetic operation.
     dtype = np.int64
     dat = ma.masked_array(0, 1, dtype)
     cube = Cube(dat)
     res = self.cube_func(cube, 5, in_place=False)
     self.assertMaskedArrayEqual(ma.masked_array(0, 1), res.data)
     self.assertEqual(dtype, res.dtype)
     self.assertIsNot(res, cube)
def difference(param):
 diff = []
 param_ldr = ma.masked_array(param, mask=LU_ldr)
 param_hdr = ma.masked_array(param, mask=LU_hdr)
 param_com = ma.masked_array(param, mask=LU_com)

 for i in range(len(param)):
    diff.append(x[i] - y[i])
 return diff
コード例 #25
0
ファイル: curvemap.py プロジェクト: wukm/cakepy
def principal_directions(img, sigma, H=None):
    """
    will ignore calculation of principal directions of masked areas

    despite the name, this function actually returns the theta corresponding to
    leading and trailing principal directions, i.e. angle w / x axis
    """

    if H is None:
        H = hessian_matrix(img, sigma)

    Hxx, Hxy, Hyy = H
    

    # check if input image is masked
    try:
        mask = img.mask
    except AttributeError:
        masked = False
    else:
        masked = True

    dims = img.shape

    # where to store
    trailing_thetas = np.zeros_like(img)
    leading_thetas = np.zeros_like(img)


    # maybe implement a small angle correction
    for i, (xx, xy, yy) in enumerate(np.nditer([Hxx, Hxy, Hyy])):
        
        # grab the (x,y) coordinate of the hxx, hxy, hyy you're using
        subs = np.unravel_index(i, dims)
        
        # ignore masked areas (if masked array)
        if masked and img.mask[subs]:
            continue

        h = np.array([[xx, xy], [xy, yy]]) # per-pixel hessian
        l, v = eig(h) # eigenvectors as columns
        
        # reorder eigenvectors by (increasing) magnitude of eigenvalues
        v = v[:,np.argsort(np.abs(l))]
        
        # angle between each eigenvector and positive x-axis
        # arccos of first element (dot product with (1,0) and eigvec is already
        # normalized)
        trailing_thetas[subs] = np.arccos(v[0,0]) # first component of each
        leading_thetas[subs] = np.arccos(v[0,1]) # first component of each
    
    if masked:
        leading_thetas = ma.masked_array(leading_thetas, mask)
        trailing_thetas = ma.masked_array(trailing_thetas, mask)


    return trailing_thetas, leading_thetas
コード例 #26
0
def get_error_over_prec(len_var,var_names,signs,dirs,error):
    for idx in np.arange(0,len_var):
        data = get_data(dirs[idx])
        mask = ma.getmaskarray(data)
        if var_names[idx] == 'PREC':
            Prec = signs[idx]*ma.masked_array(data,mask)
        error+=data*signs[idx]
    error_m = ma.masked_array(error,mask)
    error_over_Prec = error_m/Prec
    return (error_m[0],error_over_Prec[0])
コード例 #27
0
ファイル: mpyloess.py プロジェクト: ruananswer/pyloess
 def __init__(self, x, y):
     x = masked_array(x, copy=False, subok=True, dtype=float_, order="F").ravel()
     y = masked_array(y, copy=False, subok=True, dtype=float_, order="F").ravel()
     if x.size != y.size:
         msg = "Incompatible size between observations (%s) and response (%s)!"
         raise ValueError(msg % (x.size, y.size))
     idx = x.argsort()
     self._x = x[idx]
     self._y = y[idx]
     self._mask = mask_or(self._x._mask, self._y._mask, copy=False)
コード例 #28
0
ファイル: raytomo.py プロジェクト: NoiseCIEI/NoisePy
 def _numpy2ma(self, inarray, reason_n=None):
     """Convert input numpy array to masked array
     """
     if reason_n==None:
         outarray=ma.masked_array(inarray, mask=np.zeros(self.reason_n.shape) )
         outarray.mask[self.reason_n!=0]=1
     else:
         outarray=ma.masked_array(inarray, mask=np.zeros(reason_n.shape) )
         outarray.mask[reason_n!=0]=1
     return outarray
コード例 #29
0
def get_error_extreme(len_var,var_names,signs,dirs,error):
    for idx in np.arange(0,len_var):
        data = get_data(dirs[idx])
        mask = ma.getmaskarray(data)
        if var_names[idx] == 'PREC':
            Prec = signs[idx]*ma.masked_array(data,mask)
        error+=data*signs[idx]
    error_m = ma.masked_array(error,mask)
    error_over_Prec = error_m/Prec
    return np.unravel_index(error_m[0].argmax(),error_m[0].shape)
コード例 #30
0
ファイル: test_product.py プロジェクト: cedadev/ceda-di
    def test_GIVEN_masked_entries_for_lats_and_lons_THEN_summary_does_not_contain_elements(self):
        latitudes = ma.masked_array([3, 5.1, 1.1, 6.5], [False, True, False, False])
        longitudes = ma.masked_array([0, 120,  -120, 100], [False, False, True, False])
        gen = GeoJSONGenerator(latitudes=latitudes, longitudes=longitudes)

        geojson = gen.get_elasticsearch_geojson()

        for result, expected in zip(geojson["geometries"]["display"]["coordinates"], [(0, 3.0), (100, 6.5)]):
            assert_that(result[0], close_to(expected[0], 0.01), "lat" )
            assert_that(result[1], close_to(expected[1], 0.01), "lon")
コード例 #31
0
ファイル: mpyloess.py プロジェクト: tasmi/pyloess
 def __init__(self, n):
     self._fval = masked_array(empty((n,), dtype=float_, order='F'))
     self._rw = masked_array(empty((n,), dtype=float_, order='F'))
     self._fres = masked_array(empty((n,), dtype=float_, order='F'))
コード例 #32
0
ファイル: mpyloess.py プロジェクト: tasmi/pyloess
 def __init__(self, y):
     self.y = masked_array(y, subok=True, copy=False).ravel()
     self._mask = self.y._mask
     if self._mask.any():
         raise ValueError("Masked arrays should be filled first!")
     self.y_eff = self.y.compressed()
コード例 #33
0
ファイル: mpyloess.py プロジェクト: tasmi/pyloess
 def __init__(self, n):
     self._seasonal = masked_array(empty((n,), float_))
     self._trend = masked_array(empty((n,), float_))
     self._weights = masked_array(empty((n,), float_))
     self._residuals = masked_array(empty((n,), float_))
コード例 #34
0
def main():
    """Run the main routine for the script."""

    # Define the limits to plot in the various stellar parameters.
    temp_lims = (5400, 6300) * u.K
    mtl_lims = (-0.75, 0.45)
    logg_lims = (4.1, 4.6)

    tqdm.write('Unpickling transitions list..')
    with open(vcl.final_selection_file, 'r+b') as f:
        transitions_list = pickle.load(f)
    vprint(f'Found {len(transitions_list)} transitions.')

    # Define the model to use.
    if args.linear:
        model_func = fit.linear_model
    elif args.quadratic:
        model_func = fit.quadratic_model
    elif args.cross_term:
        model_func = fit.cross_term_model
    elif args.quadratic_magnitude:
        model_func = fit.quadratic_mag_model

    # model_func = fit.quadratic_model
    model_name = '_'.join(model_func.__name__.split('_')[:-1])
    tqdm.write(f'Using {model_name} model.')

    db_file = vcl.databases_dir / f'stellar_db_{model_name}_params.hdf5'
    # Load data from HDF5 database file.
    tqdm.write('Reading data from stellar database file...')
    star_transition_offsets = u.unyt_array.from_hdf5(
        db_file, dataset_name='star_transition_offsets')
    star_transition_offsets_EotWM = u.unyt_array.from_hdf5(
        db_file, dataset_name='star_transition_offsets_EotWM')
    star_transition_offsets_EotM = u.unyt_array.from_hdf5(
        db_file, dataset_name='star_transition_offsets_EotM')
    star_temperatures = u.unyt_array.from_hdf5(
        db_file, dataset_name='star_temperatures')

    with h5py.File(db_file, mode='r') as f:

        star_metallicities = hickle.load(f, path='/star_metallicities')
        star_magnitudes = hickle.load(f, path='/star_magnitudes')
        star_gravities = hickle.load(f, path='/star_gravities')
        column_dict = hickle.load(f, path='/transition_column_index')
        star_names = hickle.load(f, path='/star_row_index')

    # Handle various fitting and plotting setup:
    eras = {'pre': 0, 'post': 1}
    param_dict = {'temp': 0, 'mtl': 1, 'logg': 2}
    plot_types = ('temp', 'mtl', 'logg')

    params_list = []
    # Figure out how many parameters the model function takes, so we know how
    # many to dynamically give it later.
    num_params = len(signature(model_func).parameters)
    for i in range(num_params - 1):
        params_list.append(0.)

    # Set up the figure with subplots.
    comp_fig, axes_dict = create_comparison_figure(ylims=None)

    if args.label:
        labels = (args.label, )

    else:
        # labels = ['4219.893V1_16', '4490.998Fe1_25', '4492.660Fe2_25',
        #           '4500.398Fe1_25', '4589.484Cr2_28', '4653.460Cr1_29',]
        # '4738.098Fe1_32', '4767.190Mn1_33', '4811.877Zn1_34',
        # '4940.192Fe1_37', '5138.510Ni1_42', '5178.000Ni1_43',
        # '5200.158Fe1_43', '5571.164Fe1_50', '5577.637Fe1_50',
        # '6067.161Fe1_59', '6123.910Ca1_60', '6144.183Si1_60',
        # '6155.928Na1_61', '6162.452Na1_61', '6178.520Ni1_61',
        # '6192.900Ni1_61']
        labels = []
        for transition in tqdm(transitions_list):
            for order_num in transition.ordersToFitIn:
                label = '_'.join([transition.label, str(order_num)])
                labels.append(label)

        tqdm.write(f'Analyzing {len(labels)} transitions.')

    if not args.nbins:
        bin_dict = {}
        # Set bins manually.
        # bin_dict[name] = np.linspace(5457, 6257, 5)
        bin_dict['temp'] = [
            5377, 5477, 5577, 5677, 5777, 5877, 5977, 6077, 6177, 6277
        ]
        # bin_dict[name] = np.linspace(-0.75, 0.45, 5)
        bin_dict['mtl'] = [-0.75, -0.6, -0.45, -0.3, -0.15, 0, 0.15, 0.3, 0.45]
        # bin_dict[name] = np.linspace(4.1, 4.6, 5)
        bin_dict['logg'] = [4.04, 4.14, 4.24, 4.34, 4.44, 4.54, 4.64]

        for time in eras.keys():
            for plot_type, lims in zip(plot_types,
                                       (temp_lims, mtl_lims, logg_lims)):
                ax = axes_dict[f'{plot_type}_{time}']
                for limit in bin_dict[plot_type]:
                    ax.axvline(x=limit, color='Green', alpha=0.6, zorder=1)

    # Create an array to store all the individual sigma_sys values in in order
    # to get the means and STDs for each bin.
    row_len = len(labels)
    temp_col_len = len(bin_dict['temp']) - 1
    metal_col_len = len(bin_dict['mtl']) - 1
    logg_col_len = len(bin_dict['logg']) - 1

    # First axis is for pre- and post- fiber change values: 0 = pre, 1 = post
    temp_array = np.full([2, row_len, temp_col_len], np.nan)
    metal_array = np.full([2, row_len, metal_col_len], np.nan)
    logg_array = np.full([2, row_len, logg_col_len], np.nan)
    full_arrays_dict = {
        key: value
        for key, value in zip(plot_types, (temp_array, metal_array,
                                           logg_array))
    }

    for label_num, label in tqdm(enumerate(labels), total=len(labels)):

        vprint(f'Analyzing {label}...')
        # The column number to use for this transition:
        try:
            col = column_dict[label]
        except KeyError:
            print(f'Incorrect key given: {label}')
            sys.exit(1)

        for time in tqdm(eras.keys()):

            vprint(20 * '=')
            vprint(f'Working on {time}-change era.')
            mean = np.nanmean(star_transition_offsets[eras[time], :, col])

            # First, create a masked version to catch any missing entries:
            m_offsets = ma.masked_invalid(
                star_transition_offsets[eras[time], :, col])
            m_offsets = m_offsets.reshape([len(m_offsets), 1])
            # Then create a new array from the non-masked data:
            offsets = u.unyt_array(m_offsets[~m_offsets.mask], units=u.m / u.s)
            vprint(f'Median of offsets is {np.nanmedian(offsets)}')

            m_eotwms = ma.masked_invalid(
                star_transition_offsets_EotWM[eras[time], :, col])
            m_eotwms = m_eotwms.reshape([len(m_eotwms), 1])
            eotwms = u.unyt_array(m_eotwms[~m_eotwms.mask], units=u.m / u.s)

            m_eotms = ma.masked_invalid(
                star_transition_offsets_EotM[eras[time], :, col])
            m_eotms = m_eotms.reshape([len(m_eotms), 1])
            # Use the same mask as for the offsets.
            eotms = u.unyt_array(m_eotms[~m_offsets.mask], units=u.m / u.s)
            # Create an error array which uses the greater of the error on
            # the mean or the error on the weighted mean.
            err_array = np.maximum(eotwms, eotms)

            vprint(f'Mean is {np.mean(offsets)}')
            weighted_mean = np.average(offsets, weights=err_array**-2)
            vprint(f'Weighted mean is {weighted_mean}')

            # Mask the various stellar parameter arrays with the same mask
            # so that everything stays in sync.
            temperatures = ma.masked_array(star_temperatures)
            temps = temperatures[~m_offsets.mask]
            metallicities = ma.masked_array(star_metallicities)
            metals = metallicities[~m_offsets.mask]
            magnitudes = ma.masked_array(star_magnitudes)
            mags = magnitudes[~m_offsets.mask]
            gravities = ma.masked_array(star_gravities)
            loggs = gravities[~m_offsets.mask]

            stars = ma.masked_array([key for key in star_names.keys()
                                     ]).reshape(len(star_names.keys()), 1)
            names = stars[~m_offsets.mask]

            # Stack the stellar parameters into vertical slices
            # for passing to model functions.
            x_data = np.stack((temps, metals, loggs), axis=0)

            # Create the parameter list for this run of fitting.
            params_list[0] = float(mean)

            beta0 = tuple(params_list)
            vprint(beta0)

            # Iterate over binned segments of the data to find what additional
            # systematic error is needed to get a chi^2 of ~1.
            arrays_dict = {
                name: array
                for name, array in zip(plot_types, (temps, metals, loggs))
            }

            popt, pcov = curve_fit(model_func,
                                   x_data,
                                   offsets.value,
                                   sigma=err_array.value,
                                   p0=beta0,
                                   absolute_sigma=True,
                                   method='lm',
                                   maxfev=10000)

            model_values = model_func(x_data, *popt)
            residuals = offsets.value - model_values

            if args.nbins:
                nbins = int(args.nbins)
                # Use quantiles to get bins with the same number of elements
                # in them.
                vprint(f'Generating {args.nbins} bins.')
                bins = np.quantile(arrays_dict[name],
                                   np.linspace(0, 1, nbins + 1),
                                   interpolation='nearest')
                bin_dict[name] = bins

            min_bin_size = 7
            sigma_sys_dict = {}
            num_params = 1
            for name in tqdm(plot_types):
                sigma_sys_list = []
                sigma_list = []
                bin_mid_list = []
                bin_num = -1
                for bin_lims in pairwise(bin_dict[name]):
                    bin_num += 1
                    lower, upper = bin_lims
                    bin_mid_list.append((lower + upper) / 2)
                    mask_array = ma.masked_outside(arrays_dict[name],
                                                   *bin_lims)
                    num_points = mask_array.count()
                    vprint(f'{num_points} values in bin ({lower},{upper})')
                    if num_points < min_bin_size:
                        vprint('Skipping this bin!')
                        sigma_list.append(np.nan)
                        sigma_sys_list.append(np.nan)
                        continue
                    temps_copy = temps[~mask_array.mask]
                    metals_copy = metals[~mask_array.mask]
                    mags_copy = mags[~mask_array.mask]
                    residuals_copy = residuals[~mask_array.mask]
                    errs_copy = err_array[~mask_array.mask].value
                    x_data_copy = np.stack(
                        (temps_copy, metals_copy, mags_copy), axis=0)

                    chi_squared_nu = fit.calc_chi_squared_nu(
                        residuals_copy, errs_copy, num_params)
                    sigma_sys_delta = 0.01
                    sigma_sys = -sigma_sys_delta
                    chi_squared_nu = np.inf
                    variances = np.square(errs_copy)
                    while chi_squared_nu > 1.0:
                        sigma_sys += sigma_sys_delta
                        variance_sys = np.square(sigma_sys)
                        variances_iter = variances + variance_sys
                        # err_iter = np.sqrt(np.square(errs_copy) +
                        #                    np.square(sigma_sys))
                        weights = 1 / variances_iter
                        wmean, sum_weights = np.average(residuals_copy,
                                                        weights=weights,
                                                        returned=True)

                        chi_squared_nu = fit.calc_chi_squared_nu(
                            residuals_copy - wmean, np.sqrt(variances_iter),
                            num_params)

                    sigma_sys_list.append(sigma_sys)
                    sigma = np.std(residuals_copy)
                    sigma_list.append(sigma)
                    # tqdm.write(f'sigma_sys is {sigma_sys:.3f}')
                    # tqdm.write(f'chi^2_nu is {chi_squared_nu}')
                    if sigma_sys / sigma > 1.2:
                        print('---')
                        print(bin_lims)
                        print(mask_array)
                        print(metals)
                        print(residuals)
                        print(n_params)
                        print(num_params)
                        print(residuals_copy)
                        print(errs_copy)
                        print(sigma)
                        print(sigma_sys)
                        sys.exit()

                    # Store the result in the appropriate full array.
                    full_arrays_dict[name][eras[time], label_num,
                                           bin_num] = sigma_sys

                sigma_sys_dict[f'{name}_sigma_sys'] = sigma_sys_list
                sigma_sys_dict[f'{name}_sigma'] = sigma_list
                sigma_sys_dict[f'{name}_bin_mids'] = bin_mid_list

            # sigma = np.nanstd(residuals)

            for plot_type, lims in zip(plot_types,
                                       (temp_lims, mtl_lims, logg_lims)):
                ax = axes_dict[f'{plot_type}_{time}']
                ax.plot(sigma_sys_dict[f'{plot_type}_bin_mids'],
                        sigma_sys_dict[f'{plot_type}_sigma_sys'],
                        color='Black',
                        alpha=0.15,
                        zorder=2)
                # label=r'$\sigma_\mathrm{sys}$')
                # ax.plot(sigma_sys_dict[f'{plot_type}_bin_mids'],
                #         sigma_sys_dict[f'{plot_type}_sigma'],
                #         color='Blue', alpha=0.3,
                #         label=r'$\sigma$')
                if args.label:
                    ax.legend()

                # ax.annotate(r'$\sigma_\mathrm{sys}$:'
                #             f' {sys_err:.2f}',
                #             (0.01, 0.99),
                #             xycoords='axes fraction',
                #             verticalalignment='top')
                # ax.annotate(fr'$\chi^2_\nu$: {chi_squared_nu.value:.4f}'
                #             '\n'
                #             fr'$\sigma$: {sigma:.2f}',
                #             (0.99, 0.99),
                #             xycoords='axes fraction',
                #             horizontalalignment='right',
                #             verticalalignment='top')
                # data = np.array(ma.masked_invalid(residuals).compressed())

    for time in eras.keys():
        for name in plot_types:
            ax = axes_dict[f'{name}_{time}']
            means = []
            stds = []
            arr = full_arrays_dict[name]
            for i in range(0, np.size(arr, 2)):
                means.append(np.nanmean(arr[eras[time], :, i]))
                stds.append(np.nanstd(arr[eras[time], :, i]))
            ax.errorbar(sigma_sys_dict[f'{name}_bin_mids'],
                        means,
                        yerr=stds,
                        color='Red',
                        alpha=1,
                        marker='o',
                        markersize=4,
                        capsize=4,
                        elinewidth=2,
                        zorder=3,
                        label='Mean and stddev')
            ax.legend()

    plot_path = Path('/Users/dberke/Pictures/'
                     f'sigma_sys_stellar_parameter_dependance')
    if args.label:
        file_name = plot_path / f'{args.label}.png'
    elif args.nbins:
        file_name = plot_path / f'Combined_{model_name}_quantiles.png'
    else:
        file_name = plot_path / f'Combined_{model_name}_fixed_bins.png'
    # plt.show()
    comp_fig.savefig(str(file_name))
コード例 #35
0
def main():
    import argparse
    parser = argparse.ArgumentParser(
        description='This script performs the T-test for \
                           CICE simulations that should be bit-for-bit, but are not.'
    )
    parser.add_argument('base_dir', \
                help='Path to the baseline history (iceh_inst*) files.  REQUIRED')
    parser.add_argument('test_dir', \
                help='Path to the test history (iceh_inst*) files.  REQUIRED')
    parser.add_argument('-v', '--verbose', dest='verbose', help='Print debug output?', \
                        action='store_true')
    parser.add_argument('-pt',
                        '--plot_type',
                        dest='plot_type',
                        help='Specify type of plot \
                        to create',
                        choices=['scatter', 'contour', 'pcolor'])

    parser.set_defaults(verbose=False)
    parser.set_defaults(plot_type='pcolor')

    # If no arguments are provided, print the help message
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()

    # Set up the logger
    global logger
    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    # Log to log file as well as stdout
    fh = logging.FileHandler(r'qc_log.txt', 'w')
    logger = logging.getLogger(__name__)
    logger.addHandler(fh)

    logger.info('Running QC test on the following directories:')
    logger.info('  {}'.format(args.base_dir))
    logger.info('  {}'.format(args.test_dir))

    dir_a, dir_b, files_base, files_test = gen_filenames(
        args.base_dir, args.test_dir)

    nfiles = len(files_base)

    nlon, nlat, t_lat, t_lon = get_geom(dir_a, files_base[0])

    data_base, data_test, data_diff = read_data(dir_a, dir_b, files_base,
                                                files_test, nlon, nlat)

    if np.ma.all(data_diff.mask):
        logger.info("Data is bit-for-bit.  No need to run QC test")
        sys.exit(0)

    # Run the two-stage test
    PASSED, H1_array = two_stage_test(data_base, nfiles, data_diff,
                                      files_base[0], dir_a)

    # Delete arrays that are no longer necessary
    del data_diff

    # If test failed, attempt to create a plot of the failure locations
    if not PASSED:
        plot_two_stage_failures(H1_array, t_lat, t_lon)

        # Create plots of mean ice thickness
        baseDir = os.path.abspath(args.base_dir).rstrip('history/').rstrip(\
                                                        'history').split('/')[-1]
        testDir = os.path.abspath(args.test_dir).rstrip('history/').rstrip( \
                                                        'history').split('/')[-1]
        plot_data(np.mean(data_base, axis=0), t_lat, t_lon, 'm', baseDir,
                  args.plot_type)
        plot_data(np.mean(data_test, axis=0), t_lat, t_lon, 'm', testDir,
                  args.plot_type)
        plot_data(np.mean(data_base-data_test,axis=0), t_lat, t_lon, 'm', '{}\n- {}'.\
                  format(baseDir,testDir), args.plot_type)

        logger.error('Quality Control Test FAILED')
        sys.exit(-1)

    # Create a northern hemisphere and southern hemisphere mask
    mask_tlat = t_lat < 0
    mask_nh = np.zeros_like(data_base)
    mask_sh = np.zeros_like(data_base)
    for (a, b), val in np.ndenumerate(mask_tlat):
        mask_nh[:, a, b] = val
        mask_sh[:, a, b] = not val

    # Run skill test on northern hemisphere
    data_nh_a = ma.masked_array(data_base, mask=mask_nh)
    data_nh_b = ma.masked_array(data_test, mask=mask_nh)
    if np.ma.all(data_nh_a.mask) and np.ma.all(data_nh_b.mask):
        logger.info("Northern Hemisphere data is bit-for-bit")
        PASSED_NH = True
    else:
        PASSED_NH = skill_test(dir_a, files_base[0], data_nh_a, data_nh_b,
                               nfiles, 'Northern')

    # Run skill test on southern hemisphere
    data_sh_a = ma.masked_array(data_base, mask=mask_sh)
    data_sh_b = ma.masked_array(data_test, mask=mask_sh)
    if np.ma.all(data_sh_a.mask) and np.ma.all(data_sh_b.mask):
        logger.info("Southern Hemisphere data is bit-for-bit")
        PASSED_SH = True
    else:
        PASSED_SH = skill_test(dir_a, files_base[0], data_sh_a, data_sh_b,
                               nfiles, 'Southern')

    PASSED_SKILL = PASSED_NH and PASSED_SH

    # Plot the ice thickness data for the base and test cases
    baseDir = os.path.abspath(args.base_dir).rstrip('history/').rstrip( \
                                                    'history').split('/')[-1]
    testDir = os.path.abspath(args.test_dir).rstrip('history/').rstrip( \
                                                    'history').split('/')[-1]
    plot_data(np.mean(data_base, axis=0), t_lat, t_lon, 'm', baseDir,
              args.plot_type)
    plot_data(np.mean(data_test, axis=0), t_lat, t_lon, 'm', testDir,
              args.plot_type)
    plot_data(np.mean(data_base-data_test,axis=0), t_lat, t_lon, 'm', '{}\n- {}'.\
              format(baseDir,testDir), args.plot_type)

    logger.info('')
    if not PASSED_SKILL:
        logger.error('Quality Control Test FAILED')
        sys.exit(1)  # exit with an error return code
    else:
        logger.info('Quality Control Test PASSED')
        sys.exit(0)  # exit with successfull return code
コード例 #36
0
def wise_rgz_gurkan_lowsn():

    plt.ion()

    # WISE All-sky sample
    #
    filenames = [
        '%s/%s.fits' % (rgz_dir, x)
        for x in ('wise_allsky_2M', 'gurkan/gurkan_all', 'rgz_75_wise_16jan')
    ]
    labels = ('WISE all-sky sources', 'Gurkan+14 radio galaxies',
              'RGZ 75% radio galaxies')

    print ''
    for fname, label in zip(filenames, labels):
        with fits.open(fname) as f:
            d = f[1].data

        # Restrict the RGZ-WISE matches to 75% consensus
        if label == 'RGZ 75% radio galaxies':
            rgz75 = d['ratio'] >= 0.75
            snr_w1 = d['snr1'] >= wise_snr
            snr_w2 = d['snr2'] >= wise_snr
            snr_w3 = d['snr3'] >= wise_snr
            d = d[np.logical_not(rgz75 & snr_w1 & snr_w2 & snr_w3)]

        w1 = d['w1mpro']
        w2 = d['w2mpro']
        w3 = d['w3mpro']
        w4 = d['w4mpro']

        x = w2 - w3
        y = w1 - w2

        # AGN wedge is INCORRECTLY cited in Gurkan+14; check original Mateos+12 for numbers
        #
        wedge_lims = (y > -3.172 * x + 7.624) & (y > (0.315 * x - 0.222)) & (
            y < (0.315 * x + 0.796))
        #
        # Very rough loci from Wright et al. (2010)
        stars_lims = (x > 0) & (x < 1) & (y > 0.1) & (y < 0.4)
        el_lims = (x > 0.5) & (x < 1.3) & (y > 0.) & (y < 0.2)
        sp_lims = (x > 1.5) & (x < 3.0) & (y > 0.1) & (y < 0.4)

        agn_frac = wedge_lims.sum() / float(len(d))
        stars_frac = stars_lims.sum() / float(len(d))
        el_frac = el_lims.sum() / float(len(d))
        sp_frac = sp_lims.sum() / float(len(d))

        print 'Fraction of %25s in AGN wedge: %4.1f percent' % (label,
                                                                agn_frac * 100)
        print 'Fraction of %25s in stars locus: %4.1f percent' % (
            label, stars_frac * 100)
        print 'Fraction of %25s in elliptical locus: %4.1f percent' % (
            label, el_frac * 100)
        print 'Fraction of %25s in spiral locus: %4.1f percent' % (
            label, sp_frac * 100)
        print ''

    print ''

    # Bin data and look at differences?
    #

    with fits.open(filenames[0]) as f:
        d = f[1].data
        maglim_w1 = d['snr1'] > wise_snr
        maglim_w2 = d['snr2'] > wise_snr
        maglim_w3 = d['snr3'] < wise_snr
        wise = d[maglim_w1 & maglim_w2 & maglim_w3]
    with fits.open(filenames[2]) as f:
        d = f[1].data
        rgz75 = d['ratio'] >= 0.75
        snr_w1 = d['snr1'] >= wise_snr
        snr_w2 = d['snr2'] >= wise_snr
        snr_w3 = d['snr3'] <= wise_snr
        rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]

    xmin, xmax = -1, 6
    ymin, ymax = -0.5, 3

    bins_w2w3 = np.linspace(xmin, xmax, 40)
    bins_w1w2 = np.linspace(ymin, ymax, 40)
    hw, xedges, yedges = np.histogram2d(wise['w2mpro'] - wise['w3mpro'],
                                        wise['w1mpro'] - wise['w2mpro'],
                                        bins=(bins_w2w3, bins_w1w2))
    hr, xedges, yedges = np.histogram2d(rgz['w2mpro'] - rgz['w3mpro'],
                                        rgz['w1mpro'] - rgz['w2mpro'],
                                        bins=(bins_w2w3, bins_w1w2))

    fig = plt.figure(1, (9, 8))
    fig.clf()

    hw_norm = hw / float(np.max(hw))
    hr_norm = hr / float(np.max(hr))

    hw_norm_masked = ma.masked_array(hw, mask=(hw < 10))
    hr_norm_masked = ma.masked_array(hr_norm, mask=(hr <= 10))

    extent = [bins_w2w3[0], bins_w2w3[-1], bins_w1w2[0], bins_w1w2[-1]]

    ax1 = fig.add_subplot(111, position=(0.10, 0.10, 0.75, 0.85))

    # WISE all-sky
    cmap = cm.YlOrRd
    cmap.set_bad('w')
    Z = hw_norm_masked
    im1 = ax1.imshow(Z.T,
                     cmap=cmap,
                     alpha=1.0,
                     extent=extent,
                     interpolation='nearest',
                     origin='lower')
    '''
    fi = gaussian_filter(hw.T,0.5)
    levels=np.linspace(10,20000,10)
    CS = ax1.contour(bins_w2w3[1:],bins_w1w2[1:],fi,levels,colors='r',linewidths=1)
    '''

    # RGZ 75% catalog

    fi = gaussian_filter(hr.T, 0.5)
    levels = np.linspace(3, hr.max(), 10)
    CS = ax1.contour(bins_w2w3[1:],
                     bins_w1w2[1:],
                     fi,
                     levels,
                     colors='b',
                     linewidths=1.5)
    CS.collections[0].set_label('RGZ 75%')

    # Gurkan

    with fits.open(filenames[1]) as f:
        gurkan = f[1].data

    ax1.scatter(gurkan['w2mpro'] - gurkan['w3mpro'],
                gurkan['w1mpro'] - gurkan['w2mpro'],
                color='g',
                s=10,
                label='PRGs (Gurkan+14)')

    xb, yb = 2.250, 0.487
    xt, yt = 1.958, 1.413

    xab = np.linspace(xb, 6, 100)
    xat = np.linspace(xt, 6, 100)
    xal = np.linspace(xb, xt, 100)

    yab = 0.315 * xab - 0.222
    yat = 0.315 * xat + 0.796
    yal = -3.172 * xal + 7.624

    ax1.plot(xab, yab, color='k', linestyle='--', label='AGN "wedge"')
    ax1.plot(xat, yat, color='k', linestyle='--')
    ax1.plot(xal, yal, color='k', linestyle='--')

    ax1.set_xlabel(r'$(W2-W3)$', fontsize=20)
    ax1.set_ylabel(r'$(W1-W2)$', fontsize=20)
    ax1.set_xlim(xmin, xmax)
    ax1.set_ylim(ymin, ymax)
    ax1.set_aspect('auto')

    cb_position = fig.add_axes([0.88, 0.1, 0.02, 0.85])
    cb = plt.colorbar(im1, cax=cb_position, orientation='vertical')
    cb.set_label('WISE all-sky sources', fontsize=16)

    h, l = ax1.get_legend_handles_labels()
    ax1.legend(h, l, loc='upper left', scatterpoints=2)

    plt.show()

    fig.savefig('%s/figures/wise_colorcolor_lowsn.eps' % paper_dir)

    return None
コード例 #37
0
ファイル: iomethods.py プロジェクト: toowzh/grab-tag-graph
def read_data(varName, latName, lonName, userVariables, filelist=None):
    '''
        Purpose::
            Read gridded data into (t, lat, lon) arrays for processing

        Inputs::
            varName: a string representing the variable name to use from the file
            latName: a string representing the latitude from the file's metadata
            lonName: a string representing the longitude from the file's metadata
            userVariables: 
            filelist (optional): a list of strings representing the filenames between the start and end dates provided

        Returns:

        Outputs::
            A 3D masked array (t,lat,lon) with only the variables which meet the minimum temperature
            criteria for each frame

        Assumptions::
            (1) All the files requested to extract data are from the same instrument/model, and thus have the same
            metadata properties (varName, latName, lonName) as entered
            (2) Assumes rectilinear grids for input datasets i.e. lat, lon will be 1D arrays
    '''

    global LAT
    global LON

    timeName = 'time'

    filelistInstructions = userVariables.DIRS['CEoriDirName'] + '/*'

    if filelist is None and userVariables.filelist is None:
        userVariables.filelist = glob.glob(filelistInstructions)

    userVariables.filelist.sort()

    inputData = []
    timelist = []
    time2store = None
    tempMaskedValueNp = []

    nfiles = len(userVariables.filelist)

    # Crash nicely if there are no netCDF files
    if nfiles == 0:
        print 'Error: no files in this directory! Exiting elegantly'
        sys.exit()
    else:
        # Open the first file in the list to read in lats, lons and generate the  grid for comparison
        tmp = Dataset(userVariables.filelist[0], 'r+', format='NETCDF4')

        alllatsraw = tmp.variables[latName][:]
        alllonsraw = tmp.variables[lonName][:]
        alllonsraw[alllonsraw > 180] = alllonsraw[
            alllonsraw > 180] - 360.  # convert to -180,180 if necessary

        # Get the lat/lon info data (different resolution)
        latminNETCDF = utils.find_nearest(alllatsraw,
                                          float(userVariables.LATMIN))
        latmaxNETCDF = utils.find_nearest(alllatsraw,
                                          float(userVariables.LATMAX))
        lonminNETCDF = utils.find_nearest(alllonsraw,
                                          float(userVariables.LONMIN))
        lonmaxNETCDF = utils.find_nearest(alllonsraw,
                                          float(userVariables.LONMAX))
        latminIndex = (np.where(alllatsraw == latminNETCDF))[0][0]
        latmaxIndex = (np.where(alllatsraw == latmaxNETCDF))[0][0]
        lonminIndex = (np.where(alllonsraw == lonminNETCDF))[0][0]
        lonmaxIndex = (np.where(alllonsraw == lonmaxNETCDF))[0][0]

        # Subsetting the data
        latsraw = alllatsraw[latminIndex:latmaxIndex]
        lonsraw = alllonsraw[lonminIndex:lonmaxIndex]

        LON, LAT = np.meshgrid(lonsraw, latsraw)

        latsraw = []
        lonsraw = []
        tmp.close()

    for files in userVariables.filelist:
        try:
            thisFile = Dataset(files, 'r', format='NETCDF4')
            # Clip the dataset according to user lat, lon coordinates
            # Mask the data and fill with zeros for later
            tempRaw = thisFile.variables[
                varName][:, latminIndex:latmaxIndex,
                         lonminIndex:lonmaxIndex].astype('int16')
            tempMask = ma.masked_array(tempRaw,
                                       mask=(tempRaw > userVariables.T_BB_MAX),
                                       fill_value=0)
            # Get the actual values that the mask returned

            # timeIndex, latIndex, lonIndex = index

            tempMaskedValue = tempMask
            tempMaskedValue[tempMask.mask] = 0

            xtimes = thisFile.variables[timeName]

            # Convert this time to a python datastring
            time2store, _ = get_model_times(xtimes, timeName)

            # Extend instead of append because get_model_times returns a list already and we don't
            # want a list of list
            timelist.extend(time2store)
            inputData.extend(tempMaskedValue)
            thisFile.close()

        except:
            print 'bad file! ', files

    inputData = ma.array(inputData)

    return inputData, timelist, LAT, LON, userVariables
コード例 #38
0
ファイル: pp_bottom.py プロジェクト: kimiamote/LakeTranRS
    tp = pd.read_csv('../simulations/id/{:03d}/totp.csv.bz2'.format(id),
                     header=None).as_matrix()
    o2a = pd.read_csv('../simulations/id/{:03d}/O2abs.csv.bz2'.format(id),
                      header=None).as_matrix()
    # o2r = pd.read_csv('../simulations/id/{:03d}/O2rel.csv.bz2'.format(id), header=None).as_matrix()
    a[x1 - 1, x2 - 1, x3 - 1, x4 - 1, :, :, 0] = t
    a[x1 - 1, x2 - 1, x3 - 1, x4 - 1, :, :, 1] = chl
    a[x1 - 1, x2 - 1, x3 - 1, x4 - 1, :, :, 2] = tp
    a[x1 - 1, x2 - 1, x3 - 1, x4 - 1, :, :, 3] = o2a
    # a[x1-1, x2-1, x3-1, x4-1, :, :, 4] = o2r

for i, x1, x2, x3, x4, id in d.itertuples():
    if not os.path.exists('../simulations/id/{:03d}/t.csv.bz2'.format(id)):
        m[x1 - 1, x2 - 1, x3 - 1, x4 - 1, :, :, :] = True

a = ma.masked_array(a, mask=m)

## maximum chl in last year
r1 = a[:, :, :, :, (365 * 3 + 180):(365 * 3 + 270), 0, 1].mean(axis=4)
r1max = r1.max()
r1min = r1.min()

## number days surface anoxia in last year
r2 = (a[:, :, :, :, (365 * 3):, 80, 3] < 0.01).sum(axis=4)
r2max = r2.max()
r2min = r2.min()

cmg = plt.get_cmap('Greens')
normg = matplotlib.colors.Normalize(r1min, r1max, True)

cmr = plt.get_cmap('Reds')
コード例 #39
0
ファイル: spectrogram.py プロジェクト: Cadair/cube
    def plot(self,
             figure=None,
             overlays=[],
             colorbar=True,
             vmin=None,
             vmax=None,
             linear=True,
             showz=True,
             yres=DEFAULT_YRES,
             max_dist=None,
             **matplotlib_args):
        """
        Plot spectrogram onto figure.

        Parameters
        ----------
        figure : matplotlib.figure.Figure
            Figure to plot the spectrogram on. If None, new Figure is created.
        overlays : list
            List of overlays (functions that receive figure and axes and return
            new ones) to be applied after drawing.
        colorbar : bool
            Flag that determines whether or not to draw a colorbar. If existing
            figure is passed, it is attempted to overdraw old colorbar.
        vmin : float
            Clip intensities lower than vmin before drawing.
        vmax : float
            Clip intensities higher than vmax before drawing.
        linear :  bool
            If set to True, "stretch" image to make frequency axis linear.
        showz : bool
            If set to True, the value of the pixel that is hovered with the
            mouse is shown in the bottom right corner.
        yres : int or None
            To be used in combination with linear=True. If None, sample the
            image with half the minimum frequency delta. Else, sample the
            image to be at most yres pixels in vertical dimension. Defaults
            to 1080 because that's a common screen size.
        max_dist : float or None
            If not None, mask elements that are further than max_dist away
            from actual data points (ie, frequencies that actually have data
            from the receiver and are not just nearest-neighbour interpolated).
        """
        # [] as default argument is okay here because it is only read.
        # pylint: disable=W0102,R0914
        if linear:
            delt = yres
            if delt is not None:
                delt = max(
                    (self.freq_axis[0] - self.freq_axis[-1]) / (yres - 1),
                    _min_delt(self.freq_axis) / 2.)
                delt = float(delt)

            data = _LinearView(self.clip_values(vmin, vmax), delt)
            freqs = np.arange(self.freq_axis[0], self.freq_axis[-1],
                              -data.delt)
        else:
            data = np.array(self.clip_values(vmin, vmax))
            freqs = self.freq_axis

        figure = plt.gcf()

        if figure.axes:
            axes = figure.axes[0]
        else:
            axes = figure.add_subplot(111)

        params = {
            'origin': 'lower',
            'aspect': 'auto',
        }
        params.update(matplotlib_args)
        if linear and max_dist is not None:
            toplot = ma.masked_array(data, mask=data.make_mask(max_dist))
            pass
        else:
            toplot = data
        im = axes.imshow(toplot, **params)

        xa = axes.get_xaxis()
        ya = axes.get_yaxis()

        xa.set_major_formatter(FuncFormatter(self.time_formatter))

        if linear:
            # Start with a number that is divisible by 5.
            init = (self.freq_axis[0] % 5) / data.delt
            nticks = 15.
            # Calculate MHz difference between major ticks.
            dist = (self.freq_axis[0] - self.freq_axis[-1]) / nticks
            # Round to next multiple of 10, at least ten.
            dist = max(round(dist, -1), 10)
            # One pixel in image space is data.delt MHz, thus we can convert
            # our distance between the major ticks into image space by dividing
            # it by data.delt.

            ya.set_major_locator(IndexLocator(dist / data.delt, init))
            ya.set_minor_locator(IndexLocator(dist / data.delt / 10, init))

            def freq_fmt(x, pos):
                # This is necessary because matplotlib somehow tries to get
                # the mid-point of the row, which we do not need here.
                x = x + 0.5
                return self.format_freq(self.freq_axis[0] - x * data.delt)
        else:
            freq_fmt = _list_formatter(freqs, self.format_freq)
            ya.set_major_locator(MaxNLocator(integer=True, steps=[1, 5, 10]))

        ya.set_major_formatter(FuncFormatter(freq_fmt))

        axes.set_xlabel(self.t_label)
        axes.set_ylabel(self.f_label)
        # figure.suptitle(self.content)

        figure.suptitle(' '.join([
            get_day(self.start).strftime("%d %b %Y"),
            'Radio flux density',
            '(' + ', '.join(self.instruments) + ')',
        ]))

        for tl in xa.get_ticklabels():
            tl.set_fontsize(10)
            tl.set_rotation(30)
        figure.add_axes(axes)
        figure.subplots_adjust(bottom=0.2)
        figure.subplots_adjust(left=0.2)

        if showz:
            axes.format_coord = self._mk_format_coord(
                data,
                figure.gca().format_coord)

        if colorbar:
            if len(figure.axes) > 1:
                Colorbar(figure.axes[1], im).set_label("Intensity")
            else:
                figure.colorbar(im).set_label("Intensity")

        for overlay in overlays:
            figure, axes = overlay(figure, axes)

        for ax in figure.axes:
            ax.autoscale()
        if isinstance(figure, SpectroFigure):
            figure._init(self, freqs)
        return axes
コード例 #40
0
 def test_with_masked_constant(self):
     masked_data = ma.masked_array([8], mask=True)
     masked_constant = masked_data[0]
     result = as_lazy_data(masked_constant)
     self.assertIsInstance(result, da.core.Array)
コード例 #41
0
# fig = matplotlib.pyplot.gcf()
# cax = matplotlib.pyplot.gca()
# fig.set_size_inches(20, 20)
# fig.subplots_adjust(right=0.93)
# cbar_ax = fig.add_axes([0.95, 0.15, 0.01, 0.7])
# _ = fig.colorbar(ax2, cax=cbar_ax)

# paf vectors for right elbow and right wrist

from numpy import ma
U = paf_avg[:, :, 16] * -1
V = paf_avg[:, :, 17]
X, Y = np.meshgrid(np.arange(U.shape[1]), np.arange(U.shape[0]))
M = np.zeros(U.shape, dtype='bool')
M[U**2 + V**2 < 0.5 * 0.5] = True
U = ma.masked_array(U, mask=M)
V = ma.masked_array(V, mask=M)

# 1
plt.figure()
# plt.imshow(oriImg[:,:,[2,1,0]], alpha = .5)
s = 5
Q = plt.quiver(X[::s, ::s],
               Y[::s, ::s],
               U[::s, ::s],
               V[::s, ::s],
               scale=50,
               headaxislength=4,
               alpha=.5,
               width=0.001,
               color='r')
コード例 #42
0
ファイル: avgCRU.py プロジェクト: zhenkunl/WRF-Tools
  add_var(outdata, 'rain', dims, values=climdata['rain'].filled(fill_value), atts=atts, fill_value=fill_value)
  # 2m mean Temperature
  atts = dict(long_name='Temperature at 2m', units='K')
  add_var(outdata, 'T2', dims, values=climdata['T2'].filled(fill_value), atts=atts, fill_value=fill_value)  
  # 2m maximum Temperature
  atts = dict(long_name='Maximum 2m Temperature', units='K')
  add_var(outdata, 'Tmax', dims, values=climdata['Tmax'].filled(fill_value), atts=atts, fill_value=fill_value)  
  # 2m minimum Temperature
  atts = dict(long_name='Minimum 2m Temperature', units='K')
  add_var(outdata, 'Tmin', dims, values=climdata['Tmin'].filled(fill_value), atts=atts, fill_value=fill_value)  
  # 2m water vapor
  atts = dict(long_name='Water Vapor Pressure at 2m', units='hPa')
  add_var(outdata, 'Q2', dims, values=climdata['Q2'].filled(fill_value), atts=atts, fill_value=fill_value)  
  # land mask
  atts = dict(long_name='Land Mask', units='')
  tmp = ma.masked_array(ma.ones((datashape[1],datashape[2])), mask=dataMask)
  add_var(outdata, 'landmask', ('lat','lon'), values=tmp.filled(0)) # create climatology variables  
#   for (key,value) in indata.iteritems():
#     copy_vars(outdata, value, [key], namemap=varlist, copy_data=False, fill_value=) # , incl_=True
#     outdata.variables[key][:,:,:] = climdata[key] 
        
  
#  ## dataset feedback and diagnostics
#  # print dataset meta data
#  print outdata
#  # print dimensions meta data
#  for dimobj in outdata.dimensions.values():
#    print dimobj
#  # print variable meta data
#  for varobj in outdata.variables.values():
#    print varobj
コード例 #43
0
def famed(woa_oxyg_dh,
          woa_oxyg_dh_m,
          temp_cl,
          temp_cl_m,
          depth,
          woa_rhom=None):

    import numpy as np
    from numpy import ma

    # Main FAME computation ...
    # =========================

    #	INPUTS FOR THE FAME CALCULATIONS
    #		woa_oxyg_dh   == d18sw       in seawater <time mean> shape is [102, 180, 360]    e.g. depth, lat, lon
    #		woa_oxyg_dh_m == d18sw       in seawater monthly     shape is [12,57, 180, 360]  e.g. depth, lat, lon
    #		temp_cl       == temperature in seawater <time mean> shape is [1, 102, 180, 360] e.g. time (degenerate), depth, lat, lon
    #		temp_cl_m     == temperature in seawater monthly     shape is [12, 57, 180, 360] e.g. time             , depth, lat, lon
    #		depth         == depth of the levels in meters assumed positive here

    # FAME is coded in Kelvins ...
    temp_kl = temp_cl + 273.15
    temp_kl_m = temp_cl_m + 273.15

    # Computation of equilibrium calcite from WOA fields ...
    delt_dh_init = delta_c_eq(temp_cl[...], woa_oxyg_dh)
    delt_dh_init_m = delta_c_eq(temp_cl_m, woa_oxyg_dh_m)

    #�Added the computation of the equation of Marchitto everywhere, no weighting.

    #� Probably useless, only written for compatibility
    #~ d18Oca_mar   =  ma.mean(delta_c_mar(temp_cl  ,woa_oxyg_dh),axis=0)

    #  Actual calculation here
    d18Oca_mar_m = ma.mean(delta_c_mar(temp_cl_m, woa_oxyg_dh_m), axis=0)

    if not (woa_rhom is None):
        # [DENSITY] -- addition of density from WOA
        rho_m = woa_rhom
    #endif

    # i.e. auld method, d18Oca averaged over 50 meters ... == "Lukas Jonkers" methodology
    depth_50m = find_closest(depth, 50.0)
    depth_00m = find_closest(depth, 0.0)

    indx_less_50m = depth <= 50.0

    #~ if depth_50m == depth_00m : depth_50m += 1

    # NOTA: all *_lj variables have a dimension without time and depth
    #       e.g. [180,360], lat, lon

    d18Osw_lj = ma.mean(woa_oxyg_dh[indx_less_50m, ...], axis=0)
    tempcl_lj = ma.mean(temp_cl[indx_less_50m, ...], axis=0)
    d18Oca_lj = delta_c_eq(tempcl_lj, d18Osw_lj)

    d18Osw_ol = woa_oxyg_dh[depth_00m, ...]
    tempcl_ol = temp_cl[depth_00m, ...]
    d18Oca_ol = delta_c_eq(tempcl_ol, d18Osw_ol)

    if not (woa_rhom is None):
        # [DENSITY] -- addition of density from WOA
        rhom_ol = ma.mean(woa_rhom[:, depth_00m, ...], axis=0)
    #endif

    import forams_prod_l09 as fpl

    # Maximum shape of result: nb_forams, lat, lon
    max_shape_final = (len(fpl.l09_cnsts_dic), ) + d18Osw_ol.shape

    # Create a placeholder for the foram result
    delt_forams = ma.zeros(max_shape_final, np.float32)

    if not (woa_rhom is None):
        # [DENSITY] -- addition of density from WOA
        rhom_forams = ma.zeros(max_shape_final, np.float32)
    #endif

    for foram_specie in fpl.l09_cnsts_dic:

        # Rate of growth from the Lombard et al., 2009 methodology
        foram_growth = fpl.growth_rate_l09_array(foram_specie, temp_kl[...])
        foram_growth_m = fpl.growth_rate_l09_array(foram_specie, temp_kl_m)

        # Get the depth of the STD living foram in FAME
        f_dept = fpl.get_living_depth(foram_specie)

        # Find this depth as an index in the array water column
        indx_dfm = find_closest(depth, abs(float(f_dept[0])))
        #~ if indx_dfm == depth_00m : indx_dfm += 1

        indx_less_dfm = depth <= np.abs(float(f_dept[0]))

        # Shrink the FAME arrays to the foram living depth
        foram_growth = foram_growth[indx_less_dfm,
                                    ...]  # shape is depth, lat, lon
        foram_growth_m = foram_growth_m[:, indx_less_dfm,
                                        ...]  # shape is time, depth, lat, lon

        # Do the same for the equilibrium calcite from WOA
        delt_dh = delt_dh_init[indx_less_dfm, ...]  # idem
        delt_dh_m = delt_dh_init_m[:, indx_less_dfm, ...]  # idem

        # [DENSITY] -- addition of density from WOA
        if not (woa_rhom is None):
            rho_m_specie = rho_m[:, indx_less_dfm, ...]  # idem
        #endif

        # Get the location where there is SOME growth, based on a certain epsilon
        epsilon_growth = 0.1 * fpl.l09_maxgrowth_dic[foram_specie][
            0]  # or 0.032

        # Mask out the regions where the foram_growth is less than the epsilon
        masked_f_growth = ma.masked_less_equal(foram_growth, epsilon_growth)

        #~ nb_points_growth = (masked_f_growth * 0.0 + 1.0).filled(0.0)
        #~ if monthly is True:
        #~ nb_points_growth_m = (masked_f_growth_m * 0.0 + 1.0).filled(0.0)

        #~ nb_points_growth = ma.where(foram_growth > epsilon_growth,1,0) # 0.000001
        #~ if monthly is True:
        #~ nb_points_growth_m = ma.where(foram_growth_m > epsilon_growth,1,0) # 0.000001

        # Now sum the growth over the depth ...
        f_growth = ma.sum(masked_f_growth, axis=0)
        #~ n_growth = ma.where(ma.sum(nb_points_growth,axis=0)>0,1,0) # axis 0 = depth

        masked_f_growth_m = ma.masked_less_equal(foram_growth_m,
                                                 epsilon_growth)
        f_growth_m = ma.sum(ma.sum(masked_f_growth_m, axis=1), axis=0)
        location_max_foramprod = ma.argmax(masked_f_growth_m, axis=1)
        location_max_foramprod = ma.masked_array(
            location_max_foramprod,
            mask=masked_f_growth_m[:, :, ...].mask.all(axis=1))
        # location_max_foramprod = ma.masked_array(location_max_foramprod,mask=masked_f_growth_m[:,0,...].mask)

        # Computing the weighted sum for d18Ocalcite using growth over depth
        delt_fp = ma.sum(delt_dh * masked_f_growth, axis=0)
        delt_fp_m = ma.sum(ma.sum(delt_dh_m * masked_f_growth_m, axis=1),
                           axis=0)

        # [DENSITY] -- addition of density from WOA
        if not (woa_rhom is None):
            rho_fp_m = ma.sum(ma.sum(rho_m_specie * masked_f_growth_m, axis=1),
                              axis=0)
        #endif

        # Mask out the points where no growth occur at all, in order to avoid NaNs ...
        delt_fp = delt_fp / ma.masked_less_equal(f_growth, 0.0)
        delt_fp_m = delt_fp_m / ma.masked_less_equal(f_growth_m, 0.0)
        if not (woa_rhom is None):
            # [DENSITY] -- addition of density from WOA
            rho_fp_m = rho_fp_m / ma.masked_less_equal(f_growth_m, 0.0)
        #endif

        # Result of FAME
        Z_om_fm = delt_fp
        Z_om_fm_m = ma.masked_array(delt_fp_m,
                                    mask=ma.max(location_max_foramprod[:, ...],
                                                axis=0).mask)

        # [DENSITY] -- addition of density from WOA
        if not (woa_rhom is None):
            Z_om_rho_m = ma.masked_array(rho_fp_m,
                                         mask=ma.max(
                                             location_max_foramprod[:, ...],
                                             axis=0).mask)
        #endif

        if foram_specie == "pachy_s":
            Z_om_fm = Z_om_fm + 0.1  # in per mil
            Z_om_fm_m = Z_om_fm_m + 0.1  # in per mil

        index_for = list(fpl.l09_cnsts_dic.keys()).index(foram_specie)
        delt_forams[index_for, ...] = Z_om_fm_m

        # [DENSITY] -- addition of density from WOA
        if not (woa_rhom is None):
            rhom_forams[index_for, ...] = Z_om_rho_m
        #endif

    #endfor on foram_specie

    # For comparison with Lukas Jonkers: old method on first 50 meters
    Z_om_lj = d18Oca_lj

    # For comparison with previous figures: old method on first 00 meters
    Z_om_ol = d18Oca_ol

    # [DENSITY] -- addition of density from WOA
    if not (woa_rhom is None):
        print("Fame is used with density ...")
        return delt_forams, Z_om_ol, d18Oca_mar_m, rhom_forams, rhom_ol
    else:
        print("Fame is used without density ...")
        return delt_forams, Z_om_ol, d18Oca_mar_m
コード例 #44
0
def main():
	args = {
		'doc': 'chain.pkl',
		'allWalkerGraph': False,
		'firstWalkerHist': False,
		'triangle': False,
		'version': 2,
		'filter' : False,
		'firstTrial' : 0
	} 

	#args['doc'] = sys.argv[1]
	options = []
	i = 1
	while i < len(sys.argv):
		if sys.argv[i][0] == '-':
			if sys.argv[i] == '-awg':
				args['allWalkerGraph'] = True
			elif sys.argv[i] == '-fwh':
				args['firstWalkerHist'] = True
			elif sys.argv[i] == '-tri':
				args['triangle'] = True
			elif sys.argv[i] == '-fil':
				args['filter'] = True
		else:
			if sys.argv[i-1][0] == '-':
				if sys.argv[i-1] == '-v':
					args['version'] = int(sys.argv[i])
				if sys.argv[i-1] == '-tr':
					args['firstTrial'] = int(sys.argv[i])
			elif sys.argv[i-1] == 'chainInterpreter.py':
				args['doc'] = sys.argv[i]
		i = i + 1

	print args

	with open(args['doc'], 'rb') as doc:
		chain = pickle.load(doc)

	if args['version'] == 2:
		chain = np.exp(chain)

	nwalkers, nruns, ndim = chain.shape
	# print chain.shape
	chain = chain[:,args['firstTrial']:nruns,:]
	# print chain.shape

	if args['filter']:
		chain = ma.masked_array(chain, mask=False)
		for i in range(ndim):
			for j in range(nwalkers):
				stDev = np.std(chain[j,:,i])
				if stDev > .001:
					chain[j,:,i] = ma.masked_array([None] * (nruns - args['firstTrial']), mask=True)

	if args['allWalkerGraph']:
		for i in range(ndim):
			plt.figure(i)
			data = chain[:,:,i].transpose()
			plt.plot(range(args['firstTrial'], nruns), data)
			average = np.average(data)
			plt.plot([0, nruns], [testParams[i], testParams[i]], 'k')
			plt.plot([0, nruns], [average, average], 'b')
			plt.xlabel('runs')
			plt.ylabel('value')
			plt.title(testParamNames[i] + ' = ' + str(testParams[i]) + ' | avg = %.2E' % average)
		plt.show()

	if args['firstWalkerHist']:
		plt.figure()
		plt.plot(range(nruns), chain[0,:,:])
		plt.show()

	if args['triangle']:
		dynRange = [(0, 1)] * 5;
		samples = chain[:, :, :].reshape((-1, ndim))
		fig = corner.corner(samples, labels=testParamNames, truths=testParams)
		fig.savefig("triangle.png")
コード例 #45
0
def zoom_mollview(sig, cmin=None, cmax=None, nest=True):
    from numpy.ma import masked_array
    from matplotlib.patches import Rectangle

    if cmin is None:
        cmin = np.min(sig)
    if cmax is None:
        cmax = np.max(sig)

    projected = hp.mollview(sig, return_projected_map=True, nest=nest)
    plt.clf()
    nmesh = 400
    loleft = -35
    loright = -30

    grid = hp.cartview(sig, latra=[-2.5,2.5], lonra=[loleft,loright], fig=1, xsize=nmesh, return_projected_map=True, nest=nest)
    plt.clf()

    nside = hp.npix2nside(len(sig))

    theta, phi = hp.pix2ang(nside, np.arange(hp.nside2npix(nside)))


    # Get position for the zoom window
    theta_min = 87.5/180*np.pi
    theta_max = 92.5/180*np.pi
    delta_theta = 0.55/180*np.pi
    phi_min = (180 - loleft)/180.0*np.pi
    phi_max = (180 - loright)/180.0*np.pi
    delta_phi = 0.55/180*np.pi

    angles = np.array([theta, phi]).T

    m0 = np.argmin(np.sum((angles - np.array([theta_max, phi_max]))**2, axis=1))
    m1 = np.argmin(np.sum((angles - np.array([theta_max, phi_min]))**2, axis=1))
    m2 = np.argmin(np.sum((angles - np.array([theta_min, phi_max]))**2, axis=1))
    m3 = np.argmin(np.sum((angles - np.array([theta_min, phi_min]))**2, axis=1))

    proj = hp.projector.MollweideProj(xsize=800)

    m0 = proj.xy2ij(proj.vec2xy(hp.pix2vec(ipix=m0, nside=nside)))
    m1 = proj.xy2ij(proj.vec2xy(hp.pix2vec(ipix=m1, nside=nside)))
    m2 = proj.xy2ij(proj.vec2xy(hp.pix2vec(ipix=m2, nside=nside)))
    m3 = proj.xy2ij(proj.vec2xy(hp.pix2vec(ipix=m3, nside=nside)))

    width = m0[1] - m1[1]
    height = m2[0] - m1[0]

    test_pro = np.full(shape=(400, 1400), fill_value=-np.inf)
    test_pro_1 = np.full(shape=(400, 1400), fill_value=-np.inf)
    test_pro[:,:800] = projected
    test_pro_1[:,1000:1400] = grid.data
    tt_0 = masked_array(test_pro, test_pro<-1000)
    tt_1 = masked_array(test_pro_1, test_pro_1<-1000)

    fig = plt.figure(frameon=False, figsize=(12,8))
    ax = fig.add_axes([0, 0, 1, 1])
    ax.axis('off')
    ax = fig.gca()
    plt.plot(np.linspace(m1[1]+width, 1000), np.linspace(m1[0], 0), 'k-')
    plt.plot(np.linspace(m1[1]+width, 1000), np.linspace(m2[0], 400), 'k-')
    plt.vlines(x=[1000, 1399], ymin=0, ymax=400)
    plt.hlines(y=[0,399], xmin=1000, xmax=1400)

    c = Rectangle((m1[1], m1[0]), width, height, color='k', fill=False, linewidth=3, zorder=100)
    ax.add_artist(c)
    cm = plt.cm.Blues
    cm = plt.cm.RdBu_r
#     cm = plt.cm.coolwarm # Not working, I do not know why it is not working
#     cm.set_bad("white")
    im1 = ax.imshow(tt_0, cmap=cm, vmin=cmin, vmax=cmax)
    cbaxes1 = fig.add_axes([0.08,0.2,0.4,0.04])
    cbar1 = plt.colorbar(im1, orientation="horizontal", cax=cbaxes1)
    im2 = ax.imshow(tt_1, cmap=cm, vmin=cmin, vmax=cmax)
    cbaxes2 = fig.add_axes([1.02,0.285,0.025,0.43])
    cbar2 = plt.colorbar(im2, orientation="vertical", cax=cbaxes2)
    plt.xticks([])
    plt.yticks([])
    return fig
コード例 #46
0
    def localizeCallback(self, data):
        mapToLaser = self.tfListener.lookupTransform('/map', '/laser',
                                                     rospy.Time(0))

        toLaserTransMatrix = np.asarray(mapToLaser[0][:2])

        carOrientQTMap = (data.pose.orientation.x, data.pose.orientation.y,
                          data.pose.orientation.z, data.pose.orientation.w)

        carYawMap = euler_from_quaternion(carOrientQTMap)[2]  #yaw

        toLaserRotMatrix = np.array([[np.cos(carYawMap), -np.sin(carYawMap)],
                                     [np.sin(carYawMap),
                                      np.cos(carYawMap)]])

        toLaser = lambda coords: (coords - toLaserTransMatrix).dot(
            toLaserRotMatrix)

        #finding goal waypoint and other metadata
        carPositionMap = np.array([data.pose.position.x, data.pose.position.y])

        carPositionLaser = toLaser(
            carPositionMap)  #this should be the zero vector

        #apply positional extrapolation based on localization delay
        extrap = self.extrapolatePosition()
        self.LOOKAHEAD_DISTANCE = self.decideLookahead()
        print(self.lookSpeedMultiplier())
        print(self.lookAngleMultiplier())

        pathPointsLaser = toLaser(np.asarray(self.path_points))

        distances = np.linalg.norm(pathPointsLaser - carPositionLaser, axis=1)
        mask = np.ones(len(distances), dtype=int)
        viable = np.where(
            np.logical_and((distances >= self.LOOKAHEAD_DISTANCE),
                           (pathPointsLaser[:, 0] >
                            (self.FOV_MULT * pathPointsLaser[:, 1]))))
        mask[viable] = 0

        viableMask = ma.masked_array(distances, mask=mask)
        waypointIndex = viableMask.argmin()
        waypoint = pathPointsLaser[waypointIndex]

        goalX = waypoint[0]
        goalY = waypoint[1]

        #calculate goal angle and velocity
        self.findCurveRadius = lambda distance, offset: distance**2 / (
            2 * np.abs(offset))
        self.turnRadius = self.findCurveRadius(distances[waypointIndex], goalY)
        self.curvature = 1 / self.turnRadius

        self.steeringAngle = np.arcsin(
            self.WHEELBASE / self.turnRadius) * np.sign(goalY)
        self.steeringAngle = np.clip(self.steeringAngle, -self.MAX_TURN_ANGLE,
                                     self.MAX_TURN_ANGLE)

        print("===========================")
        print("X: " + str(goalX))
        print("Y: " + str(goalY))
        print("R: " + str(self.turnRadius))
        print("angle: " + str(self.steeringAngle))
        print("ex: " + str(extrap[0]))
        print("ey: " + str(extrap[1]))
        print("LOOK: " + str(self.decideLookahead()))
        print("SPD: " + str(self.decideVelocity()))
        print("SPD-EST: " + str(self.currentSpeed))
        print("===========================")

        msg = drive_param()
        msg.velocity = self.decideVelocity()
        msg.angle = self.steeringAngle
        self.drivePub.publish(msg)
コード例 #47
0
def test_non_masked_elements(mask, expected_idx, expected_element):
    """Test with a valid element."""
    a = ma.masked_array(np.arange(5), mask=mask)
    idx, element = _next_non_masked_element(a, 1)
    assert idx == expected_idx
    assert element == expected_element
コード例 #48
0
    def plot_map(self,
                 dataset,
                 attribute_data,
                 min_value=None,
                 max_value=None,
                 file=None,
                 my_title="",
                 filter=None,
                 background=None):
        """    Plots a 2D image of attribute given by 'name'. matplotlib required.
               The dataset must have a method 'get_2d_attribute' defined that returns
               a 2D array that is to be plotted. If min_value/max_value are given, all values
               that are smaller/larger than these values are set to min_value/max_value.
               Argument background is a value to be used for background. If it is not given,
               it is considered as a 1/100 under the minimum value of the array.
               Filter is a 2D array. Points where filter is > 0 are masked out (put into background).
        """
        import matplotlib
        matplotlib.use('Qt4Agg')

        from matplotlib.pylab import jet, imshow, colorbar, show, axis, savefig, close, figure, title, normalize
        from matplotlib.pylab import rot90

        attribute_data = attribute_data[filter]
        coord_2d_data = dataset.get_2d_attribute(attribute_data=attribute_data)
        data_mask = coord_2d_data.mask
        #        if filter is not None:
        #            if isinstance(filter, ndarray):
        #                if not ma.allclose(filter.shape, coord_2d_data.shape):
        #                    raise StandardError, "Argument filter must have the same shape as the 2d attribute."
        #                filter_data = filter
        #            else:
        #                raise TypeError, "The filter type is invalid. A character string or a 2D numpy array allowed."
        #            filter_data = where(ma.filled(filter_data,1) > 0, 1,0)
        #            data_mask = ma.mask_or(data_mask, filter_data)
        nonmaskedmin = ma.minimum(coord_2d_data) - .2 * (
            ma.maximum(coord_2d_data) - ma.minimum(coord_2d_data))
        if max_value == None:
            max_value = ma.maximum(coord_2d_data)
        if min_value == None:
            min_value = nonmaskedmin

        coord_2d_data = ma.filled(coord_2d_data, min_value)
        if background is None:
            value_range = max_value - min_value
            background = min_value - value_range / 100
        coord_2d_data = ma.filled(
            ma.masked_array(coord_2d_data, mask=data_mask), background)

        # Our data uses NW as 0,0, while matplotlib uses SW for 0,0.
        # Rotate the data so the map is oriented correctly.
        coord_2d_data = rot90(coord_2d_data, 1)

        jet()
        figure()
        norm = normalize(min_value, max_value)
        im = imshow(
            coord_2d_data,
            origin='lower',
            aspect='equal',
            interpolation=None,
            norm=norm,
        )

        tickfmt = '%4d'
        if isinstance(min_value, float) or isinstance(max_value, float):
            tickfmt = '%1.4f'
        colorbar(format=tickfmt)

        title(my_title)
        axis('off')
        if file:
            savefig(file)
            close()
        else:
            show()
コード例 #49
0
def fromtextfile(fname,
                 delimitor=None,
                 commentchar='#',
                 missingchar='',
                 varnames=None,
                 vartypes=None):
    """
    Creates a mrecarray from data stored in the file `filename`.

    Parameters
    ----------
    fname : {file name/handle}
        Handle of an opened file.
    delimitor : {None, string}, optional
        Alphanumeric character used to separate columns in the file.
        If None, any (group of) white spacestring(s) will be used.
    commentchar : {'#', string}, optional
        Alphanumeric character used to mark the start of a comment.
    missingchar : {'', string}, optional
        String indicating missing data, and used to create the masks.
    varnames : {None, sequence}, optional
        Sequence of the variable names. If None, a list will be created from
        the first non empty line of the file.
    vartypes : {None, sequence}, optional
        Sequence of the variables dtypes. If None, it will be estimated from
        the first non-commented line.


    Ultra simple: the varnames are in the header, one line"""
    # Try to open the file.
    ftext = openfile(fname)

    # Get the first non-empty line as the varnames
    while True:
        line = ftext.readline()
        firstline = line[:line.find(commentchar)].strip()
        _varnames = firstline.split(delimitor)
        if len(_varnames) > 1:
            break
    if varnames is None:
        varnames = _varnames

    # Get the data.
    _variables = masked_array([
        line.strip().split(delimitor) for line in ftext
        if line[0] != commentchar and len(line) > 1
    ])
    (_, nfields) = _variables.shape
    ftext.close()

    # Try to guess the dtype.
    if vartypes is None:
        vartypes = _guessvartypes(_variables[0])
    else:
        vartypes = [np.dtype(v) for v in vartypes]
        if len(vartypes) != nfields:
            msg = "Attempting to %i dtypes for %i fields!"
            msg += " Reverting to default."
            warnings.warn(msg % (len(vartypes), nfields), stacklevel=2)
            vartypes = _guessvartypes(_variables[0])

    # Construct the descriptor.
    mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
    mfillv = [ma.default_fill_value(f) for f in vartypes]

    # Get the data and the mask.
    # We just need a list of masked_arrays. It's easier to create it like that:
    _mask = (_variables.T == missingchar)
    _datalist = [
        masked_array(a, mask=m, dtype=t, fill_value=f)
        for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)
    ]

    return fromarrays(_datalist, dtype=mdescr)
コード例 #50
0
def compare_density():

    # WISE All-sky sample
    #
    filenames = [
        '%s/%s.fits' % (rgz_dir, x)
        for x in ('wise_allsky_2M', 'gurkan/gurkan_all', 'rgz_75_wise')
    ]
    labels = ('WISE all-sky sources', 'Gurkan+14 radio galaxies',
              'RGZ 75% radio galaxies')

    print ''
    for fname, label in zip(filenames, labels):
        with fits.open(fname) as f:
            d = f[1].data

        if label == 'RGZ 75% radio galaxies':
            d = d[d['ratio'] >= 0.75]

        # SNR cut
        snr_w1 = d['snr1'] >= wise_snr
        snr_w2 = d['snr2'] >= wise_snr
        snr_w3 = d['snr3'] >= wise_snr
        rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
        w1 = d['w1mpro']
        w2 = d['w2mpro']
        w3 = d['w3mpro']
        w4 = d['w4mpro']

        x = w2 - w3
        y = w1 - w2

        # AGN wedge is INCORRECTLY cited in Gurkan+14; check original Mateos+12 for numbers
        #
        wedge_lims = (y > -3.172 * x + 7.624) & (y > (0.315 * x - 0.222)) & (
            y < (0.315 * x + 0.796))
        #
        # Very rough loci from Wright et al. (2010)
        stars_lims = (x > 0) & (x < 1) & (y > 0.1) & (y < 0.4)
        el_lims = (x > 0.5) & (x < 1.3) & (y > 0.) & (y < 0.2)
        sp_lims = (x > 1.5) & (x < 3.0) & (y > 0.1) & (y < 0.4)

        agn_frac = wedge_lims.sum() / float(len(d))
        stars_frac = stars_lims.sum() / float(len(d))
        el_frac = el_lims.sum() / float(len(d))
        sp_frac = sp_lims.sum() / float(len(d))

        print 'Fraction of %25s in AGN wedge: %4.1f percent' % (label,
                                                                agn_frac * 100)
        print 'Fraction of %25s in stars locus: %4.1f percent' % (
            label, stars_frac * 100)
        print 'Fraction of %25s in elliptical locus: %4.1f percent' % (
            label, el_frac * 100)
        print 'Fraction of %25s in spiral locus: %4.1f percent' % (
            label, sp_frac * 100)
        print ''

    print ''
    '''
    # Make empty arrays for TOPCAT
    #
    xb,yb = 2.250,0.487
    xt,yt = 1.958,1.413
    
    xab = np.linspace(xb,6,100)
    xat = np.linspace(xt,6,100)
    xal = np.linspace(xb,xt,100)
    
    yab = 0.315*xab - 0.222
    yat = 0.315*xat + 0.796
    yal =-3.172*xal + 7.624
    
    xall = np.append(xab,np.append(xat,xal))
    yall = np.append(yab,np.append(yat,yal))
    
    with open('%s/csv/agn_wedge.csv' % rgz_dir,'w') as f:
        for x,y in zip(xall,yall):
            print >> f,x,y
    '''

    # Bin data and look at differences?
    #

    with fits.open(filenames[0]) as f:
        wise = f[1].data
    with fits.open(filenames[2]) as f:
        rgz = f[1].data

    bins_w2w3 = np.linspace(-1, 7, 25)
    bins_w1w2 = np.linspace(-0.5, 3, 25)
    hw, xedges, yedges = np.histogram2d(wise['w2mpro'] - wise['w3mpro'],
                                        wise['w1mpro'] - wise['w2mpro'],
                                        bins=(bins_w2w3, bins_w1w2))
    hr, xedges, yedges = np.histogram2d(rgz['w2mpro'] - rgz['w3mpro'],
                                        rgz['w1mpro'] - rgz['w2mpro'],
                                        bins=(bins_w2w3, bins_w1w2))

    from matplotlib import pyplot as plt
    from matplotlib import cm
    fig = plt.figure(1, (10, 5))
    fig.clf()

    hw_norm = hw / float(np.max(hw))
    hr_norm = hr / float(np.max(hr))

    from numpy import ma

    hw_norm_masked = ma.masked_array(hw_norm, mask=(hw <= 10))
    hr_norm_masked = ma.masked_array(hr_norm, mask=(hr <= 10))

    extent = [bins_w2w3[0], bins_w2w3[-1], bins_w1w2[0], bins_w1w2[-1]]

    ax1 = fig.add_subplot(121)
    cmap = cm.jet
    cmap.set_bad('w')
    im1 = ax1.imshow(hw_norm_masked.T,
                     alpha=1.0,
                     extent=extent,
                     vmin=0.,
                     vmax=1.,
                     interpolation='nearest',
                     origin='lower')
    ax1.set_title('WISE All-Sky')
    ax1.set_xlabel('(W2-W3)')
    ax1.set_ylabel('(W1-W2)')
    ax1.set_aspect('auto')

    ax2 = fig.add_subplot(122)
    cmap = cm.jet
    im2 = ax2.imshow(hr_norm_masked.T,
                     alpha=1.0,
                     extent=extent,
                     vmin=0.,
                     vmax=1.,
                     interpolation='nearest',
                     origin='lower')
    ax2.set_title('RGZ 75%')
    ax2.set_xlabel('(W2-W3)')
    ax2.set_aspect('auto')

    position = fig.add_axes([0.92, 0.1, 0.02, 0.80])
    cb = plt.colorbar(im2, cax=position, orientation='vertical')
    cb.set_label('Normalized ratio', fontsize=16)
    '''
    ax3 = fig.add_subplot(133)
    cmap = cm.jet
    im3 = ax3.imshow((np.log10(hr_norm/hw_norm)).T, alpha=1.0, extent=extent,interpolation='nearest', origin='lower')
    ax3.set_title('RGZ/WISE ratio')
    ax3.set_aspect('auto')
    
    position=fig.add_axes([0.92,0.1,0.02,0.80])
    cb = plt.colorbar(im3,cax=position,orientation='vertical')
    cb.set_label('log(ratio)',fontsize=16)
    '''

    #plt.show()

    fig.savefig('%s/wise_rgz_fractions.png' % rgz_dir)

    return None
コード例 #51
0
def loadData(image, Oi, Om, Pm, size):
    if len(Pm.shape) > 2:
        Pm = Pm[..., 0]
    Pm = Pm.astype(np.uint8)  #parts mask
    Oi = Oi.astype(
        np.uint8
    )  #object instances #more explanation here: http://groups.csail.mit.edu/vision/datasets/ADE20K/
    Pm = cv2.resize(Pm, size)
    Oi = cv2.resize(Oi, size)
    Om = cv2.resize(Om, size)
    persons = (Om == 1831).astype(
        np.uint8)  #find all person instances. person label is 1831
    H, W = image.shape[0], image.shape[1]

    Oi = ma.masked_array(
        Oi, mask=np.logical_not(persons))  # eliminate the non-person instances
    Oi = np.ma.filled(Oi, 0)
    gt_boxes = []  #will have shape: [N,x1,y1,x2,y2,cls]
    masks_instances = []  #shape: [N,H,W,7]

    for x in np.unique(Oi):  #for each person
        if x == 0:
            continue
        per = (Oi == x).astype(np.uint8).copy()  #get the mask for a person
        per_before_erosion = per.copy()
        per = cv2.erode(
            per, np.ones((3, 3), np.uint8), iterations=2
        )  # the image is badly annotated so I eliminate some artifacts
        _, contours, hierarchy = cv2.findContours(per, 1,
                                                  2)  ######### from here
        if len(contours) == 0:
            continue
        x1 = 100000
        y1 = 100000
        x2 = -10000
        y2 = -10000
        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)
            xw, yh = x + w, y + h
            if x < x1:
                x1 = x
            if y < y1:
                y1 = y
            if xw > x2:
                x2 = xw
            if yh > y2:
                y2 = yh
        if x2 - x1 < 30 or y2 - y1 < 30:  ######### to here i find the bbox of the person as the mask for the person might contain multiple blobs
            continue

        masks_for_person = np.zeros((H, W, 7),
                                    dtype=np.uint8)  # whole body + 6 parts
        masks_for_person[...,
                         0] = per_before_erosion  # the first mask is the body

        if True:  #######  ONLY BODY

            Partsmask = ma.masked_array(Pm, mask=np.logical_not(per))
            Partsmask = np.ma.filled(Partsmask, 0)
            Parts = Partsmask > 0
            torso = cv2.erode(
                per - Parts, np.ones((5, 5), np.uint8), iterations=2
            )  # the torso is not annotated. so i remove from the whole body, the parts=> torso
            _, contours, hierarchy = cv2.findContours(torso.copy(), 1, 2)
            torso2 = np.zeros(shape=torso.shape, dtype=np.uint8)
            maxArea, mx, my, mw, mh = 0, 0, 0, 0, 0
            for contour in contours:
                x, y, w, h = cv2.boundingRect(contour)
                if (w * h > maxArea):
                    maxArea = w * h
                    mx, my, mw, mh = x, y, w, h
            torso2[my:my + mh, mx:mx + mw] = torso[
                my:my + mh, mx:mx +
                mw]  ## again the torso is badly annotated that results in multiple blobs. so I select the biggest blob
            for p in np.unique(
                    Partsmask
            ):  # the parts mask contain labels that are not parts. so i skip over those labels
                if p not in body_parts_dict.keys():
                    continue

                part = (Partsmask == p).astype(np.uint8)  #select one body part
                masks_for_person[..., body_parts_dict[p]] = np.logical_or(
                    masks_for_person[..., body_parts_dict[p]], part
                )  #this is where I combine for example right upper leg and right lower leg
            masks_for_person[...,
                             2] = np.logical_or(masks_for_person[..., 2],
                                                torso2)  # here is the torso
            if float(np.sum(masks_for_person)) / float(
                    H * W * 7
            ) < 0.0001:  # sometimes the object instance mask exists but there is no body parts annotation. so I skip over it
                continue
        gt_boxes.append([x1, y1, x2, y2, 1])
        masks_instances.append(masks_for_person)
    if len(gt_boxes) == 0:
        return False, None, None, None, H, W
    masks_instances = np.array(masks_instances, dtype=np.uint8)
    gt_boxes = np.array(gt_boxes, dtype=np.float32)
    mask = masks_instances[
        0, :, :, 1]  # this mask is used for visualization in tensorboard
    return True, gt_boxes, masks_instances, mask, H, W
コード例 #52
0
def skill_test(path_a, fname, data_a, data_b, num_files, hemisphere):
    '''Calculate Taylor Skill Score'''
    # First calculate the weight attributed to each grid point (based on Area)
    nfid = nc.Dataset("{}/{}".format(path_a, fname), 'r')
    tarea = nfid.variables['tarea'][:]
    nfid.close()
    tarea = ma.masked_array(tarea, mask=data_a[0, :, :].mask)
    area_weight = tarea / np.sum(tarea)

    weighted_mean_a = 0
    weighted_mean_b = 0
    for i in np.arange(num_files):
        weighted_mean_a = weighted_mean_a + np.sum(
            area_weight * data_a[i, :, :])
        weighted_mean_b = weighted_mean_b + np.sum(
            area_weight * data_b[i, :, :])

    weighted_mean_a = weighted_mean_a / num_files
    weighted_mean_b = weighted_mean_b / num_files

    nonzero_weights = np.count_nonzero(area_weight)
    area_var_a = 0
    area_var_b = 0
    for t in np.arange(num_files):
        area_var_a = area_var_a + np.sum(
            area_weight * np.square(data_a[t, :, :] - weighted_mean_a))
        area_var_b = area_var_b + np.sum(
            area_weight * np.square(data_b[t, :, :] - weighted_mean_b))

    area_var_a = nonzero_weights / (num_files * nonzero_weights -
                                    1.) * area_var_a
    area_var_b = nonzero_weights / (num_files * nonzero_weights -
                                    1.) * area_var_b
    std_a = np.sqrt(area_var_a)
    std_b = np.sqrt(area_var_b)

    combined_cov = 0
    for i in np.arange(num_files):
        combined_cov = combined_cov + np.sum(area_weight*(data_a[i, :, :]-weighted_mean_a)*\
                                            (data_b[i, :, :]-weighted_mean_b))

    combined_cov = nonzero_weights / (num_files * nonzero_weights -
                                      1.) * combined_cov

    weighted_r = combined_cov / (std_a * std_b)

    s = np.square((1+weighted_r)*(std_a*std_b)/\
                 (area_var_a + area_var_b))

    logger.debug('%s Hemisphere skill score = %f', hemisphere, s)

    s_crit = 0.99
    if s < 0 or s > 1:
        logger.error('Skill score out of range for %s Hemisphere', hemisphere)
        return False
    elif s > s_crit:
        logger.info('Quadratic Skill Test Passed for %s Hemisphere',
                    hemisphere)
        return True
    else:
        logger.info('Quadratic Skill Test Failed for %s Hemisphere',
                    hemisphere)
        return False
コード例 #53
0
def feedForwardWtaReadout(layers,
                          wtaStrength=1.,
                          offset=0.,
                          noiseMagnitude=1.,
                          inhStrength=None,
                          noWtaMask=False,
                          fixedPatternNoiseSigma=0.0):
    """
        Create a the connection matrix as a masked matrix of a feedforward network with a winner-take-all network in the last layer.
        The network uses He initialization (He et al IEEE Comp Vision 2015).

        Keywords:
            --- layers: list of number of neurons in the consecutive layers
            --- wtaStrength: wieght of the excitatory connections
            --- offset: offset in the feedforward connections
            --- noiseMagnitued: magnitude of the uniform noise in the feedforward connections
            --- inhStrength: the strength of the inhibitory connections if specified
    """

    # get the number of neurons
    N = np.sum(layers)
    W = np.zeros((N, N))  # Placeholder
    WMask = np.ones((N, N))

    low = 0
    mid = layers[0]
    upper = mid + layers[1]
    for i in range(len(layers) - 1):
        WMask[mid:upper, low:mid] = 0
        # Initialize the weights
        numbBefore = mid - low
        numbAfter = upper - mid
        norm = np.sqrt(2. / float(numbBefore))
        W[mid:upper, low:mid] = np.random.randn(numbAfter, numbBefore) * norm
        if not i == len(layers) - 2:
            low = mid
            mid = upper
            upper += layers[i + 2]

    # create WTA matrix
    if not (inhStrength is None):
        inhW = inhStrength
    elif layers[-1] != 1:
        inhW = wtaStrength / (layers[-1] - 1.)

    WX = ma.masked_array(W, mask=WMask.astype(int))
    index = np.where(WX.mask == 1)
    WX.data[index] = 0

    Nlast = layers[-1]
    wta = -1. * np.ones((Nlast, Nlast)) * inhW
    np.fill_diagonal(wta, wtaStrength)
    relNoise = 1. + np.random.normal(
        0.0, fixedPatternNoiseSigma, size=(Nlast, Nlast))
    relNoise = np.maximum(relNoise, 0.0)
    wta = wta * relNoise
    if not noWtaMask:
        WMask[-Nlast:, -Nlast:] = 0
    W[-Nlast:, -Nlast:] = wta

    return WX
コード例 #54
0
ファイル: numpy_masked.py プロジェクト: handyhan/isc-work
import numpy as np
import numpy.ma as MA

mask_arr = MA.masked_array(range(10),fill_value = -999)

print mask_arr , mask_arr.fill_value


mask_arr[2] = MA.masked

print mask_arr

print mask_arr.mask


narr = MA.masked_where(mask_arr < 6, mask_arr)

print narr, narr.fill_value

x= MA.filled(narr)

print x

print np.dtype(x)
コード例 #55
0
    def fit(self,
            conver=DEFAULT_CONVERGENCE,
            minit=DEFAULT_MINIT,
            maxit=DEFAULT_MAXIT,
            fflag=DEFAULT_FFLAG,
            maxgerr=DEFAULT_MAXGERR,
            going_inwards=False):
        """
        Fit an elliptical isophote.

        Parameters
        ----------
        conver : float, optional
            The main convergence criterion. Iterations stop when the
            largest harmonic amplitude becomes smaller (in absolute
            value) than ``conver`` times the harmonic fit rms.  The
            default is 0.05.
        minit : int, optional
            The minimum number of iterations to perform. A minimum of 10
            (the default) iterations guarantees that, on average, 2
            iterations will be available for fitting each independent
            parameter (the four harmonic amplitudes and the intensity
            level). For the first isophote, the minimum number of
            iterations is 2 * ``minit`` to ensure that, even departing
            from not-so-good initial values, the algorithm has a better
            chance to converge to a sensible solution.
        maxit : int, optional
            The maximum number of iterations to perform.  The default is
            50.
        fflag : float, optional
            The acceptable fraction of flagged data points in the
            sample.  If the actual fraction of valid data points is
            smaller than this, the iterations will stop and the current
            `~photutils.isophote.Isophote` will be returned.  Flagged
            data points are points that either lie outside the image
            frame, are masked, or were rejected by sigma-clipping.  The
            default is 0.7.
        maxgerr : float, optional
            The maximum acceptable relative error in the local radial
            intensity gradient. This is the main control for preventing
            ellipses to grow to regions of too low signal-to-noise
            ratio.  It specifies the maximum acceptable relative error
            in the local radial intensity gradient.  `Busko (1996; ASPC
            101, 139)
            <https://ui.adsabs.harvard.edu/abs/1996ASPC..101..139B/abstract>`_
            showed that the fitting precision relates to that relative
            error.  The usual behavior of the gradient relative error is
            to increase with semimajor axis, being larger in outer,
            fainter regions of a galaxy image.  In the current
            implementation, the ``maxgerr`` criterion is triggered only
            when two consecutive isophotes exceed the value specified by
            the parameter. This prevents premature stopping caused by
            contamination such as stars and HII regions.

            A number of actions may happen when the gradient error
            exceeds ``maxgerr`` (or becomes non-significant and is set
            to `None`).  If the maximum semimajor axis specified by
            ``maxsma`` is set to `None`, semimajor axis growth is
            stopped and the algorithm proceeds inwards to the galaxy
            center. If ``maxsma`` is set to some finite value, and this
            value is larger than the current semimajor axis length, the
            algorithm enters non-iterative mode and proceeds outwards
            until reaching ``maxsma``.  The default is 0.5.
        going_inwards : bool, optional
            Parameter to define the sense of SMA growth. When fitting
            just one isophote, this parameter is used only by the code
            that defines the details of how elliptical arc segments
            ("sectors") are extracted from the image, when using area
            extraction modes (see the ``integrmode`` parameter in the
            `~photutils.isophote.EllipseSample` class).  The default is
            `False`.

        Returns
        -------
        result : `~photutils.isophote.Isophote` instance
            The fitted isophote, which also contains fit status
            information.

        Examples
        --------
        >>> from photutils.isophote import EllipseSample, EllipseFitter
        >>> sample = EllipseSample(data, sma=10.)
        >>> fitter = EllipseFitter(sample)
        >>> isophote = fitter.fit()
        """

        sample = self._sample

        # this flag signals that limiting gradient error (`maxgerr`)
        # wasn't exceeded yet.
        lexceed = False

        # here we keep track of the sample that caused the minimum harmonic
        # amplitude(in absolute value). This will eventually be used to
        # build the resulting Isophote in cases where iterations run to
        # the maximum allowed (maxit), or the maximum number of flagged
        # data points (fflag) is reached.
        minimum_amplitude_value = np.Inf
        minimum_amplitude_sample = None

        # these must be passed throughout the execution chain.
        fixed_parameters = self._sample.geometry.fix

        for i in range(maxit):
            # Force the sample to compute its gradient and associated values.
            sample.update(fixed_parameters)

            # The extract() method returns sampled values as a 2-d numpy array
            # with the following structure:
            # values[0] = 1-d array with angles
            # values[1] = 1-d array with radii
            # values[2] = 1-d array with intensity
            values = sample.extract()

            # We have to check for a zero-length condition here, and bail out
            # in case it is detected. The scipy fitter won't raise an exception
            # for zero-length input arrays, but just prints an "INFO" message.
            # This may result in an infinite loop.
            if len(values[2]) < 1:
                s = str(sample.geometry.sma)
                log.warning("Too small sample to warrant a fit. SMA is " + s)
                sample.geometry.fix = fixed_parameters
                return Isophote(sample, i + 1, False, 3)

            # Fit harmonic coefficients. Failure in fitting is
            # a fatal error; terminate immediately with sample
            # marked as invalid.
            try:
                coeffs = fit_first_and_second_harmonics(values[0], values[2])
                coeffs = coeffs[0]
            except Exception as e:
                log.warning(e)
                sample.geometry.fix = fixed_parameters
                return Isophote(sample, i + 1, False, 3)

            # Mask out coefficients that control fixed ellipse parameters.
            free_coeffs = ma.masked_array(coeffs[1:], mask=fixed_parameters)

            # Largest non-masked harmonic in absolute value drives the
            # correction.
            largest_harmonic_index = np.argmax(np.abs(free_coeffs))
            largest_harmonic = free_coeffs[largest_harmonic_index]

            # see if the amplitude decreased; if yes, keep the
            # corresponding sample for eventual later use.
            if abs(largest_harmonic) < minimum_amplitude_value:
                minimum_amplitude_value = abs(largest_harmonic)
                minimum_amplitude_sample = sample

            # check if converged
            model = first_and_second_harmonic_function(values[0], coeffs)
            residual = values[2] - model

            if ((conver * sample.sector_area * np.std(residual)) >
                    np.abs(largest_harmonic)):
                # Got a valid solution. But before returning, ensure
                # that a minimum of iterations has run.
                if i >= minit - 1:
                    sample.update(fixed_parameters)
                    return Isophote(sample, i + 1, True, 0)

            # it may not have converged yet, but the sample contains too
            # many invalid data points: return.
            if sample.actual_points < (sample.total_points * fflag):
                # when too many data points were flagged, return the
                # best fit sample instead of the current one.
                minimum_amplitude_sample.update(fixed_parameters)
                return Isophote(minimum_amplitude_sample, i + 1, True, 1)

            # pick appropriate corrector code.
            corrector = _CORRECTORS[largest_harmonic_index]

            # generate *NEW* EllipseSample instance with corrected
            # parameter.  Note that this instance is still devoid of other
            # information besides its geometry.  It needs to be explicitly
            # updated for computations to proceed.  We have to build a new
            # EllipseSample instance every time because of the lazy
            # extraction process used by EllipseSample code. To minimize
            # the number of calls to the area integrators, we pay a
            # (hopefully smaller) price here, by having multiple calls to
            # the EllipseSample constructor.
            sample = corrector.correct(sample, largest_harmonic)
            sample.update(fixed_parameters)

            # see if any abnormal (or unusual) conditions warrant
            # the change to non-iterative mode, or go-inwards mode.
            proceed, lexceed = self._check_conditions(sample, maxgerr,
                                                      going_inwards, lexceed)

            if not proceed:
                sample.update(fixed_parameters)
                return Isophote(sample, i + 1, True, -1)

        # Got to the maximum number of iterations. Return with
        # code 2, and handle it as a valid isophote. Use the
        # best fit sample instead of the current one.
        minimum_amplitude_sample.update(fixed_parameters)
        return Isophote(minimum_amplitude_sample, maxit, True, 2)
コード例 #56
0
    def __init__(self,
                 path,
                 deadPixeltol=2,
                 aspect="auto",
                 Linescan=True,
                 autoclose=False,
                 save=False):
        self.shift_is_held = False
        self.path = path
        self.deadPixeltol = deadPixeltol
        self.aspect = aspect
        self.Linescan = Linescan
        self.leftpressed = False
        if autoclose:
            plt.ioff()
        else:
            plt.ion()

        dirname, filename = os.path.split(self.path)
        filename, ext = os.path.splitext(filename)

        hyppath = self.path
        specpath = os.path.join(dirname, filename + '_X_axis.asc')
        filepath = os.path.join(dirname,
                                filename + '_SEM image after carto.tif')
        start = time.time()
        data = pd.read_csv(hyppath, delimiter='\t', header=None).to_numpy()
        end = time.time()
        print(end - start)
        #data = np.loadtxt(hyppath)
        xlen = int(data[0, 0])  #nbr de pts selon x
        ylen = int(data[1, 0])  #nbr de pts selon y
        wavelenght = np.loadtxt(specpath)
        self.wavelenght = wavelenght[:2048]  #bins du spectro
        xcoord = data[0, 1:]
        ycoord = data[1, 1:]
        CLdata = data[
            2:,
            1:]  #tableau de xlen * ylen points (espace) et 2048 longueur d'onde CLdata[:,n] n = numero du spectr
        self.hypSpectrum = np.transpose(
            np.reshape(np.transpose(CLdata),
                       (ylen, xlen, len(self.wavelenght))), (0, 1, 2))

        #correct dead / wrong pixels
        self.hypSpectrum, self.hotpixels = correct_dead_pixel(
            self.hypSpectrum, tol=self.deadPixeltol)

        self.wavelenght = ma.masked_array(self.wavelenght, mask=False)
        hypmask = np.resize(self.wavelenght.mask, self.hypSpectrum.shape)
        self.hypSpectrum = ma.masked_array(self.hypSpectrum, mask=hypmask)

        xscale_CL, yscale_CL, acceleration, image = scaleSEMimage(filepath)
        if self.Linescan:
            self.fig, (self.ax, self.bx, self.cx) = plt.subplots(
                3, 1, sharex=True, gridspec_kw={'height_ratios': [1, 1, 3]})
        else:
            self.fig, (self.ax, self.bx, self.cx) = plt.subplots(3,
                                                                 1,
                                                                 sharex=True,
                                                                 sharey=True)
        self.fig.patch.set_alpha(0)  #Transparency style
        self.fig.subplots_adjust(top=0.9,
                                 bottom=0.12,
                                 left=0.15,
                                 right=0.82,
                                 hspace=0.1,
                                 wspace=0.05)
        newX = np.linspace(xscale_CL[int(xcoord.min())],
                           xscale_CL[int(xcoord.max())], len(xscale_CL))
        newY = np.linspace(yscale_CL[int(ycoord.min())],
                           yscale_CL[int(ycoord.max())], len(yscale_CL))
        self.X = np.linspace(np.min(newX), np.max(newX),
                             self.hypSpectrum.shape[1])
        self.Y = np.linspace(np.min(newY), np.max(newY),
                             self.hypSpectrum.shape[0])

        nImage = np.array(
            image.crop(
                (xcoord.min(), ycoord.min(), xcoord.max(), ycoord.max())))
        self.ax.imshow(
            nImage,
            cmap='gray',
            vmin=0,
            vmax=65535,
            interpolation="None",
            extent=[np.min(newX),
                    np.max(newX),
                    np.max(newY),
                    np.min(newY)])

        self.hypimage = np.nansum(self.hypSpectrum, axis=2)
        jet = cm.get_cmap("jet")
        jet.set_bad(color='k')
        self.hyperspectralmap = cm.ScalarMappable(cmap=jet)
        self.hyperspectralmap.set_clim(vmin=np.nanmin(self.hypimage),
                                       vmax=np.nanmax(self.hypimage))
        self.lumimage = self.bx.imshow(
            self.hypimage,
            cmap=self.hyperspectralmap.cmap,
            norm=self.hyperspectralmap.norm,
            interpolation="None",
            extent=[np.min(newX),
                    np.max(newX),
                    np.max(newY),
                    np.min(newY)])
        if self.Linescan:
            self.linescan = np.nansum(self.hypSpectrum, axis=0)
            self.linescanmap = cm.ScalarMappable(cmap=jet)
            self.linescanmap.set_clim(vmin=np.nanmin(self.linescan),
                                      vmax=np.nanmax(self.linescan))
            # ATTENTION pcolormesh =/= imshow
            #imshow takes values at pixel
            #pcolormesh takes values between
            temp = np.linspace(newX.min(), newX.max(), self.X.size + 1)
            self.im = self.cx.pcolormesh(temp,
                                         eV_To_nm / self.wavelenght,
                                         self.linescan.T,
                                         cmap=self.linescanmap.cmap,
                                         shading='flat',
                                         rasterized=True)

            #dX=np.diff(self.X).mean()
            #newX = np.arange(min(self.X),max(self.X)+dX,dX)-dX/2
            #            dW = np.diff(self.wavelenght).mean()
            #            newW = np.arange(min(self.wavelenght),max(self.wavelenght)+dW,dW)-dW/2
            #self.im=self.cx.pcolormesh(newX,eV_To_nm/self.wavelenght,self.linescan.T,cmap=self.linescanmap.cmap,shading = 'flat',rasterized=True)
            def format_coord(x, y):
                xarr = self.X
                yarr = eV_To_nm / self.wavelenght
                if ((x > xarr.min()) and (x <= xarr.max()) and (y > yarr.min())
                        and (y <= yarr.max())):
                    col = np.argmin(abs(xarr - x))  #np.searchsorted(xarr, x)-1
                    row = np.argmin(abs(yarr - y))  #np.searchsorted(yarr, y)-1
                    z = self.linescan.T[row, col]
                    return f'x={x:1.4f}, y={y:1.4f}, lambda={eV_To_nm/y:1.2f}, z={z:1.2e}   [{row},{col}]'
                else:
                    return f'x={x:1.4f}, y={y:1.4f}, lambda={eV_To_nm/y:1.2f}'

            self.cx.format_coord = format_coord
            self.cx.set_ylabel("Energy (eV)")
            self.cx.set_xlabel("distance (µm)")
            self.cx.set_aspect(self.aspect)
        else:
            self.im = self.cx.imshow(self.wavelenght[np.argmax(
                self.hypSpectrum, axis=2)],
                                     cmap='viridis',
                                     extent=[
                                         np.min(newX),
                                         np.max(newX),
                                         np.max(newY),
                                         np.min(newY)
                                     ])
            self.cx.set_aspect(aspect)
        self.bx.set_aspect(self.aspect)
        self.ax.set_aspect(self.aspect)
        self.ax.get_shared_y_axes().join(self.ax, self.bx)
        self.ax.set_ylabel("distance (µm)")
        #    fig.text(ax.get_position().bounds[0]-0.11, ax.get_position().bounds[1],'distance (µm)',fontsize=16, va='center', rotation='vertical')

        pos = self.cx.get_position().bounds
        cbar_ax = self.fig.add_axes([0.85, pos[1], 0.05, pos[-1] * 0.9])
        self.fig.colorbar(self.linescanmap, cax=cbar_ax)
        cbar_ax.ticklabel_format(axis='both', style='sci', scilimits=(0, 0))

        pos = self.bx.get_position().bounds
        cbar_ax = self.fig.add_axes([0.85, pos[1], 0.05, pos[-1] * 0.9])
        self.fig.colorbar(self.hyperspectralmap, cax=cbar_ax)
        cbar_ax.ticklabel_format(axis='both', style='sci', scilimits=(0, 0))
        if save == True:
            self.fig.savefig(os.path.join(dirname, filename + ".png"), dpi=300)

        if autoclose == True:
            plt.close(self.fig)
            return None
        else:
            self.spec_fig, self.spec_ax = plt.subplots()
            self.spec_data, = self.spec_ax.plot(
                eV_To_nm / self.wavelenght, np.zeros(self.wavelenght.shape[0]))
            self.spec_ax.set_ylim((0, self.hypSpectrum.max()))
            self.spec_ax.set_xlabel('Energy (eV)')
            self.spec_ax.set_ylabel('Intensity (a.u)')
            self.spec_fig.subplots_adjust(top=0.885,
                                          bottom=0.125,
                                          left=0.13,
                                          right=0.94)
            self.spec_ax.ticklabel_format(axis='y',
                                          style='sci',
                                          scilimits=(0, 0))
            self.spec_minbar = self.spec_ax.axvline(eV_To_nm /
                                                    self.wavelenght.max())
            self.spec_maxbar = self.spec_ax.axvline(eV_To_nm /
                                                    self.wavelenght.min())

            #if Linescan:
            #self.cursor = MultiCursor(self.fig.canvas, (self.ax, self.bx), color='r', lw=1,horizOn=True, vertOn=True)
            def onmotion(event):
                if self.leftpressed:
                    x = event.xdata
                    y = event.ydata
                    if ((event.inaxes is not None) and (x > self.X.min())
                            and (x <= self.X.max()) and (y > self.Y.min())
                            and (y <= self.Y.max())):
                        indx = np.argmin(abs(x - self.X))
                        indy = np.argmin(abs(y - self.Y))
                        self.spec_data.set_ydata(self.hypSpectrum.data[indy,
                                                                       indx])
                        self.spec_fig.canvas.draw_idle()

            def onclick(event):
                if event.button == 1:
                    self.leftpressed = True
                    #self.cursor.active = True
                    #if event.dblclick:


#                        if not(self.cursor.visible):
#                            self.fig.canvas.blit(self.fig.bbox)
#self.cursor.active = not(self.cursor.active)
#                        self.cursor.visible = not(self.cursor.visible)
#                        self.fig.canvas.draw_idle()
#                        self.fig.canvas.blit(self.fig.bbox)
#                    elif self.cursor.active:
#                    x = event.xdata
#                    y = event.ydata
#                    if ((event.inaxes is not None) and (x > self.X.min()) and (x <= self.X.max()) and
#                        (y > self.Y.min()) and (y <= self.Y.max())):
#                        indx = np.argmin(abs(x-self.X))
#                        indy=np.argmin(abs(y-self.Y))
#                        self.spec_data.set_ydata(self.hypSpectrum.data[indy,indx])
#                        self.spec_fig.canvas.draw_idle()
                elif event.button == 3:
                    self.wavelenght = ma.masked_array(self.wavelenght.data,
                                                      mask=False)
                    hypmask = np.resize(self.wavelenght.mask,
                                        self.hypSpectrum.shape)
                    self.hypSpectrum = ma.masked_array(self.hypSpectrum.data,
                                                       mask=hypmask)
                    self.hypimage = np.nansum(self.hypSpectrum, axis=2)
                    self.lumimage.set_array(self.hypimage)
                    self.hyperspectralmap.set_clim(
                        vmin=np.nanmin(self.hypimage),
                        vmax=np.nanmax(self.hypimage))
                    self.cx.set_ylim(eV_To_nm / self.wavelenght.max(),
                                     eV_To_nm / self.wavelenght.min())
                    # TODO : A recoder
                    self.linescan = np.nansum(self.hypSpectrum, axis=0)
                    self.spec_maxbar.set_xdata(
                        np.repeat(eV_To_nm / self.wavelenght.min(), 2))
                    self.spec_minbar.set_xdata(
                        np.repeat(eV_To_nm / self.wavelenght.max(), 2))
                    self.im.set_array(
                        (self.linescan.T[:, :]).ravel())  #flat shading
                    #self.im.set_array((self.linescan.T).ravel())#gouraud shading
                    self.linescanmap.set_clim(vmin=np.nanmin(self.linescan),
                                              vmax=np.nanmax(self.linescan))
                    self.im.set_norm(self.linescanmap.norm)
                    #self.im.set_cmap(self.linescanmap.cmap)
                    self.fig.canvas.draw_idle()
                    self.spec_fig.canvas.draw_idle()
                    self.fig.canvas.blit(self.fig.bbox)

            def onrelease(event):
                if event.button == 1:
                    self.leftpressed = False
                    #self.cursor.active = False
            def onselect(ymin, ymax):
                indmin = np.argmin(abs(eV_To_nm / self.wavelenght - ymin))
                indmax = np.argmin(abs(eV_To_nm / self.wavelenght - ymax))
                #indmin resp.max est lindex tel que wavelenght[indmin] minimise
                # la distance entre la position du clic et la longueur d'onde
                #                print(eV_To_nm/self.wavelenght[indmin])
                #                print(eV_To_nm/self.wavelenght[indmax])
                if abs(indmax - indmin) < 1: return

                self.wavelenght = ma.masked_outside(self.wavelenght.data,
                                                    eV_To_nm / ymax,
                                                    eV_To_nm / ymin)
                hypmask = np.resize(self.wavelenght.mask,
                                    self.hypSpectrum.shape)
                self.hypSpectrum = ma.masked_array(self.hypSpectrum.data,
                                                   mask=hypmask)
                self.hypimage = np.nansum(self.hypSpectrum, axis=2)
                self.lumimage.set_array(self.hypimage)
                self.hyperspectralmap.set_clim(vmin=np.nanmin(self.hypimage),
                                               vmax=np.nanmax(self.hypimage))
                self.cx.set_ylim(eV_To_nm / self.wavelenght.max(),
                                 eV_To_nm / self.wavelenght.min())
                self.linescan = np.nansum(self.hypSpectrum, axis=0)
                self.spec_maxbar.set_xdata(
                    np.repeat(eV_To_nm / self.wavelenght.min(), 2))
                self.spec_minbar.set_xdata(
                    np.repeat(eV_To_nm / self.wavelenght.max(), 2))
                self.im.set_array(
                    (self.linescan.T[:, :]).ravel())  #flat shading
                #self.im.set_array((self.linescan.T).ravel())#gouraud shading
                self.linescanmap.set_clim(vmin=np.nanmin(self.linescan),
                                          vmax=np.nanmax(self.linescan))
                self.im.set_norm(self.linescanmap.norm)
                #self.im.set_cmap(self.linescanmap.cmap)
                self.fig.canvas.draw_idle()
                self.spec_fig.canvas.draw_idle()
                self.fig.canvas.blit(self.fig.bbox)

            self.span = None
            if Linescan:
                self.span = SpanSelector(self.cx,
                                         onselect,
                                         'vertical',
                                         useblit=True,
                                         rectprops=dict(alpha=0.5,
                                                        facecolor='red'),
                                         button=1)
            self.fig.canvas.mpl_connect('button_press_event', onclick)
            self.fig.canvas.mpl_connect('motion_notify_event', onmotion)
            self.fig.canvas.mpl_connect('button_release_event', onrelease)
            plt.show(block=True)
コード例 #57
0
r2_df = pd.DataFrame(data=stat_col_arr.mean(axis=1),
                     columns=subsp_axis,
                     index=layers)
r2var_df = pd.DataFrame(data=stat_col_arr.std(axis=1),
                        columns=subsp_axis,
                        index=layers)
kappa_df = pd.DataFrame(data=param_col_arr[:, :, :, 3].mean(axis=1),
                        columns=subsp_axis,
                        index=layers)
print("Rsquare data frame")
print(r2_df)
print("kappa data frame")
print(kappa_df)
#%% Using masked array to avoid the unsuccessful fittings.
import numpy.ma as ma
makappa = ma.masked_array(data=param_col_arr[:, :, :, 3],
                          mask=(stat_col_arr < 0.5) | np.isnan(stat_col_arr))
mar2 = ma.masked_array(data=stat_col_arr,
                       mask=np.isinf(stat_col_arr) | np.isnan(stat_col_arr))
import pandas as pd
r2_df = pd.DataFrame(data=mar2.mean(axis=1), columns=subsp_axis, index=layers)
r2var_df = pd.DataFrame(data=mar2.std(axis=1),
                        columns=subsp_axis,
                        index=layers)
kappa_df = pd.DataFrame(data=makappa.mean(axis=1),
                        columns=subsp_axis,
                        index=layers)

print("Rsquare data frame")
print(r2_df)
print("kappa data frame")
print(kappa_df)
コード例 #58
0
import numpy.ma as ma

# print dir(ma)

mrr = ma.masked_array(range(10), fill_value=-999)

print mrr

mrr[2] = ma.masked

print mrr
print mrr.mask

narr = ma.masked_where(mrr > 6, mrr)

print narr

x = ma.filled(narr)

print x

print type(x)
コード例 #59
0
import numpy.ma as MA

mask = MA.masked_array(range(0, 9),
                       fill_value=-999,
                       mask=[0, 0, 0, 1, 0, 0, 0, 0, 0])
print(mask)
コード例 #60
0
ファイル: rescaler.py プロジェクト: yz842614503/ggcmi
# Time indexes
btidx0 = where(time == tmin)[0][0]
btidx1 = where(time == tmax)[0][0] + 1
ftidx0 = where(ftime == tmin)[0][0]
ftidx1 = where(ftime == tmax)[0][0] + 1

time = time[btidx0:btidx1]
detrend = masked_where(isnan(detrend), detrend)

# Get dimensions
(nlats, nlons) = aggmap.shape
nt = len(time)
nirr = 3

varr = masked_array(zeros((nt, nlats, nlons, nirr)),
                    mask=ones((nt, nlats, nlons, nirr)))

with Dataset(irfile) as f:
    var = f.variables[vname]
    if 'units' in var.ncattrs():
        units = var.units
    else:
        units = ''
    if 'long_name' in var.ncattrs():
        lname = var.long_name
    else:
        lname = ''
    varr[:, :, :, 0] = var[ftidx0:ftidx1]

if isfile(rffile):
    with Dataset(rffile) as f: