Beispiel #1
0
    def concatenate(self,value,axis=0):
        """ Concatentate UncertContainer value to self.
            Assumes that if dimensions of self and value do not match, to 
            add a np.newaxis along axis of value
        """

        if isinstance(value,UncertContainer):
            if value.vals.ndim == self.vals.ndim:
                vals = value.vals
                dmin = value.dmin
                dmax = value.dmax
                wt = value.wt
                uncert = value.uncert
                mask = value.mask
            elif (value.vals.ndim + 1) == self.vals.ndim:
                vals =  ma.expand_dims(value.vals,axis)
                dmin =  ma.expand_dims(value.dmin,axis)
                dmax =  ma.expand_dims(value.dmax,axis)
                wt =  ma.expand_dims(value.wt,axis)
                uncert =  ma.expand_dims(value.uncert,axis)
                mask =  np.expand_dims(value.mask,axis)
            else:
                raise ValueError('Could not propery match dimensionality')
                
            self.vals = ma.concatenate((self.vals,vals),axis=axis)
            self.dmin = ma.concatenate((self.dmin,dmin),axis=axis)
            self.dmax = ma.concatenate((self.dmax,dmax),axis=axis)
            self.wt = ma.concatenate((self.wt,wt),axis=axis)
            self.uncert = ma.concatenate((self.uncert,uncert),axis=axis)
            
            self.mask = np.concatenate((self.mask,mask),axis=axis)
        else:
            raise ValueError('Can only concatenate with an UncertContainer object')
Beispiel #2
0
 def test_testUfuncs1(self):
     # Test various functions such as sin, cos.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     assert_(eq(np.cos(x), cos(xm)))
     assert_(eq(np.cosh(x), cosh(xm)))
     assert_(eq(np.sin(x), sin(xm)))
     assert_(eq(np.sinh(x), sinh(xm)))
     assert_(eq(np.tan(x), tan(xm)))
     assert_(eq(np.tanh(x), tanh(xm)))
     with np.errstate(divide='ignore', invalid='ignore'):
         assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
         assert_(eq(np.log(abs(x)), log(xm)))
         assert_(eq(np.log10(abs(x)), log10(xm)))
     assert_(eq(np.exp(x), exp(xm)))
     assert_(eq(np.arcsin(z), arcsin(zm)))
     assert_(eq(np.arccos(z), arccos(zm)))
     assert_(eq(np.arctan(z), arctan(zm)))
     assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
     assert_(eq(np.absolute(x), absolute(xm)))
     assert_(eq(np.equal(x, y), equal(xm, ym)))
     assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
     assert_(eq(np.less(x, y), less(xm, ym)))
     assert_(eq(np.greater(x, y), greater(xm, ym)))
     assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
     assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
     assert_(eq(np.conjugate(x), conjugate(xm)))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
     assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
     assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
Beispiel #3
0
 def computeStatistics(self):
     dates = []
     values = []
     for ts in self.itervalues():
         dates.append(ts.dates)
         values.append(ts.values)
     dates = concatenate(dates).astype(int)
     values = ma.concatenate(values)
     minDate = dates.min()
     nDates = dates.max() - minDate + 1 
     hist = histogram(dates*(-values.mask), bins=arange(nDates)+minDate)
     values = values.compressed()
     if values.size == 0:
         raise Exception("empty dataset")
     lowend = values.min()*0.99
     highend = values.max()*1.01
     #cumfreqs, lowlim, binsize, extrapoints = cumfreq(values, 40, (lowend, highend))
     #normcumfreqs = cumfreqs/values.size
     #ind = ((normcumfreqs > 0.02) & (normcumfreqs < 0.98)).nonzero()[0]
     #if ind.size == 0:
     #    raise Exception("empty dataset")
     #min = ind[0]*binsize + lowlim
     #max = ind[-1]*binsize + lowlim
     #return min, max, hist
     return values.min(),values.max(),hist
Beispiel #4
0
    def add_solver(self, fname):

        # Reg exp: Any line starting (ignoring white-space)
        # starting with a comment character. Also col sep.
        comment = re.compile(r'[\s]*[%#]')
        column  = re.compile(self.sep)

        # Grab the column from the file
        metrics = []
        file = open(fname, 'r')
        for line in file.readlines():
            if not comment.match(line):
                line = line.strip()
                cols = column.split( line )
                data = atof(cols[opts.datacol - 1])
		metrics.append(data)
        file.close()

        if self.metric is not None:
	    self.metric = ma.concatenate((self.metric, [metrics]))	    
        else: 
            self.metric = ma.array([metrics])

        # Current num of probs grabbed
        nprobs = len(metrics)
        if not self.nprobs: self.nprobs = nprobs
        elif self.nprobs <> nprobs:
            commandline_error("All files must have same num of problems.")
Beispiel #5
0
def rect2sphere(vector, degree=True):
    """\
    Convert vector (x,y,z) from rect to sphere coordinates. If degree is 
    ``True`` the unit will be in degree.

    Examples
    --------
    >>> convert.rect2sphere([1,1,1], degree=False)
    array([ 0.78539816,  0.61547971,  1.73205081])

    >>> convert.rect2sphere(numpy.array([[1,2],[1,0],[1,3]]), degree=True) 
    array([[ 45.        ,   0.        ],
           [ 35.26438968,  56.30993247],
           [  1.73205081,   3.60555128]])

    """
    x, y, z = vector

    r = np.sqrt(x**2 + y**2 + z**2)
    lon = np.arctan2(y,x)
    lat = np.arcsin(z/r)

    if degree:
        lon = np.rad2deg(lon)
        lat = np.rad2deg(lat)

    return ma.concatenate([ lon[np.newaxis], lat[np.newaxis], r[np.newaxis] ])
Beispiel #6
0
def _map_common(draw_method_name, arg_func, mode, cube, data, *args, **kwargs):
    """
    Draw the given cube on a map using its points or bounds.

    "Mode" parameter will switch functionality between POINT or BOUND plotting.

    """
    # get the 2d x and 2d y from the CS
    if mode == iris.coords.POINT_MODE:
        x, y = cartography.get_xy_grids(cube)
    else:
        try:
            x, y = cartography.get_xy_contiguous_bounded_grids(cube)
        # Exception translation.
        except iris.exceptions.CoordinateMultiDimError:
            raise ValueError("Could not get XY grid from bounds. "
                             "X or Y coordinate not 1D.")
        except ValueError:
            raise ValueError("Could not get XY grid from bounds. "
                             "X or Y coordinate doesn't have 2 bounds "
                             "per point.")

    # take a copy of the data so that we can make modifications to it
    data = data.copy()

    # If we are global, then append the first column of data the array to the
    # last (and add 360 degrees) NOTE: if it is found that this block of code
    # is useful in anywhere other than this plotting routine, it may be better
    # placed in the CS.
    x_coord = cube.coord(axis="X")
    if getattr(x_coord, 'circular', False):
        _, direction = iris.util.monotonic(x_coord.points,
                                           return_direction=True)
        y = np.append(y, y[:, 0:1], axis=1)
        x = np.append(x, x[:, 0:1] + 360 * direction, axis=1)
        data = ma.concatenate([data, data[:, 0:1]], axis=1)

    # Replace non-cartopy subplot/axes with a cartopy alternative.
    cs = cube.coord_system('CoordSystem')
    if cs:
        cartopy_proj = cs.as_cartopy_projection()
    else:
        cartopy_proj = cartopy.crs.PlateCarree()
    ax = _get_cartopy_axes(cartopy_proj)

    draw_method = getattr(ax, draw_method_name)

    # Set the "from transform" keyword.
    # NB. While cartopy doesn't support spherical contours, just use the
    # projection as the source CRS.
    assert 'transform' not in kwargs, 'Transform keyword is not allowed.'
    kwargs['transform'] = cartopy_proj

    if arg_func is not None:
        new_args, kwargs = arg_func(x, y, data, *args, **kwargs)
    else:
        new_args = (x, y, data) + args

    # Draw the contour lines/filled contours.
    return draw_method(*new_args, **kwargs)
Beispiel #7
0
Datei: plot.py Projekt: ckmo/iris
def _map_common(draw_method_name, arg_func, mode, cube, plot_defn,
                *args, **kwargs):
    """
    Draw the given cube on a map using its points or bounds.

    "Mode" parameter will switch functionality between POINT or BOUND plotting.


    """
    # Generate 2d x and 2d y grids.
    y_coord, x_coord = plot_defn.coords
    if mode == iris.coords.POINT_MODE:
        if x_coord.ndim == y_coord.ndim == 1:
            x, y = np.meshgrid(x_coord.points, y_coord.points)
        elif x_coord.ndim == y_coord.ndim == 2:
            x = x_coord.points
            y = y_coord.points
        else:
            raise ValueError("Expected 1D or 2D XY coords")
    else:
        try:
            x, y = np.meshgrid(x_coord.contiguous_bounds(),
                               y_coord.contiguous_bounds())
        # Exception translation.
        except iris.exceptions.CoordinateMultiDimError:
            raise ValueError("Could not get XY grid from bounds. "
                             "X or Y coordinate not 1D.")
        except ValueError:
            raise ValueError("Could not get XY grid from bounds. "
                             "X or Y coordinate doesn't have 2 bounds "
                             "per point.")

    # Obtain the data array.
    data = cube.data
    if plot_defn.transpose:
        data = data.T

    # If we are global, then append the first column of data the array to the
    # last (and add 360 degrees) NOTE: if it is found that this block of code
    # is useful in anywhere other than this plotting routine, it may be better
    # placed in the CS.
    if getattr(x_coord, 'circular', False):
        _, direction = iris.util.monotonic(x_coord.points,
                                           return_direction=True)
        y = np.append(y, y[:, 0:1], axis=1)
        x = np.append(x, x[:, 0:1] + 360 * direction, axis=1)
        data = ma.concatenate([data, data[:, 0:1]], axis=1)

    # Replace non-cartopy subplot/axes with a cartopy alternative and set the
    # transform keyword.
    kwargs = _ensure_cartopy_axes_and_determine_kwargs(x_coord, y_coord,
                                                       kwargs)

    if arg_func is not None:
        new_args, kwargs = arg_func(x, y, data, *args, **kwargs)
    else:
        new_args = (x, y, data) + args

    # Draw the contour lines/filled contours.
    return getattr(plt, draw_method_name)(*new_args, **kwargs)
Beispiel #8
0
def fix_pop_grid(tlon,tlat,data):
    """
    Pad coordinates and data on CCSM/POP gx3v5 grid so it can be
    plotted with matplotlib/basemap
    tlon,tlat,data must be 2D arrays

    Inputs:
        tlon, tlat, data

    Outputs:
        lon, lat, data

    """
    # make lon monotonic and pad coordinate and data arrays along lon axis
    tlon = N.where(N.greater_equal(tlon,tlon[:,0].min()),tlon-360,tlon)
    lon  = N.concatenate((tlon,tlon+360,tlon+2*360),1)
    lat  = N.concatenate((tlat,tlat,tlat),1)
    if hasattr(data,'mask'):
        data = MA.concatenate((data,data,data),-1)
    else:
        data = N.concatenate((data,data,data),-1)

    lon = lon - 360
    # clip grid - this allows to clip map anywhere between -360 and 360
    ind1 = N.searchsorted(lon[0,:],-360)
    ind2 = N.searchsorted(lon[0,:],360)
    lon  = lon[:,ind1-1:ind2+1]
    lat  = lat[:,ind1-1:ind2+1]
    data = data[...,ind1-1:ind2+1]

    return lon, lat, data
 def extend_interp(datafield):
   # add masked values at southernmost end
   southernlimitmask = ma.masked_all(len(self.olon))
   olat_ext          = np.append(-82.1,self.olat)
   dfield_ext = ma.concatenate([ma.column_stack(southernlimitmask), datafield], 0)
   # f = interp2d(self.olon, olat_ext, dfield_ext)
   # return f(self.pismlon, self.pismlat)
   return interp(dfield_ext, self.olon, olat_ext, self.pismlon, self.pismlat)
Beispiel #10
0
    def recache(self, always=False):
        if always or self._invalidx:
            xconv = self.convert_xunits(self._xorig)
            if ma.isMaskedArray(self._xorig):
                x = ma.asarray(xconv, np.float_)
            else:
                x = np.asarray(xconv, np.float_)
            x = x.ravel()
        else:
            x = self._x
        if always or self._invalidy:
            yconv = self.convert_yunits(self._yorig)
            if ma.isMaskedArray(self._yorig):
                y = ma.asarray(yconv, np.float_)
            else:
                y = np.asarray(yconv, np.float_)
            y = y.ravel()
        else:
            y = self._y

        if len(x) == 1 and len(y) > 1:
            x = x * np.ones(y.shape, np.float_)
        if len(y) == 1 and len(x) > 1:
            y = y * np.ones(x.shape, np.float_)

        if len(x) != len(y):
            raise RuntimeError("xdata and ydata must be the same length")

        x = x.reshape((len(x), 1))
        y = y.reshape((len(y), 1))

        if ma.isMaskedArray(x) or ma.isMaskedArray(y):
            self._xy = ma.concatenate((x, y), 1)
        else:
            self._xy = np.concatenate((x, y), 1)
        self._x = self._xy[:, 0]  # just a view
        self._y = self._xy[:, 1]  # just a view

        self._subslice = False
        if (
            self.axes
            and len(x) > 100
            and self._is_sorted(x)
            and self.axes.name == "rectilinear"
            and self.axes.get_xscale() == "linear"
            and self._markevery is None
        ):
            self._subslice = True
        if hasattr(self, "_path"):
            interpolation_steps = self._path._interpolation_steps
        else:
            interpolation_steps = 1
        self._path = Path(self._xy, None, interpolation_steps)
        self._transformed_path = None
        self._invalidx = False
        self._invalidy = False
def test_flags2bin(n=100):
    flag = ma.concatenate([np.random.randint(0,5,n),
        ma.masked_all(2, dtype='int8')])

    binflags = flags2bin(flag)

    assert type(binflags) == ma.MaskedArray
    assert binflags.dtype == 'bool'
    assert binflags.shape == (n+2,)
    assert binflags.mask[flag.mask].all(), \
            "All masked flags records should be also masked at binflags"
Beispiel #12
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        n = [0, 0, 1, 0, 0]
        m = make_mask(n)
        m2 = make_mask(m)
        assert_(m is m2)
        m3 = make_mask(m, copy=1)
        assert_(m is not m3)

        x1 = np.arange(5)
        y1 = array(x1, mask=m)
        assert_(y1._data is not x1)
        assert_(allequal(x1, y1._data))
        assert_(y1._mask is m)

        y1a = array(y1, copy=0)
        # For copy=False, one might expect that the array would just
        # passed on, i.e., that it would be "is" instead of "==".
        # See gh-4043 for discussion.
        assert_(y1a._mask.__array_interface__ ==
                y1._mask.__array_interface__)

        y2 = array(x1, mask=m3, copy=0)
        assert_(y2._mask is m3)
        assert_(y2[2] is masked)
        y2[2] = 9
        assert_(y2[2] is not masked)
        assert_(y2._mask is m3)
        assert_(allequal(y2.mask, 0))

        y2a = array(x1, mask=m, copy=1)
        assert_(y2a._mask is not m)
        assert_(y2a[2] is masked)
        y2a[2] = 9
        assert_(y2a[2] is not masked)
        assert_(y2a._mask is not m)
        assert_(allequal(y2a.mask, 0))

        y3 = array(x1 * 1.0, mask=m)
        assert_(filled(y3).dtype is (x1 * 1.0).dtype)

        x4 = arange(4)
        x4[2] = masked
        y4 = resize(x4, (8,))
        assert_(eq(concatenate([x4, x4]), y4))
        assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
        y5 = repeat(x4, (2, 2, 2, 2), axis=0)
        assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
        y6 = repeat(x4, 2, axis=0)
        assert_(eq(y5, y6))
Beispiel #13
0
def densitystep(S, T, P):
    """
    """
    assert S.shape == T.shape
    assert S.shape == P.shape
    try:
        import gsw
        rho0 = gsw.pot_rho_t_exact(S, T, P, 0)
        assert S.ndim == 1, "Not able to densitystep an array ndim > 1"
        ds = ma.concatenate([ma.masked_all(1),
                np.sign(np.diff(P))*np.diff(rho0)])
        return ma.fix_invalid(ds)

    except ImportError:
        print("Package gsw is required and is not available.")
Beispiel #14
0
def addcyclic(data):
	"""
	Adds cyclic points to an array in rightmost dimension.
	data = input 2D array.
	"""
	if data.ndim != 2:
		print('ERROR: Input array is not two-dimensional')
		return

	if MA.isMA(data):
		newdata = MA.concatenate((data,data[:,0,N.newaxis]),axis=-1)
	else:
		newdata = N.concatenate((data,data[:,0,N.newaxis]),axis=-1)

	return newdata
Beispiel #15
0
 def append(self, na):
     """
 Append tableDict na to self. 
 """
     # for key in na.cols:
     # if key == 'keys': continue
     # if key in self.cols:
     # newDataVec = self.__getitem__(key)
     # for ele in na.data[key]:
     ##newDataVec.append(ele)
     # np.append(newDataVec, ele)
     # self.__setitem__(key, newDataVec)
     for col in self.cols:
         self.data[col] = ma.concatenate((self.data[col][:], na.data[col][:]))
     self.nRows = self.nRows + na.nRows
Beispiel #16
0
def fix_pop_grid_test(tlon,tlat,data):
    """
    Pad coordinates and data on CCSM/POP gx3v5 grid so it can be
    plotted with matplotlib/basemap
    tlon,tlat,data must be 2D arrays
    """
    # make lon monotonic and pad coordinate and data arrays along lon axis
    tlon = N.where(N.greater_equal(tlon,min(tlon[:,0])),tlon-360,tlon)
    lon  = N.concatenate((tlon,tlon+360),1)
    lat  = N.concatenate((tlat,tlat),1)
    if hasattr(data,'mask'):
        data = MA.concatenate((data,data),1)
    else:
        data = N.concatenate((data,data),1)

    return lon, lat, data
def test_i2b_flags(n=100):
    flag = ma.concatenate([np.random.randint(0,5,n),
        ma.masked_all(2, dtype='int8')])

    binflags = i2b_flags(flag)

    assert type(binflags) == ma.MaskedArray
    assert binflags.dtype == 'bool'
    assert binflags.shape == (n+2,)
    assert binflags.mask[flag.mask].all(), \
            "All masked flags records should be also masked at binflags"

    # FIXME: Improve this. Include cases with dict as input.
    #   Check the output in case of dict input and differente combinations
    #     of True/False, masked or not.
    assert (i2b_flags([1, 2, 3, 4]) ==
            ma.array([True, True, False, False])).all()
Beispiel #18
0
 def _make_verts(self, U, V):
     uv = ma.asarray(U+V*1j)
     a = ma.absolute(uv)
     if self.scale is None:
         sn = max(10, math.sqrt(self.N))
         scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
         self.scale = scale
     length = a/(self.scale*self.width)
     X, Y = self._h_arrows(length)
     # There seems to be a ma bug such that indexing
     # a masked array with one element converts it to
     # an ndarray.
     theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
     xy = (X+Y*1j) * np.exp(1j*theta)*self.width
     xy = xy[:,:,np.newaxis]
     XY = ma.concatenate((xy.real, xy.imag), axis=2)
     return XY
Beispiel #19
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        n = [0, 0, 1, 0, 0]
        m = make_mask(n)
        m2 = make_mask(m)
        assert_(m is m2)
        m3 = make_mask(m, copy=1)
        assert_(m is not m3)

        x1 = np.arange(5)
        y1 = array(x1, mask=m)
        assert_(y1._data is not x1)
        assert_(allequal(x1, y1._data))
        assert_(y1.mask is m)

        y1a = array(y1, copy=0)
        assert_(y1a.mask is y1.mask)

        y2 = array(x1, mask=m3, copy=0)
        assert_(y2.mask is m3)
        assert_(y2[2] is masked)
        y2[2] = 9
        assert_(y2[2] is not masked)
        assert_(y2.mask is m3)
        assert_(allequal(y2.mask, 0))

        y2a = array(x1, mask=m, copy=1)
        assert_(y2a.mask is not m)
        assert_(y2a[2] is masked)
        y2a[2] = 9
        assert_(y2a[2] is not masked)
        assert_(y2a.mask is not m)
        assert_(allequal(y2a.mask, 0))

        y3 = array(x1 * 1.0, mask=m)
        assert_(filled(y3).dtype is (x1 * 1.0).dtype)

        x4 = arange(4)
        x4[2] = masked
        y4 = resize(x4, (8,))
        assert_(eq(concatenate([x4, x4]), y4))
        assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
        y5 = repeat(x4, (2, 2, 2, 2), axis=0)
        assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
        y6 = repeat(x4, 2, axis=0)
        assert_(eq(y5, y6))
Beispiel #20
0
    def _build_data(self):
        """
        Generate the data payload for the new concatenated cube.

        Returns:
            The concatenated :class:`iris.cube.Cube` data payload.

        """
        skeletons = self._skeletons
        data = [skeleton.data for skeleton in skeletons]

        if self._data_is_masked:
            data = ma.concatenate(tuple(data), axis=self.axis)
        else:
            data = np.concatenate(tuple(data), axis=self.axis)

        return data
    def _build_data(self):
        """
        Generate the data payload for the new concatenated cube.

        Returns:
            The concatenated :class:`iris.cube.Cube` data payload.

        """
        skeletons = self._skeletons
        data = [skeleton.data for skeleton in skeletons]

        if self._cube_signature.mdi is not None:
            # Preserve masked entries.
            data = ma.concatenate(tuple(data), axis=self.axis)
        else:
            data = np.concatenate(tuple(data), axis=self.axis)

        return data
Beispiel #22
0
 def test_testAddSumProd(self):
     # Test add, sum, product.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     assert_(eq(np.add.reduce(x), add.reduce(x)))
     assert_(eq(np.add.accumulate(x), add.accumulate(x)))
     assert_(eq(4, sum(array(4), axis=0)))
     assert_(eq(4, sum(array(4), axis=0)))
     assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
     assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
     assert_(eq(np.sum(x, 0), sum(x, 0)))
     assert_(eq(np.product(x, axis=0), product(x, axis=0)))
     assert_(eq(np.product(x, 0), product(x, 0)))
     assert_(eq(np.product(filled(xm, 1), axis=0), product(xm, axis=0)))
     if len(s) > 1:
         assert_(eq(np.concatenate((x, y), 1), concatenate((xm, ym), 1)))
         assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
         assert_(eq(np.sum(x, 1), sum(x, 1)))
         assert_(eq(np.product(x, 1), product(x, 1)))
Beispiel #23
0
    def read(self, indexes=None, **kwargs):
        """
        Read reprojected & resampled input data.

        Parameters
        ----------
        indexes : integer or list
            band number or list of band numbers

        Returns
        -------
        data : array
        """
        band_indexes = self._get_band_indexes(indexes)
        arr = self.process.get_raw_output(self.tile)
        return (arr[band_indexes[0] -
                    1] if len(band_indexes) == 1 else ma.concatenate(
                        [ma.expand_dims(arr[i - 1], 0) for i in band_indexes]))
Beispiel #24
0
 def test_testAddSumProd(self):
     # Test add, sum, product.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
     self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
     self.assertTrue(eq(4, sum(array(4), axis=0)))
     self.assertTrue(eq(4, sum(array(4), axis=0)))
     self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
     self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
     self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
     self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
     self.assertTrue(eq(np.product(x, 0), product(x, 0)))
     self.assertTrue(eq(np.product(filled(xm, 1), axis=0), product(xm, axis=0)))
     if len(s) > 1:
         self.assertTrue(eq(np.concatenate((x, y), 1), concatenate((xm, ym), 1)))
         self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
         self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
         self.assertTrue(eq(np.product(x, 1), product(x, 1)))
Beispiel #25
0
    def read_v0_0(cls, header: PoseHeader, reader: BufferReader):
        fps, _frames = reader.unpack(ConstStructs.double_ushort)

        _dims = max([len(c.format) for c in header.components]) - 1
        _points = sum([len(c.points) for c in header.components])

        frames_d = []
        frames_c = []
        for _ in range(_frames):
            _people = reader.unpack(ConstStructs.ushort)
            people_d = []
            people_c = []
            for pid in range(_people):
                reader.advance(ConstStructs.short)  # Skip Person ID
                person_d = []
                person_c = []
                for component in header.components:
                    points = np.array(
                        reader.unpack_numpy(
                            ConstStructs.float,
                            (len(component.points), len(component.format))))
                    dimensions, confidence = np.split(points, [-1], axis=1)
                    boolean_confidence = np.where(confidence > 0, 0,
                                                  1)  # To create the mask
                    mask = np.column_stack(
                        tuple([boolean_confidence] *
                              (len(component.format) - 1)))

                    person_d.append(ma.masked_array(dimensions, mask=mask))
                    person_c.append(np.squeeze(confidence, axis=-1))

                if pid == 0:
                    people_d.append(ma.concatenate(person_d))
                    people_c.append(np.concatenate(person_c))

            # In case no person, should all be zeros
            if len(people_d) == 0:
                people_d.append(np.zeros((_points, _dims)))
                people_c.append(np.zeros(_points))

            frames_d.append(ma.stack(people_d))
            frames_c.append(np.stack(people_c))

        return cls(fps, ma.stack(frames_d), ma.stack(frames_c))
Beispiel #26
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        with suppress_warnings() as sup:
            sup.filter(
                np.ma.core.MaskedArrayFutureWarning,
                "setting an item on a masked array which has a "
                "shared mask will not copy")

            n = [0, 0, 1, 0, 0]
            m = make_mask(n)
            m2 = make_mask(m)
            self.assertTrue(m is m2)
            m3 = make_mask(m, copy=1)
            self.assertTrue(m is not m3)

            x1 = np.arange(5)
            y1 = array(x1, mask=m)
            self.assertTrue(y1._data is not x1)
            self.assertTrue(allequal(x1, y1._data))
            self.assertTrue(y1.mask is m)

            y1a = array(y1, copy=0)
            self.assertTrue(y1a.mask is y1.mask)

            y2 = array(x1, mask=m, copy=0)
            self.assertTrue(y2.mask is m)
            self.assertTrue(y2[2] is masked)
            y2[2] = 9
            self.assertTrue(y2[2] is not masked)
            self.assertTrue(y2.mask is not m)
            self.assertTrue(allequal(y2.mask, 0))

            y3 = array(x1 * 1.0, mask=m)
            self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)

            x4 = arange(4)
            x4[2] = masked
            y4 = resize(x4, (8,))
            self.assertTrue(eq(concatenate([x4, x4]), y4))
            self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
            y5 = repeat(x4, (2, 2, 2, 2), axis=0)
            self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
            y6 = repeat(x4, 2, axis=0)
            self.assertTrue(eq(y5, y6))
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        with suppress_warnings() as sup:
            sup.filter(
                np.ma.core.MaskedArrayFutureWarning,
                "setting an item on a masked array which has a "
                "shared mask will not copy")

            n = [0, 0, 1, 0, 0]
            m = make_mask(n)
            m2 = make_mask(m)
            self.assertTrue(m is m2)
            m3 = make_mask(m, copy=1)
            self.assertTrue(m is not m3)

            x1 = np.arange(5)
            y1 = array(x1, mask=m)
            self.assertTrue(y1._data is not x1)
            self.assertTrue(allequal(x1, y1._data))
            self.assertTrue(y1.mask is m)

            y1a = array(y1, copy=0)
            self.assertTrue(y1a.mask is y1.mask)

            y2 = array(x1, mask=m, copy=0)
            self.assertTrue(y2.mask is m)
            self.assertTrue(y2[2] is masked)
            y2[2] = 9
            self.assertTrue(y2[2] is not masked)
            self.assertTrue(y2.mask is not m)
            self.assertTrue(allequal(y2.mask, 0))

            y3 = array(x1 * 1.0, mask=m)
            self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)

            x4 = arange(4)
            x4[2] = masked
            y4 = resize(x4, (8,))
            self.assertTrue(eq(concatenate([x4, x4]), y4))
            self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
            y5 = repeat(x4, (2, 2, 2, 2), axis=0)
            self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
            y6 = repeat(x4, 2, axis=0)
            self.assertTrue(eq(y5, y6))
Beispiel #28
0
def _geo_globe(x, y, z, xmin=-180, modulo=False):
    """
    Ensure global coverage by fixing gaps over poles and across
    longitude seams. Increases the size of the arrays.
    """
    # Cover gaps over poles by appending polar data
    with np.errstate(all='ignore'):
        p1 = np.mean(z[0, :])  # do not ignore NaN if present
        p2 = np.mean(z[-1, :])
    ps = (-90, 90) if (y[0] < y[-1]) else (90, -90)
    z1 = np.repeat(p1, z.shape[1])
    z2 = np.repeat(p2, z.shape[1])
    y = ma.concatenate((ps[:1], y, ps[1:]))
    z = ma.concatenate((z1[None, :], z, z2[None, :]), axis=0)
    # Cover gaps over cartopy longitude seam
    # Ensure coordinates span 360 after modulus
    if modulo:
        if x[0] % 360 != (x[-1] + 360) % 360:
            x = ma.concatenate((x, (x[0] + 360,)))
            z = ma.concatenate((z, z[:, :1]), axis=1)
    # Cover gaps over basemap longitude seam
    # Ensure coordinates span exactly 360
    else:
        # Interpolate coordinate centers to seam. Size possibly augmented by 2
        if x.size == z.shape[1]:
            if x[0] + 360 != x[-1]:
                xi = np.array([x[-1], x[0] + 360])  # input coordinates
                xq = xmin + 360  # query coordinate
                zq = ma.concatenate((z[:, -1:], z[:, :1]), axis=1)
                zq = (zq[:, :1] * (xi[1] - xq) + zq[:, 1:] * (xq - xi[0])) / (xi[1] - xi[0])  # noqa: E501
                x = ma.concatenate(((xmin,), x, (xmin + 360,)))
                z = ma.concatenate((zq, z, zq), axis=1)
        # Extend coordinate edges to seam. Size possibly augmented by 1.
        elif x.size - 1 == z.shape[1]:
            if x[0] != xmin:
                x = ma.append(xmin, x)
                x[-1] = xmin + 360
                z = ma.concatenate((z[:, -1:], z), axis=1)
        else:
            raise ValueError('Unexpected shapes of coordinates or data arrays.')
    return x, y, z
Beispiel #29
0
 def _make_verts(self, U, V):
     uv = ma.asarray(U + V * 1j)
     a = ma.absolute(uv)
     if self.scale is None:
         sn = max(10, math.sqrt(self.N))
         scale = 1.8 * a.mean() * sn / self.span  # crude auto-scaling
         self.scale = scale
     length = a / (self.scale * self.width)
     X, Y = self._h_arrows(length)
     if self.angles == 'xy':
         theta = self._angles(U, V).filled(0)[:, np.newaxis]
     elif self.angles == 'uv':
         theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
     else:
         theta = ma.asarray(self.angles * np.pi / 180.0).filled(0)
     xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
     xy = xy[:, :, np.newaxis]
     XY = ma.concatenate((xy.real, xy.imag), axis=2)
     return XY
Beispiel #30
0
def y_grad_v(RomsFile, RomsGrd, varname):
    """
    Compute y-gradient on v points
    """
    if type(varname) == str:
        #load roms file
        RomsNC = nc4(RomsFile, 'r')
        #load variable
        _var = RomsNC.variables[varname][:]

    else:
        _var = varname  #[v points]

    #get mask
    Mask = ma.getmask(_var)

    #compute difference [rho points]
    dvar_rho = ma.diff(_var, n=1, axis=2)

    #pad
    mask_pad = ma.notmasked_edges(dvar_rho, axis=2)
    dvar_ = dvar_rho
    dvar_[:, :, mask_pad[0][2][0] - 1, :] = dvar_rho[:, :,
                                                     mask_pad[0][2][0], :]
    dvar_[:, :, mask_pad[1][2][0] + 1, :] = dvar_rho[:, :,
                                                     mask_pad[1][2][0], :]

    dvar_pad = ma.concatenate((dvar_[:,:,0:1, :],\
                               dvar_, \
                               dvar_[:,:,-2:-1, :]), axis = 2)

    #shift to V points
    dvar_v = GridShift.Rho_to_Vpt(dvar_pad)

    #dy
    y_dist = rt.rho_dist_grd(RomsGrd)[1]
    dy = rt.AddDepthTime(RomsFile, y_dist)
    dy_v = ma.array(GridShift.Rho_to_Vpt(dy), mask=Mask)

    #compute gradient
    dvar_dy = dvar_v / dy_v

    return dvar_dy
Beispiel #31
0
def sphere2rect(vector, degree=True):
    """\
    Convert vector (lon, lat, r) from sphere to rect coordinates. If degree
    is True, the unit of vector has to be in degree.

    Examples
    --------
    >>> convert.sphere2rect([120, 30, 1], degree=True)
    array([-0.4330127,  0.75     ,  0.5      ])
    """
    lon, lat, r = vector

    if degree:
        lon = np.deg2rad(lon)
        lat = np.deg2rad(lat)

    return ma.concatenate([(r * np.cos(lat) * np.cos(lon))[np.newaxis],
                           (r * np.cos(lat) * np.sin(lon))[np.newaxis],
                           (r * np.sin(lat))[np.newaxis]])
def add_cyclic_point(data, axis=-1):
    """
    Add a cyclic point to an array.

    Args:

    * data:
        An n-dimensional array of data to add a cyclic point to.

    Kwargs:

    * axis:
        Specifies the axis of the data array to add the cyclic point to.
        Defaults to the right-most axis.

    Returns:

    * cyclic_data:
        The data array with a cyclic point added.

    Examples:

    Adding a cyclic point to a data array, where the cyclic dimension is
    the right-most dimension

    >>> import numpy as np
    >>> data = np.ones([5, 6]) * np.arange(6)
    >>> cyclic_data = add_cyclic_point(data)
    >>> print(cyclic_data)
    [[ 0.  1.  2.  3.  4.  5.  0.]
     [ 0.  1.  2.  3.  4.  5.  0.]
     [ 0.  1.  2.  3.  4.  5.  0.]
     [ 0.  1.  2.  3.  4.  5.  0.]
     [ 0.  1.  2.  3.  4.  5.  0.]]

    """
    slicer = [slice(None)] * data.ndim
    try:
        slicer[axis] = slice(0, 1)
    except IndexError:
        raise ValueError('specified axis does not exist in the input')
    new_data = ma.concatenate((data, data[slicer]), axis=axis)
    return new_data
Beispiel #33
0
 def data(self):
     """
     This is a getter for the data property. It caches the raw data if it has not already been read.
     Throws a MemoryError when reading for the first time if the data is too large.
     """
     import numpy.ma as ma
     if self._data is None:
         try:
             # If we were given a list of data managers then we need to concatenate them now...
             self._data = self.retrieve_raw_data(self._data_manager[0])
             if len(self._data_manager) > 1:
                 for manager in self._data_manager[1:]:
                     self._data = ma.concatenate((self._data, self.retrieve_raw_data(manager)), axis=0)
             self._post_process()
         except MemoryError:
             raise MemoryError(
                 "Failed to read the ungridded data as there was not enough memory available.\n"
                 "Consider freeing up variables or indexing the cube before getting its data.")
     return self._data
Beispiel #34
0
 def _make_verts(self, U, V):
     uv = ma.asarray(U+V*1j)
     a = ma.absolute(uv)
     if self.scale is None:
         sn = max(10, math.sqrt(self.N))
         scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
         self.scale = scale
     length = a/(self.scale*self.width)
     X, Y = self._h_arrows(length)
     if self.angles == 'xy':
         theta = self._angles(U, V).filled(0)[:,np.newaxis]
     elif self.angles == 'uv':
         theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
     else:
         theta = ma.asarray(self.angles*np.pi/180.0).filled(0)
     xy = (X+Y*1j) * np.exp(1j*theta)*self.width
     xy = xy[:,:,np.newaxis]
     XY = ma.concatenate((xy.real, xy.imag), axis=2)
     return XY
Beispiel #35
0
def Vpt_to_Rho(Vpt_variable):
    """
    Convert variable on v point to rho point
    """
    if np.ma.is_masked(Vpt_variable) == True:

        import numpy.ma as ma

    else:

        import numpy as ma

    _dy_pad = ma.concatenate((Vpt_variable[0:1,:], Vpt_variable,\
                              Vpt_variable[-2:-1,:]), axis = 0)


    d_y = 0.5*(_dy_pad[0:_dy_pad.shape[0]-1, :] + \
               _dy_pad[1:_dy_pad.shape[0], :])

    return d_y
Beispiel #36
0
def Upt_to_Rho(Upt_variable):
    """
    Converts variables on u points to rho points
    """
    if np.ma.is_masked(Upt_variable) == True:

        import numpy.ma as ma

    else:

        import numpy as ma


    _dx_pad = ma.concatenate((Upt_variable[:,:,:,0:1], Upt_variable,\
                              Upt_variable[:, :,:,-2:-1]), axis = 3)

    d_x = 0.5*(_dx_pad[:,:,:, 0:_dx_pad.shape[3]-1] + \
                _dx_pad[:,:,:, 1:_dx_pad.shape[3]])

    return d_x
def regrid(data_array):
    nc_grid = netcdf_file('/home/nicholas/thesis/data/netcdf_files/ORCA2.0_grid.nc','r')
    lon = nc_grid.variables['lon'][0:50,:]
    lat = nc_grid.variables['lat'][0:50,:]
    area = nc_grid.variables['area'][0:50,:]
    mask = nc_grid.variables['mask'][0,0:50,:]
    nc_grid.close()
    lon_min = lon.copy()
    i,j = np.where(lon_min >= 180.)
    lon_min[i,j] = lon_min[i,j] - 360. 
    iw = np.where(lon_min[0,:] >= lon_min[0][0])[0]
    ie = np.where(lon_min[0,:] < lon_min[0][0])[0]
    lon = np.concatenate((np.take(lon_min,ie,axis=1)[:,:-1],np.take(lon_min,iw,axis=1)),axis=1)[:,:-1]
    lat = np.concatenate((np.take(lat,ie,axis=1)[:,:-1],np.take(lat,iw,axis=1)),axis=1)[:,:-1]
    lon_list = np.round(lon[0])
    lat_list = np.round(lat[:, 0])
    timesteps = np.shape(data_array)[0]
    bm_array = [ma.concatenate((ma.take(data_array[i, :, :],ie,axis=1),ma.take(data_array[i, :, :],iw,axis=1)),axis=1)[:,:-1] for i in range(timesteps)]
    bm_array = ma.array(bm_array)
    return bm_array, lon, lat
Beispiel #38
0
def test_split_data_groups(n=100):
    flag = ma.concatenate([np.random.randint(0,5,n),
        ma.masked_all(2, dtype='int8')])

    indices = split_data_groups(flag)

    assert type(indices) is dict
    assert sorted(indices.keys()) == ['err', 'fit', 'test']
    for k in indices:
        indices[k].size == n
        assert indices[k].dtype == 'bool'
        # Necessarily each group have trues and falses
        assert indices[k].any(), "%s are all True" % k
        assert (~indices[k]).any(), "%s are all False" % k
        # Indices return only valid data. Ignore masked.
        assert ~flag[indices[k]].mask.any()

    # Fit group is all True, but err & test must have both
    assert sorted(np.unique(flag[indices['fit']])) == [1,2]
    for k in ['err', 'test']:
        assert sorted(np.unique(flag[indices[k]])) == [1,2,3,4]
Beispiel #39
0
  def insertRow(self, rowIndex, msk = True):
    '''
      At the moment, insert empty row before index rowIndex
      rowindex starts counting at zeros (as usual)
      New row is masked by default
    '''

    for col in self.cols:
      dt = self.data[col].dtype
      newEle = ma.empty((1,), dtype = dt)
      #print(self.data[col])
      #print(self.data[col][:rowIndex].shape)
      #print(newEle.shape)
      #print(self.data[col][rowIndex:].shape)
      self.data[col] = ma.concatenate((self.data[col][:rowIndex], newEle, self.data[col][rowIndex:]))
      #if rowIndex <= self.nRows:
        #self.data[col] = ma.concatenate((self.data[col][:rowIndex], newEle, self.data[col][rowIndex:]))
      #else:
        #self.data[col] = ma.concatenate((self.data[col], newEle))
      if msk: self.data[col][rowIndex] = ma.masked
    self.nRows += 1
Beispiel #40
0
def sphere2rect(vector, degree=True):
    """\
    Convert vector (lon, lat, r) from sphere to rect coordinates. If degree
    is True, the unit of vector has to be in degree.

    Examples
    --------
    >>> convert.sphere2rect([120, 30, 1], degree=True)
    array([-0.4330127,  0.75     ,  0.5      ])
    """
    lon, lat, r = vector

    if degree:
        lon = np.deg2rad(lon)
        lat = np.deg2rad(lat)

    return ma.concatenate([
        (r * np.cos(lat) * np.cos(lon))[np.newaxis],
        (r * np.cos(lat) * np.sin(lon))[np.newaxis],
        (r * np.sin(lat))[np.newaxis]
    ])
def test_split_data_groups(n=100):
    flag = ma.concatenate([np.random.random(n) > .5,
        ma.masked_all(2, dtype='bool')])

    indices = split_data_groups(flag)

    assert type(indices) is dict
    assert sorted(indices.keys()) == ['err', 'fit', 'test']
    for k in indices:
        indices[k].size == n
        assert indices[k].dtype == 'bool'
        # Necessarily each group have trues and falses
        assert indices[k].any(), "%s are all True" % k
        assert (~indices[k]).any(), "%s are all False" % k
        # Indices return only valid data. Ignore masked.
        assert ~flag[indices[k]].mask.any()

    # Fit group is all True, but err & test must have both
    assert sorted(np.unique(flag[indices['fit']])) == [True]
    for k in ['err', 'test']:
        assert sorted(np.unique(flag[indices[k]])) == [False, True]
Beispiel #42
0
def get_gates_from_tar(nexrad_archive):
    time_list = []
    alt_list = []
    lat_list = []
    lon_list = []
    ref_list = []
    with tarfile.open(nexrad_archive) as tar:
        # Loop iver each element and inspect to see if they are actual radar archive files (there is also metadata in the tar)
        for item in [
                name for name in tar.getnames() if name[-9:] == '_V06.ar2v'
        ]:
            try:
                radar = pyart.io.read_nexrad_archive(
                    tar.extractfile(tar.getmember(item)),
                    include_fields=['reflectivity'],
                    delay_field_loading=True)
            except IOError:
                pass
            else:
                alt_list.append(radar.gate_altitude['data'])
                lat_list.append(radar.gate_latitude['data'])
                lon_list.append(radar.gate_longitude['data'])
                ref_list.append(radar.fields['reflectivity']['data'])

                start_time = parse_date(item[4:19], fuzzy=True)
                time_list.append([
                    start_time + timedelta(seconds=t)
                    for t in radar.time['data']
                ])

                del radar

    times = np.concatenate(time_list, 0)
    alts = np.concatenate(alt_list, 0)
    lats = np.concatenate(lat_list, 0)
    lons = np.concatenate(lon_list, 0)
    refs = ma.concatenate(ref_list, 0)

    return times, alts, lats, lons, refs
Beispiel #43
0
def densitystep(S, T, P):
    """Estimates the potential density step of successive mesurements

       Expects the data to be recorded along the time, i.e. first measurement
         was recorded first. This makes difference since the first measurement
         has no reference to define the delta change.
       This is relevant for the type of instrument. For instance: XBTs are
         always measured surface to bottom, CTDs are expected the same, but
         Spray underwater gliders measure bottom to surface.
    """
    assert T.shape == P.shape
    assert T.shape == S.shape
    assert T.ndim == 1, "Not ready to densitystep an array ndim > 1"

    try:
        import gsw
    except ImportError:
        print("Package gsw is required and is not available.")

    rho0 = gsw.pot_rho_t_exact(S, T, P, 0)
    ds = ma.concatenate([ma.masked_all(1), np.sign(np.diff(P)) * np.diff(rho0)])
    return ma.fix_invalid(ds)
Beispiel #44
0
    def insertRow(self, rowIndex, msk=True):
        """
      At the moment, insert empty row before index rowIndex
      rowindex starts counting at zeros (as usual)
      New row is masked by default
    """

        for col in self.cols:
            dt = self.data[col].dtype
            newEle = ma.empty((1,), dtype=dt)
            # print(self.data[col])
            # print(self.data[col][:rowIndex].shape)
            # print(newEle.shape)
            # print(self.data[col][rowIndex:].shape)
            self.data[col] = ma.concatenate((self.data[col][:rowIndex], newEle, self.data[col][rowIndex:]))
            # if rowIndex <= self.nRows:
            # self.data[col] = ma.concatenate((self.data[col][:rowIndex], newEle, self.data[col][rowIndex:]))
            # else:
            # self.data[col] = ma.concatenate((self.data[col], newEle))
            if msk:
                self.data[col][rowIndex] = ma.masked
        self.nRows += 1
def regrid_array(data=data_cflux):
	'''
	#Could be put with plotting tools???
	# Regrid array to be used with Basemap
	# Only works if the same latitudes and longitudes are selected from netdcf file and grid
	# Uses the ORCA netcdf file
	### transform the longitude of ORCA onto something that basemap can read
	### The ORCA grid starts at 80 and goes to 440
	### What we want: starts at 80 and goes to 180 and then switches to -180 and goes to 80
	### this method 
	'''
	from Scientific.IO.NetCDF import NetCDFFile
	#nc_grid_file = choose_netcdf_file()
	#~ indir = raw_input('Where is the ORCA netcdf file located? \n')
	nc_grid = NetCDFFile(NC_PATH+ 'ORCA2.0_grid.nc','r')
	lon = nc_grid.variables['lon'][0:40,:]
	lat = nc_grid.variables['lat'][0:40,:]
	area = nc_grid.variables['area'][0:40,:]
	mask = nc_grid.variables['mask'][0,0:40,:]
	nc_grid.close()
	
	lon_min = lon.copy()
	i,j = np.where(lon_min >= 180.) # elements of lon_min that are over 180
	lon_min[i,j] = lon_min[i,j] - 360. # takes those elements and subtracts 360 from them

	### ==============================================================================================================
	### get rid of the funny extra lon and do the same for the lat array ! 
	iw = np.where(lon_min[0,:] >= lon_min[0][0])[0] # are the elements that are greater or equal to the first element ie. 78.000038
	ie = np.where(lon_min[0,:] < lon_min[0][0])[0] # are the elements less than 78.000038

	### puts the lon in order from -180 to 180 and removes the extra 80 at the end
	lon = np.concatenate((np.take(lon_min,ie,axis=1),np.take(lon_min,iw,axis=1)),axis=1)[:,:-1]
	lat = np.concatenate((np.take(lat,ie,axis=1),np.take(lat,iw,axis=1)),axis=1)[:,:-1]

	# The data that is to be plotted needs to be regridded
	bm_array = [ma.concatenate((ma.take(data[i, :, :],ie,axis=1),ma.take(data[i, :, :],iw,axis=1)),axis=1)[:,:-1] for i in range(3650)]
	bm_array = ma.array(bm_array)
	return bm_array
Beispiel #46
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        n = [0, 0, 1, 0, 0]
        m = make_mask(n)
        m2 = make_mask(m)
        self.assertTrue(m is m2)
        m3 = make_mask(m, copy=1)
        self.assertTrue(m is not m3)

        x1 = np.arange(5)
        y1 = array(x1, mask=m)
        self.assertTrue(y1._data is not x1)
        self.assertTrue(allequal(x1, y1._data))
        self.assertTrue(y1.mask is m)

        y1a = array(y1, copy=0)
        self.assertTrue(y1a.mask is y1.mask)

        y2 = array(x1, mask=m, copy=0)
        self.assertTrue(y2.mask is m)
        self.assertTrue(y2[2] is masked)
        y2[2] = 9
        self.assertTrue(y2[2] is not masked)
        self.assertTrue(y2.mask is not m)
        self.assertTrue(allequal(y2.mask, 0))

        y3 = array(x1 * 1.0, mask=m)
        self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)

        x4 = arange(4)
        x4[2] = masked
        y4 = resize(x4, (8, ))
        self.assertTrue(eq(concatenate([x4, x4]), y4))
        self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
        y5 = repeat(x4, (2, 2, 2, 2), axis=0)
        self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
        y6 = repeat(x4, 2, axis=0)
        self.assertTrue(eq(y5, y6))
Beispiel #47
0
    def recache(self):
        #if self.axes is None: print 'recache no axes'
        #else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units
        if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig):
            x = ma.asarray(self.convert_xunits(self._xorig), float)
            y = ma.asarray(self.convert_yunits(self._yorig), float)
            x = ma.ravel(x)
            y = ma.ravel(y)
        else:
            x = np.asarray(self.convert_xunits(self._xorig), float)
            y = np.asarray(self.convert_yunits(self._yorig), float)
            x = np.ravel(x)
            y = np.ravel(y)

        if len(x) == 1 and len(y) > 1:
            x = x * np.ones(y.shape, float)
        if len(y) == 1 and len(x) > 1:
            y = y * np.ones(x.shape, float)

        if len(x) != len(y):
            raise RuntimeError('xdata and ydata must be the same length')

        x = x.reshape((len(x), 1))
        y = y.reshape((len(y), 1))

        if ma.isMaskedArray(x) or ma.isMaskedArray(y):
            self._xy = ma.concatenate((x, y), 1)
        else:
            self._xy = np.concatenate((x, y), 1)
        self._x = self._xy[:, 0]  # just a view
        self._y = self._xy[:, 1]  # just a view

        # Masked arrays are now handled by the Path class itself
        self._path = Path(self._xy)
        self._transformed_path = TransformedPath(self._path,
                                                 self.get_transform())

        self._invalid = False
Beispiel #48
0
def concatenate(arrays, axis=0):
    """
    Concatenate a list of numpy arrays into one larger array along the axis specified (the default axis is zero). If any
    of the arrays are masked arrays then the returned array will be a masked array with the correct mask, otherwise a
    numpy array is returned.

    :param arrays: A list of numpy arrays (masked or not)
    :param axis: The axis along which to concatenate (the default is 0)
    :return: The concatenated array
    """
    from numpy.ma import MaskedArray
    if any(isinstance(array, MaskedArray) for array in arrays):
        from numpy.ma import concatenate
    else:
        from numpy import concatenate

    res = arrays[0]

    if len(arrays) > 1:
        for array in arrays[1:]:
            res = concatenate((res, array), axis)

    return res
Beispiel #49
0
    def __init__(self, contour, contour_height, data, interval):
        # Height of the contour
        self.h_0 = contour_height
        # Step minimal to consider amplitude
        self.interval_min = interval * 2
        # Indices of all pixels in contour
        self.contour = contour
        # Link on original grid (local view) or copy if it's on bound
        (x_start, x_stop), (y_start, y_stop) = contour.bbox_slice
        on_bounds = x_start > x_stop
        if on_bounds:
            self.grid_extract = ma.concatenate(
                (data[x_start:, y_start:y_stop], data[:x_stop,
                                                      y_start:y_stop]))
            if self.grid_extract.mask.size == 1:
                self.grid_extract = ma.array(
                    self.grid_extract,
                    mask=ones(self.grid_extract.shape, dtype="bool") *
                    self.grid_extract.mask,
                )
        else:
            self.grid_extract = data[x_start:x_stop, y_start:y_stop]
        # => maybe replace pixel out of contour by nan?
        self.pixel_mask = zeros(self.grid_extract.shape, dtype="bool")
        i_x = contour.pixels_index[0] - x_start
        if on_bounds:
            i_x %= data.shape[0]

        self.pixel_mask[i_x, contour.pixels_index[1] - y_start] = True
        self.nb_pixel = i_x.shape[0]

        # Only pixel in contour
        self.sla = data[contour.pixels_index]
        # Amplitude which will be provide
        self.amplitude = 0
        # Maximum local extrema accepted
        self.mle = 1
Beispiel #50
0
def _map_common(draw_method_name, arg_func, mode, cube, plot_defn, *args,
                **kwargs):
    """
    Draw the given cube on a map using its points or bounds.

    "Mode" parameter will switch functionality between POINT or BOUND plotting.


    """
    # Generate 2d x and 2d y grids.
    y_coord, x_coord = plot_defn.coords
    if mode == iris.coords.POINT_MODE:
        if x_coord.ndim == y_coord.ndim == 1:
            x, y = np.meshgrid(x_coord.points, y_coord.points)
        elif x_coord.ndim == y_coord.ndim == 2:
            x = x_coord.points
            y = y_coord.points
        else:
            raise ValueError("Expected 1D or 2D XY coords")
    else:
        try:
            x, y = np.meshgrid(x_coord.contiguous_bounds(),
                               y_coord.contiguous_bounds())
        # Exception translation.
        except iris.exceptions.CoordinateMultiDimError:
            raise ValueError("Could not get XY grid from bounds. "
                             "X or Y coordinate not 1D.")
        except ValueError:
            raise ValueError("Could not get XY grid from bounds. "
                             "X or Y coordinate doesn't have 2 bounds "
                             "per point.")

    # Obtain the data array.
    data = cube.data
    if plot_defn.transpose:
        data = data.T

    # If we are global, then append the first column of data the array to the
    # last (and add 360 degrees) NOTE: if it is found that this block of code
    # is useful in anywhere other than this plotting routine, it may be better
    # placed in the CS.
    if getattr(x_coord, 'circular', False):
        _, direction = iris.util.monotonic(x_coord.points,
                                           return_direction=True)
        y = np.append(y, y[:, 0:1], axis=1)
        x = np.append(x, x[:, 0:1] + 360 * direction, axis=1)
        data = ma.concatenate([data, data[:, 0:1]], axis=1)

    # Replace non-cartopy subplot/axes with a cartopy alternative and set the
    # transform keyword.
    kwargs = _ensure_cartopy_axes_and_determine_kwargs(x_coord, y_coord,
                                                       kwargs)

    if arg_func is not None:
        new_args, kwargs = arg_func(x, y, data, *args, **kwargs)
    else:
        new_args = (x, y, data) + args

    # Draw the contour lines/filled contours.
    axes = kwargs.pop('axes', None)
    plotfn = getattr(axes if axes else plt, draw_method_name)
    return plotfn(*new_args, **kwargs)
Beispiel #51
0
def add_cyclic_point(data, coord=None, axis=-1):
    """
    Add a cyclic point to an array and optionally a corresponding
    coordinate.

    Parameters
    ----------
    data
        An n-dimensional array of data to add a cyclic point to.
    coord : optional
        A 1-dimensional array which specifies the coordinate values for
        the dimension the cyclic point is to be added to. The coordinate
        values must be regularly spaced. Defaults to None.
    axis : optional
        Specifies the axis of the data array to add the cyclic point to.
        Defaults to the right-most axis.

    Returns
    -------
    cyclic_data
        The data array with a cyclic point added.
    cyclic_coord
        The coordinate with a cyclic point, only returned if the coord
        keyword was supplied.

    Examples
    --------
    Adding a cyclic point to a data array, where the cyclic dimension is
    the right-most dimension.

    >>> import numpy as np
    >>> data = np.ones([5, 6]) * np.arange(6)
    >>> cyclic_data = add_cyclic_point(data)
    >>> print(cyclic_data)  # doctest: +NORMALIZE_WHITESPACE
    [[0. 1. 2. 3. 4. 5. 0.]
     [0. 1. 2. 3. 4. 5. 0.]
     [0. 1. 2. 3. 4. 5. 0.]
     [0. 1. 2. 3. 4. 5. 0.]
     [0. 1. 2. 3. 4. 5. 0.]]

    Adding a cyclic point to a data array and an associated coordinate

    >>> lons = np.arange(0, 360, 60)
    >>> cyclic_data, cyclic_lons = add_cyclic_point(data, coord=lons)
    >>> print(cyclic_data)  # doctest: +NORMALIZE_WHITESPACE
    [[0. 1. 2. 3. 4. 5. 0.]
     [0. 1. 2. 3. 4. 5. 0.]
     [0. 1. 2. 3. 4. 5. 0.]
     [0. 1. 2. 3. 4. 5. 0.]
     [0. 1. 2. 3. 4. 5. 0.]]
    >>> print(cyclic_lons)
    [  0  60 120 180 240 300 360]

    """
    if coord is not None:
        if coord.ndim != 1:
            raise ValueError('The coordinate must be 1-dimensional.')
        if len(coord) != data.shape[axis]:
            raise ValueError(f'The length of the coordinate does not match '
                             f'the size of the corresponding dimension of '
                             f'the data array: len(coord) = {len(coord)}, '
                             f'data.shape[{axis}] = {data.shape[axis]}.')
        delta_coord = np.diff(coord)
        if not np.allclose(delta_coord, delta_coord[0]):
            raise ValueError('The coordinate must be equally spaced.')
        new_coord = ma.concatenate((coord, coord[-1:] + delta_coord[0]))
    slicer = [slice(None)] * data.ndim
    try:
        slicer[axis] = slice(0, 1)
    except IndexError:
        raise ValueError('The specified axis does not correspond to an '
                         'array dimension.')
    new_data = ma.concatenate((data, data[tuple(slicer)]), axis=axis)
    if coord is None:
        return_value = new_data
    else:
        return_value = new_data, new_coord
    return return_value
Beispiel #52
0
def join_by(key,
            r1,
            r2,
            jointype='inner',
            r1postfix='1',
            r2postfix='2',
            defaults=None,
            usemask=True,
            asrecarray=False):
    """
    Join arrays `r1` and `r2` on key `key`.

    The key should be either a string or a sequence of string corresponding
    to the fields used to join the array.
    An exception is raised if the `key` field cannot be found in the two input
    arrays.
    Neither `r1` nor `r2` should have any duplicates along `key`: the presence
    of duplicates will make the output quite unreliable. Note that duplicates
    are not looked for by the algorithm.

    Parameters
    ----------
    key : {string, sequence}
        A string or a sequence of strings corresponding to the fields used
        for comparison.
    r1, r2 : arrays
        Structured arrays.
    jointype : {'inner', 'outer', 'leftouter'}, optional
        If 'inner', returns the elements common to both r1 and r2.
        If 'outer', returns the common elements as well as the elements of r1
        not in r2 and the elements of not in r2.
        If 'leftouter', returns the common elements and the elements of r1 not
        in r2.
    r1postfix : string, optional
        String appended to the names of the fields of r1 that are present in r2
        but absent of the key.
    r2postfix : string, optional
        String appended to the names of the fields of r2 that are present in r1
        but absent of the key.
    defaults : {dictionary}, optional
        Dictionary mapping field names to the corresponding default values.
    usemask : {True, False}, optional
        Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
        or a ndarray.
    asrecarray : {False, True}, optional
        Whether to return a recarray (or MaskedRecords if `usemask==True`) or
        just a flexible-type ndarray.

    Notes
    -----
    * The output is sorted along the key.
    * A temporary array is formed by dropping the fields not in the key for the
      two arrays and concatenating the result. This array is then sorted, and
      the common entries selected. The output is constructed by filling the fields
      with the selected entries. Matching is not preserved if there are some
      duplicates...

    """
    # Check jointype
    if jointype not in ('inner', 'outer', 'leftouter'):
        raise ValueError("The 'jointype' argument should be in 'inner', "\
                         "'outer' or 'leftouter' (got '%s' instead)" % jointype)
    # If we have a single key, put it in a tuple
    if isinstance(key, basestring):
        key = (key, )

    # Check the keys
    for name in key:
        if name not in r1.dtype.names:
            raise ValueError('r1 does not have key field %s' % name)
        if name not in r2.dtype.names:
            raise ValueError('r2 does not have key field %s' % name)

    # Make sure we work with ravelled arrays
    r1 = r1.ravel()
    r2 = r2.ravel()
    (nb1, nb2) = (len(r1), len(r2))
    (r1names, r2names) = (r1.dtype.names, r2.dtype.names)

    # Check the names for collision
    if (set.intersection(set(r1names), set(r2names)).difference(key)
            and not (r1postfix or r2postfix)):
        msg = "r1 and r2 contain common names, r1postfix and r2postfix "
        msg += "can't be empty"
        raise ValueError(msg)

    # Make temporary arrays of just the keys
    r1k = drop_fields(r1, [n for n in r1names if n not in key])
    r2k = drop_fields(r2, [n for n in r2names if n not in key])

    # Concatenate the two arrays for comparison
    aux = ma.concatenate((r1k, r2k))
    idx_sort = aux.argsort(order=key)
    aux = aux[idx_sort]
    #
    # Get the common keys
    flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
    flag_in[:-1] = flag_in[1:] + flag_in[:-1]
    idx_in = idx_sort[flag_in]
    idx_1 = idx_in[(idx_in < nb1)]
    idx_2 = idx_in[(idx_in >= nb1)] - nb1
    (r1cmn, r2cmn) = (len(idx_1), len(idx_2))
    if jointype == 'inner':
        (r1spc, r2spc) = (0, 0)
    elif jointype == 'outer':
        idx_out = idx_sort[~flag_in]
        idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
        idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
        (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
    elif jointype == 'leftouter':
        idx_out = idx_sort[~flag_in]
        idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
        (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
    # Select the entries from each input
    (s1, s2) = (r1[idx_1], r2[idx_2])
    #
    # Build the new description of the output array .......
    # Start with the key fields
    ndtype = [list(_) for _ in r1k.dtype.descr]
    # Add the other fields
    ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
    # Find the new list of names (it may be different from r1names)
    names = list(_[0] for _ in ndtype)
    for desc in r2.dtype.descr:
        desc = list(desc)
        name = desc[0]
        # Have we seen the current name already ?
        if name in names:
            nameidx = ndtype.index(desc)
            current = ndtype[nameidx]
            # The current field is part of the key: take the largest dtype
            if name in key:
                current[-1] = max(desc[1], current[-1])
            # The current field is not part of the key: add the suffixes
            else:
                current[0] += r1postfix
                desc[0] += r2postfix
                ndtype.insert(nameidx + 1, desc)
        #... we haven't: just add the description to the current list
        else:
            names.extend(desc[0])
            ndtype.append(desc)
    # Revert the elements to tuples
    ndtype = [tuple(_) for _ in ndtype]
    # Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
    cmn = max(r1cmn, r2cmn)
    # Construct an empty array
    output = ma.masked_all((cmn + r1spc + r2spc, ), dtype=ndtype)
    names = output.dtype.names
    for f in r1names:
        selected = s1[f]
        if f not in names or (f in r2names and not r2postfix and not f in key):
            f += r1postfix
        current = output[f]
        current[:r1cmn] = selected[:r1cmn]
        if jointype in ('outer', 'leftouter'):
            current[cmn:cmn + r1spc] = selected[r1cmn:]
    for f in r2names:
        selected = s2[f]
        if f not in names or (f in r1names and not r1postfix and f not in key):
            f += r2postfix
        current = output[f]
        current[:r2cmn] = selected[:r2cmn]
        if (jointype == 'outer') and r2spc:
            current[-r2spc:] = selected[r2cmn:]
    # Sort and finalize the output
    output.sort(order=key)
    kwargs = dict(usemask=usemask, asrecarray=asrecarray)
    return _fix_output(_fix_defaults(output, defaults), **kwargs)
Beispiel #53
0
def stack_arrays(arrays,
                 defaults=None,
                 usemask=True,
                 asrecarray=False,
                 autoconvert=False):
    """
    Superposes arrays fields by fields

    Parameters
    ----------
    seqarrays : array or sequence
        Sequence of input arrays.
    defaults : dictionary, optional
        Dictionary mapping field names to the corresponding default values.
    usemask : {True, False}, optional
        Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
        or a ndarray.
    asrecarray : {False, True}, optional
        Whether to return a recarray (or MaskedRecords if `usemask==True`) or
        just a flexible-type ndarray.
    autoconvert : {False, True}, optional
        Whether automatically cast the type of the field to the maximum.

    Examples
    --------
    >>> from numpy.lib import recfunctions as rfn
    >>> x = np.array([1, 2,])
    >>> rfn.stack_arrays(x) is x
    True
    >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
    >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
    ...   dtype=[('A', '|S3'), ('B', float), ('C', float)])
    >>> test = rfn.stack_arrays((z,zz))
    >>> test
    masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
     ('c', 30.0, 300.0)],
                 mask = [(False, False, True) (False, False, True) (False, False, False)
     (False, False, False) (False, False, False)],
           fill_value = ('N/A', 1e+20, 1e+20),
                dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])

    """
    if isinstance(arrays, ndarray):
        return arrays
    elif len(arrays) == 1:
        return arrays[0]
    seqarrays = [np.asanyarray(a).ravel() for a in arrays]
    nrecords = [len(a) for a in seqarrays]
    ndtype = [a.dtype for a in seqarrays]
    fldnames = [d.names for d in ndtype]
    #
    dtype_l = ndtype[0]
    newdescr = dtype_l.descr
    names = [_[0] for _ in newdescr]
    for dtype_n in ndtype[1:]:
        for descr in dtype_n.descr:
            name = descr[0] or ''
            if name not in names:
                newdescr.append(descr)
                names.append(name)
            else:
                nameidx = names.index(name)
                current_descr = newdescr[nameidx]
                if autoconvert:
                    if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
                        current_descr = list(current_descr)
                        current_descr[-1] = descr[1]
                        newdescr[nameidx] = tuple(current_descr)
                elif descr[1] != current_descr[-1]:
                    raise TypeError("Incompatible type '%s' <> '%s'" % \
                                    (dict(newdescr)[name], descr[1]))
    # Only one field: use concatenate
    if len(newdescr) == 1:
        output = ma.concatenate(seqarrays)
    else:
        #
        output = ma.masked_all((np.sum(nrecords), ), newdescr)
        offset = np.cumsum(np.r_[0, nrecords])
        seen = []
        for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
            names = a.dtype.names
            if names is None:
                output['f%i' % len(seen)][i:j] = a
            else:
                for name in n:
                    output[name][i:j] = a[name]
                    if name not in seen:
                        seen.append(name)
    #
    return _fix_output(_fix_defaults(output, defaults),
                       usemask=usemask,
                       asrecarray=asrecarray)
Beispiel #54
0
def load_GPM_IMERG_files_with_spatial_filter(file_path=None,
                                             filename_pattern=None,
                                             filelist=None,
                                             variable_name='precipitationCal',
                                             user_mask_file=None,
                                             mask_variable_name='mask',
                                             user_mask_values=[10],
                                             longitude_name='lon',
                                             latitude_name='lat'):
    ''' Load multiple GPM Level 3 IMEGE files containing calibrated \
        precipitation and generate a two-dimensional array \
        for the masked grid points.
    :param file_path: Directory to the HDF files to load.
    :type file_path: :mod:`string`
    :param filename_pattern: Path to the HDF files to load.
    :type filename_pattern: :mod:`string`
    :param filelist: A list of filenames
    :type filelist: :mod:`string`
    :param variable_name: The variable name to load from the HDF file.
    :type variable_name: :mod:`string`
    :param name: (Optional) A name for the loaded dataset.
    :type name: :mod:`string`
    :user_mask_file: user's own gridded mask file(a netCDF file name)
    :type name: :mod:`string`
    :mask_variable_name: mask variables in user_mask_file    
    :type name: :mod:`string`
    :longitude_name: longitude variable in user_mask_file    
    :type name: :mod:`string`
    :latitude_name: latitude variable in user_mask_file    
    :type name: :mod:`string`
    :param user_mask_values: grid points where mask_variable == user_mask_value will be extracted.
    :type user_mask_values: list of strings
    :returns: A two-dimensional array with the requested variable's MASKED data from \
        the HDF file.
    :rtype: :class:`dataset.Dataset`
    :raises ValueError:
    '''

    if not filelist:
        GPM_files = []
        for pattern in filename_pattern:
            GPM_files.extend(glob(file_path + pattern))
    else:
        GPM_files = [line.rstrip('\n') for line in open(filelist)]

    GPM_files.sort()

    file_object_first = h5py.File(GPM_files[0])
    lats = file_object_first['Grid']['lat'][:]
    lons = file_object_first['Grid']['lon'][:]

    lons, lats = numpy.meshgrid(lons, lats)

    nfile = len(GPM_files)
    for ifile, file in enumerate(GPM_files):
        if ifile == 0 and user_mask_file:
            file_object = netCDF4.Dataset(user_mask_file)
            mask_variable = file_object.variables[mask_variable_name][:]
            mask_longitude = file_object.variables[longitude_name][:]
            mask_latitude = file_object.variables[latitude_name][:]
            spatial_mask = utils.regrid_spatial_mask(lons, lats,
                                                     mask_longitude,
                                                     mask_latitude,
                                                     mask_variable,
                                                     user_mask_values)
            y_index, x_index = numpy.where(spatial_mask == 0)
        print('Reading file ' + str(ifile + 1) + '/' + str(nfile), file)
        file_object = h5py.File(file)
        values0 = ma.transpose(
            ma.masked_less(file_object['Grid'][variable_name][:], 0.))
        values_masked = values0[y_index, x_index]
        values_masked = ma.expand_dims(values_masked, axis=0)
        if ifile == 0:
            values = values_masked
        else:
            values = ma.concatenate((values, values_masked))
        file_object.close()
    return values
Beispiel #55
0
def load_GPM_IMERG_files(file_path=None,
                         filename_pattern=None,
                         filelist=None,
                         variable_name='precipitationCal',
                         name='GPM_IMERG'):
    ''' Load multiple GPM Level 3 IMEGE files containing calibrated \
        precipitation and generate an OCW Dataset obejct.

    :param file_path: Directory to the HDF files to load.
    :type file_path: :mod:`string`

    :param filename_pattern: Path to the HDF files to load.
    :type filename_pattern: :mod:`string`

    :param filelist: A list of filenames
    :type filelist: :mod:`string`

    :param variable_name: The variable name to load from the HDF file.
    :type variable_name: :mod:`string`

    :param name: (Optional) A name for the loaded dataset.
    :type name: :mod:`string`

    :returns: An OCW Dataset object with the requested variable's data from \
        the HDF file.
    :rtype: :class:`dataset.Dataset`

    :raises ValueError:
    '''

    if not filelist:
        GPM_files = []
        for pattern in filename_pattern:
            GPM_files.extend(glob(file_path + pattern))
    else:
        GPM_files = [line.rstrip('\n') for line in open(filelist)]

    GPM_files.sort()

    file_object_first = h5py.File(GPM_files[0])
    lats = file_object_first['Grid']['lat'][:]
    lons = file_object_first['Grid']['lon'][:]

    lons, lats = numpy.meshgrid(lons, lats)

    variable_unit = "mm/hr"

    times = []
    nfile = len(GPM_files)
    for ifile, file in enumerate(GPM_files):
        print('Reading file ' + str(ifile + 1) + '/' + str(nfile), file)
        file_object = h5py.File(file)
        time_struct_parsed = strptime(file[-39:-23], "%Y%m%d-S%H%M%S")
        times.append(datetime(*time_struct_parsed[:6]))
        values0 = ma.transpose(
            ma.masked_less(file_object['Grid'][variable_name][:], 0.))
        values0 = ma.expand_dims(values0, axis=0)
        if ifile == 0:
            values = values0
        else:
            values = ma.concatenate((values, values0))
        file_object.close()
    times = numpy.array(times)
    return Dataset(lats,
                   lons,
                   times,
                   values,
                   variable_name,
                   units=variable_unit,
                   name=name)
Beispiel #56
0
def load_WRF_2d_files_RAIN(file_path=None,
                           filename_pattern=None,
                           filelist=None,
                           name=''):
    ''' Load multiple WRF (or nuWRF) original output files containing 2D \
        fields such as precipitation and surface variables into a Dataset. \
    The dataset can be spatially subset.

    :param file_path: Directory to the NetCDF file to load.
    :type file_path: :mod:`string`

    :param filename_pattern: Path to the NetCDF file to load.
    :type filename_pattern: :mod:`string`

    :param name: (Optional) A name for the loaded dataset.
    :type name: :mod:`string`

    :returns: An OCW Dataset object with the requested variable's data from \
        the NetCDF file.
    :rtype: :class:`dataset.Dataset`

    :raises ValueError:
    '''

    if not filelist:
        WRF_files = []
        for pattern in filename_pattern:
            WRF_files.extend(glob(file_path + pattern))
        WRF_files.sort()
    else:
        WRF_files = [line.rstrip('\n') for line in open(filelist)]

    file_object_first = netCDF4.Dataset(WRF_files[0])
    lats = file_object_first.variables['XLAT'][0, :]
    lons = file_object_first.variables['XLONG'][0, :]

    times = []
    nfile = len(WRF_files)
    for ifile, file in enumerate(WRF_files):
        print('Reading file ' + str(ifile + 1) + '/' + str(nfile), file)
        file_object = netCDF4.Dataset(file)
        time_struct_parsed = strptime(file[-19:], "%Y-%m-%d_%H:%M:%S")
        for ihour in range(24):
            times.append(
                datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
        temp_value = file_object.variables['RAINC'][:] + file_object.variables[
            'RAINNC'][:]
        if isinstance(temp_value, numpy.ndarray):
            temp_value = ma.array(temp_value,
                                  mask=numpy.zeros(temp_value.shape))
        if ifile == 0:
            values0 = temp_value
        else:
            values0 = ma.concatenate((values0, temp_value))
        file_object.close()
    times = numpy.array(times)
    years = numpy.array([d.year for d in times])
    ncycle = numpy.unique(years).size
    print('ncycle=', ncycle)
    nt, ny, nx = values0.shape
    values = numpy.zeros([nt - ncycle * 24, ny, nx])
    values = ma.array(values, mask=numpy.zeros(values.shape))
    times2 = []
    nt2 = nt / ncycle
    # remove the first day in each year
    nt3 = nt2 - 24
    t_index = 0
    for icycle in numpy.arange(ncycle):
        for it in numpy.arange(nt3) + 24:
            values[t_index, :] = values0[icycle * nt2 + it, :] - \
                values0[icycle * nt2 + it - 1, :]
            times2.append(times[icycle * nt2 + it])
            t_index = t_index + 1
    variable_name = 'PREC'
    variable_unit = 'mm/hr'
    times2 = numpy.array(times2)
    return Dataset(lats,
                   lons,
                   times2,
                   values,
                   variable_name,
                   units=variable_unit,
                   name=name)
Beispiel #57
0
def load_WRF_2d_files(file_path=None,
                      filename_pattern=None,
                      filelist=None,
                      variable_name='T2',
                      name=''):
    ''' Load multiple WRF (or nuWRF) original output files containing 2D \
        fields such as precipitation and surface variables into a Dataset. \
        The dataset can be spatially subset.

    :param file_path: Directory to the NetCDF file to load.
    :type file_path: :mod:`string`

    :param filename_pattern: Path to the NetCDF file to load.
    :type filename_pattern: :mod:`string`

    :param filelist: A list of filenames
    :type filelist: :mod:`string`

    :param variable_name: The variable name to load from the NetCDF file.
    :type variable_name: :mod:`string`

    :param name: (Optional) A name for the loaded dataset.
    :type name: :mod:`string`

    :returns: An OCW Dataset object with the requested variable's data from \
        the NetCDF file.
    :rtype: :class:`dataset.Dataset`

    :raises ValueError:
    '''

    if not filelist:
        WRF_files = []
        for pattern in filename_pattern:
            WRF_files.extend(glob(file_path + pattern))
    else:
        WRF_files = [line.rstrip('\n') for line in open(filelist)]

    WRF_files.sort()

    file_object_first = netCDF4.Dataset(WRF_files[0])
    lats = file_object_first.variables['XLAT'][0, :]
    lons = file_object_first.variables['XLONG'][0, :]

    times = []
    nfile = len(WRF_files)
    for ifile, file in enumerate(WRF_files):
        print('Reading file ' + str(ifile + 1) + '/' + str(nfile), file)
        file_object = netCDF4.Dataset(file)
        time_struct_parsed = strptime(file[-19:], "%Y-%m-%d_%H:%M:%S")
        for ihour in numpy.arange(24):
            times.append(
                datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
        values0 = file_object.variables[variable_name][:]
        if isinstance(values0, numpy.ndarray):
            values0 = ma.array(values0, mask=numpy.zeros(values0.shape))
        if ifile == 0:
            values = values0
            variable_unit = file_object.variables[variable_name].units
        else:
            values = ma.concatenate((values, values0))
        file_object.close()
    times = numpy.array(times)
    return Dataset(lats,
                   lons,
                   times,
                   values,
                   variable_name,
                   units=variable_unit,
                   name=name)
Beispiel #58
0
import numpy.ma as ma

a = ma.arange(3)
a[1] = ma.masked
b = ma.arange(2, 5)
a
b
ma.concatenate([a, b])
Beispiel #59
0
        '*.pkl')))  #  get a list of the paths to all the VolcNet files
if len(VolcNet_files) == 0:
    raise Exception(
        'No VolcNet files have been found.  Perhaps the path is wrong? Or perhaps you only want to use synthetic data?  In which case, this section can be removed.  Exiting...'
    )

X_1s = []
Y_class_1s = []
Y_loc_1s = []
for VolcNet_file in VolcNet_files:
    X_1, Y_class_1, Y_loc_1 = open_VolcNet_file(
        VolcNet_file, synthetic_ifgs_settings['defo_sources'])
    X_1s.append(X_1)
    Y_class_1s.append(Y_class_1)
    Y_loc_1s.append(Y_loc_1)
X = ma.concatenate(X_1s, axis=0)
Y_class = np.concatenate(Y_class_1s, axis=0)
Y_loc = np.concatenate(Y_loc_1s, axis=0)
del X_1s, Y_class_1s, Y_loc_1s, X_1, Y_class_1, Y_loc_1
plot_data_class_loc_caller(
    X[:30, ],
    Y_class[:30, ],
    Y_loc[:30, ],
    source_names=['dyke', 'sill', 'no def'],
    window_title='02 Sample of Real data'
)  # plot the data in it (note that this can be across multiople windows)
print('Done.  ')

n_augmented_files = int(
    (X.shape[0] * real_ifg_settings['augmentation_factor']) /
    ifg_settings['n_per_file']
Beispiel #60
0
 def test_masked_data(self):
     new_data = ma.masked_less(self.data2d, 3)
     c_data = add_cyclic_point(new_data)
     r_data = ma.concatenate((self.data2d, self.data2d[:, :1]), axis=1)
     assert_array_equal(c_data, r_data)