def _make_src_affine(src_data_array):
    src_bounds = _get_bounds(src_data_array)
    src_left, src_bottom, src_right, src_top = src_bounds
    src_resolution_x, src_resolution_y = _get_resolution(src_data_array,
                                                         as_tuple=True)
    return Affine.translation(src_left, src_top) * Affine.scale(
        src_resolution_x, src_resolution_y)
Beispiel #2
0
def array2coords(array, array_transform, nodata):
    '''
    

    Parameters
    ----------
    array : numpy.ndarray
        2D Numpy array to be converted
    array_transform : affine.Affine
        The affine transformation needed. Can be derived from a rasterio.profile
    nodata : float
        Value to be ignored from output dataset

    Returns
    -------
    pandas.DataFrame

    '''

    T1 = array_transform * Affine.translation(
        0.5, 0.5)  # reference the pixel centre
    rc2xy = lambda r, c: (c, r) * T1
    row, col = np.where(array != nodata)
    z = np.extract(array != nodata, array)

    df_coords = pd.DataFrame({'col': col, 'row': row, 'z': z})
    df_coords['x'] = df_coords.apply(lambda row: rc2xy(row.row, row.col)[0],
                                     axis=1)
    df_coords['y'] = df_coords.apply(lambda row: rc2xy(row.row, row.col)[1],
                                     axis=1)

    return df_coords
Beispiel #3
0
def pixel_row_col_to_xy(in_row, in_col, in_transform, in_crs):
    '''

    Return x, y of given row, col of a pixel from a specific transform.

    Parameters
    ----------
    in_row: int
        the row number of a pixel
    in_col: int
        the column number of a pixel
    in_transform: rasterio transform
        the transform of a original raster of the pixel
    in_crs: crs or str
        the CRS of the original raster
        
    Returns
    -------
    (x, y): tuple
        a tuple of x and y of the pixel
	'''
 
    t = in_transform * Affine.translation(0.5, 0.5) # reference the pixel centre
    rc2xy = lambda r, c: (c, r) * t 
    x,y = rc2xy(in_row, in_col)
    
    return (x, y)
Beispiel #4
0
def areagrid(outraster, c = 0.083333001, r = 6371007.2, minx = -180, miny = -90, w = 360, h = 180):
    """
    Generates a global grid in geograpic coordinates
    with the area of each gridcell as the value of the cell
    
    Parameters:
        outraster = path to output raster file
        c = cellsize in decimal degrees
        r = radius of earth in desired units (e.g. 6371007.2m for sq. meters)
        minx, miny = the south west coordinate of the raster in degrees
        w, h = the width and height of the raster in degrees
        
    Returns:
        None
    """
    c = float(c)
    r = float(r)

    # make a vector of ones [1,1,1, ... 1] of length equal to the number of cells from west to east.
    X = np.ones(round(w/c))
    # make a vector counting from 0 to the number of cells from south to north. e.g. [0,1,2,...,179] for 1 deg cells.
    Y = np.arange(round(h/c))
    
    # multiply all the numbers in the Y vector by the cell size, 
    # so it extends from 0 to <180 (if the cell size is different than 1 deg)
    # then add the southernmost coordinate (-90deg). This makes a vector of -90 to +90 degrees North
    degN = Y*c + miny
    # convert degrees vector to radians
    radN = degN*np.pi/180.0
    # convert the cell size to radians
    radc = c * np.pi/180.0
    
    # calculate the area of the cell
    # there's some implicit geometry that's been done here already'
    # but basically it averages the width of the top of a cell and the bottom of a cell
    # and mutiplies it by the height of the cell (which is constant no matter how far or south north you are)
    # then by the square of the radius
    # since the angles are in radians it works out area correctly
    # you end up with a vector of cell area from south to north
    A = (np.sin(radN+radc/2)-np.sin(radN-radc/2)) * radc * r**2
    
    # the outer product of any vector and a vector of ones just duplicates the first vector into columns in a matrix
    # basically we just copy the latitude vector across from east to west
    M = np.outer(A,X)
    
    cols = round(w/c)
    rows = round(h/c)
    
    # save the matrix as a raster
    with rasterio.open(outraster,'w',
                       'GTiff',
                       width=cols,
                       height=rows,
                       dtype=M.dtype,
                       crs={'init': 'EPSG:4326'},
                       transform=Affine.translation(-cols*c/2, rows*c/2) * Affine.scale(c, -c),
                       count=1) as dst:
        dst.write_band(1,M)
Beispiel #5
0
    def _get_affine_transform(self, bounding_box):
        lng1, lng2 = bounding_box.west, bounding_box.east
        lat1, lat2 = bounding_box.south, bounding_box.north
        x_scale = self.google_static_maps.image_w / (lng2 - lng1)
        y_scale = -self.google_static_maps.image_h / (lat2 - lat1)

        affine_translate = Affine.translation(-lng1, -lat2)
        affine_scale = Affine.scale(x_scale, y_scale)
        # affine_mirror = Affine(1, 0, 0, 0, -1, image_h)
        affine_transform = affine_scale * affine_translate
        return affine_transform
Beispiel #6
0
 def defineSquare(self, x, y, alpha, length):
     transform0 = rasterio.open(self.imgdir + self.imgWithCoords(x, y),
                                'r').transform
     x_ras, y_ras = np.round((~transform0) * (x, y))
     transform1 = transform0 * A.translation(x_ras,
                                             y_ras) * A.rotation(alpha)
     corners = [
         transform1 * (0, 0), transform1 * (length, 0),
         transform1 * (length, length), transform1 * (0, length)
     ]
     return corners
    def coordinates(fn=None,
                    meta=None,
                    numpy_array=None,
                    input_crs=None,
                    to_latlong=False):
        '''
		take a raster file as input and return the centroid coords for each 
		of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
		'''
        import rasterio
        import numpy as np
        from affine import Affine
        from pyproj import Proj, transform

        if fn:
            # Read raster
            with rasterio.open(fn) as r:
                T0 = r.affine  # upper-left pixel corner affine transform
                p1 = Proj(r.crs)
                A = r.read(1)  # pixel values

        elif (meta is not None) & (numpy_array is not None):
            A = numpy_array
            if input_crs != None:
                p1 = Proj(input_crs)
                T0 = meta['affine']
            else:
                p1 = None
                T0 = meta['affine']
        else:
            BaseException('check inputs')

        # All rows and columns
        cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
        # Get affine transform for pixel centres
        T1 = T0 * Affine.translation(0.5, 0.5)
        # Function to convert pixel row/column index (from 0) to easting/northing at centre
        rc2en = lambda r, c: (c, r) * T1
        # All eastings and northings (there is probably a faster way to do this)
        eastings, northings = np.vectorize(rc2en, otypes=[np.float,
                                                          np.float])(rows,
                                                                     cols)

        if to_latlong == False:
            return eastings, northings
        elif (to_latlong == True) & (input_crs != None):
            # Project all longitudes, latitudes
            longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
            return longs, lats
        else:
            BaseException('cant reproject to latlong without an input_crs')
def GFMS_extract_by_mask(vrt_file, mask_json):
    """extract GFMS data for a given watershed"""

    #print(vrt_file)
    #print(mask_json['features'][0]['geometry'])
    with rasterio.open(vrt_file) as src:
        try:
            out_image, out_transform = mask(
                src, [mask_json['features'][0]['geometry']], crop=True)
        except ValueError as e:
            #'Input shapes do not overlap raster.'
            #print(e)
            src = None
            # return empty dataframe
            return pd.DataFrame()

    # extract data
    no_data = src.nodata
    # extract the values of the masked array
    #print(out_image)
    data = out_image[0]
    # extract the row, columns of the valid values
    row, col = np.where(data != no_data)
    point_value = np.extract(data != no_data, data)
    if (len(point_value) == 0):
        src = None
        # return empty dataframe
        return pd.DataFrame()

    T1 = out_transform * Affine.translation(0.5,
                                            0.5)  # reference the pixel centre
    rc2xy = lambda r, c: (c, r) * T1
    px, py = src.res
    #print (px,py)
    pixel_area_km2 = lambda lon, lat: 111.111 * 111.111 * math.cos(
        lat * 0.01745) * px * py
    d = geopandas.GeoDataFrame({
        'col': col,
        'row': row,
        'intensity': point_value
    })
    # coordinate transformation
    d['lon'] = d.apply(lambda row: rc2xy(row.row, row.col)[0], axis=1)
    d['lat'] = d.apply(lambda row: rc2xy(row.row, row.col)[1], axis=1)
    d['area'] = d.apply(lambda row: pixel_area_km2(row.lon, row.lat), axis=1)

    # geometry
    d['geometry'] = d.apply(lambda row: Point(row['lon'], row['lat']), axis=1)
    # first 2 points
    src = None
    return d
Beispiel #9
0
    def newImage(self, x, y, alpha, length):
        square = self.defineSquare(x, y, alpha, length)
        imgs = self.imagesFromSquare(square)
        if len(imgs) < 1:
            return 0
        datasets = []
        road_datasets = []
        for filename in imgs:
            datasets.append(rasterio.open(self.imgdir + filename, 'r'))
            road_datasets.append(
                rasterio.open(self.roaddir + self.roadName(filename), 'r'))

        if len(datasets) > 1:
            mergedImages, src_transform = rasterio.merge.merge(datasets)
            mergedRoads = rasterio.merge.merge(road_datasets)[0]
        else:
            mergedImages = datasets[0].read()
            mergedRoads = road_datasets[0].read()
            src_transform = datasets[0].transform

        #define destination transform from the upper left corner of the square
        translationVector = ~src_transform * square[0]
        dst_transform = src_transform * A.translation(
            *translationVector) * A.rotation(alpha)

        dest = np.zeros((mergedImages.shape[0], length, length),
                        mergedImages.dtype)
        road_dest = np.zeros((length, length), mergedRoads.dtype)

        reproject(mergedImages,
                  dest,
                  src_transform=src_transform,
                  src_crs=datasets[0].crs,
                  dst_transform=dst_transform,
                  dst_crs=datasets[0].crs,
                  resampling=Resampling.nearest)

        reproject(mergedRoads,
                  road_dest,
                  src_transform=src_transform,
                  src_crs=datasets[0].crs,
                  dst_transform=dst_transform,
                  dst_crs=datasets[0].crs,
                  resampling=Resampling.nearest)

        return [dest, road_dest, dst_transform]
Beispiel #10
0
def example_reproject():
    import idfpy

    from matplotlib import pyplot as plt
    from rasterio import Affine
    from rasterio.crs import CRS
    from rasterio.warp import reproject, Resampling
    import numpy as np

    with idfpy.open('bxk1-d-ck.idf') as src:
        a = src.read(masked=True)
        nr, nc = src.header['nrow'], src.header['ncol']
        dx, dy = src.header['dx'], src.header['dy']
        src_transform = Affine.from_gdal(*src.geotransform)

    # define new grid transform (same extent, 10 times resolution)
    dst_transform = Affine.translation(src_transform.c, src_transform.f)
    dst_transform *= Affine.scale(dx / 10., -dy / 10.)

    # define coordinate system (here RD New)
    src_crs = CRS.from_epsg(28992)

    # initialize new data array
    b = np.empty((10 * nr, 10 * nc))

    # reproject using Rasterio
    reproject(
        source=a,
        destination=b,
        src_transform=src_transform,
        dst_transform=dst_transform,
        src_crs=src_crs,
        dst_crs=src_crs,
        resampling=Resampling.bilinear,
    )

    # result as masked array
    b = np.ma.masked_equal(b, a.fill_value)

    # plot images
    fig, axes = plt.subplots(nrows=2, ncols=1)
    axes[0].imshow(a.filled(np.nan))
    axes[0].set_title('bxk1 original')
    axes[1].imshow(b.filled(np.nan))
    axes[1].set_title('bxk1 resampled')
    plt.show()
def example_reproject():
    import idfpy

    from matplotlib import pyplot as plt
    from rasterio import Affine
    from rasterio.crs import CRS
    from rasterio.warp import reproject, Resampling
    import numpy as np

    with idfpy.open('bxk1-d-ck.idf') as src:
        a = src.read(masked=True)
        nr, nc = src.header['nrow'], src.header['ncol']
        dx, dy = src.header['dx'], src.header['dy']
        src_transform = Affine.from_gdal(*src.geotransform)

    # define new grid transform (same extent, 10 times resolution)
    dst_transform = Affine.translation(src_transform.c, src_transform.f)
    dst_transform *= Affine.scale(dx / 10., -dy / 10.)

    # define coordinate system (here RD New)
    src_crs = CRS.from_epsg(28992)

    # initialize new data array
    b = np.empty((10*nr, 10*nc))

    # reproject using Rasterio
    reproject(
        source=a,
        destination=b,
        src_transform=src_transform,
        dst_transform=dst_transform,
        src_crs=src_crs,
        dst_crs=src_crs,
        resampling=Resampling.bilinear,
        )

    # result as masked array
    b = np.ma.masked_equal(b, a.fill_value)

    # plot images
    fig, axes = plt.subplots(nrows=2, ncols=1)
    axes[0].imshow(a.filled(np.nan))
    axes[0].set_title('bxk1 original')
    axes[1].imshow(b.filled(np.nan))
    axes[1].set_title('bxk1 resampled')
    plt.show()
Beispiel #12
0
def update(pr_10m, size_10m: Tuple, model_output: np.ndarray, xmi: int, ymi: int):
    """
    This method creates the proper georeferencing for the output image.
    :param data: The raster file for 10m resolution.

    """
    # Here based on the params.json file, the output image dimension will be calculated.
    out_dims = model_output.shape[2]

    new_transform = pr_10m["transform"] * A.translation(xmi, ymi)
    pr_10m.update(dtype=rasterio.float32)
    pr_10m.update(driver="GTiff")
    pr_10m.update(width=size_10m[1])
    pr_10m.update(height=size_10m[0])
    pr_10m.update(count=out_dims)
    pr_10m.update(transform=new_transform)
    return pr_10m
    def coordinates(fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False):
        """
		take a raster file as input and return the centroid coords for each 
		of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
		"""
        import rasterio
        import numpy as np
        from affine import Affine
        from pyproj import Proj, transform

        if fn:
            # Read raster
            with rasterio.open(fn) as r:
                T0 = r.affine  # upper-left pixel corner affine transform
                p1 = Proj(r.crs)
                A = r.read(1)  # pixel values

        elif (meta is not None) & (numpy_array is not None):
            A = numpy_array
            if input_crs != None:
                p1 = Proj(input_crs)
                T0 = meta["affine"]
            else:
                p1 = None
                T0 = meta["affine"]
        else:
            BaseException("check inputs")

            # All rows and columns
        cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
        # Get affine transform for pixel centres
        T1 = T0 * Affine.translation(0.5, 0.5)
        # Function to convert pixel row/column index (from 0) to easting/northing at centre
        rc2en = lambda r, c: (c, r) * T1
        # All eastings and northings (there is probably a faster way to do this)
        eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)

        if to_latlong == False:
            return eastings, northings
        elif (to_latlong == True) & (input_crs != None):
            # Project all longitudes, latitudes
            longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
            return longs, lats
        else:
            BaseException("cant reproject to latlong without an input_crs")
Beispiel #14
0
def main():
    
    for i in range(2):
        dat = nc.Dataset(innc[i])
        print dat
        
        #sum monthly values for 2010
        dat2010 = dat.variables[varname[i]][600:611,:,:].sum(0)
        cols = 720
        rows = 360
        d = 1/2.0
    
        with rasterio.open(outras[i],'w',
                       'GTiff',
                       width=cols,
                       height=rows,
                       dtype=dat2010.dtype,
                       crs={'init': 'EPSG:4326'},
                       transform=A.translation(-cols*d/2, rows*d/2) * A.scale(d, -d),
                       count=1) as dst:
            dst.write_band(1,dat2010)
    def update(data, size_10m: Tuple, model_output: np.ndarray, xmi: int,
               ymi: int):
        """
        This method creates the proper georeferencing for the output image.

        Args:

            data: The raster file for 10m resolution.
        """
        # Here based on the params.json file, the output image dimension will be calculated.
        out_dims = model_output.shape[2]

        with rasterio.open(data) as d_s:
            p_r = d_s.profile
        new_transform = p_r["transform"] * A.translation(xmi, ymi)
        p_r.update(dtype=rasterio.uint16)
        p_r.update(driver="GTiff")
        p_r.update(width=size_10m[1])
        p_r.update(height=size_10m[0])
        p_r.update(count=out_dims)
        p_r.update(transform=new_transform)
        return p_r
Beispiel #16
0
def content_within_shape(content: np.ndarray, trans: Affine,
                         shape: sgp.LinearRing):
    """

    :param content: data being displayed on the screen
    :param trans: affine transform between content array indices and screen coordinates
    :param shape: LinearRing in screen coordinates (e.g. mercator meters)
    :return: masked_content:masked_array, (y_index_offset:int, x_index_offset:int) containing minified masked content array
    """
    # Get the bounds so we can limit how big our rasterize boolean array actually is
    inv_trans = ~trans
    # convert bounding box to content coordinates
    # (0, 0) image index is upper-left origin of data (needs more work if otherwise)
    nx, ny, mx, my = shape.bounds  # minx,miny,maxx,maxy
    nx, my = inv_trans * (nx, my)
    mx, ny = inv_trans * (mx, ny)
    nx, my = int(nx), int(my)
    mx, ny = int(np.ceil(mx)), int(np.ceil(ny))

    # subset the content (ny is the higher *index*, my is the lower *index*)
    w = (mx - nx) + 1
    h = (ny - my) + 1

    # Make our linear ring a properly oriented shapely polygon
    shape = sgp.Polygon(shape)
    shape = sgp.orient(shape)
    # create a transform that is shifted to where the polygon is
    offset_trans = trans * Affine.translation(nx, my)

    # Get boolean mask for where the polygon is and get an index mask of those positions
    index_mask = np.nonzero(
        rasterize([shape],
                  out_shape=(h, w),
                  transform=offset_trans,
                  default_value=1).astype(np.bool_))
    # translate the mask indexes back to the original data array coordinates (original index mask is read-only)
    index_mask = (index_mask[0] + my, index_mask[1] + nx)
    return index_mask, content[index_mask]
Beispiel #17
0
def export_array(modelgrid, filename, a, nodata=-9999,
                 fieldname='value',
                 **kwargs):
    """
    Write a numpy array to Arc Ascii grid or shapefile with the model
    reference.

    Parameters
    ----------
    filename : str
        Path of output file. Export format is determined by
        file extention.
        '.asc'  Arc Ascii grid
        '.tif'  GeoTIFF (requries rasterio package)
        '.shp'  Shapefile
    a : 2D numpy.ndarray
        Array to export
    nodata : scalar
        Value to assign to np.nan entries (default -9999)
    fieldname : str
        Attribute field name for array values (shapefile export only).
        (default 'values')
    kwargs:
        keyword arguments to np.savetxt (ascii)
        rasterio.open (GeoTIFF)
        or flopy.export.shapefile_utils.write_grid_shapefile2

    Notes
    -----
    Rotated grids will be either be unrotated prior to export,
    using scipy.ndimage.rotate (Arc Ascii format) or rotation will be
    included in their transform property (GeoTiff format). In either case
    the pixels will be displayed in the (unrotated) projected geographic
    coordinate system, so the pixels will no longer align exactly with the
    model grid (as displayed from a shapefile, for example). A key difference
    between Arc Ascii and GeoTiff (besides disk usage) is that the
    unrotated Arc Ascii will have a different grid size, whereas the GeoTiff
    will have the same number of rows and pixels as the original.

    """

    if filename.lower().endswith(".asc"):
        if len(np.unique(modelgrid.delr)) != len(np.unique(modelgrid.delc)) != 1 \
                or modelgrid.delr[0] != modelgrid.delc[0]:
            raise ValueError('Arc ascii arrays require a uniform grid.')

        xoffset, yoffset = modelgrid.xoffset, modelgrid.yoffset
        cellsize = modelgrid.delr[0] # * self.length_multiplier
        fmt = kwargs.get('fmt', '%.18e')
        a = a.copy()
        a[np.isnan(a)] = nodata
        if modelgrid.angrot != 0:
            try:
                from scipy.ndimage import rotate
                a = rotate(a, modelgrid.angrot, cval=nodata)
                height_rot, width_rot = a.shape
                xmin, ymin, xmax, ymax = modelgrid.extent
                dx = (xmax - xmin) / width_rot
                dy = (ymax - ymin) / height_rot
                cellsize = np.max((dx, dy))
                xoffset, yoffset = xmin, ymin
            except ImportError:
                print('scipy package required to export rotated grid.')

        filename = '.'.join(
            filename.split('.')[:-1]) + '.asc'  # enforce .asc ending
        nrow, ncol = a.shape
        a[np.isnan(a)] = nodata
        txt = 'ncols  {:d}\n'.format(ncol)
        txt += 'nrows  {:d}\n'.format(nrow)
        txt += 'xllcorner  {:f}\n'.format(xoffset)
        txt += 'yllcorner  {:f}\n'.format(yoffset)
        txt += 'cellsize  {}\n'.format(cellsize)
        # ensure that nodata fmt consistent w values
        txt += 'NODATA_value  {}\n'.format(fmt) % (nodata)
        with open(filename, 'w') as output:
            output.write(txt)
        with open(filename, 'ab') as output:
            np.savetxt(output, a, **kwargs)
        print('wrote {}'.format(filename))

    elif filename.lower().endswith(".tif"):
        if len(np.unique(modelgrid.delr)) != len(np.unique(modelgrid.delc)) != 1 \
                or modelgrid.delr[0] != modelgrid.delc[0]:
            raise ValueError('GeoTIFF export require a uniform grid.')
        try:
            import rasterio
            from rasterio import Affine
        except ImportError:
            print('GeoTIFF export requires the rasterio package.')
            return
        dxdy = modelgrid.delc[0] # * self.length_multiplier
        trans = Affine.translation(modelgrid.xoffset, modelgrid.yoffset) * \
                Affine.rotation(modelgrid.angrot) * \
                Affine.scale(dxdy, -dxdy)

        # third dimension is the number of bands
        a = a.copy()
        if len(a.shape) == 2:
            a = np.reshape(a, (1, a.shape[0], a.shape[1]))
        if a.dtype.name == 'int64':
            a = a.astype('int32')
            dtype = rasterio.int32
        elif a.dtype.name == 'int32':
            dtype = rasterio.int32
        elif a.dtype.name == 'float64':
            dtype = rasterio.float64
        elif a.dtype.name == 'float32':
            dtype = rasterio.float32
        else:
            msg = 'ERROR: invalid dtype "{}"'.format(a.dtype.name)
            raise TypeError(msg)

        meta = {'count': a.shape[0],
                'width': a.shape[2],
                'height': a.shape[1],
                'nodata': nodata,
                'dtype': dtype,
                'driver': 'GTiff',
                'crs': modelgrid.proj4,
                'transform': trans
                }
        meta.update(kwargs)
        with rasterio.open(filename, 'w', **meta) as dst:
            dst.write(a)
        print('wrote {}'.format(filename))

    elif filename.lower().endswith(".shp"):
        from ..export.shapefile_utils import write_grid_shapefile2
        epsg = kwargs.get('epsg', None)
        prj = kwargs.get('prj', None)
        if epsg is None and prj is None:
            epsg = modelgrid.epsg
        write_grid_shapefile2(filename, modelgrid, array_dict={fieldname: a},
                              nan_val=nodata,
                              epsg=epsg, prj=prj)
Beispiel #18
0
import rasterio
from rasterio import Affine as A
from rasterio.warp import reproject, RESAMPLING

tempdir = '/tmp'
tiffname = os.path.join(tempdir, 'example.tif')

with rasterio.drivers():

    # Consider a 512 x 512 raster centered on 0 degrees E and 0 degrees N
    # with each pixel covering 15".
    rows, cols = src_shape = (512, 512)
    dpp = 1.0/240 # decimal degrees per pixel
    # The following is equivalent to 
    # A(dpp, 0, -cols*dpp/2, 0, -dpp, rows*dpp/2).
    src_transform = A.translation(-cols*dpp/2, rows*dpp/2) * A.scale(dpp, -dpp)
    src_crs = {'init': 'EPSG:4326'}
    source = numpy.ones(src_shape, numpy.uint8)*255

    # Prepare to reproject this rasters to a 1024 x 1024 dataset in
    # Web Mercator (EPSG:3857) with origin at -8928592, 2999585.
    dst_shape = (1024, 1024)
    dst_transform = A.from_gdal(-237481.5, 425.0, 0.0, 237536.4, 0.0, -425.0)
    dst_transform = dst_transform.to_gdal()
    dst_crs = {'init': 'EPSG:3857'}
    destination = numpy.zeros(dst_shape, numpy.uint8)

    reproject(
        source, 
        destination, 
        src_transform=src_transform,
def _make_src_affine(src_data_array):
    src_bounds = _get_bounds(src_data_array)
    src_left, src_bottom, src_right, src_top = src_bounds
    src_resolution_x, src_resolution_y = _get_resolution(src_data_array, as_tuple=True)
    return Affine.translation(src_left, src_top) * Affine.scale(src_resolution_x, src_resolution_y)
Beispiel #20
0
def reproject_grids(src_array, dst_array, metadata_src, metadata_dst):
    """
    Reproject precipitation fields to the domain of another precipitation field.

    Parameters
    ----------
    src_array: array-like
        Three-dimensional array of shape (t, x, y) containing a time series of
        precipitation fields. These precipitation fields will be reprojected.
    dst_array: array-like
        Array containing a precipitation field or a time series of precipitation
        fields. The src_array will be reprojected to the domain of
        dst_array.
    metadata_src: dict
        Metadata dictionary containing the projection, x- and ypixelsize, x1 and
        y2 attributes of the src_array as described in the documentation of
        :py:mod:`pysteps.io.importers`.
    metadata_dst: dict
        Metadata dictionary containing the projection, x- and ypixelsize, x1 and
        y2 attributes of the dst_array.

    Returns
    -------
    r_rprj: array-like
        Three-dimensional array of shape (t, x, y) containing the precipitation
        fields of src_array, but reprojected to the domain of dst_array.
    metadata: dict
        Metadata dictionary containing the projection, x- and ypixelsize, x1 and
        y2 attributes of the reprojected src_array.
    """

    if not RASTERIO_IMPORTED:
        raise MissingOptionalDependency(
            "rasterio package is required for the reprojection module, but it is "
            "not installed"
        )

    # Extract the grid info from src_array
    src_crs = metadata_src["projection"]
    x1_src = metadata_src["x1"]
    y2_src = metadata_src["y2"]
    xpixelsize_src = metadata_src["xpixelsize"]
    ypixelsize_src = metadata_src["ypixelsize"]
    src_transform = A.translation(float(x1_src), float(y2_src)) * A.scale(
        float(xpixelsize_src), float(-ypixelsize_src)
    )

    # Extract the grid info from dst_array
    dst_crs = metadata_dst["projection"]
    x1_dst = metadata_dst["x1"]
    y2_dst = metadata_dst["y2"]
    xpixelsize_dst = metadata_dst["xpixelsize"]
    ypixelsize_dst = metadata_dst["ypixelsize"]
    dst_transform = A.translation(float(x1_dst), float(y2_dst)) * A.scale(
        float(xpixelsize_dst), float(-ypixelsize_dst)
    )

    # Initialise the reprojected array
    r_rprj = np.zeros((src_array.shape[0], dst_array.shape[-2], dst_array.shape[-1]))

    # For every timestep, reproject the precipitation field of src_array to
    # the domain of dst_array
    if metadata_src["yorigin"] != metadata_dst["yorigin"]:
        src_array = src_array[:, ::-1, :]

    for i in range(src_array.shape[0]):
        reproject(
            src_array[i, :, :],
            r_rprj[i, :, :],
            src_transform=src_transform,
            src_crs=src_crs,
            dst_transform=dst_transform,
            dst_crs=dst_crs,
            resampling=Resampling.nearest,
            dst_nodata=np.nan,
        )

    # Update the metadata
    metadata = metadata_src.copy()

    for key in [
        "projection",
        "yorigin",
        "xpixelsize",
        "ypixelsize",
        "x1",
        "x2",
        "y1",
        "y2",
        "cartesian_unit",
    ]:
        metadata[key] = metadata_dst[key]

    return r_rprj, metadata
Beispiel #21
0
def main():
    # load and average netcdfs
    arr = None
    for f in NETCDFS:
        ds = nc.Dataset(f,'r')
        if arr is None:
            print ds.variables.keys()
            arr = np.asarray(ds.variables['lwe_thickness']) / len(NETCDFS)
        else:
            arr += np.asarray(ds.variables['lwe_thickness']) / len(NETCDFS)

    # multiply by scale factor
    ds = nc.Dataset(SCALER,'r')
    print ds.variables.keys()
    scaler = np.asarray(ds.variables['SCALE_FACTOR'])
    print scaler.shape
    arr = arr*scaler

    # extract error grids
    m_err = np.asarray(ds.variables['MEASUREMENT_ERROR'])
    l_err = np.asarray(ds.variables['LEAKAGE_ERROR'])
    t_err = np.sqrt(m_err*m_err + l_err*l_err)

    # compute slopes, coefficients
    print arr.shape
    slope_arr = np.zeros(arr.shape[1:])
    r2_arr = np.zeros(arr.shape[1:])
    p_arr = np.zeros(arr.shape[1:])
    print slope_arr.shape
    time = np.arange(arr.shape[0])
    print time.shape
    for i in range(arr.shape[1]):
        for j in range(arr.shape[2]):
            b1, b0, r2, p, sd = stats.linregress(arr[:,i,j], time)
            slope_arr[i,j]=b1
            r2_arr[i,j]=r2
            p_arr[i,j]=p

    # dump to csv
    np.savetxt(SLOPE,slope_arr,delimiter=',')
    np.savetxt(R2,r2_arr,delimiter=',')
    np.savetxt(P,p_arr,delimiter=',')
    np.savetxt(ERR,t_err,delimiter=',')

    # rescale to WGS84 and dump to tif bands
    rows = arr.shape[1]
    cols = arr.shape[2]
    d = 1
    transform = A.translation(-cols*d/2,-rows*d/2) * A.scale(d,d)
    print transform
    slope_arr = np.roll(slope_arr.astype(rio.float64),180)
    r2_arr = np.roll(r2_arr.astype(rio.float64),180)
    p_arr = np.roll(p_arr.astype(rio.float64),180)
    t_err = np.roll(t_err.astype(rio.float64),180)

    with rio.open(OUT, 'w',
                  'GTiff',
                  width=cols,
                  height=rows,
                  dtype=rio.float64,
                  crs={'init': 'EPSG:4326'},
                  transform=transform,
                  count=4) as out:
        out.write_band(1, slope_arr)
        out.write_band(2, r2_arr)
        out.write_band(3, p_arr)
        out.write_band(4, t_err)
Beispiel #22
0
def __make_rastertiles_Z__(src_dataset: rio.DatasetReader, world_size: float,
                           tile_size: int, zoom: int) -> list():

    # get bands
    src_bands = src_dataset.read()

    # structure for store tiles
    tiles = []

    # get bounds
    src_bbox = src_dataset.bounds
    src_bbox = [src_bbox.left, src_bbox.top, src_bbox.right, src_bbox.bottom]

    # get pixel size
    pixel_size = __pixel_size__(world_size, tile_size, zoom)

    # get all quadrant
    quadrants = __make_quadrants__(src_bbox, zoom, world_size, 1)

    for xmin, ymin, xmax, ymax in quadrants:

        # get bbox of quadrant
        Xmin, Ymin, Xmax, Ymax = list(
            __tile_world_bbox__(xmin, ymin, zoom, world_size, tile_size))

        # get pixel size
        pixel_size = __pixel_size__(world_size, tile_size, zoom)

        # make dst shape (3, tsize, tsize), 3 is fix because it's an image RGB
        dst_shape = (3, tile_size, tile_size)

        # make transform with orig (Xmin, Ymin) and scale (psize, -psize)
        dst_transform = A.translation(Xmin, Ymin) * A.scale(
            pixel_size, -pixel_size)

        dtype = src_dataset.dtypes[0]

        if dtype == rio.uint8:
            datatype = 1
        elif dtype == rio.uint16:
            datatype = 2
        elif dtype == rio.int16:
            datatype = 3
        elif dtype == rio.uint32:
            datatype = 4
        elif dtype == rio.int32:
            datatype = 5
        elif dtype == rio.float32:
            datatype = 6
        elif dtype == rio.float64:
            datatype = 7
        else:
            assert False

        # init dst bands
        dst_bands = np.zeros(dst_shape, dtype=dtype)

        count = dst_bands.shape[0]
        nodata = 0 if src_dataset.nodata is None else src_dataset.nodata

        # make reprojection for each bands
        for i in range(count):

            try:

                reproject(source=src_bands[i],
                          destination=dst_bands[i],
                          src_transform=src_dataset.transform,
                          src_crs=src_dataset.crs,
                          src_nodata=nodata,
                          dst_transform=dst_transform,
                          dst_crs=src_dataset.crs)

            except IndexError:
                continue

        gdal_bands = [{
            'data': dst_bands[x],
            'nodata_value': nodata
        } for x in range(count)]

        gdal_raster = GDALRaster({
            'srid': WEB_MERCATOR_SRID,
            'width': tile_size,
            'height': tile_size,
            'datatype': datatype,
            'nr_of_bands': count,
            'origin': [Xmin, Ymin],
            'scale': [pixel_size, -pixel_size],
            'bands': gdal_bands
        })

        tiles.append((zoom, xmin, ymin, gdal_raster))

    del src_bands

    # return structure
    return tiles
Beispiel #23
0
def __make_imagetiles_Z__(src_dataset: rio.DatasetReader, world_size: float,
                          tile_size: int, zoom: int) -> list():

    # structure for store tiles
    tiles = []

    # get bounding box
    src_bbox = src_dataset.bounds
    src_bbox = [src_bbox.left, src_bbox.top, src_bbox.right, src_bbox.bottom]

    # get pixel size
    pixel_size = __pixel_size__(world_size, tile_size, zoom)

    # get all quadrant
    quadrants = __make_quadrants__(src_bbox, zoom, world_size, 1)

    for xmin, ymin, xmax, ymax in quadrants:

        # get bbox of quadrant
        Xmin, Ymin, Xmax, Ymax = list(
            __tile_world_bbox__(xmin, ymin, zoom, world_size, tile_size))

        # get pixel size
        pixel_size = __pixel_size__(world_size, tile_size, zoom)

        # make dst shape (3, tsize, tsize), 3 is fix because it's an image RGB
        dst_shape = (3, tile_size, tile_size)

        # make transform with orig (Xmin, Ymin) and scale (psize, -psize)
        dst_transform = A.translation(Xmin, Ymin) * A.scale(
            pixel_size, -pixel_size)

        # init dst bands
        dst_bands = np.zeros(dst_shape, dtype=np.uint8)

        # make reprojection for each bands
        for i in range(3):

            reproject(source=src_dataset.read(i + 1),
                      destination=dst_bands[i],
                      src_transform=src_dataset.transform,
                      src_crs=src_dataset.crs,
                      dst_transform=dst_transform,
                      dst_crs=src_dataset.crs)

        # switch channel fst to channel last
        dst_bands = np.rollaxis(dst_bands, 0, 3)

        # make alpha band for no data
        dst_sum = np.sum(dst_bands, axis=2)
        alpha = np.zeros((tile_size, tile_size, 3))
        alpha[dst_sum > 0] = np.array([255, 255, 255])

        # convert alpha as pilimage
        pil_alpha = Image.fromarray(alpha.astype(dtype=np.uint8)).convert('L')

        # convert dst_bands as pilimage & put alpha
        pil_tile = Image.fromarray(dst_bands)
        pil_tile.putalpha(pil_alpha)

        # write in a buffer as bytes
        buffer = BytesIO()
        pil_tile.save(fp=buffer, format="PNG")

        # push all in ret structure
        tiles.append((zoom, xmin, ymin, buffer))

    # return structure
    return tiles
Beispiel #24
0
def export_array(modelgrid, filename, a, nodata=-9999,
                 fieldname='value',
                 **kwargs):
    """Write a numpy array to Arc Ascii grid
    or shapefile with the model reference.
    Parameters
    ----------
    filename : str
        Path of output file. Export format is determined by
        file extention.
        '.asc'  Arc Ascii grid
        '.tif'  GeoTIFF (requries rasterio package)
        '.shp'  Shapefile
    a : 2D numpy.ndarray
        Array to export
    nodata : scalar
        Value to assign to np.nan entries (default -9999)
    fieldname : str
        Attribute field name for array values (shapefile export only).
        (default 'values')
    kwargs:
        keyword arguments to np.savetxt (ascii)
        rasterio.open (GeoTIFF)
        or flopy.export.shapefile_utils.write_grid_shapefile2

    Notes
    -----
    Rotated grids will be either be unrotated prior to export,
    using scipy.ndimage.rotate (Arc Ascii format) or rotation will be
    included in their transform property (GeoTiff format). In either case
    the pixels will be displayed in the (unrotated) projected geographic coordinate system,
    so the pixels will no longer align exactly with the model grid
    (as displayed from a shapefile, for example). A key difference between
    Arc Ascii and GeoTiff (besides disk usage) is that the
    unrotated Arc Ascii will have a different grid size, whereas the GeoTiff
    will have the same number of rows and pixels as the original.
    """

    if filename.lower().endswith(".asc"):
        if len(np.unique(modelgrid.delr)) != len(np.unique(modelgrid.delc)) != 1 \
                or modelgrid.delr[0] != modelgrid.delc[0]:
            raise ValueError('Arc ascii arrays require a uniform grid.')

        xoffset, yoffset = modelgrid.xoffset, modelgrid.yoffset
        cellsize = modelgrid.delr[0] # * self.length_multiplier
        fmt = kwargs.get('fmt', '%.18e')
        a = a.copy()
        a[np.isnan(a)] = nodata
        if modelgrid.angrot != 0:
            try:
                from scipy.ndimage import rotate
                a = rotate(a, modelgrid.angrot, cval=nodata)
                height_rot, width_rot = a.shape
                xmin, ymin, xmax, ymax = modelgrid.extent
                dx = (xmax - xmin) / width_rot
                dy = (ymax - ymin) / height_rot
                cellsize = np.max((dx, dy))
                # cellsize = np.cos(np.radians(self.rotation)) * cellsize
                xoffset, yoffset = xmin, ymin
            except ImportError:
                print('scipy package required to export rotated grid.')
                pass

        filename = '.'.join(
            filename.split('.')[:-1]) + '.asc'  # enforce .asc ending
        nrow, ncol = a.shape
        a[np.isnan(a)] = nodata
        txt = 'ncols  {:d}\n'.format(ncol)
        txt += 'nrows  {:d}\n'.format(nrow)
        txt += 'xllcorner  {:f}\n'.format(xoffset)
        txt += 'yllcorner  {:f}\n'.format(yoffset)
        txt += 'cellsize  {}\n'.format(cellsize)
        # ensure that nodata fmt consistent w values
        txt += 'NODATA_value  {}\n'.format(fmt) % (nodata)
        with open(filename, 'w') as output:
            output.write(txt)
        with open(filename, 'ab') as output:
            np.savetxt(output, a, **kwargs)
        print('wrote {}'.format(filename))

    elif filename.lower().endswith(".tif"):
        if len(np.unique(modelgrid.delr)) != len(np.unique(modelgrid.delc)) != 1 \
                or modelgrid.delr[0] != modelgrid.delc[0]:
            raise ValueError('GeoTIFF export require a uniform grid.')
        try:
            import rasterio
            from rasterio import Affine
        except:
            print('GeoTIFF export requires the rasterio package.')
            return
        dxdy = modelgrid.delc[0] # * self.length_multiplier
        trans = Affine.translation(modelgrid.xoffset, modelgrid.yoffset) * \
                Affine.rotation(modelgrid.angrot) * \
                Affine.scale(dxdy, -dxdy)

        # third dimension is the number of bands
        a = a.copy()
        if len(a.shape) == 2:
            a = np.reshape(a, (1, a.shape[0], a.shape[1]))
        if a.dtype.name == 'int64':
            a = a.astype('int32')
            dtype = rasterio.int32
        elif a.dtype.name == 'int32':
            dtype = rasterio.int32
        elif a.dtype.name == 'float64':
            dtype = rasterio.float64
        elif a.dtype.name == 'float32':
            dtype = rasterio.float32
        else:
            msg = 'ERROR: invalid dtype "{}"'.format(a.dtype.name)
            raise TypeError(msg)

        meta = {'count': a.shape[0],
                'width': a.shape[2],
                'height': a.shape[1],
                'nodata': nodata,
                'dtype': dtype,
                'driver': 'GTiff',
                'crs': modelgrid.proj4,
                'transform': trans
                }
        meta.update(kwargs)
        with rasterio.open(filename, 'w', **meta) as dst:
            dst.write(a)
        print('wrote {}'.format(filename))

    elif filename.lower().endswith(".shp"):
        from ..export.shapefile_utils import write_grid_shapefile2
        epsg = kwargs.get('epsg', None)
        prj = kwargs.get('prj', None)
        if epsg is None and prj is None:
            epsg = modelgrid.epsg
        write_grid_shapefile2(filename, modelgrid, array_dict={fieldname: a},
                              nan_val=nodata,
                              epsg=epsg, prj=prj)
Beispiel #25
0
def generate_SWATweather(WatershedPath,
                         PRISMfolderPath,
                         OutputFolder,
                         yearstart,
                         yearend,
                         Google_API=None,
                         shapeout=True):
    '''
    Function “generate_SWATweather” can create daily time series of precipitation and temperature (max and min)
        from PRISM pixels within the watershed. The center point of each pixel will be treated as a weather station
        for SWAT model. 
        
        *** 1. This function is not capable to check the continous of time series. Users are recommended to use PRISMdownload
            function to download PRISM data, in which the continuity is guaranteed.  
        *** 2. If zip files exist, the function will unzip these files; if no zip or unzipped files are found, the function
            will quit, and you need to use PRISMdownload to download the data for your study period.
        
    Parameters:
        WatershedPath: the path of the watershed shapefile;
        PRISMfolderPath: the PRISM parent folder created by PRISMdownload function or following such structure:
                         "./PRISM/daily/ppt or tmin or tmax";
        OutputFolder: the path of folder storing tables in format that SWAT requires;
        yearstart: the beginning year of the timeseries;
        yearend: the end year of the time series;
        Google_API: The key of API for Google Map Elevation API. Users are recommended to get one from Google for free,
                    or the function will use author's API to download (can be disabled anytime). 
    '''
    import os
    import rasterio
    from rasterio.mask import mask
    import geopandas as gpd
    from rasterio import Affine
    import numpy as np
    import zipfile

    Google_API = "AIzaSyC61eXnJkcWHqCab4r0VzFfDTldR1dYYZU" if Google_API is None else Google_API
    pptpath = PRISMfolderPath + "/daily/ppt"
    tmaxpath = PRISMfolderPath + "/daily/tmax"
    tminpath = PRISMfolderPath + "/daily/tmin"
    path_list = [pptpath, tmaxpath, tminpath]

    tables_ls = []
    os.chdir(OutputFolder)  #save files in the output folder
    for vari_path in path_list:
        file_rasters = [
            f for f in os.listdir(vari_path) if (f.endswith('.bil') and any(
                str(x) in f for x in range(yearstart, yearend + 1)))
        ]
        if len(file_rasters) < dayofyears(yearstart, yearend):
            ziptest = [
                f for f in os.listdir(vari_path)
                if (f.endswith('.zip') and any(
                    str(x) in f for x in range(yearstart, yearend + 1)))
            ]
            print(len(ziptest))
            if len(ziptest) > 0:
                for fn in ziptest:
                    if os.path.exists(vari_path + '/' + fn[:-3] + "bil"):
                        continue
                    else:
                        fn = vari_path + "/" + fn
                        print(fn)
                        zfile = zipfile.ZipFile(fn)
                        zfile.extractall(vari_path)
                        print("DONE")
                        zfile.close()
                file_rasters = [
                    f for f in os.listdir(vari_path) if f.endswith('.bil')
                ]
                print(len(file_rasters))
            if len(file_rasters) < dayofyears(yearstart, yearend):
                print("Missing some dates' PRISM data.")
                indi = input("\nWant to download missing data? (Y/N)")
                if indi == "Y":
                    email = input(
                        "Enter an email address to download missing PRISM data."
                    )
                    PRISMdownload(email, 'd', [vari_path[vari_path.rfind("/")+1:]], yearstart , yearend, \
                            savedir=PRISMfolderPath[:PRISMfolderPath.rfind("/")], unzip=True, keepzip=True)
                    file_rasters = [
                        f for f in os.listdir(vari_path)
                        if (f.endswith('.bil') and any(
                            str(x) in f
                            for x in range(yearstart, yearend + 1)))
                    ]
                #if vari_path[vari_path.rfind('/')+1:]=='ppt':
                #    raster_year = list(set([int(year[23:27]) for year in file_rasters]))
                #else:
                #    raster_year = list(set([int(year[24:28]) for year in file_rasters]))
                #if raster_year != range(yearstart, yearend+1):
                #    missyear = [i for i in range(yearstart,yearend+1) if i not in raster_year]
                #    print "Found gap years", missyear,
                #    indi = input("\nWant to download missing data? (Y/N)")
                #    if indi == "Y":
                #        email = raw_input("Enter an email address to download missing PRISM data.")
                #        PRISMdownload(email, 'd', vari_path[vari_path.rfind("/")+1:], yearstart , yearend, \
                #            savedir=PRISMfolderPath[:PRISMfolderPath.rfind("/")], unzip=True, keepzip=True)
                else:
                    return
        file_rasters.sort(
        )  # make sure the data list is organized in order of date
        os.chdir(OutputFolder)

        shapefile = gpd.read_file(WatershedPath)
        shapefile1 = shapefile.to_crs(
            {'init':
             'epsg:4269'})  # reproject the shapefile as same as PRISM data's
        geoms = shapefile1.geometry.values
        #geometry = geoms[0]  # to check the geometry of watershed

        from shapely.geometry import mapping  #, Point
        geoms = [mapping(geoms[0])]

        for ind, raster in enumerate(file_rasters):
            full_path = vari_path + '/' + raster

            date = raster[-16:-8]
            with rasterio.open(full_path) as src:
                out_image, out_transform = mask(src, geoms, crop=True)

            no_data = src.nodata
            data = out_image.data[0]
            variable = np.extract(data != no_data, data)

            T1 = out_transform * Affine.translation(
                0.5, 0.5)  # reference the pixel center
            rc2xy = lambda r, c: (c, r) * T1

            if ind == 0:
                row, col = np.where(data != no_data)
                d = gpd.GeoDataFrame({'col': col, 'row': row})
                d['x'] = d.apply(lambda row: rc2xy(row.row, row.col)[0],
                                 axis=1)
                d['y'] = d.apply(lambda row: rc2xy(row.row, row.col)[1],
                                 axis=1)
                if shapeout is True:
                    from shapely.geometry import Point
                    d['geometry'] = d.apply(
                        lambda row: Point(row['x'], row['y']), axis=1)
                    d.crs = {'init': 'epsg:4326'}
                    d.to_file(driver='ESRI Shapefile',
                              filename="weatherstations.shp")
                d[date] = variable
            else:
                d[date] = variable
        tables_ls.append(d)

    table_tot = open("tmp.txt", "w")
    line = ','.join(['id', 'name', 'lat', 'long', 'elevation'])
    table_tot.write(line + "\n")
    for i in range(tables_ls[1].count()[0]):  # number of rows
        a = tables_ls[1].loc[[i]]  # get each row for tmax and tmin
        b = tables_ls[2].loc[[i]]
        line1 = ",".join([
            str(i), "t" + str(i),
            str(round(a['y'], 5)),
            str(round(a['x'], 5)),
            str(get_elevation(round(a['y'], 7), round(a['x'], 7)))
        ])
        table_tot.write(line1 + "\n")
        table_station = open("t" + str(i) + ".txt", "w")
        table_station.write(str(a.columns[5]) + "\n")
        for x in a:
            if x.isdigit():
                table_station.write(
                    str(round(a[x], 3)) + "," + str(round(b[x], 3)) + "\n")
        table_station.close()
    table_tot.close()

    table_tot = open("pcp.txt", "w")
    line = ','.join(['id', 'name', 'lat', 'long', 'elevation'])
    table_tot.write(line + "\n")
    for i in range(tables_ls[0].count()[0]):  # number of rows
        a = tables_ls[0].loc[[i]]  # get each row for ppt
        line1 = ",".join([
            str(i), "p" + str(i),
            str(round(a['y'], 5)),
            str(round(a['x'], 5)),
            str(get_elevation(round(a['y'], 7), round(a['x'], 7)))
        ])
        table_tot.write(line1 + "\n")
        table_station = open("p" + str(i) + ".txt", "w")
        table_station.write(str(a.columns[5]) + "\n")
        for x in a:
            if x.isdigit():
                table_station.write(str(round(a[x], 3)) + "\n")
        table_station.close()
    table_tot.close()
    return tables_ls
Beispiel #26
0
def main():
    # load and average netcdfs
    arr = None
    for f in NETCDFS:
        ds = nc.Dataset(f, 'r')
        if arr is None:
            print ds.variables.keys()
            arr = np.asarray(ds.variables['lwe_thickness']) / len(NETCDFS)
        else:
            arr += np.asarray(ds.variables['lwe_thickness']) / len(NETCDFS)

    # multiply by scale factor
    ds = nc.Dataset(SCALER, 'r')
    print ds.variables.keys()
    scaler = np.asarray(ds.variables['SCALE_FACTOR'])
    print scaler.shape
    arr = arr * scaler

    # extract error grids
    m_err = np.asarray(ds.variables['MEASUREMENT_ERROR'])
    l_err = np.asarray(ds.variables['LEAKAGE_ERROR'])
    t_err = np.sqrt(m_err * m_err + l_err * l_err)

    # compute slopes, coefficients
    print arr.shape
    slope_arr = np.zeros(arr.shape[1:])
    r2_arr = np.zeros(arr.shape[1:])
    p_arr = np.zeros(arr.shape[1:])
    print slope_arr.shape
    time = np.arange(arr.shape[0])
    print time.shape
    for i in range(arr.shape[1]):
        for j in range(arr.shape[2]):
            b1, b0, r2, p, sd = stats.linregress(arr[:, i, j], time)
            slope_arr[i, j] = b1
            r2_arr[i, j] = r2
            p_arr[i, j] = p

    # dump to csv
    np.savetxt(SLOPE, slope_arr, delimiter=',')
    np.savetxt(R2, r2_arr, delimiter=',')
    np.savetxt(P, p_arr, delimiter=',')
    np.savetxt(ERR, t_err, delimiter=',')

    # rescale to WGS84 and dump to tif bands
    rows = arr.shape[1]
    cols = arr.shape[2]
    d = 1
    transform = A.translation(-cols * d / 2, -rows * d / 2) * A.scale(d, d)
    print transform
    slope_arr = np.roll(slope_arr.astype(rio.float64), 180)
    r2_arr = np.roll(r2_arr.astype(rio.float64), 180)
    p_arr = np.roll(p_arr.astype(rio.float64), 180)
    t_err = np.roll(t_err.astype(rio.float64), 180)

    with rio.open(OUT,
                  'w',
                  'GTiff',
                  width=cols,
                  height=rows,
                  dtype=rio.float64,
                  crs={'init': 'EPSG:4326'},
                  transform=transform,
                  count=4) as out:
        out.write_band(1, slope_arr)
        out.write_band(2, r2_arr)
        out.write_band(3, p_arr)
        out.write_band(4, t_err)
Beispiel #27
0
def get_nearest_point_on_grid(x,
                              y,
                              transform=None,
                              xul=None,
                              yul=None,
                              dx=None,
                              dy=None,
                              rotation=0.,
                              offset='center',
                              op=None):
    """

    Parameters
    ----------
    x : float
        x-coordinate of point
    y : float
        y-coordinate of point
    transform : Affine instance, optional
        Affine object instance describing grid
    xul : float
        x-coordinate of upper left corner of the grid
    yul : float
        y-coordinate of upper left corner of the grid
    dx : float
        grid spacing in the x-direction (along rows)
    dy : float
        grid spacing in the y-direction (along columns)
    rotation : float
        grid rotation about the upper left corner, in degrees clockwise from the x-axis
    offset : str, {'center', 'edge'}
        Whether the point on the grid represents a cell center or corner (edge). This
        argument is only used if xul, yul, dx, dy and rotation are supplied. If
        an Affine transform instance is supplied, it is assumed to already incorporate
        the offset.
    op : function, optional
        Function to convert fractional pixels to whole numbers (np.round, np.floor, np.ceiling).
        Defaults to np.round if offset == 'center'; otherwise defaults to np.floor.



    Returns
    -------
    x_nearest, y_nearest : float
        Coordinates of nearest grid cell center.

    """
    # get the closet (fractional) grid cell location
    # (in case the grid is rotated)
    if transform is None:
        transform = Affine(dx, 0., xul,
                           0., dy, yul) * \
                    Affine.rotation(rotation)
        if offset == 'center':
            transform *= Affine.translation(0.5, 0.5)
    x_raster, y_raster = ~transform * (x, y)

    if offset == 'center':
        op = np.round
    elif op is None:
        op = np.floor

    j = int(op(x_raster))
    i = int(op(y_raster))

    x_nearest, y_nearest = transform * (j, i)
    return x_nearest, y_nearest
    "height": out_image.shape[1],
    "width": out_image.shape[2],
    "transform": out_transform
})

with rasterio.open(cycles_output_crop_raster, "w", **out_meta) as dest:
    dest.write(out_image)

src = rasterio.open(cycles_output_crop_raster)
array = src.read(1)
#print(array.shape)

row, col = np.where(array == 4)  #Crop id value equals 4
elev = np.extract(array == 4, array)  #Crop id value equals 4

T1 = out_transform * Affine.translation(0.5, 0.5)  # reference the pixel centre
rc2xy = lambda r, c: (c, r) * T1

d = gpd.GeoDataFrame({'col': col, 'row': row, 'elev': elev})
d['x'] = d.apply(lambda row: rc2xy(row.row, row.col)[0], axis=1)
d['y'] = d.apply(lambda row: rc2xy(row.row, row.col)[1], axis=1)
d['geometry'] = d.apply(lambda row: Point(row['x'], row['y']), axis=1)
d.to_file(cycles_output_crop_shapefile, driver='ESRI Shapefile')

if (print_messages):
    print('Number of Crop points: ' + str(len(d)))

#-------------------------------------------------------------------
#https://www.isric.org/projects/soil-property-maps-africa-1-km-resolution
#https://www.isric.org/projects/soil-property-maps-africa-250-m-resolution
Beispiel #29
0
import rasterio
from rasterio import Affine as A
from rasterio.warp import reproject, RESAMPLING

tempdir = '/tmp'
tiffname = os.path.join(tempdir, 'example.tif')

with rasterio.drivers():

    # Consider a 512 x 512 raster centered on 0 degrees E and 0 degrees N
    # with each pixel covering 15".
    rows, cols = src_shape = (512, 512)
    dpp = 1.0 / 240  # decimal degrees per pixel
    # The following is equivalent to
    # A(dpp, 0, -cols*dpp/2, 0, -dpp, rows*dpp/2).
    src_transform = A.translation(-cols * dpp / 2, rows * dpp / 2) * A.scale(
        dpp, -dpp)
    src_crs = {'init': 'EPSG:4326'}
    source = numpy.ones(src_shape, numpy.uint8) * 255

    # Prepare to reproject this rasters to a 1024 x 1024 dataset in
    # Web Mercator (EPSG:3857) with origin at -8928592, 2999585.
    dst_shape = (1024, 1024)
    dst_transform = A.from_gdal(-237481.5, 425.0, 0.0, 237536.4, 0.0, -425.0)
    dst_transform = dst_transform.to_gdal()
    dst_crs = {'init': 'EPSG:3857'}
    destination = numpy.zeros(dst_shape, numpy.uint8)

    reproject(source,
              destination,
              src_transform=src_transform,
Beispiel #30
0
def weighted_means(z, name_list, dist_vars):
    temp_name = name_list[0][z]
    all_pts = gpd.GeoDataFrame.from_file('Movement_Shapefiles/' +
                                         ''.join([temp_name, '_Python.shp']))
    all_pts.crs = {'init': 'epsg:32733'}

    from scipy.stats import gamma
    shape = float(dist_vars[z]['gamma.shape'])
    rate = float(dist_vars[z]['gamma.rate'])
    rad = gamma.ppf(0.975, a=shape, scale=1 / rate)

    big_buffers = all_pts.geometry.buffer(rad)
    #small_buffers = all_pts.geometry.buffer(30)
    #buffers_diff = big_buffers.difference(small_buffers)

    avail_green = []
    avail_wet = []
    avail_roads = []

    all_buff = big_buffers.geometry.values
    sample_iter = range(0, len(all_buff))
    from shapely.geometry import mapping
    for i in sample_iter:
        geoms = [mapping(big_buffers.geometry.values[i])]
        from rasterio.mask import mask
        with rasterio.open("ENP_Predictors/Final_Predictors_2009.tif") as src:
            out_image, out_transform = mask(src, geoms, crop=True)

            no_data = -3.39999995e+38
            Green_band = out_image.data[0]
            Wet_band = out_image.data[1]
            Road_band = out_image.data[2]
            row, col = np.where(Green_band != no_data)
            green = np.extract(Green_band != no_data, Green_band)
            wet = np.extract(Wet_band != no_data, Wet_band)
            roads = np.extract(Road_band != no_data, Road_band)

            from rasterio import Affine  # or from affine import Affine
            T1 = out_transform * Affine.translation(
                0.5, 0.5)  # reference the pixel centre
            rc2xy = lambda r, c: (c, r) * T1

            d = gpd.GeoDataFrame({
                'col': col,
                'row': row,
                'green': green,
                'wet': wet,
                'roads': roads
            })
            # coordinate transformation
            d['x'] = d.apply(lambda row: rc2xy(row.row, row.col)[0], axis=1)
            d['y'] = d.apply(lambda row: rc2xy(row.row, row.col)[1], axis=1)
            # geometry
            from shapely.geometry import Point
            d['geometry'] = d.apply(lambda row: Point(row['x'], row['y']),
                                    axis=1)

            from scipy.stats import gamma
            shape = float(dist_vars[z]['gamma.shape'])
            rate = float(dist_vars[z]['gamma.rate'])

            pt_iter = range(0, len(d))
            temp_weights = []
            temp_green_vals = []
            temp_wet_vals = []
            temp_roads_vals = []
            for j in pt_iter:
                temp_dist = d.loc[j].geometry.distance(all_pts['geometry'][i])
                weight = gamma.pdf(temp_dist, a=shape, scale=1 / rate)
                temp_weights.append(weight)

                temp_green = d.loc[j].green
                temp_wet = d.loc[j].wet
                temp_roads = d.loc[j].roads
                temp_green_vals.append(temp_green)
                temp_wet_vals.append(temp_wet)
                temp_roads_vals.append(temp_roads)

            weighted_green = sum(
                temp_green_vals[g] * temp_weights[g]
                for g in range(len(temp_green_vals))) / sum(temp_weights)
            weighted_wet = sum(
                temp_wet_vals[g] * temp_weights[g]
                for g in range(len(temp_wet_vals))) / sum(temp_weights)
            weighted_roads = sum(
                temp_roads_vals[g] * temp_weights[g]
                for g in range(len(temp_roads_vals))) / sum(temp_weights)
            avail_green.append(weighted_green.mean())
            avail_wet.append(weighted_wet.mean())
            avail_roads.append(weighted_roads.mean())

    import pandas

    gdf = gpd.GeoDataFrame(geometry=all_pts['geometry'])
    x_test = gdf.geometry.apply(lambda p: p.x)
    y_test = gdf.geometry.apply(lambda p: p.y)
    out_df = pandas.DataFrame(
        data={
            "x": x_test,
            "y": y_test,
            "green_avail": avail_green,
            "wet_avail": avail_wet,
            "roads_avail": avail_roads
        })
    return out_df