Exemplo n.º 1
0
        def __call__(self):
            with rasterio.open(input) as src:
                if bands:
                    if sampling == 1:
                        img = src.read_band(bidx)
                        transform = src.transform
                    # Decimate the band.
                    else:
                        img = numpy.zeros(
                            (src.height // sampling, src.width // sampling),
                            dtype=src.dtypes[src.indexes.index(bidx)])
                        img = src.read_band(bidx, img)
                        transform = src.affine * Affine.scale(float(sampling))
                else:
                    if sampling == 1:
                        img = src.read_mask()
                        transform = src.transform
                    # Decimate the mask.
                    else:
                        img = numpy.zeros(
                            (src.height // sampling, src.width // sampling),
                            dtype=numpy.uint8)
                        img = src.read_mask(img)
                        transform = src.affine * Affine.scale(float(sampling))

                bounds = src.bounds
                xs = [bounds[0], bounds[2]]
                ys = [bounds[1], bounds[3]]
                if projected == 'geographic':
                    xs, ys = rasterio.warp.transform(src.crs,
                                                     {'init': 'epsg:4326'}, xs,
                                                     ys)
                if precision >= 0:
                    xs = [round(v, precision) for v in xs]
                    ys = [round(v, precision) for v in ys]
                self._xs = xs
                self._ys = ys

                kwargs = {'transform': transform}
                if not bands and not with_nodata:
                    kwargs['mask'] = (img == 255)
                for g, i in rasterio.features.shapes(img, **kwargs):
                    if projected == 'geographic':
                        g = rasterio.warp.transform_geom(
                            src.crs,
                            'EPSG:4326',
                            g,
                            antimeridian_cutting=True,
                            precision=precision)
                    xs, ys = zip(*coords(g))
                    yield {
                        'type': 'Feature',
                        'id': str(i),
                        'properties': {
                            'val': i
                        },
                        'bbox': [min(xs), min(ys),
                                 max(xs), max(ys)],
                        'geometry': g
                    }
Exemplo n.º 2
0
        def __call__(self):
            with rasterio.open(input) as src:
                img = None
                nodata_mask = None
                if bands:
                    if sampling == 1:
                        img = src.read(bidx, masked=False)
                        transform = src.transform
                    # Decimate the band.
                    else:
                        img = numpy.zeros(
                            (src.height//sampling, src.width//sampling),
                            dtype=src.dtypes[src.indexes.index(bidx)])
                        img = src.read(bidx, img, masked=False)
                        transform = src.affine * Affine.scale(float(sampling))
                if not bands or not with_nodata:
                    if sampling == 1:
                        nodata_mask = src.read_masks(bidx)
                        transform = src.transform
                    # Decimate the mask.
                    else:
                        nodata_mask = numpy.zeros(
                            (src.height//sampling, src.width//sampling),
                            dtype=numpy.uint8)
                        nodata_mask = src.read_masks(bidx, nodata_mask)
                        transform = src.affine * Affine.scale(float(sampling))

                bounds = src.bounds
                xs = [bounds[0], bounds[2]]
                ys = [bounds[1], bounds[3]]
                if projection == 'geographic':
                    xs, ys = rasterio.warp.transform(
                        src.crs, {'init': 'epsg:4326'}, xs, ys)
                if precision >= 0:
                    xs = [round(v, precision) for v in xs]
                    ys = [round(v, precision) for v in ys]
                self._xs = xs
                self._ys = ys

                kwargs = {'transform': transform}
                # Default is to exclude nodata features.
                if nodata_mask is not None:
                    kwargs['mask'] = (nodata_mask==255)
                if img is None:
                    img = nodata_mask
                for g, i in rasterio.features.shapes(img, **kwargs):
                    if projection == 'geographic':
                        g = rasterio.warp.transform_geom(
                            src.crs, 'EPSG:4326', g,
                            antimeridian_cutting=True, precision=precision)
                    xs, ys = zip(*coords(g))
                    yield {
                        'type': 'Feature',
                        'id': str(i),
                        'properties': {'val': i},
                        'bbox': [min(xs), min(ys), max(xs), max(ys)],
                        'geometry': g }
Exemplo n.º 3
0
def _crop_img_to_shp(img: rasterio.DatasetReader, shape: shapefile.Shape,
                     out_path: _OutPath) -> bool:
    # Get the bbox to crop to
    shp_bbox = _BBox(*[round(v) for v in shape.bbox])
    img_bbox = _BBox(*list(img.bounds))
    bbox = shp_bbox.intersect(img_bbox)

    if not bbox.is_valid:
        return False

    # Crop the image
    window = bbox.to_window(img)
    data = img.read(window=window)

    # Write to the output directory
    out_path = out_path.crop_path(shp_bbox.left, shp_bbox.bottom)
    x_res, y_res = img.res
    transform = Affine.translation(bbox.left, bbox.top) * Affine.scale(
        x_res, -y_res)

    profile = img.profile
    profile.update(transform=transform,
                   height=window.height,
                   width=window.width)

    with rasterio.open(out_path, 'w', **profile) as writer:
        writer.write(data)

        # Fixes band 4 being labelled as alpha channel
        writer.colorinterp = img.colorinterp

    print(f'Created: {out_path}')
    return True
Exemplo n.º 4
0
def test_complex_nodata(tmpdir):
    """A complex dataset can be created with a real nodata value"""
    import numpy as np
    import rasterio
    from rasterio.transform import Affine

    x = np.linspace(-4.0, 4.0, 240)
    y = np.linspace(-3.0, 3.0, 180)
    X, Y = np.meshgrid(x, y)
    Z1 = np.ones_like(X) + 1j

    res = (x[-1] - x[0]) / 240.0
    transform1 = Affine.translation(x[0] - res / 2,
                                    y[-1] - res / 2) * Affine.scale(res, -res)

    tempfile = str(tmpdir.join("test.tif"))
    with rasterio.open(tempfile,
                       'w',
                       driver='GTiff',
                       height=Z1.shape[0],
                       width=Z1.shape[1],
                       nodata=0,
                       count=1,
                       dtype=Z1.dtype,
                       crs='+proj=latlong',
                       transform=transform1) as dst:
        dst.write(Z1, 1)

    with rasterio.open(tempfile) as dst:
        assert dst.nodata == 0
Exemplo n.º 5
0
def selectInterestArea(imgName, x, y):
    out_folder = os.path.join(workingDir, 'Split', imgName[:-4])
    with rio.open(os.path.join(imageDir, imgName)) as inds:

        # tile_width, tile_height = int(inds.width/16),int(inds.height/16)
        tile_width, tile_height = 1220, 1220
        inv_transform = Affine.scale(
            1 / inds.transform.a, 1 / inds.transform.e) * Affine.translation(
                -inds.transform.xoff, -inds.transform.yoff)
        meta = inds.meta.copy()
        rasterio.warp.transform(crs0, inds.crs, [y], [x])
        a = rasterio.warp.transform(crs0, inds.crs, [y], [x])
        xPixel, yPixel = pixel_location = inv_transform * (a[0][0], a[1][0])
        print(pixel_location)
        # print(count_sliding_window(inds.read(1)))
        for window, transform in get_tiles(inds, tile_width, tile_height):
            print(window)
            meta['transform'] = transform
            meta['width'], meta['height'] = window.width, window.height
            outpath = os.path.join(
                out_folder,
                output_filename.format(int(window.col_off),
                                       int(window.row_off)))
            if ((xPixel > window.col_off) &
                (xPixel < window.col_off + window.width) &
                (yPixel > window.row_off) &
                (yPixel < window.row_off + window.height)):
                with rio.open(outpath, 'w', **meta) as outds:
                    outds.write(inds.read(window=window))
Exemplo n.º 6
0
def basic_flattening(target_folder, raster, res, origin, size, tin = False):
    """Reads some pre-determined vector files, tiles them using
    Lisa's code and "burns" them into the output raster. The flat
    elevation of the polygons is estimated by Laplace-interpolating
    at the locations of the polygon vertices. The underlying TIN
    is constructed from the centre points of the raster pixels.
    Rasterisation takes place via rasterio's interface.
    """
    import startin
    from rasterio.features import rasterize
    from rasterio.transform import Affine
    transform = (Affine.translation(origin[0], origin[1])
                 * Affine.scale(size, size))
    x0, x1 = origin[0] + size / 2, origin[0] + ((res[0] - 0.5) * size)
    y0, y1 = origin[1] + size / 2, origin[1] + ((res[1] - 0.5) * size)
    poly_fpaths = [
                     'rest_bodies/bbg_rest_of_the_water.shp',
                     'sea_bodies/bbg_sea_and_big_bodies.shp',
                     # You can add more resources here.
                  ]
    wfs_urls =    [
                     #('http://3dbag.bk.tudelft.nl/data/wfs', 'BAG3D:pand3d'),
                     # You can add more resources here.
                  ]
    in_vecs = []
    for fpath in poly_fpaths:
        vec = vector_prepare([[x0, x1], [y0, y1]], target_folder + fpath)
        if len(vec) != 0: in_vecs.append(vec)
    for wfs in wfs_urls:
        vec = wfs_prepare([[x0, x1], [y0, y1]], wfs[0], wfs[1])
        if len(vec) != 0: in_vecs.append(vec)
    if len(in_vecs) == 0: return
    if tin is False:
        xs, ys = np.linspace(x0, x1, res[0]), np.linspace(y0, y1, res[1])
        xg, yg = np.meshgrid(xs, ys); xg = xg.flatten(); yg = yg.flatten()
        cs = np.vstack((xg, yg, raster.flatten())).transpose()
        data = cs[cs[:,2] != -9999]
        tin = startin.DT(); tin.insert(data)
    elevations = []
    for polys in in_vecs:
        for poly, i in zip(polys, range(len(polys))):
            els = []
            for vx in poly.exterior.coords:
                try: els += [tin.interpolate_laplace(vx[0], vx[1])]
                except: pass
            for interior in poly.interiors:
                for vx in interior.coords:
                    try: els += [tin.interpolate_laplace(vx[0], vx[1])]
                    except: pass
            elevations.append(np.median(els))
    shapes = []
    for polys in in_vecs:
        shapes += [(p, v) for p, v in zip(polys, elevations)]
    raspolys = rasterize(shapes, raster.shape, -9999, transform = transform)
    for yi in range(res[1]):
        for xi in range(res[0]):
            if raspolys[yi, xi] != -9999: raster[yi, xi] = raspolys[yi, xi]
    return tin
Exemplo n.º 7
0
def latlon2transform(lat, lon, cell_center=True):
    lat = np.asarray(lat)
    lon = np.asarray(lon)
    resX = (lon[-1] - lon[0]) / (len(lon) - 1)
    resY = (lat[-1] - lat[0]) / (len(lat) - 1)
    trans = Affine.translation(lon[0] - resX / 2. * cell_center,
                               lat[0] - resY / 2. * cell_center)
    scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
    return trans * scale
Exemplo n.º 8
0
def transform_lat_lon(lat, lon):
    """
    Transforms latitude and longitude arrays.
    :param lat: numpy array latitude coordinates.
    :param lon: numpy array longitude coordinates.
    :return:
    """
    lat = np.asarray(lat)
    lon = np.asarray(lon)
    trans = Affine.translation(lon[0], lat[0])
    scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
    return trans * scale
def save_geotiff(img, coords, filename):
    height, width, channels = img.shape
    xres = (coords[1][0] - coords[0][0]) / width
    yres = (coords[0][1] - coords[1][1]) / height
    transform = Affine.translation(coords[0][0] - xres / 2, coords[0][1] +
                                   yres / 2) * Affine.scale(xres, -yres)
    profile = {
        'driver': 'GTiff',
        'width': width,
        'height': height,
        'count': channels,
        'crs': '+proj=latlong',
        'transform': transform,
        'dtype': img.dtype,
        'compress': 'JPEG'
    }
    with rasterio.open(filename, 'w', **profile) as f:
        f.write(img.transpose(2, 0, 1))
Exemplo n.º 10
0
def write_geotiff(raster, origin, size, fpath):
    """Writes data in an n by n numpy array to disk as a
    GeoTIFF raster. The header is based on the raster array
    and a manual definition of the coordinate system and an
    affine transform.
    """
    transform = (Affine.translation(origin[0], origin[1]) *
                 Affine.scale(size, size))
    with rasterio.Env():
        with rasterio.open(fpath,
                           'w',
                           driver='GTiff',
                           height=raster.shape[0],
                           width=raster.shape[1],
                           count=1,
                           dtype=rasterio.float32,
                           crs='EPSG:28992',
                           transform=transform) as out_file:
            out_file.write(raster.astype(rasterio.float32), 1)
Exemplo n.º 11
0
def main(args):
    data = np.load(args.in_file).astype(args.type)
    if len(data.shape) > 3 or len(data.shape) < 2:
        raise UnsupportedNumberDimsError(
            f"Number of dims must be 2 or 3. Got {len(data.shape)}."
        )
    elif len(data.shape) == 3:
        nbands = data.shape[0]
    else:
        nbands = 1
        # Add extra axis for iteration purposes
        data = np.expand_dims(data, axis=0)
    trans = REGION_TO_TRANS[args.region]
    if args.mask is not None:
        mask = trans(np.load(args.mask).astype(bool))
        data[..., mask] = args.missing_value
    crs = eg.GRID_NAME_TO_V1_PROJ[eg.ML]
    x, y = [
        trans(xi)
        for xi in eg.v1_lonlat_to_meters(*eg.v1_get_full_grid_lonlat(eg.ML))
    ]
    x = x[0]
    y = y[:, 0]
    xres = (x[-1] - x[0]) / len(x)
    yres = (y[-1] - y[0]) / len(y)
    t = Affine.translation(x[0], y[0]) * Affine.scale(xres, yres)
    ds = rio.open(
        args.out_file,
        "w",
        driver="GTiff",
        height=data.shape[1],
        width=data.shape[2],
        count=nbands,
        dtype=args.type,
        crs=crs.srs,
        transform=t,
        compress="lzw",
        nodata=args.missing_value,
    )
    for i, band in enumerate(data):
        ds.write(band, i + 1)
    ds.close()
    def exec(self) -> dict:
        data_array = self.ndarray[self.data_attr].values
        x_min = min(self.ndarray.coords["X"])
        x_max = max(self.ndarray.coords["X"])
        y_min = min(self.ndarray.coords["Y"])
        y_max = max(self.ndarray.coords["Y"])
        x_res = (x_max - x_min) / len(self.ndarray.coords["X"])
        y_res = (y_max - y_min) / len(self.ndarray.coords["Y"])
        transform = Affine.translation(
            x_min - x_res / 2, y_min - y_res / 2) * Affine.scale(x_res, y_res)
        if self.is_multiple_files:
            os.makedirs(self.output_file, exist_ok=True)
            for i, time in enumerate(self.ndarray.coords["time"].values):
                with rasterio.open(
                        str(self.output_file /
                            f"{time.replace(':', '-')}.tif"),
                        'w',
                        driver='GTiff',
                        height=data_array.shape[2],
                        width=data_array.shape[1],
                        count=1,
                        dtype=data_array.dtype,
                        crs='+init=epsg:4326',
                        transform=transform,
                ) as dst:
                    array = data_array[i].transpose()
                    dst.write(np.expand_dims(array, 0))
        else:
            with rasterio.open(
                    str(self.output_file),
                    'w',
                    driver='GTiff',
                    height=data_array.shape[2],
                    width=data_array.shape[1],
                    count=12,
                    dtype=data_array.dtype,
                    crs='+init=epsg:4326',
                    transform=transform,
            ) as dst:
                dst.write(data_array.swapaxes(1, 2))

        return {"result": True}
Exemplo n.º 13
0
def write_geotiff(raster, origin, size, fpath):
    """Writes the interpolated TIN-linear and Laplace rasters
    to disk using the GeoTIFF format. The header is based on
    the raster array and a manual definition of the coordinate
    system and an identity affine transform.
    """
    import rasterio
    from rasterio.transform import Affine
    transform = (Affine.translation(origin[0], origin[1])
                 * Affine.scale(size, size))
    with rasterio.Env():
        with rasterio.open(fpath, 'w', driver = 'GTiff',
                           height = raster.shape[0],
                           width = raster.shape[1],
                           count = 1,
                           dtype = rasterio.float32,
                           crs='EPSG:28992',
                           transform = transform
                           ) as out_file:
            out_file.write(raster.astype(rasterio.float32), 1)
Exemplo n.º 14
0
def setup_grid(xmin, ymin, xmax, ymax, resolution):
    # First, setup the non-optimized grid.
    x = np.arange(xmin, xmax - resolution, resolution)
    y = np.arange(ymin, ymax - resolution, resolution)
    mx, my = np.meshgrid(x, y)
    # Then, write it to a tmp file.
    temp_path = os.path.join(tempfile.gettempdir(), "grid.tiff")
    raster = mx.astype(np.float32)
    transform = Affine.translation(xmin, ymin) * Affine.scale(
        resolution, resolution)
    raster_to_cog(raster, transform, temp_path)
    # Read it again.
    with rasterio.open(temp_path, 'r') as r:
        new_transform = r.transform
        new_bounds = r.bounds
    # Prepare the grid.
    x = np.arange(new_bounds[0], new_bounds[2], new_transform.a)
    y = np.arange(new_bounds[3], new_bounds[1], new_transform.e)
    mx, my = np.meshgrid(x, y)

    return mx, my, new_bounds, new_transform
Exemplo n.º 15
0
 def to_rasterio(self, output_file=None, src_epsg=27700):
     """ Convert to a rasterio dataset
     
     Args:
         output_file: a string to give output file name
         src_epsg: int scalar to give EPSG code of the coordinate reference
             system of the original dataset, default is 27700 for BNG
     Return:
         ds_rio: a rasterio dataset
     """
     import rasterio
     from rasterio.transform import Affine
     cellsize = self.cellsize
     x00 = self.extent_dict['left']  # upper-left corner of the first pixel
     y00 = self.extent_dict['top']
     transform = Affine.translation(x00, y00) * Affine.scale(
         cellsize, -cellsize)
     if output_file is None:
         filename = '/tmp/new.tif'
     else:
         if output_file.endswith('.tif'):
             filename = output_file
         else:
             filename = output_file + '.tif'
     src_crs = rasterio.crs.CRS.from_epsg(src_epsg)
     ds_rio = rasterio.open(filename,
                            'w+',
                            driver='GTiff',
                            height=self.shape[0],
                            width=self.shape[1],
                            count=1,
                            dtype=self.array.dtype,
                            crs=src_crs,
                            transform=transform,
                            nodata=self.header['NODATA_value'])
     ds_rio.write(self.array, 1)
     if output_file is not None:
         ds_rio.close()
     return ds_rio
def main(AHN_pc_file):
    AHN_pc = read_PC_Data("AHN/" + AHN_pc_file + ".las")
    interpolation_methods = ["IDW", "NN", "Laplace", "TINlinear"]
    file_name = "DSM_" + AHN_pc_file[4:] + "_"
    ff = open("MAE_report_" + AHN_pc_file[4:] + ".txt", "w")

    for method in interpolation_methods:
        raster = method + "/" + file_name + method + ".tif"
        raster_info = read_file(raster)
        differences_values = calculate_differences(AHN_pc, raster_info[0],
                                                   raster_info[1],
                                                   raster_info[2],
                                                   raster_info[3],
                                                   raster_info[4])
        transform = (Affine.translation(raster_info[2][0], raster_info[2][3]) *
                     Affine.scale(raster_info[1][0], raster_info[1][1]))
        with rasterio.Env():
            with rasterio.open(method + "/" + file_name +
                               'verticle_differences.tif',
                               'w',
                               driver='GTiff',
                               height=raster_info[4],
                               width=raster_info[3],
                               count=1,
                               dtype=differences_values.dtype,
                               crs='EPSG:28992',
                               transform=transform) as dst:
                dst.write(differences_values, 1)

        abs_sum = 0
        for col in range(raster_info[3]):
            for row in range(raster_info[4]):
                abs_sum += abs(differences_values[row][col])
        N = (raster_info[3] *
             raster_info[4]) - np.count_nonzero(differences_values == 0)
        MAE = abs_sum / N
        ff.write("MAE for " + method + "_" + file_name + " = " + str(MAE) +
                 "\n")
        print("MAE for " + method + "_" + file_name + " = " + str(MAE))
Exemplo n.º 17
0
def test_complex_int16(tmpdir):
    """A cint16 dataset can be created"""
    import numpy as np
    import rasterio
    from rasterio.transform import Affine

    x = np.linspace(-4.0, 4.0, 240)
    y = np.linspace(-3.0, 3.0, 180)
    X, Y = np.meshgrid(x, y)
    Z1 = np.ones_like(X) + 1j

    res = (x[-1] - x[0]) / 240.0
    transform1 = Affine.translation(x[0] - res / 2,
                                    y[-1] - res / 2) * Affine.scale(res, -res)

    tempfile = str(tmpdir.join("test.tif"))

    with rasterio.open(
            tempfile,
            "w",
            driver="GTiff",
            height=Z1.shape[0],
            width=Z1.shape[1],
            nodata=None,
            count=1,
            dtype="complex_int16",
            crs="+proj=latlong",
            transform=transform1,
    ) as dst:
        dst.write(Z1, 1)

    assert "Type=CInt16" in subprocess.check_output(["gdalinfo",
                                                     tempfile]).decode("utf-8")

    with rasterio.open(tempfile) as dst:
        assert dst.nodatavals == (None, )
        data = dst.read()
        assert data.dtype == np.complex64
Exemplo n.º 18
0
 def get_meta(self, src_epsg=27700):
     """ Get rasterio meta data
     """
     from rasterio.transform import Affine
     dx = self.cellsize
     x = self.extent_dict['left']  # upper-left corner of the first pixel
     y = self.extent_dict['top']
     transform = Affine.translation(x, y) * Affine.scale(dx, -dx)
     if not hasattr(self, 'crs'):
         crs = rio.crs.CRS.from_epsg(src_epsg)
     else:
         crs = self.crs
     ras_meta = {
         'driver': 'GTiff',
         'dtype': self.array.dtype.name,
         'nodata': self.header['NODATA_value'],
         'width': self.array.shape[1],
         'height': self.array.shape[0],
         'count': 1,
         'crs': crs,
         'transform': transform
     }
     self.meta = ras_meta
Exemplo n.º 19
0
        def __call__(self):
            with rasterio.open(input) as src:
                if bidx is not None and bidx > src.count:
                    raise ValueError('bidx is out of range for raster')

                img = None
                msk = None

                # Adjust transforms.
                transform = src.affine
                if sampling > 1:
                    # Decimation of the raster produces a georeferencing
                    # shift that we correct with a translation.
                    transform *= Affine.translation(
                                    src.width%sampling, src.height%sampling)
                    # And follow by scaling.
                    transform *= Affine.scale(float(sampling))

                # Most of the time, we'll use the valid data mask.
                # We skip reading it if we're extracting every possible
                # feature (even invalid data features) from a band.
                if not band or (band and not as_mask and not with_nodata):
                    if sampling == 1:
                        msk = src.read_masks(bidx)
                    else:
                        msk_shape = (
                            src.height//sampling, src.width//sampling)
                        if bidx is None:
                            msk = numpy.zeros(
                                (src.count,) + msk_shape, 'uint8')
                        else:
                            msk = numpy.zeros(msk_shape, 'uint8')
                        msk = src.read_masks(bidx, msk)

                    if bidx is None:
                        msk = numpy.logical_or.reduce(msk).astype('uint8')

                    # Possibly overidden below.
                    img = msk

                # Read the band data unless the --mask option is given.
                if band:
                    if sampling == 1:
                        img = src.read(bidx, masked=False)
                    else:
                        img = numpy.zeros(
                            (src.height//sampling, src.width//sampling),
                            dtype=src.dtypes[src.indexes.index(bidx)])
                        img = src.read(bidx, img, masked=False)

                # If --as-mask option was given, convert the image
                # to a binary image. This reduces the number of shape
                # categories to 2 and likely reduces the number of
                # shapes.
                if as_mask:
                    tmp = numpy.ones_like(img, 'uint8') * 255
                    tmp[img == 0] = 0
                    img = tmp
                    if not with_nodata:
                        msk = tmp

                # Transform the raster bounds.
                bounds = src.bounds
                xs = [bounds[0], bounds[2]]
                ys = [bounds[1], bounds[3]]
                if projection == 'geographic':
                    xs, ys = rasterio.warp.transform(
                        src.crs, {'init': 'epsg:4326'}, xs, ys)
                if precision >= 0:
                    xs = [round(v, precision) for v in xs]
                    ys = [round(v, precision) for v in ys]
                self._xs = xs
                self._ys = ys

                # Prepare keyword arguments for shapes().
                kwargs = {'transform': transform}
                if not with_nodata:
                    kwargs['mask'] = msk

                src_basename = os.path.basename(src.name)

                # Yield GeoJSON features.
                for i, (g, val) in enumerate(
                        rasterio.features.shapes(img, **kwargs)):
                    if projection == 'geographic':
                        g = rasterio.warp.transform_geom(
                            src.crs, 'EPSG:4326', g,
                            antimeridian_cutting=True, precision=precision)
                    xs, ys = zip(*coords(g))
                    yield {
                        'type': 'Feature',
                        'id': "{0}:{1}".format(src_basename, i),
                        'properties': {
                            'val': val, 'filename': src_basename
                        },
                        'bbox': [min(xs), min(ys), max(xs), max(ys)],
                        'geometry': g
                    }
Exemplo n.º 20
0
def dataset_features(src,
                     bidx=None,
                     sampling=1,
                     band=True,
                     as_mask=False,
                     with_nodata=False,
                     geographic=True,
                     precision=-1):
    """Yield GeoJSON features for the dataset

    The geometries are polygons bounding contiguous regions of the same raster value.

    Parameters
    ----------
    src: Rasterio Dataset

    bidx: int
        band index

    sampling: int (DEFAULT: 1)
        Inverse of the sampling fraction; a value of 10 decimates

    band: boolean (DEFAULT: True)
        extract features from a band (True) or a mask (False)

    as_mask: boolean (DEFAULT: False)
        Interpret band as a mask and output only one class of valid data shapes?

    with_nodata: boolean (DEFAULT: False)
        Include nodata regions?

    geographic: str (DEFAULT: True)
        Output shapes in EPSG:4326? Otherwise use the native CRS.

    precision: int (DEFAULT: -1)
        Decimal precision of coordinates. -1 for full float precision output

    Yields
    ------
    GeoJSON-like Feature dictionaries for shapes found in the given band
    """
    if bidx is not None and bidx > src.count:
        raise ValueError('bidx is out of range for raster')

    img = None
    msk = None

    # Adjust transforms.
    transform = src.transform
    if sampling > 1:
        # Determine the target shape (to decimate)
        shape = (int(math.ceil(src.height / sampling)),
                 int(math.ceil(src.width / sampling)))

        # Calculate independent sampling factors
        x_sampling = src.width / shape[1]
        y_sampling = src.height / shape[0]

        # Decimation of the raster produces a georeferencing
        # shift that we correct with a translation.
        transform *= Affine.translation(src.width % x_sampling,
                                        src.height % y_sampling)

        # And follow by scaling.
        transform *= Affine.scale(x_sampling, y_sampling)

    # Most of the time, we'll use the valid data mask.
    # We skip reading it if we're extracting every possible
    # feature (even invalid data features) from a band.
    if not band or (band and not as_mask and not with_nodata):
        if sampling == 1:
            msk = src.read_masks(bidx)
        else:
            msk_shape = shape
            if bidx is None:
                msk = np.zeros((src.count, ) + msk_shape, 'uint8')
            else:
                msk = np.zeros(msk_shape, 'uint8')
            msk = src.read_masks(bidx, msk)

        if bidx is None:
            msk = np.logical_or.reduce(msk).astype('uint8')

        # Possibly overridden below.
        img = msk

    # Read the band data unless the --mask option is given.
    if band:
        if sampling == 1:
            img = src.read(bidx, masked=False)
        else:
            img = np.zeros(shape, dtype=src.dtypes[src.indexes.index(bidx)])
            img = src.read(bidx, img, masked=False)

    # If as_mask option was given, convert the image
    # to a binary image. This reduces the number of shape
    # categories to 2 and likely reduces the number of
    # shapes.
    if as_mask:
        tmp = np.ones_like(img, 'uint8') * 255
        tmp[img == 0] = 0
        img = tmp
        if not with_nodata:
            msk = tmp

    # Prepare keyword arguments for shapes().
    kwargs = {'transform': transform}
    if not with_nodata:
        kwargs['mask'] = msk

    src_basename = os.path.basename(src.name)

    # Yield GeoJSON features.
    for i, (g, val) in enumerate(rasterio.features.shapes(img, **kwargs)):
        if geographic:
            g = warp.transform_geom(src.crs,
                                    'EPSG:4326',
                                    g,
                                    antimeridian_cutting=True,
                                    precision=precision)
        xs, ys = zip(*coords(g))
        yield {
            'type': 'Feature',
            'id': "{0}:{1}".format(src_basename, i),
            'properties': {
                'val': val,
                'filename': src_basename
            },
            'bbox': [min(xs), min(ys), max(xs),
                     max(ys)],
            'geometry': g
        }
Exemplo n.º 21
0
        def __call__(self):
            with rasterio.open(input) as src:
                img = None
                msk = None
                if band:
                    if sampling == 1:
                        img = src.read(bidx, masked=False)
                        transform = src.affine
                    # Decimate the band.
                    else:
                        img = numpy.zeros(
                            (src.height//sampling, src.width//sampling),
                            dtype=src.dtypes[src.indexes.index(bidx)])
                        img = src.read(bidx, img, masked=False)
                        transform = src.affine * Affine.scale(float(sampling))
                    if as_mask:
                        tmp = numpy.ones_like(img, 'uint8') * 255
                        tmp[img == 0] = 0
                        img = tmp
                        msk = tmp
                if not band or not with_nodata:
                    if sampling == 1:
                        msk = src.read_masks(bidx)
                        if bidx is None:
                            msk = numpy.logical_or.reduce(msk).astype('uint8')
                        transform = src.affine
                    # Decimate the mask.
                    else:
                        msk_shape = src.height//sampling, src.width//sampling
                        if bidx is None:
                            msk = numpy.zeros(
                                (src.count,) + msk_shape, 'uint8')
                        else:
                            msk = numpy.zeros(msk_shape, 'uint8')
                        msk = src.read_masks(bidx, msk)
                        if bidx is None:
                            msk = numpy.logical_or.reduce(msk).astype('uint8')
                        transform = src.affine * Affine.scale(float(sampling))

                bounds = src.bounds
                xs = [bounds[0], bounds[2]]
                ys = [bounds[1], bounds[3]]
                if projection == 'geographic':
                    xs, ys = rasterio.warp.transform(
                        src.crs, {'init': 'epsg:4326'}, xs, ys)
                if precision >= 0:
                    xs = [round(v, precision) for v in xs]
                    ys = [round(v, precision) for v in ys]
                self._xs = xs
                self._ys = ys

                kwargs = {'transform': transform}
                # Default is to exclude nodata features.
                if msk is not None:
                    kwargs['mask'] = msk #(msk > 0)
                if img is None:
                    img = msk

                for g, i in rasterio.features.shapes(img, **kwargs):
                    if projection == 'geographic':
                        g = rasterio.warp.transform_geom(
                            src.crs, 'EPSG:4326', g,
                            antimeridian_cutting=True, precision=precision)
                    xs, ys = zip(*coords(g))
                    yield {
                        'type': 'Feature',
                        'id': str(i),
                        'properties': {
                            'val': i, 'filename': os.path.basename(src.name)
                        },
                        'bbox': [min(xs), min(ys), max(xs), max(ys)],
                        'geometry': g
                    }
Exemplo n.º 22
0
def merge(ctx, files, driver, bounds, res, nodata):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm. If the output file exists, its values will be
    overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.
    """
    import numpy as np

    verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
    logger = logging.getLogger('rio')

    try:
        with rasterio.drivers(CPL_DEBUG=verbosity>2):
            output = files[-1]
            files = files[:-1]

            with rasterio.open(files[0]) as first:
                first_res = first.res
                kwargs = first.meta
                kwargs.pop('affine')
                nodataval = first.nodatavals[0]
                dtype = first.dtypes[0]

            if os.path.exists(output):
                # TODO: prompt user to update existing file (-i option) like:
                # overwrite b.tif? (y/n [n]) n
                # not overwritten
                dst = rasterio.open(output, 'r+')
                nodataval = dst.nodatavals[0]
                dtype = dst.dtypes[0]
                dest = np.zeros((dst.count,) + dst.shape, dtype=dtype)
            else:
                # Create new output file.
                # Extent from option or extent of all inputs.
                if not bounds:
                    # scan input files.
                    xs = []
                    ys = []
                    for f in files:
                        with rasterio.open(f) as src:
                            left, bottom, right, top = src.bounds
                            xs.extend([left, right])
                            ys.extend([bottom, top])
                    bounds = min(xs), min(ys), max(xs), max(ys)
                output_transform = Affine.translation(bounds[0], bounds[3])

                # Resolution/pixel size.
                if not res:
                    res = first_res
                output_transform *= Affine.scale(res[0], -res[1])

                # Dataset shape.
                output_width = int(math.ceil((bounds[2]-bounds[0])/res[0]))
                output_height = int(math.ceil((bounds[3]-bounds[1])/res[1]))

                kwargs['driver'] == driver
                kwargs['transform'] = output_transform
                kwargs['width'] = output_width
                kwargs['height'] = output_height

                logger.debug("Kwargs: %r", kwargs)
                logger.debug("bounds: %r", bounds)
                logger.debug("Res: %r", res)

                dst = rasterio.open(output, 'w', **kwargs)
                dest = np.zeros((first.count, output_height, output_width),
                        dtype=dtype)

                logger.debug("In merge, dest shape: %r", dest.shape)

            if nodata is not None:
                nodataval = nodata

            if nodataval is not None:
                # Only fill if the nodataval is within dtype's range.
                inrange = False
                if np.dtype(dtype).kind in ('i', 'u'):
                    info = np.iinfo(dtype)
                    inrange = (info.min <= nodataval <= info.max)
                elif np.dtype(dtype).kind == 'f':
                    info = np.finfo(dtype)
                    inrange = (info.min <= nodataval <= info.max)
                if inrange:
                    dest.fill(nodataval)
                else:
                    warnings.warn(
                        "Input file's nodata value, %s, is beyond the valid "
                        "range of its data type, %s. Consider overriding it "
                        "using the --nodata option for better results." % (
                            nodataval, dtype))
            else:
                nodataval = 0

            dst_w, dst_s, dst_e, dst_n = dst.bounds

            for fname in reversed(files):
                with rasterio.open(fname) as src:
                    # Real World (tm) use of boundless reads.
                    # This approach uses the maximum amount of memory to solve
                    # the problem. Making it more efficient is a TODO.

                    # 1. Compute spatial intersection of destination
                    #    and source.
                    src_w, src_s, src_e, src_n = src.bounds

                    int_w = src_w if src_w > dst_w else dst_w
                    int_s = src_s if src_s > dst_s else dst_s
                    int_e = src_e if src_e < dst_e else dst_e
                    int_n = src_n if src_n < dst_n else dst_n

                    # 2. Compute the source window.
                    src_window = src.window(int_w, int_s, int_e, int_n)

                    # 3. Compute the destination window.
                    dst_window = dst.window(int_w, int_s, int_e, int_n)

                    # 4. Initialize temp array.
                    temp = np.zeros(
                            (first.count,) + tuple(b - a for a, b in dst_window),
                            dtype=dtype)

                    temp = src.read(
                            out=temp,
                            window=src_window,
                            boundless=False,
                            masked=True)

                    # 5. Copy elements of temp into dest.
                    roff, coff = dst.index(int_w, int_n)
                    h, w = temp.shape[-2:]

                    region = dest[:,roff:roff+h,coff:coff+w]
                    np.copyto(region, temp,
                        where=np.logical_and(
                        region==nodataval, temp.mask==False))

            if dst.mode == 'r+':
                temp = dst.read(masked=True)
                np.copyto(dest, temp,
                    where=np.logical_and(
                    dest==nodataval, temp.mask==False))

            dst.write(dest)
            dst.close()

        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
Exemplo n.º 23
0
def merge_rgba_tool(sources, outtif, bounds=None, res=None, precision=7,
                    creation_options={}):
    """A windowed, top-down approach to merging.
    For each block window, it loops through the sources,
    reads the corresponding source window until the block
    is filled with data or we run out of sources.

    Uses more disk IO but is faster* and
    consumes significantly less memory

    * The read efficiencies comes from using
    RGBA tifs where we can assume band 4 is the sole
    determinant of nodata. This avoids the use of
    expensive masked reads but, of course, limits
    what data can used. Hence merge_rgba.
    """
    first = sources[0]
    first_res = first.res
    dtype = first.dtypes[0]
    profile = first.profile

    # Extent from option or extent of all inputs.
    if bounds:
        dst_w, dst_s, dst_e, dst_n = bounds
    else:
        # scan input files.
        # while we're at it, validate assumptions about inputs
        xs = []
        ys = []
        for src in sources:
            left, bottom, right, top = src.bounds
            xs.extend([left, right])
            ys.extend([bottom, top])
            if src.profile['count'] != 4:  # TODO, how to test for alpha?
                raise ValueError("Inputs must be 4-band RGBA rasters")
        dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)
    logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
    output_transform = Affine.translation(dst_w, dst_n)
    logger.debug("Output transform, before scaling: %r", output_transform)

    # Resolution/pixel size.
    if not res:
        res = first_res
    elif not np.iterable(res):
        res = (res, res)
    elif len(res) == 1:
        res = (res[0], res[0])
    output_transform *= Affine.scale(res[0], -res[1])
    logger.debug("Output transform, after scaling: %r", output_transform)

    # Compute output array shape. We guarantee it will cover the output
    # bounds completely.
    output_width = int(math.ceil((dst_e - dst_w) / res[0]))
    output_height = int(math.ceil((dst_n - dst_s) / res[1]))

    # Adjust bounds to fit.
    dst_e, dst_s = output_transform * (output_width, output_height)
    logger.debug("Output width: %d, height: %d", output_width, output_height)
    logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))

    profile['transform'] = output_transform
    profile['height'] = output_height
    profile['width'] = output_width

    profile['nodata'] = None  # rely on alpha mask

    # Creation opts
    profile.update(creation_options)

    # create destination file
    with rasterio.open(outtif, 'w', **profile) as dstrast:

        for idx, dst_window in dstrast.block_windows():

            left, bottom, right, top = dstrast.window_bounds(dst_window)
            blocksize = ((dst_window[0][1] - dst_window[0][0]) *
                         (dst_window[1][1] - dst_window[1][0]))

            # initialize array destined for the block
            dst_count = first.count
            dst_rows, dst_cols = tuple(b - a for a, b in dst_window)
            dst_shape = (dst_count, dst_rows, dst_cols)
            logger.debug("Temp shape: %r", dst_shape)
            dstarr = np.zeros(dst_shape, dtype=dtype)

            # Read up srcs until
            # a. everything is data; i.e. no nodata
            # b. no sources left
            for src in sources:
                # The full_cover behavior is problematic here as it includes
                # extra pixels along the bottom right when the sources are
                # slightly misaligned
                #
                # src_window = get_window(left, bottom, right, top,
                #                         src.transform, precision=precision)
                #
                # With rio merge this just adds an extra row, but when the
                # imprecision occurs at each block, you get artifacts

                # Alternative, custom get_window using rounding
                window_start = rowcol(
                    src.transform, left, top, op=round, precision=precision)
                window_stop = rowcol(
                    src.transform, right, bottom, op=round, precision=precision)
                src_window = tuple(zip(window_start, window_stop))

                temp = np.zeros(dst_shape, dtype=dtype)
                temp = src.read(out=temp, window=src_window,
                                boundless=True, masked=False)

                # pixels without data yet are available to write
                write_region = np.logical_and(
                    (dstarr[3] == 0),  # 0 is nodata
                    (temp[3] != 0))
                np.copyto(dstarr, temp, where=write_region)

                # check if dest has any nodata pixels available
                if np.count_nonzero(dstarr[3]) == blocksize:
                    break

            dstrast.write(dstarr, window=dst_window)

    return output_transform
Exemplo n.º 24
0
def merge(
    datasets,
    bounds=None,
    res=None,
    nodata=None,
    dtype=None,
    precision=None,
    indexes=None,
    output_count=None,
    resampling=Resampling.nearest,
    method="first",
    target_aligned_pixels=False,
    dst_path=None,
    dst_kwds=None,
):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm (default) or another method. If the output file exists,
    its values will be overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.

    Parameters
    ----------
    datasets : list of dataset objects opened in 'r' mode, filenames or PathLike objects
        source datasets to be merged.
    bounds: tuple, optional
        Bounds of the output image (left, bottom, right, top).
        If not set, bounds are determined from bounds of input rasters.
    res: tuple, optional
        Output resolution in units of coordinate reference system. If not set,
        the resolution of the first raster is used. If a single value is passed,
        output pixels will be square.
    nodata: float, optional
        nodata value to use in output file. If not set, uses the nodata value
        in the first input raster.
    dtype: numpy dtype or string
        dtype to use in outputfile. If not set, uses the dtype value in the
        first input raster.
    precision: float, optional
        Number of decimal points of precision when computing inverse transform.
    indexes : list of ints or a single int, optional
        bands to read and merge
    output_count: int, optional
        If using callable it may be useful to have additional bands in the output
        in addition to the indexes specified for read
    resampling : Resampling, optional
        Resampling algorithm used when reading input files.
        Default: `Resampling.nearest`.
    method : str or callable
        pre-defined method:
            first: reverse painting
            last: paint valid new on top of existing
            min: pixel-wise min of existing and new
            max: pixel-wise max of existing and new
        or custom callable with signature:

        def function(merged_data, new_data, merged_mask, new_mask, index=None, roff=None, coff=None):

            Parameters
            ----------
            merged_data : array_like
                array to update with new_data
            new_data : array_like
                data to merge
                same shape as merged_data
            merged_mask, new_mask : array_like
                boolean masks where merged/new data pixels are invalid
                same shape as merged_data
            index: int
                index of the current dataset within the merged dataset collection
            roff: int
                row offset in base array
            coff: int
                column offset in base array

    target_aligned_pixels : bool, optional
        Whether to adjust output image bounds so that pixel coordinates
        are integer multiples of pixel size, matching the ``-tap``
        options of GDAL utilities.  Default: False.
    dst_path : str or PathLike, optional
        Path of output dataset
    dst_kwds : dict, optional
        Dictionary of creation options and other paramters that will be
        overlaid on the profile of the output dataset.

    Returns
    -------
    tuple

        Two elements:

            dest: numpy ndarray
                Contents of all input rasters in single array

            out_transform: affine.Affine()
                Information for mapping pixel coordinates in `dest` to another
                coordinate system

    """
    if method in MERGE_METHODS:
        copyto = MERGE_METHODS[method]
    elif callable(method):
        copyto = method
    else:
        raise ValueError(
            'Unknown method {0}, must be one of {1} or callable'.format(
                method, list(MERGE_METHODS.keys())))

    # Create a dataset_opener object to use in several places in this function.
    if isinstance(datasets[0], (str, os.PathLike)):
        dataset_opener = rasterio.open
    else:

        @contextmanager
        def nullcontext(obj):
            try:
                yield obj
            finally:
                pass

        dataset_opener = nullcontext

    with dataset_opener(datasets[0]) as first:
        first_profile = first.profile
        first_res = first.res
        nodataval = first.nodatavals[0]
        dt = first.dtypes[0]

        if indexes is None:
            src_count = first.count
        elif isinstance(indexes, int):
            src_count = indexes
        else:
            src_count = len(indexes)

        try:
            first_colormap = first.colormap(1)
        except ValueError:
            first_colormap = None

    if not output_count:
        output_count = src_count

    # Extent from option or extent of all inputs
    if bounds:
        dst_w, dst_s, dst_e, dst_n = bounds
    else:
        # scan input files
        xs = []
        ys = []
        for dataset in datasets:
            with dataset_opener(dataset) as src:
                left, bottom, right, top = src.bounds
            xs.extend([left, right])
            ys.extend([bottom, top])
        dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)

    # Resolution/pixel size
    if not res:
        res = first_res
    elif not np.iterable(res):
        res = (res, res)
    elif len(res) == 1:
        res = (res[0], res[0])

    if target_aligned_pixels:
        dst_w = math.floor(dst_w / res[0]) * res[0]
        dst_e = math.ceil(dst_e / res[0]) * res[0]
        dst_s = math.floor(dst_s / res[1]) * res[1]
        dst_n = math.ceil(dst_n / res[1]) * res[1]

    # Compute output array shape. We guarantee it will cover the output
    # bounds completely
    output_width = int(round((dst_e - dst_w) / res[0]))
    output_height = int(round((dst_n - dst_s) / res[1]))

    output_transform = Affine.translation(dst_w, dst_n) * Affine.scale(
        res[0], -res[1])

    if dtype is not None:
        dt = dtype
        logger.debug("Set dtype: %s", dt)

    out_profile = first_profile
    out_profile.update(**(dst_kwds or {}))

    out_profile["transform"] = output_transform
    out_profile["height"] = output_height
    out_profile["width"] = output_width
    out_profile["count"] = output_count
    if nodata is not None:
        out_profile["nodata"] = nodata

    # create destination array
    dest = np.zeros((output_count, output_height, output_width), dtype=dt)

    if nodata is not None:
        nodataval = nodata
        logger.debug("Set nodataval: %r", nodataval)

    if nodataval is not None:
        # Only fill if the nodataval is within dtype's range
        inrange = False
        if np.issubdtype(dt, np.integer):
            info = np.iinfo(dt)
            inrange = (info.min <= nodataval <= info.max)
        elif np.issubdtype(dt, np.floating):
            if math.isnan(nodataval):
                inrange = True
            else:
                info = np.finfo(dt)
                inrange = (info.min <= nodataval <= info.max)
        if inrange:
            dest.fill(nodataval)
        else:
            warnings.warn(
                "The nodata value, %s, is beyond the valid "
                "range of the chosen data type, %s. Consider overriding it "
                "using the --nodata option for better results." %
                (nodataval, dt))
    else:
        nodataval = 0

    for idx, dataset in enumerate(datasets):
        with dataset_opener(dataset) as src:
            # Real World (tm) use of boundless reads.
            # This approach uses the maximum amount of memory to solve the
            # problem. Making it more efficient is a TODO.

            if disjoint_bounds((dst_w, dst_s, dst_e, dst_n), src.bounds):
                logger.debug("Skipping source: src=%r, window=%r", src)
                continue

            # 1. Compute spatial intersection of destination and source
            src_w, src_s, src_e, src_n = src.bounds

            int_w = src_w if src_w > dst_w else dst_w
            int_s = src_s if src_s > dst_s else dst_s
            int_e = src_e if src_e < dst_e else dst_e
            int_n = src_n if src_n < dst_n else dst_n

            # 2. Compute the source window
            src_window = windows.from_bounds(int_w,
                                             int_s,
                                             int_e,
                                             int_n,
                                             src.transform,
                                             precision=precision)

            # 3. Compute the destination window
            dst_window = windows.from_bounds(int_w,
                                             int_s,
                                             int_e,
                                             int_n,
                                             output_transform,
                                             precision=precision)

            # 4. Read data in source window into temp
            src_window_rnd_shp = src_window.round_shape(pixel_precision=0)
            dst_window_rnd_shp = dst_window.round_shape(pixel_precision=0)
            dst_window_rnd_off = dst_window_rnd_shp.round_offsets(
                pixel_precision=0)
            temp_height, temp_width = (
                dst_window_rnd_off.height,
                dst_window_rnd_off.width,
            )
            temp_shape = (src_count, temp_height, temp_width)
            temp_src = src.read(
                out_shape=temp_shape,
                window=src_window_rnd_shp,
                boundless=False,
                masked=True,
                indexes=indexes,
                resampling=resampling,
            )

        # 5. Copy elements of temp into dest
        roff, coff = (
            max(0, dst_window_rnd_off.row_off),
            max(0, dst_window_rnd_off.col_off),
        )
        region = dest[:, roff:roff + temp_height, coff:coff + temp_width]

        if math.isnan(nodataval):
            region_mask = np.isnan(region)
        elif np.issubdtype(region.dtype, np.floating):
            region_mask = np.isclose(region, nodataval)
        else:
            region_mask = region == nodataval

        # Ensure common shape, resolving issue #2202.
        temp = temp_src[:, :region.shape[1], :region.shape[2]]
        temp_mask = np.ma.getmask(temp)
        copyto(region,
               temp,
               region_mask,
               temp_mask,
               index=idx,
               roff=roff,
               coff=coff)

    if dst_path is None:
        return dest, output_transform

    else:
        with rasterio.open(dst_path, "w", **out_profile) as dst:
            dst.write(dest)
            if first_colormap:
                dst.write_colormap(1, first_colormap)
Exemplo n.º 25
0
def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
    """
    Based on https://github.com/mapbox/rio-merge-rgba/
    Merge orthophotos around cutlines using a blend buffer.
    """
    inputs = []
    bounds = None
    precision = 7

    for o, c in input_ortho_and_ortho_cuts:
        if not io.file_exists(o):
            log.ODM_WARNING(
                "%s does not exist. Will skip from merged orthophoto." % o)
            continue
        if not io.file_exists(c):
            log.ODM_WARNING(
                "%s does not exist. Will skip from merged orthophoto." % c)
            continue
        inputs.append((o, c))

    if len(inputs) == 0:
        log.ODM_WARNING("No input orthophotos, skipping merge.")
        return

    with rasterio.open(inputs[0][0]) as first:
        res = first.res
        dtype = first.dtypes[0]
        profile = first.profile
        num_bands = first.meta['count'] - 1  # minus alpha
        colorinterp = first.colorinterp

    log.ODM_INFO("%s valid orthophoto rasters to merge" % len(inputs))
    sources = [(rasterio.open(o), rasterio.open(c)) for o, c in inputs]

    # scan input files.
    # while we're at it, validate assumptions about inputs
    xs = []
    ys = []
    for src, _ in sources:
        left, bottom, right, top = src.bounds
        xs.extend([left, right])
        ys.extend([bottom, top])
        if src.profile["count"] < 4:
            raise ValueError("Inputs must be at least 4-band rasters")
    dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)
    log.ODM_INFO("Output bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n))

    output_transform = Affine.translation(dst_w, dst_n)
    output_transform *= Affine.scale(res[0], -res[1])

    # Compute output array shape. We guarantee it will cover the output
    # bounds completely.
    output_width = int(math.ceil((dst_e - dst_w) / res[0]))
    output_height = int(math.ceil((dst_n - dst_s) / res[1]))

    # Adjust bounds to fit.
    dst_e, dst_s = output_transform * (output_width, output_height)
    log.ODM_INFO("Output width: %d, height: %d" %
                 (output_width, output_height))
    log.ODM_INFO("Adjusted bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n))

    profile["transform"] = output_transform
    profile["height"] = output_height
    profile["width"] = output_width
    profile["tiled"] = orthophoto_vars.get('TILED', 'YES') == 'YES'
    profile["blockxsize"] = orthophoto_vars.get('BLOCKXSIZE', 512)
    profile["blockysize"] = orthophoto_vars.get('BLOCKYSIZE', 512)
    profile["compress"] = orthophoto_vars.get('COMPRESS', 'LZW')
    profile["predictor"] = orthophoto_vars.get('PREDICTOR', '2')
    profile["bigtiff"] = orthophoto_vars.get('BIGTIFF', 'IF_SAFER')
    profile.update()

    # create destination file
    with rasterio.open(output_orthophoto, "w", **profile) as dstrast:
        dstrast.colorinterp = colorinterp
        for idx, dst_window in dstrast.block_windows():
            left, bottom, right, top = dstrast.window_bounds(dst_window)

            blocksize = dst_window.width
            dst_rows, dst_cols = (dst_window.height, dst_window.width)

            # initialize array destined for the block
            dst_count = first.count
            dst_shape = (dst_count, dst_rows, dst_cols)

            dstarr = np.zeros(dst_shape, dtype=dtype)

            # First pass, write all rasters naively without blending
            for src, _ in sources:
                src_window = tuple(
                    zip(
                        rowcol(src.transform,
                               left,
                               top,
                               op=round,
                               precision=precision),
                        rowcol(src.transform,
                               right,
                               bottom,
                               op=round,
                               precision=precision)))

                temp = np.zeros(dst_shape, dtype=dtype)
                temp = src.read(out=temp,
                                window=src_window,
                                boundless=True,
                                masked=False)

                # pixels without data yet are available to write
                write_region = np.logical_and(
                    (dstarr[-1] == 0),
                    (temp[-1] != 0)  # 0 is nodata
                )
                np.copyto(dstarr, temp, where=write_region)

                # check if dest has any nodata pixels available
                if np.count_nonzero(dstarr[-1]) == blocksize:
                    break

            # Second pass, write all feathered rasters
            # blending the edges
            for src, _ in sources:
                src_window = tuple(
                    zip(
                        rowcol(src.transform,
                               left,
                               top,
                               op=round,
                               precision=precision),
                        rowcol(src.transform,
                               right,
                               bottom,
                               op=round,
                               precision=precision)))

                temp = np.zeros(dst_shape, dtype=dtype)
                temp = src.read(out=temp,
                                window=src_window,
                                boundless=True,
                                masked=False)

                where = temp[-1] != 0
                for b in range(0, num_bands):
                    blended = temp[-1] / 255.0 * temp[b] + (
                        1 - temp[-1] / 255.0) * dstarr[b]
                    np.copyto(dstarr[b],
                              blended,
                              casting='unsafe',
                              where=where)
                dstarr[-1][where] = 255.0

                # check if dest has any nodata pixels available
                if np.count_nonzero(dstarr[-1]) == blocksize:
                    break

            # Third pass, write cut rasters
            # blending the cutlines
            for _, cut in sources:
                src_window = tuple(
                    zip(
                        rowcol(cut.transform,
                               left,
                               top,
                               op=round,
                               precision=precision),
                        rowcol(cut.transform,
                               right,
                               bottom,
                               op=round,
                               precision=precision)))

                temp = np.zeros(dst_shape, dtype=dtype)
                temp = cut.read(out=temp,
                                window=src_window,
                                boundless=True,
                                masked=False)

                # For each band, average alpha values between
                # destination raster and cut raster
                for b in range(0, num_bands):
                    blended = temp[-1] / 255.0 * temp[b] + (
                        1 - temp[-1] / 255.0) * dstarr[b]
                    np.copyto(dstarr[b],
                              blended,
                              casting='unsafe',
                              where=temp[-1] != 0)

            dstrast.write(dstarr, window=dst_window)

    return output_orthophoto
Exemplo n.º 26
0
import pandas as pd
import unittest
import rasterio as rio
import numpy as np
from rasterio.transform import Affine
import geopandas as gpd
from shapely.geometry import Polygon

dem_file = rio.MemoryFile()
x_min, y_max = 100, 500
res = 5
height, width = 100, 200
x_max = x_min + width * res
y_min = y_max - height * res
array = np.round(np.random.random((height, width)), 3)
transform = Affine.translation(x_min, y_max) * Affine.scale(res, -res)

with rio.open(
        dem_file,
        'w',
        driver='GTiff',
        height=height,
        width=width,
        count=1,
        dtype=array.dtype,
        transform=transform,
        nodata=-9999
) as dst:
    dst.write(array, 1)

Exemplo n.º 27
0
    def rasterize(self, raster_file=None,
                  pixel_size=None,
                  all_touched=False,
                  no_data_value=0,
                  default_value=1,
                  crs=None,
                  cropped=False,
                  classifier_column=None,
                  *args, **kwargs):
        """
        Rasterize (burn) the environment rangemaps (geometrical shapes) into pixels (cells), i.e., a 2-dimensional image array
        of type numpy ndarray. Uses the `Rasterio <https://mapbox.github.io/rasterio/_modules/rasterio/features.html>`_ library
        for this purpose. All the shapes from the ``VectorEnvironmentalLayer`` object data are burned in a single *band* of the image.
        Rasterio datasets can generally have one or more bands, or layers. Following the GDAL convention, these are indexed starting with 1.

        :param string raster_file: The full path to the targed GeoTIFF raster file (including the directory and filename in one string).

        :param int pixel_size: The size of the pixel in degrees, i.e., the resolution to use for rasterizing.

        :param bool all_touched: If true, all pixels touched by geometries, will be burned in. If false, only pixels \
        whose center is within the polygon or that are selected by Bresenham's line algorithm, will be burned in.

        :param int no_data_value: Used as value of the pixels which are not burned in. Default is 0.

        :param int default_value: Used as value of the pixels which are burned in. Default is 1.

        :param crs: The Coordinate Reference System to use. Default is "ESPG:4326"

        :param bool cropped: If true, the resulting pixel array (image) is cropped to the region borders, which contain \
        the burned pixels (i.e., an envelope within the range). Otherwise, a "global world map" is used, i.e., the boundaries \
        are set to (-180, -90, 180, 90) for the resulting array.

        :returns: Rasterio RasterReader file object which can be used to read individual bands from the raster file.

        :rtype: rasterio._io.RasterReader

        """
        if not (pixel_size or raster_file):
            raise AttributeError("Please provide pixel_size and a target raster_file.")

        if not hasattr(self, 'data_full'):
            raise AttributeError("You have not loaded the data.")

        if crs is None:
            crs = {'init': "EPSG:4326"}

        # crop to the boundaries of the shape?
        if cropped:
            # cascaded_union_geometry = shapely.ops.cascaded_union(self.data_full.geometry)
            x_min, y_min, x_max, y_max = self.data_full.geometry.total_bounds
        # else global map
        else:
            x_min, y_min, x_max, y_max = -180, -90, 180, 90

        x_res = int((x_max - x_min) / pixel_size)
        y_res = int((y_max - y_min) / pixel_size)
        logger.info("Will rasterize using pixel_size=%s, all_touched=%s, no_data_value=%s, fill_value=%s "
                    % (pixel_size, all_touched, no_data_value, default_value))
        transform = Affine.translation(x_min, y_max) * Affine.scale(pixel_size, -pixel_size)
        if classifier_column:
            logger.info("Will rasterize using classifier: %s." % classifier_column)
            classifier_categories = self.data_full[classifier_column].unique()
            stacked_layers = []
            for category_name in classifier_categories:
                if category_name:
                    logger.info("Rasterizing category %s " % category_name)
                    result = features.rasterize(self.data_full.geometry[self.data_full[classifier_column] == category_name],
                                                transform=transform,
                                                out_shape=(y_res, x_res),
                                                all_touched=all_touched,
                                                fill=no_data_value,
                                                default_value=default_value
                                                )
                    stacked_layers.append(result)

            stacked_layers = np.stack(stacked_layers)

            for i, band in enumerate(stacked_layers, 1):
                with rasterio.open(raster_file, 'w', driver='GTiff', width=x_res, height=y_res,
                                   count=stacked_layers.shape[0],
                                   dtype=np.uint8,
                                   nodata=no_data_value,
                                   transform=transform,
                                   crs=crs) as out:
                    out.write(band.astype(np.uint8), indexes=i)
            result_final = stacked_layers
        else:
            logger.info("Will rasterize everything on a single band.")
            result_final = features.rasterize(self.data_full.geometry,
                                              transform=transform,
                                              out_shape=(y_res, x_res),
                                              all_touched=all_touched,
                                              fill=no_data_value,
                                              default_value=default_value
                                              )

            with rasterio.open(raster_file, 'w', driver='GTiff', width=x_res, height=y_res,
                               count=1,
                               dtype=np.uint8,
                               nodata=no_data_value,
                               transform=transform,
                               crs=crs) as out:
                out.write(result_final.astype(np.uint8), indexes=1)
        out.close()
        logger.info("RASTERIO: Data rasterized into file %s " % raster_file)
        logger.info("RASTERIO: Resolution: x_res={0} y_res={1}".format(x_res, y_res))
        self.raster_file = raster_file
        self.raster_affine = transform
        self.stacked_layers = stacked_layers
        return result_final
Exemplo n.º 28
0
def merge(sources, bounds=None, res=None, nodata=None, precision=7):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm. If the output file exists, its values will be
    overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.

    Parameters
    ----------
    sources: list of source datasets
        Open rasterio RasterReader objects to be merged.
    bounds: tuple, optional
        Bounds of the output image (left, bottom, right, top).
        If not set, bounds are determined from bounds of input rasters.
    res: tuple, optional
        Output resolution in units of coordinate reference system. If not set,
        the resolution of the first raster is used. If a single value is passed,
        output pixels will be square.
    nodata: float, optional
        nodata value to use in output file. If not set, uses the nodata value
        in the first input raster.

    Returns
    -------
    dest: numpy ndarray
        Contents of all input rasters in single array.
    out_transform: affine object
        Information for mapping pixel coordinates in `dest` to another
        coordinate system
    """
    first = sources[0]
    first_res = first.res
    nodataval = first.nodatavals[0]
    dtype = first.dtypes[0]

    # Extent from option or extent of all inputs.
    if bounds:
        dst_w, dst_s, dst_e, dst_n = bounds
    else:
        # scan input files.
        xs = []
        ys = []
        for src in sources:
            left, bottom, right, top = src.bounds
            xs.extend([left, right])
            ys.extend([bottom, top])
        dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)

    logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
    output_transform = Affine.translation(dst_w, dst_n)
    logger.debug("Output transform, before scaling: %r", output_transform)

    # Resolution/pixel size.
    if not res:
        res = first_res
    elif not np.iterable(res):
        res = (res, res)
    elif len(res) == 1:
        res = (res[0], res[0])
    output_transform *= Affine.scale(res[0], -res[1])
    logger.debug("Output transform, after scaling: %r", output_transform)

    # Compute output array shape. We guarantee it will cover the output
    # bounds completely.
    output_width = int(math.ceil((dst_e - dst_w) / res[0]))
    output_height = int(math.ceil((dst_n - dst_s) / res[1]))

    # Adjust bounds to fit.
    dst_e, dst_s = output_transform * (output_width, output_height)
    logger.debug("Output width: %d, height: %d", output_width, output_height)
    logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))

    # create destination array
    dest = np.zeros((first.count, output_height, output_width), dtype=dtype)

    if nodata is not None:
        nodataval = nodata
        logger.debug("Set nodataval: %r", nodataval)

    if nodataval is not None:
        # Only fill if the nodataval is within dtype's range.
        inrange = False
        if np.dtype(dtype).kind in ('i', 'u'):
            info = np.iinfo(dtype)
            inrange = (info.min <= nodataval <= info.max)
        elif np.dtype(dtype).kind == 'f':
            info = np.finfo(dtype)
            inrange = (info.min <= nodataval <= info.max)
        if inrange:
            dest.fill(nodataval)
        else:
            warnings.warn(
                "Input file's nodata value, %s, is beyond the valid "
                "range of its data type, %s. Consider overriding it "
                "using the --nodata option for better results." % (
                    nodataval, dtype))
    else:
        nodataval = 0

    for src in sources:
        # Real World (tm) use of boundless reads.
        # This approach uses the maximum amount of memory to solve the problem.
        # Making it more efficient is a TODO.

        # 1. Compute spatial intersection of destination and source.
        src_w, src_s, src_e, src_n = src.bounds

        int_w = src_w if src_w > dst_w else dst_w
        int_s = src_s if src_s > dst_s else dst_s
        int_e = src_e if src_e < dst_e else dst_e
        int_n = src_n if src_n < dst_n else dst_n

        # 2. Compute the source window.
        src_window = get_window(
            int_w, int_s, int_e, int_n, src.affine, precision=precision)
        logger.debug("Src %s window: %r", src.name, src_window)

        # 3. Compute the destination window.
        dst_window = get_window(
            int_w, int_s, int_e, int_n, output_transform, precision=precision)
        logger.debug("Dst window: %r", dst_window)

        # 4. Initialize temp array.
        tcount = first.count
        trows, tcols = tuple(b - a for a, b in dst_window)

        temp_shape = (tcount, trows, tcols)
        logger.debug("Temp shape: %r", temp_shape)

        temp = np.zeros(temp_shape, dtype=dtype)
        temp = src.read(out=temp, window=src_window, boundless=False,
                        masked=True)

        # 5. Copy elements of temp into dest.
        roff, coff = dst_window[0][0], dst_window[1][0]

        region = dest[:, roff:roff + trows, coff:coff + tcols]
        np.copyto(
            region, temp,
            where=np.logical_and(region == nodataval, temp.mask == False))

    return dest, output_transform
Exemplo n.º 29
0
def transform_lat_lon(lat, lon):
    lat = np.asarray(lat)
    lon = np.asarray(lon)
    trans = Affine.translation(lon[0], lat[0])
    scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
    return trans * scale
Exemplo n.º 30
0
    print("width, height: ", width, height)

    if True:
        #bbox = Window( px_a, py_a, width, height)
        bbox = from_bounds(lon_a, lat_b, lon_b, lat_a,
                           dataset.transform)  #left, bottom, right, top
        window = dataset.read(window=bbox)
        print(window.shape)
        res_x = (lon_b - lon_a) / width
        res_y = (lat_b - lat_a) / height
        # this is the point (A) scaled, so we can locate coords inside.
        # Affine.scale(res_x, res_x) should be Affine.scale(res_x, res_y) but this generates
        # non square pixels, and insert dx,dy values, that are not supported by noone.
        # tested with GlobalMapper, and it works fine.
        transform = Affine.translation(
            lon_a + res_x, lat_a + res_y) * Affine.scale(res_x, res_x)

        with rasterio.open(
                SUBSET,
                'w',
                driver='GTiff',
                height=height,
                width=width,
                count=window.shape[0],
                transform=transform,
                # specific for format
                blockxsize=256,
                blockysize=256,
                compress='lzw',
                dtype=window.dtype,
                interleave='band',
Exemplo n.º 31
0
plt.clf()
plt.imshow(imgOut, cmap=plt.get_cmap('terrain'))
plt.colorbar()
plt.tight_layout()  # DEBUG
plt.savefig(outputDir + 'out-color.png')

## Write the GeoTIFF

imgOut[
    imgOut ==
    oceanFloor] = -5000.0  # For actual heightmap output, set 'ocean' to the nodata value
imgOut = np.flipud(imgOut)  # Adjust to GeoTIFF coordinate system

projection = f'+proj=ortho +lat_0={latitude} +lon_0={longitude}'  # Adjust lat_o and lon_0 for location
transform = Affine.scale(*(shore.rasterShape[1]*resolution/outputResolution,shore.rasterShape[0]*resolution/outputResolution)) * \
            Affine.translation(-outputResolution*0.5,-outputResolution*0.5)
new_dataset = rasterio.open(outputDir + '/out-geo.tif',
                            'w',
                            driver='GTiff',
                            height=imgOut.shape[0],
                            width=imgOut.shape[1],
                            count=1,
                            dtype=imgOut.dtype,
                            crs=projection,
                            transform=transform,
                            nodata=-5000.0)
new_dataset.write(imgOut, 1)
print(new_dataset.meta)
new_dataset.close()
Exemplo n.º 32
0
import rasterio
from rasterio.transform import Affine

img = rasterio.open("color.tif")
angle = -85
res = 360/5928
transform = Affine.translation(0,angle) * Affine.scale(res, res)
new = rasterio.open(f"out_neg85.tif", "w", driver="GTiff", height=2963, width=5926, count=3, transform=transform, crs="+proj=latlong", dtype=img.read().dtype, nodata=None, interleave="pixel", compress="lzw")
new.write(img.read())
new.close()
Exemplo n.º 33
0
def dataset_features(
        src,
        bidx=None,
        sampling=1,
        band=True,
        as_mask=False,
        with_nodata=False,
        geographic=True,
        precision=-1):
    """Yield GeoJSON features for the dataset

    The geometries are polygons bounding contiguous regions of the same raster value.

    Parameters
    ----------
    src: Rasterio Dataset

    bidx: int
        band index

    sampling: int (DEFAULT: 1)
        Inverse of the sampling fraction; a value of 10 decimates

    band: boolean (DEFAULT: True)
        extract features from a band (True) or a mask (False)

    as_mask: boolean (DEFAULT: False)
        Interpret band as a mask and output only one class of valid data shapes?

    with_nodata: boolean (DEFAULT: False)
        Include nodata regions?

    geographic: str (DEFAULT: True)
        Output shapes in EPSG:4326? Otherwise use the native CRS.

    precision: int (DEFAULT: -1)
        Decimal precision of coordinates. -1 for full float precision output

    Yields
    ------
    GeoJSON-like Feature dictionaries for shapes found in the given band
    """
    if bidx is not None and bidx > src.count:
        raise ValueError('bidx is out of range for raster')

    img = None
    msk = None

    # Adjust transforms.
    transform = src.transform
    if sampling > 1:
        # Determine the target shape (to decimate)
        shape = (int(math.ceil(src.height / sampling)),
                 int(math.ceil(src.width / sampling)))

        # Calculate independent sampling factors
        x_sampling = src.width / shape[1]
        y_sampling = src.height / shape[0]

        # Decimation of the raster produces a georeferencing
        # shift that we correct with a translation.
        transform *= Affine.translation(
            src.width % x_sampling, src.height % y_sampling)

        # And follow by scaling.
        transform *= Affine.scale(x_sampling, y_sampling)

    # Most of the time, we'll use the valid data mask.
    # We skip reading it if we're extracting every possible
    # feature (even invalid data features) from a band.
    if not band or (band and not as_mask and not with_nodata):
        if sampling == 1:
            msk = src.read_masks(bidx)
        else:
            msk_shape = shape
            if bidx is None:
                msk = np.zeros(
                    (src.count,) + msk_shape, 'uint8')
            else:
                msk = np.zeros(msk_shape, 'uint8')
            msk = src.read_masks(bidx, msk)

        if bidx is None:
            msk = np.logical_or.reduce(msk).astype('uint8')

        # Possibly overridden below.
        img = msk

    # Read the band data unless the --mask option is given.
    if band:
        if sampling == 1:
            img = src.read(bidx, masked=False)
        else:
            img = np.zeros(
                shape,
                dtype=src.dtypes[src.indexes.index(bidx)])
            img = src.read(bidx, img, masked=False)

    # If as_mask option was given, convert the image
    # to a binary image. This reduces the number of shape
    # categories to 2 and likely reduces the number of
    # shapes.
    if as_mask:
        tmp = np.ones_like(img, 'uint8') * 255
        tmp[img == 0] = 0
        img = tmp
        if not with_nodata:
            msk = tmp

    # Prepare keyword arguments for shapes().
    kwargs = {'transform': transform}
    if not with_nodata:
        kwargs['mask'] = msk

    src_basename = os.path.basename(src.name)

    # Yield GeoJSON features.
    for i, (g, val) in enumerate(
            rasterio.features.shapes(img, **kwargs)):
        if geographic:
            g = warp.transform_geom(
                src.crs, 'EPSG:4326', g,
                antimeridian_cutting=True, precision=precision)
        xs, ys = zip(*coords(g))
        yield {
            'type': 'Feature',
            'id': "{0}:{1}".format(src_basename, i),
            'properties': {
                'val': val,
                'filename': src_basename
            },
            'bbox': [min(xs), min(ys), max(xs), max(ys)],
            'geometry': g
        }
Exemplo n.º 34
0
        def __call__(self):
            with rasterio.open(input) as src:
                img = None
                msk = None

                # Adjust transforms.
                if sampling == 1:
                    transform = src.affine
                else:
                    transform = src.affine * Affine.scale(float(sampling))

                # Most of the time, we'll use the valid data mask.
                # We skip reading it if we're extracting every possible
                # feature (even invalid data features) from a band.
                if not band or (band and not as_mask and not with_nodata):
                    if sampling == 1:
                        msk = src.read_masks(bidx)
                    else:
                        msk_shape = (src.height // sampling,
                                     src.width // sampling)
                        if bidx is None:
                            msk = numpy.zeros((src.count, ) + msk_shape,
                                              'uint8')
                        else:
                            msk = numpy.zeros(msk_shape, 'uint8')
                        msk = src.read_masks(bidx, msk)

                    if bidx is None:
                        msk = numpy.logical_or.reduce(msk).astype('uint8')

                    # Possibly overidden below.
                    img = msk

                # Read the band data unless the --mask option is given.
                if band:
                    if sampling == 1:
                        img = src.read(bidx, masked=False)
                    else:
                        img = numpy.zeros(
                            (src.height // sampling, src.width // sampling),
                            dtype=src.dtypes[src.indexes.index(bidx)])
                        img = src.read(bidx, img, masked=False)

                # If --as-mask option was given, convert the image
                # to a binary image. This reduces the number of shape
                # categories to 2 and likely reduces the number of
                # shapes.
                if as_mask:
                    tmp = numpy.ones_like(img, 'uint8') * 255
                    tmp[img == 0] = 0
                    img = tmp
                    if not with_nodata:
                        msk = tmp

                # Transform the raster bounds.
                bounds = src.bounds
                xs = [bounds[0], bounds[2]]
                ys = [bounds[1], bounds[3]]
                if projection == 'geographic':
                    xs, ys = rasterio.warp.transform(src.crs,
                                                     {'init': 'epsg:4326'}, xs,
                                                     ys)
                if precision >= 0:
                    xs = [round(v, precision) for v in xs]
                    ys = [round(v, precision) for v in ys]
                self._xs = xs
                self._ys = ys

                # Prepare keyword arguments for shapes().
                kwargs = {'transform': transform}
                if not with_nodata:
                    kwargs['mask'] = msk

                # Yield GeoJSON features.
                for g, i in rasterio.features.shapes(img, **kwargs):
                    if projection == 'geographic':
                        g = rasterio.warp.transform_geom(
                            src.crs,
                            'EPSG:4326',
                            g,
                            antimeridian_cutting=True,
                            precision=precision)
                    xs, ys = zip(*coords(g))
                    yield {
                        'type': 'Feature',
                        'id': str(i),
                        'properties': {
                            'val': i,
                            'filename': os.path.basename(src.name)
                        },
                        'bbox': [min(xs), min(ys),
                                 max(xs), max(ys)],
                        'geometry': g
                    }
Exemplo n.º 35
0
def affine_from_corner(ulx, uly, dx, dy):
    return Affine.translation(ulx, uly)*Affine.scale(dx, -dy)
Exemplo n.º 36
0
def merge(
    datasets,
    bounds=None,
    res=None,
    nodata=None,
    dtype=None,
    precision=10,
    indexes=None,
    output_count=None,
    resampling=Resampling.nearest,
    method="first",
    dst_path=None,
    dst_kwds=None,
):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm (default) or another method. If the output file exists,
    its values will be overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.

    Parameters
    ----------
    datasets : list of dataset objects opened in 'r' mode, filenames or pathlib.Path objects
        source datasets to be merged.
    bounds: tuple, optional
        Bounds of the output image (left, bottom, right, top).
        If not set, bounds are determined from bounds of input rasters.
    res: tuple, optional
        Output resolution in units of coordinate reference system. If not set,
        the resolution of the first raster is used. If a single value is passed,
        output pixels will be square.
    nodata: float, optional
        nodata value to use in output file. If not set, uses the nodata value
        in the first input raster.
    dtype: numpy dtype or string
        dtype to use in outputfile. If not set, uses the dtype value in the
        first input raster.
    precision: float, optional
        Number of decimal points of precision when computing inverse transform.
    indexes : list of ints or a single int, optional
        bands to read and merge
    output_count: int, optional
        If using callable it may be useful to have additional bands in the output
        in addition to the indexes specified for read
    resampling : Resampling, optional
        Resampling algorithm used when reading input files.
        Default: `Resampling.nearest`.
    method : str or callable
        pre-defined method:
            first: reverse painting
            last: paint valid new on top of existing
            min: pixel-wise min of existing and new
            max: pixel-wise max of existing and new
        or custom callable with signature:

        def function(old_data, new_data, old_nodata, new_nodata, index=None, roff=None, coff=None):

            Parameters
            ----------
            old_data : array_like
                array to update with new_data
            new_data : array_like
                data to merge
                same shape as old_data
            old_nodata, new_data : array_like
                boolean masks where old/new data is nodata
                same shape as old_data
            index: int
                index of the current dataset within the merged dataset collection
            roff: int
                row offset in base array
            coff: int
                column offset in base array
    dst_path : str or Pathlike, optional
        Path of output dataset
    dst_kwds : dict, optional
        Dictionary of creation options and other paramters that will be
        overlaid on the profile of the output dataset.

    Returns
    -------
    tuple

        Two elements:

            dest: numpy ndarray
                Contents of all input rasters in single array

            out_transform: affine.Affine()
                Information for mapping pixel coordinates in `dest` to another
                coordinate system

    """
    if method not in MERGE_METHODS and not callable(method):
        raise ValueError(
            'Unknown method {0}, must be one of {1} or callable'.format(
                method, MERGE_METHODS))

    # Create a dataset_opener object to use in several places in this function.
    if isinstance(datasets[0], string_types) or isinstance(datasets[0], Path):
        dataset_opener = rasterio.open
    else:

        @contextmanager
        def nullcontext(obj):
            try:
                yield obj
            finally:
                pass

        dataset_opener = nullcontext

    with dataset_opener(datasets[0]) as first:
        first_profile = first.profile
        first_res = first.res
        nodataval = first.nodatavals[0]
        dt = first.dtypes[0]

        if indexes is None:
            src_count = first.count
        elif isinstance(indexes, int):
            src_count = indexes
        else:
            src_count = len(indexes)

        try:
            first_colormap = first.colormap(1)
        except ValueError:
            first_colormap = None

    if not output_count:
        output_count = src_count

    # Extent from option or extent of all inputs
    if bounds:
        dst_w, dst_s, dst_e, dst_n = bounds
    else:
        # scan input files
        xs = []
        ys = []
        for dataset in datasets:
            with dataset_opener(dataset) as src:
                left, bottom, right, top = src.bounds
            xs.extend([left, right])
            ys.extend([bottom, top])
        dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)

    logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
    output_transform = Affine.translation(dst_w, dst_n)
    logger.debug("Output transform, before scaling: %r", output_transform)

    # Resolution/pixel size
    if not res:
        res = first_res
    elif not np.iterable(res):
        res = (res, res)
    elif len(res) == 1:
        res = (res[0], res[0])
    output_transform *= Affine.scale(res[0], -res[1])
    logger.debug("Output transform, after scaling: %r", output_transform)

    # Compute output array shape. We guarantee it will cover the output
    # bounds completely
    output_width = int(math.ceil((dst_e - dst_w) / res[0]))
    output_height = int(math.ceil((dst_n - dst_s) / res[1]))

    # Adjust bounds to fit
    dst_e, dst_s = output_transform * (output_width, output_height)
    logger.debug("Output width: %d, height: %d", output_width, output_height)
    logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))

    if dtype is not None:
        dt = dtype
        logger.debug("Set dtype: %s", dt)

    out_profile = first_profile
    out_profile.update(**(dst_kwds or {}))

    out_profile["transform"] = output_transform
    out_profile["height"] = output_height
    out_profile["width"] = output_width
    out_profile["count"] = output_count
    if nodata is not None:
        out_profile["nodata"] = nodata

    # create destination array
    dest = np.zeros((output_count, output_height, output_width), dtype=dt)

    if nodata is not None:
        nodataval = nodata
        logger.debug("Set nodataval: %r", nodataval)

    if nodataval is not None:
        # Only fill if the nodataval is within dtype's range
        inrange = False
        if np.dtype(dtype).kind in ('i', 'u'):
            info = np.iinfo(dtype)
            inrange = (info.min <= nodataval <= info.max)
        elif np.dtype(dtype).kind == 'f':
            info = np.finfo(dtype)
            if np.isnan(nodataval):
                inrange = True
            else:
                inrange = (info.min <= nodataval <= info.max)
        if inrange:
            dest.fill(nodataval)
        else:
            warnings.warn("Input file's nodata value, %s, is beyond the valid "
                          "range of its data type, %s. Consider overriding it "
                          "using the --nodata option for better results." %
                          (nodataval, dtype))
    else:
        nodataval = 0

    if method == 'first':

        def copyto(old_data, new_data, old_nodata, new_nodata, **kwargs):
            mask = np.logical_and(old_nodata, ~new_nodata)
            old_data[mask] = new_data[mask]

    elif method == 'last':

        def copyto(old_data, new_data, old_nodata, new_nodata, **kwargs):
            mask = ~new_nodata
            old_data[mask] = new_data[mask]

    elif method == 'min':

        def copyto(old_data, new_data, old_nodata, new_nodata, **kwargs):
            mask = np.logical_and(~old_nodata, ~new_nodata)
            old_data[mask] = np.minimum(old_data[mask], new_data[mask])

            mask = np.logical_and(old_nodata, ~new_nodata)
            old_data[mask] = new_data[mask]

    elif method == 'max':

        def copyto(old_data, new_data, old_nodata, new_nodata, **kwargs):
            mask = np.logical_and(~old_nodata, ~new_nodata)
            old_data[mask] = np.maximum(old_data[mask], new_data[mask])

            mask = np.logical_and(old_nodata, ~new_nodata)
            old_data[mask] = new_data[mask]

    elif callable(method):
        copyto = method

    else:
        raise ValueError(method)

    for idx, dataset in enumerate(datasets):
        with dataset_opener(dataset) as src:
            # Real World (tm) use of boundless reads.
            # This approach uses the maximum amount of memory to solve the
            # problem. Making it more efficient is a TODO.

            # 1. Compute spatial intersection of destination and source
            src_w, src_s, src_e, src_n = src.bounds

            int_w = src_w if src_w > dst_w else dst_w
            int_s = src_s if src_s > dst_s else dst_s
            int_e = src_e if src_e < dst_e else dst_e
            int_n = src_n if src_n < dst_n else dst_n

            # 2. Compute the source window
            src_window = windows.from_bounds(int_w,
                                             int_s,
                                             int_e,
                                             int_n,
                                             src.transform,
                                             precision=precision)
            logger.debug("Src %s window: %r", src.name, src_window)

            src_window = src_window.round_shape()

            # 3. Compute the destination window
            dst_window = windows.from_bounds(int_w,
                                             int_s,
                                             int_e,
                                             int_n,
                                             output_transform,
                                             precision=precision)

            # 4. Read data in source window into temp
            trows, tcols = (int(round(dst_window.height)),
                            int(round(dst_window.width)))
            temp_shape = (src_count, trows, tcols)
            temp = src.read(
                out_shape=temp_shape,
                window=src_window,
                boundless=False,
                masked=True,
                indexes=indexes,
                resampling=resampling,
            )

        # 5. Copy elements of temp into dest
        roff, coff = (int(round(dst_window.row_off)),
                      int(round(dst_window.col_off)))

        region = dest[:, roff:roff + trows, coff:coff + tcols]
        if np.isnan(nodataval):
            region_nodata = np.isnan(region)
            temp_nodata = np.isnan(temp)
        else:
            region_nodata = region == nodataval
            temp_nodata = temp.mask

        copyto(region,
               temp,
               region_nodata,
               temp_nodata,
               index=idx,
               roff=roff,
               coff=coff)

    if dst_path is None:
        return dest, output_transform

    else:
        with rasterio.open(dst_path, "w", **out_profile) as dst:
            dst.write(dest)
            if first_colormap:
                dst.write_colormap(1, first_colormap)
Exemplo n.º 37
0
def merge(ctx, files, driver, bounds, res, nodata):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm. If the output file exists, its values will be
    overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.
    """
    import numpy as np

    verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
    logger = logging.getLogger('rio')

    try:
        with rasterio.drivers(CPL_DEBUG=verbosity > 2):
            output = files[-1]
            files = files[:-1]

            with rasterio.open(files[0]) as first:
                first_res = first.res
                kwargs = first.meta
                kwargs.pop('affine')
                nodataval = first.nodatavals[0]
                dtype = first.dtypes[0]

            if os.path.exists(output):
                # TODO: prompt user to update existing file (-i option) like:
                # overwrite b.tif? (y/n [n]) n
                # not overwritten
                dst = rasterio.open(output, 'r+')
                nodataval = dst.nodatavals[0]
                dtype = dst.dtypes[0]
                dest = np.zeros((dst.count, ) + dst.shape, dtype=dtype)
            else:
                # Create new output file.
                # Extent from option or extent of all inputs.
                if not bounds:
                    # scan input files.
                    xs = []
                    ys = []
                    for f in files:
                        with rasterio.open(f) as src:
                            left, bottom, right, top = src.bounds
                            xs.extend([left, right])
                            ys.extend([bottom, top])
                    bounds = min(xs), min(ys), max(xs), max(ys)
                output_transform = Affine.translation(bounds[0], bounds[3])

                # Resolution/pixel size.
                if not res:
                    res = first_res
                output_transform *= Affine.scale(res[0], -res[1])

                # Dataset shape.
                output_width = int(math.ceil((bounds[2] - bounds[0]) / res[0]))
                output_height = int(math.ceil(
                    (bounds[3] - bounds[1]) / res[1]))

                kwargs['driver'] == driver
                kwargs['transform'] = output_transform
                kwargs['width'] = output_width
                kwargs['height'] = output_height

                logger.debug("Kwargs: %r", kwargs)
                logger.debug("bounds: %r", bounds)
                logger.debug("Res: %r", res)

                dst = rasterio.open(output, 'w', **kwargs)
                dest = np.zeros((first.count, output_height, output_width),
                                dtype=dtype)

                logger.debug("In merge, dest shape: %r", dest.shape)

            if nodata is not None:
                nodataval = nodata

            if nodataval is not None:
                # Only fill if the nodataval is within dtype's range.
                inrange = False
                if np.dtype(dtype).kind in ('i', 'u'):
                    info = np.iinfo(dtype)
                    inrange = (info.min <= nodataval <= info.max)
                elif np.dtype(dtype).kind == 'f':
                    info = np.finfo(dtype)
                    inrange = (info.min <= nodataval <= info.max)
                if inrange:
                    dest.fill(nodataval)
                else:
                    warnings.warn(
                        "Input file's nodata value, %s, is beyond the valid "
                        "range of its data type, %s. Consider overriding it "
                        "using the --nodata option for better results." %
                        (nodataval, dtype))
            else:
                nodataval = 0

            dst_w, dst_s, dst_e, dst_n = dst.bounds

            for fname in reversed(files):
                with rasterio.open(fname) as src:
                    # Real World (tm) use of boundless reads.
                    # This approach uses the maximum amount of memory to solve
                    # the problem. Making it more efficient is a TODO.

                    # 1. Compute spatial intersection of destination
                    #    and source.
                    src_w, src_s, src_e, src_n = src.bounds

                    int_w = src_w if src_w > dst_w else dst_w
                    int_s = src_s if src_s > dst_s else dst_s
                    int_e = src_e if src_e < dst_e else dst_e
                    int_n = src_n if src_n < dst_n else dst_n

                    # 2. Compute the source window.
                    src_window = src.window(int_w, int_s, int_e, int_n)

                    # 3. Compute the destination window.
                    dst_window = dst.window(int_w, int_s, int_e, int_n)

                    # 4. Initialize temp array.
                    temp = np.zeros(
                        (first.count, ) + tuple(b - a for a, b in dst_window),
                        dtype=dtype)

                    temp = src.read(out=temp,
                                    window=src_window,
                                    boundless=False,
                                    masked=True)

                    # 5. Copy elements of temp into dest.
                    roff, coff = dst.index(int_w, int_n)
                    h, w = temp.shape[-2:]

                    region = dest[:, roff:roff + h, coff:coff + w]
                    np.copyto(region,
                              temp,
                              where=np.logical_and(region == nodataval,
                                                   temp.mask == False))

            if dst.mode == 'r+':
                temp = dst.read(masked=True)
                np.copyto(dest,
                          temp,
                          where=np.logical_and(dest == nodataval,
                                               temp.mask == False))

            dst.write(dest)
            dst.close()

        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
Exemplo n.º 38
0
def merge(sources, bounds=None, res=None, nodata=None, precision=7):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm. If the output file exists, its values will be
    overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.

    Parameters
    ----------
    sources: list of source datasets
        Open rasterio RasterReader objects to be merged.
    bounds: tuple, optional
        Bounds of the output image (left, bottom, right, top).
        If not set, bounds are determined from bounds of input rasters.
    res: tuple, optional
        Output resolution in units of coordinate reference system. If not set,
        the resolution of the first raster is used. If a single value is passed,
        output pixels will be square.
    nodata: float, optional
        nodata value to use in output file. If not set, uses the nodata value
        in the first input raster.

    Returns
    -------
    dest: numpy ndarray
        Contents of all input rasters in single array.
    out_transform: affine object
        Information for mapping pixel coordinates in `dest` to another
        coordinate system
    """
    first = sources[0]
    first_res = first.res
    nodataval = first.nodatavals[0]
    dtype = first.dtypes[0]

    # Extent from option or extent of all inputs.
    if bounds:
        dst_w, dst_s, dst_e, dst_n = bounds
    else:
        # scan input files.
        xs = []
        ys = []
        for src in sources:
            left, bottom, right, top = src.bounds
            xs.extend([left, right])
            ys.extend([bottom, top])
        dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)

    logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
    output_transform = Affine.translation(dst_w, dst_n)
    logger.debug("Output transform, before scaling: %r", output_transform)

    # Resolution/pixel size.
    if not res:
        res = first_res
    elif not np.iterable(res):
        res = (res, res)
    elif len(res) == 1:
        res = (res[0], res[0])
    output_transform *= Affine.scale(res[0], -res[1])
    logger.debug("Output transform, after scaling: %r", output_transform)

    # Compute output array shape. We guarantee it will cover the output
    # bounds completely.
    output_width = int(math.ceil((dst_e - dst_w) / res[0]))
    output_height = int(math.ceil((dst_n - dst_s) / res[1]))

    # Adjust bounds to fit.
    dst_e, dst_s = output_transform * (output_width, output_height)
    logger.debug("Output width: %d, height: %d", output_width, output_height)
    logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))

    # create destination array
    dest = np.zeros((first.count, output_height, output_width), dtype=dtype)

    if nodata is not None:
        nodataval = nodata
        logger.debug("Set nodataval: %r", nodataval)

    if nodataval is not None:
        # Only fill if the nodataval is within dtype's range.
        inrange = False
        if np.dtype(dtype).kind in ('i', 'u'):
            info = np.iinfo(dtype)
            inrange = (info.min <= nodataval <= info.max)
        elif np.dtype(dtype).kind == 'f':
            info = np.finfo(dtype)
            inrange = (info.min <= nodataval <= info.max)
        if inrange:
            dest.fill(nodataval)
        else:
            warnings.warn("Input file's nodata value, %s, is beyond the valid "
                          "range of its data type, %s. Consider overriding it "
                          "using the --nodata option for better results." %
                          (nodataval, dtype))
    else:
        nodataval = 0

    for src in sources:
        # Real World (tm) use of boundless reads.
        # This approach uses the maximum amount of memory to solve the problem.
        # Making it more efficient is a TODO.

        # 1. Compute spatial intersection of destination and source.
        src_w, src_s, src_e, src_n = src.bounds

        int_w = src_w if src_w > dst_w else dst_w
        int_s = src_s if src_s > dst_s else dst_s
        int_e = src_e if src_e < dst_e else dst_e
        int_n = src_n if src_n < dst_n else dst_n

        # 2. Compute the source window.
        src_window = get_window(int_w,
                                int_s,
                                int_e,
                                int_n,
                                src.affine,
                                precision=precision)
        logger.debug("Src %s window: %r", src.name, src_window)

        # 3. Compute the destination window.
        dst_window = get_window(int_w,
                                int_s,
                                int_e,
                                int_n,
                                output_transform,
                                precision=precision)
        logger.debug("Dst window: %r", dst_window)

        # 4. Initialize temp array.
        tcount = first.count
        trows, tcols = tuple(b - a for a, b in dst_window)

        temp_shape = (tcount, trows, tcols)
        logger.debug("Temp shape: %r", temp_shape)

        temp = np.zeros(temp_shape, dtype=dtype)
        temp = src.read(out=temp,
                        window=src_window,
                        boundless=False,
                        masked=True)

        # 5. Copy elements of temp into dest.
        roff, coff = dst_window[0][0], dst_window[1][0]

        region = dest[:, roff:roff + trows, coff:coff + tcols]
        np.copyto(region,
                  temp,
                  where=np.logical_and(region == nodataval,
                                       temp.mask == False))

    return dest, output_transform
Exemplo n.º 39
0
def INSAR_to_rasterio(grd_file, desc, out_file):
    """
    Reads in the UAVSAR interferometry file and saves the real and complex
    value and writes them to GeoTiffs. Requires a .ann file in the same
    directory to describe the data.

    Args:
        grd_file: File containing the UAVsAR data
        desc: dictionary of the annotation file.
        out_file: Directory to output the converted files
    """
    log = get_logger('insar_2_raster')

    data_map = {
        'int': 'interferogram',
        'amp1': 'amplitude of pass 1',
        'amp2': 'amplitude of pass 2',
        'cor': 'correlation'
    }

    # Grab just the filename and make a list splitting it on periods
    fparts = basename(grd_file).split('.')
    fkey = fparts[0]
    ftype = fparts[-2]
    dname = data_map[ftype]
    log.info('Processing {} file...'.format(dname))

    # Grab the metadata for building our georeference
    nrow = desc['ground range data latitude lines']['value']
    ncol = desc['ground range data longitude samples']['value']

    # Find starting latitude, longitude already at the center
    lat1 = desc['ground range data starting latitude']['value']
    lon1 = desc['ground range data starting longitude']['value']

    # Delta latitude and longitude
    dlat = desc['ground range data latitude spacing']['value']
    dlon = desc['ground range data longitude spacing']['value']
    log.debug('Expecting data to be shaped {} x {}'.format(nrow, ncol))

    log.info('Using Deltas for lat/long = {} / {} degrees'.format(dlat, dlon))

    # Read in the data as a tuple representing the real and imaginary
    # components
    log.info('Reading {} and converting it from binary...'.format(
        basename(grd_file)))

    bytes = desc['{} bytes per pixel'.format(dname.split(' ')[0])]['value']
    log.info('{} bytes per pixel = {}'.format(dname, bytes))

    # Form the datatypes
    if dname in 'interferogram':
        # Little Endian (<) + real values (float) +  4 bytes (32 bits) = <f4
        dtype = np.dtype([('real', '<f4'), ('imaginary', '<f4')])
    else:
        dtype = np.dtype([('real', '<f{}'.format(bytes))])

    # Read in the data according to the annotation file and bytes
    z = np.fromfile(grd_file, dtype=dtype)

    # Reshape it to match what the text file says the image is
    z = z.reshape(nrow, ncol)

    # Build the transform and CRS
    crs = CRS.from_user_input("EPSG:4326")

    # Lat1/lon1 are already the center so for geotiff were good to go.
    t = Affine.translation(lon1, lat1) * Affine.scale(dlon, dlat)
    ext = out_file.split('.')[-1]
    fbase = join(dirname(out_file),
                 '.'.join(basename(out_file).split('.')[0:-1]) + '.{}.{}')

    for i, comp in enumerate(['real', 'imaginary']):
        if comp in z.dtype.names:
            d = z[comp]
            out = fbase.format(comp, ext)
            log.info('Writing to {}...'.format(out))
            dataset = rasterio.open(
                out,
                'w+',
                driver='GTiff',
                height=d.shape[0],
                width=d.shape[1],
                count=1,
                dtype=d.dtype,
                crs=crs,
                transform=t,
            )
            # Write out the data
            dataset.write(d, 1)

            # show(new_dataset.read(1), vmax=0.1, vmin=-0.1)
            # for stat in ['min','max','mean','std']:
            #     log.info('{} {} = {}'.format(comp, stat, getattr(d, stat)()))
            dataset.close()