コード例 #1
0
def test_fail_overwrite(tmpdir):
    """Unforced overwrite of existing file fails."""
    foo_tif = tmpdir.join('foo.tif')
    foo_tif.write("content")
    with pytest.raises(FileOverwriteError) as excinfo:
        helpers.resolve_inout(files=[str(x) for x in tmpdir.listdir()])
        assert "file exists and won't be overwritten without use of the " in str(excinfo.value)
コード例 #2
0
def test_force_overwrite(tmpdir):
    """Forced overwrite of existing file succeeds."""
    foo_tif = tmpdir.join('foo.tif')
    foo_tif.write("content")
    output, inputs = helpers.resolve_inout(
        files=[str(x) for x in tmpdir.listdir()], force_overwrite=True)
    assert output == str(foo_tif)
コード例 #3
0
ファイル: merge.py プロジェクト: DanLipsitt/rasterio
def merge(ctx, files, output, driver, bounds, res, nodata, bidx, overwrite,
          precision, creation_options):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm. If the output file exists, its values will be
    overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.

    Note: --res changed from 2 parameters in 0.25.

    \b
      --res 0.1 0.1  => --res 0.1 (square)
      --res 0.1 0.2  => --res 0.1 --res 0.2  (rectangular)
    """
    from rasterio.merge import merge as merge_tool

    output, files = resolve_inout(
        files=files, output=output, overwrite=overwrite)

    with ctx.obj['env']:
        datasets = [rasterio.open(f) for f in files]
        dest, output_transform = merge_tool(datasets, bounds=bounds, res=res,
                                            nodata=nodata, precision=precision,
                                            indexes=(bidx or None))

        profile = datasets[0].profile
        profile['transform'] = output_transform
        profile['height'] = dest.shape[1]
        profile['width'] = dest.shape[2]
        profile['driver'] = driver
        profile['count'] = dest.shape[0]

        if nodata is not None:
            profile['nodata'] = nodata

        profile.update(**creation_options)

        with rasterio.open(output, 'w', **profile) as dst:
            dst.write(dest)

            # uses the colormap in the first input raster.
            try:
                colormap = datasets[0].colormap(1)
                dst.write_colormap(1, colormap)
            except ValueError:
                pass
コード例 #4
0
ファイル: cli.py プロジェクト: mapbox/rio-merge-rgba
def merge_rgba(files, output, bounds, res, force_overwrite,
               precision, creation_options):

    output, files = resolve_inout(files=files, output=output)

    if os.path.exists(output) and not force_overwrite:
        raise click.ClickException(
            "Output exists and won't be overwritten without the "
            "`-f` option")

    sources = [rasterio.open(f) for f in files]
    merge_rgba_tool(sources, output, bounds=bounds, res=res,
                    precision=precision,
                    creation_options=creation_options)
コード例 #5
0
ファイル: convert.py プロジェクト: RodrigoGonzalez/rasterio
def convert(
        ctx, files, output, driver, dtype, scale_ratio, scale_offset,
        photometric, creation_options):
    """Copy and convert raster datasets to other data types and formats.

    Data values may be linearly scaled when copying by using the
    --scale-ratio and --scale-offset options. Destination raster values
    are calculated as

      dst = scale_ratio * src + scale_offset

    For example, to scale uint16 data with an actual range of 0-4095 to
    0-255 as uint8:

      $ rio convert in16.tif out8.tif --dtype uint8 --scale-ratio 0.0625

    Format specific creation options may also be passed using --co. To
    tile a new GeoTIFF output file, do the following.

      --co tiled=true --co blockxsize=256 --co blockysize=256

    To compress it using the LZW method, add

      --co compress=LZW

    """
    with ctx.obj['env']:

        outputfile, files = resolve_inout(files=files, output=output)
        inputfile = files[0]

        with rasterio.open(inputfile) as src:

            # Use the input file's profile, updated by CLI
            # options, as the profile for the output file.
            profile = src.profile

            if driver:
                profile['driver'] = driver

            if dtype:
                profile['dtype'] = dtype
            dst_dtype = profile['dtype']

            if photometric:
                creation_options['photometric'] = photometric

            profile.update(**creation_options)

            with rasterio.open(outputfile, 'w', **profile) as dst:

                data = src.read()

                if scale_ratio:
                    # Cast to float64 before multiplying.
                    data = data.astype('float64', casting='unsafe', copy=False)
                    np.multiply(
                        data, scale_ratio, out=data, casting='unsafe')

                if scale_offset:
                    # My understanding of copy=False is that this is a
                    # no-op if the array was cast for multiplication.
                    data = data.astype('float64', casting='unsafe', copy=False)
                    np.add(
                        data, scale_offset, out=data, casting='unsafe')

                # Cast to the output dtype and write.
                result = data.astype(dst_dtype, casting='unsafe', copy=False)
                dst.write(result)
コード例 #6
0
def test_resolve_files_inout__input():
    assert helpers.resolve_inout(input='in') == (None, ['in'])
コード例 #7
0
ファイル: rasterize.py プロジェクト: DanLipsitt/rasterio
def rasterize(
        ctx,
        files,
        output,
        driver,
        like,
        bounds,
        dimensions,
        res,
        src_crs,
        all_touched,
        default_value,
        fill,
        prop,
        overwrite,
        nodata,
        creation_options):
    """Rasterize GeoJSON into a new or existing raster.

    If the output raster exists, rio-rasterize will rasterize feature
    values into all bands of that raster.  The GeoJSON is assumed to be
    in the same coordinate reference system as the output unless
    --src-crs is provided.

    --default_value or property values when using --property must be
    using a data type valid for the data type of that raster.

    If a template raster is provided using the --like option, the affine
    transform and data type from that raster will be used to create the
    output.  Only a single band will be output.

    The GeoJSON is assumed to be in the same coordinate reference system
    unless --src-crs is provided.

    --default_value or property values when using --property must be
    using a data type valid for the data type of that raster.

    --driver, --bounds, --dimensions, --res, --nodata are ignored when
    output exists or --like raster is provided

    If the output does not exist and --like raster is not provided, the
    input GeoJSON will be used to determine the bounds of the output
    unless provided using --bounds.

    --dimensions or --res are required in this case.

    If --res is provided, the bottom and right coordinates of bounds are
    ignored.

    Note
    ----

    The GeoJSON is not projected to match the coordinate reference
    system of the output or --like rasters at this time.  This
    functionality may be added in the future.

    """

    from rasterio.crs import CRS
    from rasterio.features import rasterize
    from rasterio.features import bounds as calculate_bounds

    output, files = resolve_inout(
        files=files, output=output, overwrite=overwrite)

    bad_param = click.BadParameter('invalid CRS.  Must be an EPSG code.',
                                   ctx, param=src_crs, param_hint='--src_crs')
    has_src_crs = src_crs is not None
    try:
        src_crs = CRS.from_string(src_crs) if has_src_crs else CRS.from_string('EPSG:4326')
    except CRSError:
        raise bad_param

    # If values are actually meant to be integers, we need to cast them
    # as such or rasterize creates floating point outputs
    if default_value == int(default_value):
        default_value = int(default_value)
    if fill == int(fill):
        fill = int(fill)

    with ctx.obj['env']:

        def feature_value(feature):
            if prop and 'properties' in feature:
                return feature['properties'].get(prop, default_value)
            return default_value

        with click.open_file(files.pop(0) if files else '-') as gj_f:
            geojson = json.loads(gj_f.read())
        if 'features' in geojson:
            geometries = []
            for f in geojson['features']:
                geometries.append((f['geometry'], feature_value(f)))
        elif 'geometry' in geojson:
            geometries = ((geojson['geometry'], feature_value(geojson)), )
        else:
            raise click.BadParameter('Invalid GeoJSON', param=input,
                                     param_hint='input')

        geojson_bounds = geojson.get('bbox', calculate_bounds(geojson))

        if rasterio.shutil.exists(output):
            with rasterio.open(output, 'r+') as out:
                if has_src_crs and src_crs != out.crs:
                    raise click.BadParameter('GeoJSON does not match crs of '
                                             'existing output raster',
                                             param='input', param_hint='input')

                if disjoint_bounds(geojson_bounds, out.bounds):
                    click.echo("GeoJSON outside bounds of existing output "
                               "raster. Are they in different coordinate "
                               "reference systems?",
                               err=True)

                meta = out.meta

                result = rasterize(
                    geometries,
                    out_shape=(meta['height'], meta['width']),
                    transform=meta.get('affine', meta['transform']),
                    all_touched=all_touched,
                    dtype=meta.get('dtype', None),
                    default_value=default_value,
                    fill=fill)

                for bidx in range(1, meta['count'] + 1):
                    data = out.read(bidx, masked=True)
                    # Burn in any non-fill pixels, and update mask accordingly
                    ne = result != fill
                    data[ne] = result[ne]
                    if data.mask.any():
                        data.mask[ne] = False
                    out.write(data, indexes=bidx)

        else:
            if like is not None:
                template_ds = rasterio.open(like)

                if has_src_crs and src_crs != template_ds.crs:
                    raise click.BadParameter('GeoJSON does not match crs of '
                                             '--like raster',
                                             param='input', param_hint='input')

                if disjoint_bounds(geojson_bounds, template_ds.bounds):
                    click.echo("GeoJSON outside bounds of --like raster. "
                               "Are they in different coordinate reference "
                               "systems?",
                               err=True)

                kwargs = template_ds.profile
                kwargs['count'] = 1
                kwargs['transform'] = template_ds.transform

                template_ds.close()

            else:
                bounds = bounds or geojson_bounds

                if src_crs.is_geographic:
                    if (bounds[0] < -180 or bounds[2] > 180 or
                            bounds[1] < -80 or bounds[3] > 80):
                        raise click.BadParameter(
                            "Bounds are beyond the valid extent for "
                            "EPSG:4326.",
                            ctx, param=bounds, param_hint='--bounds')

                if dimensions:
                    width, height = dimensions
                    res = (
                        (bounds[2] - bounds[0]) / float(width),
                        (bounds[3] - bounds[1]) / float(height)
                    )

                else:
                    if not res:
                        raise click.BadParameter(
                            'pixel dimensions are required',
                            ctx, param=res, param_hint='--res')

                    elif len(res) == 1:
                        res = (res[0], res[0])

                    width = max(int(ceil((bounds[2] - bounds[0]) /
                                float(res[0]))), 1)
                    height = max(int(ceil((bounds[3] - bounds[1]) /
                                 float(res[1]))), 1)

                kwargs = {
                    'count': 1,
                    'crs': src_crs,
                    'width': width,
                    'height': height,
                    'transform': Affine(res[0], 0, bounds[0], 0, -res[1],
                                        bounds[3]),
                    'driver': driver
                }

            kwargs.update(**creation_options)

            if nodata is not None:
                kwargs['nodata'] = nodata

            result = rasterize(
                geometries,
                out_shape=(kwargs['height'], kwargs['width']),
                transform=kwargs['transform'],
                all_touched=all_touched,
                dtype=kwargs.get('dtype', None),
                default_value=default_value,
                fill=fill)

            if 'dtype' not in kwargs:
                kwargs['dtype'] = result.dtype

            with rasterio.open(output, 'w', **kwargs) as out:
                out.write(result, indexes=1)
コード例 #8
0
ファイル: calc.py プロジェクト: mapbox/rasterio
def calc(ctx, command, files, output, name, dtype, masked, overwrite, mem_limit, creation_options):
    """A raster data calculator

    Evaluates an expression using input datasets and writes the result
    to a new dataset.

    Command syntax is lisp-like. An expression consists of an operator
    or function name and one or more strings, numbers, or expressions
    enclosed in parentheses. Functions include ``read`` (gets a raster
    array) and ``asarray`` (makes a 3-D array from 2-D arrays).

    \b
        * (read i) evaluates to the i-th input dataset (a 3-D array).
        * (read i j) evaluates to the j-th band of the i-th dataset (a
          2-D array).
        * (take foo j) evaluates to the j-th band of a dataset named foo
          (see help on the --name option above).
        * Standard numpy array operators (+, -, *, /) are available.
        * When the final result is a list of arrays, a multiple band
          output file is written.
        * When the final result is a single array, a single band output
          file is written.

    Example:

    \b
         $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\
         > /tmp/out.tif

    The command above produces a 3-band GeoTIFF with all values scaled
    by 0.95 and incremented by 2.

    \b
        $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\
        > tests/data/shade.tif /tmp/out.tif

    The command above produces a 3-band RGB GeoTIFF, with red levels
    incremented by 125, from the single-band input.

    The maximum amount of memory used to perform caculations defaults to
    64 MB. This number can be increased to improve speed of calculation.

    """
    import numpy as np

    try:
        with ctx.obj['env']:
            output, files = resolve_inout(files=files, output=output,
                                          overwrite=overwrite)
            inputs = ([tuple(n.split('=')) for n in name] +
                      [(None, n) for n in files])
            sources = [rasterio.open(path) for name, path in inputs]

            first = sources[0]
            kwargs = first.profile
            kwargs.update(**creation_options)
            dtype = dtype or first.meta['dtype']
            kwargs['dtype'] = dtype

            # Extend snuggs.
            snuggs.func_map['read'] = _read_array
            snuggs.func_map['band'] = lambda d, i: _get_bands(inputs, sources, d, i)
            snuggs.func_map['bands'] = lambda d: _get_bands(inputs, sources, d)
            snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)
            snuggs.func_map['sieve'] = lambda *args: sieve(*args)

            dst = None

            # The windows iterator is initialized with a single sample.
            # The actual work windows will be added in the second
            # iteration of the loop.
            work_windows = [(None, Window(0, 0, 16, 16))]

            for ij, window in work_windows:

                ctxkwds = OrderedDict()

                for i, ((name, path), src) in enumerate(zip(inputs, sources)):

                    # Using the class method instead of instance
                    # method. Latter raises
                    #
                    # TypeError: astype() got an unexpected keyword
                    # argument 'copy'
                    #
                    # possibly something to do with the instance being
                    # a masked array.
                    ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window)

                res = snuggs.eval(command, **ctxkwds)

                if (isinstance(res, np.ma.core.MaskedArray) and (
                        tuple(LooseVersion(np.__version__).version) < (1, 9) or
                        tuple(LooseVersion(np.__version__).version) > (1, 10))):
                    res = res.filled(kwargs['nodata'])

                if len(res.shape) == 3:
                    results = np.ndarray.astype(res, dtype, copy=False)
                else:
                    results = np.asanyarray(
                        [np.ndarray.astype(res, dtype, copy=False)])

                # The first iteration is only to get sample results and from them
                # compute some properties of the output dataset.
                if dst is None:
                    kwargs['count'] = results.shape[0]
                    dst = rasterio.open(output, 'w', **kwargs)
                    work_windows.extend(_chunk_output(dst.width, dst.height, dst.count, np.dtype(dst.dtypes[0]).itemsize, mem_limit=mem_limit))

                # In subsequent iterations we write results.
                else:
                    dst.write(results, window=window)

    except snuggs.ExpressionError as err:
        click.echo("Expression Error:")
        click.echo('  %s' % err.text)
        click.echo(' ' + ' ' * err.offset + "^")
        click.echo(err)
        raise click.Abort()

    finally:
        if dst:
            dst.close()
        for src in sources:
            src.close()
コード例 #9
0
def test_resolve_files_inout__inout_files_output_o():
    assert helpers.resolve_inout(
        files=('a', 'b', 'c'), output='out') == ('out', ['a', 'b', 'c'])
コード例 #10
0
def test_implicit_overwrite(tmpdir):
    """Implicit overwrite of existing file succeeds."""
    foo_tif = tmpdir.join('foo.tif')
    foo_tif.write("content")
    output, inputs = helpers.resolve_inout(output=str(foo_tif))
    assert output == str(foo_tif)
コード例 #11
0
ファイル: cli.py プロジェクト: david-murr/rio-mbtiles
def mbtiles(ctx, files, output_opt, title, description, layer_type, img_format,
            zoom_levels, image_dump, num_workers):
    """Export a dataset to MBTiles (version 1.1) in a SQLite file.

    The input dataset may have any coordinate reference system. It must
    have at least three bands, which will be become the red, blue, and
    green bands of the output image tiles.

    If no zoom levels are specified, the defaults are the zoom levels
    nearest to the one at which one tile may contain the entire source
    dataset.

    If a title or description for the output file are not provided,
    they will be taken from the input dataset's filename.

    This command is suited for small to medium (~1 GB) sized sources.

    Python package: rio-mbtiles (https://github.com/mapbox/rio-mbtiles).
    """

    verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
    logger = logging.getLogger('rio')

    output, files = resolve_inout(files=files, output=output_opt)
    inputfile = files[0]

    with rasterio.drivers(CPL_DEBUG=verbosity > 2):

        # Read metadata from the source dataset.
        with rasterio.open(inputfile) as src:

            # Name and description.
            title = title or os.path.basename(src.name)
            description = description or src.name

            # Compute the geographic bounding box of the dataset.
            (west, east), (south, north) = transform(src.crs, 'EPSG:4326',
                                                     src.bounds[::2],
                                                     src.bounds[1::2])

        # Resolve the minimum and maximum zoom levels for export.
        if zoom_levels:
            minzoom, maxzoom = map(int, zoom_levels.split('..'))
        else:
            zw = int(round(math.log(360.0 / (east - west), 2.0)))
            zh = int(round(math.log(170.1022 / (north - south), 2.0)))
            minzoom = min(zw, zh)
            maxzoom = max(zw, zh)
        logger.debug("Zoom range: %d..%d", minzoom, maxzoom)

        # Parameters for creation of tile images.
        base_kwds = {
            'driver': img_format.upper(),
            'dtype': 'uint8',
            'nodata': 0,
            'height': 256,
            'width': 256,
            'count': 3,
            'crs': 'EPSG:3857'
        }

        img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'

        # Initialize the sqlite db.
        if os.path.exists(output):
            os.unlink(output)
        conn = sqlite3.connect(output)
        cur = conn.cursor()
        cur.execute("CREATE TABLE tiles "
                    "(zoom_level integer, tile_column integer, "
                    "tile_row integer, tile_data blob);")
        cur.execute("CREATE TABLE metadata (name text, value text);")

        # Insert mbtiles metadata into db.
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("name", title))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("type", layer_type))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("version", "1.1"))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("description", description))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("format", img_ext))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("bounds", "%f,%f,%f,%f" % (west, south, east, north)))

        conn.commit()

        # Create a pool of workers to process tile tasks.
        pool = Pool(num_workers, init_worker, (inputfile, base_kwds), 100)

        # Constrain bounds.
        EPS = 1.0e-10
        west = max(-180 + EPS, west)
        south = max(-85.051129, south)
        east = min(180 - EPS, east)
        north = min(85.051129, north)

        # Initialize iterator over output tiles.
        tiles = mercantile.tiles(west, south, east, north,
                                 range(minzoom, maxzoom + 1))

        for tile, contents in pool.imap_unordered(process_tile, tiles):

            # MBTiles has a different origin than Mercantile/tilebelt.
            tiley = int(math.pow(2, tile.z)) - tile.y - 1

            # Optional image dump.
            if image_dump:
                img_name = '%d-%d-%d.%s' % (tile.x, tiley, tile.z, img_ext)
                img_path = os.path.join(image_dump, img_name)
                with open(img_path, 'wb') as img:
                    img.write(contents)

            # Insert tile into db.
            cur.execute(
                "INSERT INTO tiles "
                "(zoom_level, tile_column, tile_row, tile_data) "
                "VALUES (?, ?, ?, ?);",
                (tile.z, tile.x, tiley, buffer(contents)))

            conn.commit()

        conn.close()
コード例 #12
0
def test_resolve_files_inout__input():
    assert helpers.resolve_inout(input='in') == (None, ['in'])
コード例 #13
0
ファイル: cli.py プロジェクト: soiqualang/rio-mbtiles
def mbtiles(ctx, files, output, overwrite, title, description, layer_type,
            img_format, tile_size, zoom_levels, image_dump, num_workers,
            src_nodata, dst_nodata, resampling, rgba):
    """Export a dataset to MBTiles (version 1.1) in a SQLite file.

    The input dataset may have any coordinate reference system. It must
    have at least three bands, which will be become the red, blue, and
    green bands of the output image tiles.

    An optional fourth alpha band may be copied to the output tiles by
    using the --rgba option in combination with the PNG format. This
    option requires that the input dataset has at least 4 bands.

    If no zoom levels are specified, the defaults are the zoom levels
    nearest to the one at which one tile may contain the entire source
    dataset.

    If a title or description for the output file are not provided,
    they will be taken from the input dataset's filename.

    This command is suited for small to medium (~1 GB) sized sources.

    Python package: rio-mbtiles (https://github.com/mapbox/rio-mbtiles).
    """
    output, files = resolve_inout(files=files,
                                  output=output,
                                  overwrite=overwrite)
    inputfile = files[0]

    log = logging.getLogger(__name__)

    with ctx.obj['env']:

        # Read metadata from the source dataset.
        with rasterio.open(inputfile) as src:

            validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))
            base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}

            if src_nodata is not None:
                base_kwds.update(nodata=src_nodata)

            if dst_nodata is not None:
                base_kwds.update(nodata=dst_nodata)

            # Name and description.
            title = title or os.path.basename(src.name)
            description = description or src.name

            # Compute the geographic bounding box of the dataset.
            (west, east), (south, north) = transform(src.crs, 'EPSG:4326',
                                                     src.bounds[::2],
                                                     src.bounds[1::2])

        # Resolve the minimum and maximum zoom levels for export.
        if zoom_levels:
            minzoom, maxzoom = map(int, zoom_levels.split('..'))
        else:
            zw = int(round(math.log(360.0 / (east - west), 2.0)))
            zh = int(round(math.log(170.1022 / (north - south), 2.0)))
            minzoom = min(zw, zh)
            maxzoom = max(zw, zh)

        log.debug("Zoom range: %d..%d", minzoom, maxzoom)

        if rgba:
            if img_format == 'JPEG':
                raise click.BadParameter(
                    "RGBA output is not possible with JPEG format.")
            else:
                count = 4
        else:
            count = 3

        # Parameters for creation of tile images.
        base_kwds.update({
            'driver': img_format.upper(),
            'dtype': 'uint8',
            'nodata': 0,
            'height': tile_size,
            'width': tile_size,
            'count': count,
            'crs': TILES_CRS
        })

        img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'

        # Initialize the sqlite db.
        if os.path.exists(output):
            os.unlink(output)

        # workaround for bug here: https://bugs.python.org/issue27126
        sqlite3.connect(':memory:').close()

        conn = sqlite3.connect(output)
        cur = conn.cursor()
        cur.execute("CREATE TABLE tiles "
                    "(zoom_level integer, tile_column integer, "
                    "tile_row integer, tile_data blob);")
        cur.execute("CREATE TABLE metadata (name text, value text);")

        # Insert mbtiles metadata into db.
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("name", title))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("type", layer_type))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("version", "1.1"))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("description", description))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("format", img_ext))
        cur.execute("INSERT INTO metadata (name, value) VALUES (?, ?);",
                    ("bounds", "%f,%f,%f,%f" % (west, south, east, north)))

        conn.commit()

        # Create a pool of workers to process tile tasks.
        pool = Pool(num_workers, init_worker,
                    (inputfile, base_kwds, resampling), 100)

        # Constrain bounds.
        EPS = 1.0e-10
        west = max(-180 + EPS, west)
        south = max(-85.051129, south)
        east = min(180 - EPS, east)
        north = min(85.051129, north)

        # Initialize iterator over output tiles.
        tiles = mercantile.tiles(west, south, east, north,
                                 range(minzoom, maxzoom + 1))

        for tile, contents in pool.imap_unordered(process_tile, tiles):

            if contents is None:
                log.info("Tile %r is empty and will be skipped", tile)
                continue

            # MBTiles have a different origin than Mercantile/tilebelt.
            tiley = int(math.pow(2, tile.z)) - tile.y - 1

            # Optional image dump.
            if image_dump:
                img_name = '%d-%d-%d.%s' % (tile.x, tiley, tile.z, img_ext)
                img_path = os.path.join(image_dump, img_name)
                with open(img_path, 'wb') as img:
                    img.write(contents)

            # Insert tile into db.
            cur.execute(
                "INSERT INTO tiles "
                "(zoom_level, tile_column, tile_row, tile_data) "
                "VALUES (?, ?, ?, ?);",
                (tile.z, tile.x, tiley, sqlite3.Binary(contents)))

            conn.commit()

        conn.close()
コード例 #14
0
def convert(ctx, files, output, driver, dtype, scale_ratio, scale_offset,
            photometric, creation_options):
    """Copy and convert raster datasets to other data types and formats.

    Data values may be linearly scaled when copying by using the
    --scale-ratio and --scale-offset options. Destination raster values
    are calculated as

      dst = scale_ratio * src + scale_offset

    For example, to scale uint16 data with an actual range of 0-4095 to
    0-255 as uint8:

      $ rio convert in16.tif out8.tif --dtype uint8 --scale-ratio 0.0625

    Format specific creation options may also be passed using --co. To
    tile a new GeoTIFF output file, do the following.

      --co tiled=true --co blockxsize=256 --co blockysize=256

    To compress it using the LZW method, add

      --co compress=LZW

    """
    with ctx.obj['env']:

        outputfile, files = resolve_inout(files=files, output=output)
        inputfile = files[0]

        with rasterio.open(inputfile) as src:

            # Use the input file's profile, updated by CLI
            # options, as the profile for the output file.
            profile = src.profile

            if driver:
                profile['driver'] = driver

            if dtype:
                profile['dtype'] = dtype
            dst_dtype = profile['dtype']

            if photometric:
                creation_options['photometric'] = photometric

            profile.update(**creation_options)

            with rasterio.open(outputfile, 'w', **profile) as dst:

                data = src.read()

                if scale_ratio:
                    # Cast to float64 before multiplying.
                    data = data.astype('float64', casting='unsafe', copy=False)
                    np.multiply(data, scale_ratio, out=data, casting='unsafe')

                if scale_offset:
                    # My understanding of copy=False is that this is a
                    # no-op if the array was cast for multiplication.
                    data = data.astype('float64', casting='unsafe', copy=False)
                    np.add(data, scale_offset, out=data, casting='unsafe')

                # Cast to the output dtype and write.
                result = data.astype(dst_dtype, casting='unsafe', copy=False)
                dst.write(result)
コード例 #15
0
def test_implicit_overwrite(tmpdir):
    """Implicit overwrite of existing file fails."""
    foo_tif = tmpdir.join('foo.tif')
    foo_tif.write("content")
    with pytest.raises(FileOverwriteError):
        helpers.resolve_inout(output=str(foo_tif))
コード例 #16
0
def test_resolve_files_too_many_inputs():
    with pytest.raises(click.BadParameter) as excinfo:
        helpers.resolve_inout(files=["a", "b", "c"], num_inputs=1)
    assert "Too many inputs" in str(excinfo.value)
コード例 #17
0
def test_resolve_files_insufficient_inputs():
    with pytest.raises(click.BadParameter) as excinfo:
        helpers.resolve_inout(files=["a"], num_inputs=1)
    assert "Insufficient inputs" in str(excinfo.value)
コード例 #18
0
def stack(ctx, files, output, driver, bidx, photometric, overwrite,
          creation_options):
    """Stack a number of bands from one or more input files into a
    multiband dataset.

    Input datasets must be of a kind: same data type, dimensions, etc. The
    output is cloned from the first input.

    By default, rio-stack will take all bands from each input and write them
    in same order to the output. Optionally, bands for each input may be
    specified using a simple syntax:

      --bidx N takes the Nth band from the input (first band is 1).

      --bidx M,N,0 takes bands M, N, and O.

      --bidx M..O takes bands M-O, inclusive.

      --bidx ..N takes all bands up to and including N.

      --bidx N.. takes all bands from N to the end.

    Examples, using the Rasterio testing dataset, which produce a copy.

      rio stack RGB.byte.tif -o stacked.tif

      rio stack RGB.byte.tif --bidx 1,2,3 -o stacked.tif

      rio stack RGB.byte.tif --bidx 1..3 -o stacked.tif

      rio stack RGB.byte.tif --bidx ..2 RGB.byte.tif --bidx 3.. -o stacked.tif

    """
    logger = logging.getLogger(__name__)
    try:
        with ctx.obj['env']:
            output, files = resolve_inout(files=files, output=output,
                                          overwrite=overwrite)
            output_count = 0
            indexes = []
            for path, item in zip_longest(files, bidx, fillvalue=None):
                with rasterio.open(path) as src:
                    src_indexes = src.indexes
                if item is None:
                    indexes.append(src_indexes)
                    output_count += len(src_indexes)
                elif '..' in item:
                    start, stop = map(
                        lambda x: int(x) if x else None, item.split('..'))
                    if start is None:
                        start = 1
                    indexes.append(src_indexes[slice(start - 1, stop)])
                    output_count += len(src_indexes[slice(start - 1, stop)])
                else:
                    parts = list(map(int, item.split(',')))
                    if len(parts) == 1:
                        indexes.append(parts[0])
                        output_count += 1
                    else:
                        parts = list(parts)
                        indexes.append(parts)
                        output_count += len(parts)

            with rasterio.open(files[0]) as first:
                kwargs = first.meta
                kwargs.update(**creation_options)

            kwargs.update(
                driver=driver,
                count=output_count)

            if photometric:
                kwargs['photometric'] = photometric

            with rasterio.open(output, 'w', **kwargs) as dst:
                dst_idx = 1
                for path, index in zip(files, indexes):
                    with rasterio.open(path) as src:
                        if isinstance(index, int):
                            data = src.read(index)
                            dst.write(data, dst_idx)
                            dst_idx += 1
                        elif isinstance(index, Iterable):
                            data = src.read(index)
                            dst.write(data, range(dst_idx, dst_idx + len(index)))
                            dst_idx += len(index)

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
コード例 #19
0
ファイル: calc.py プロジェクト: quexiang/Fast-STWR
def calc(ctx, command, files, output, name, dtype, masked, overwrite,
         creation_options):
    """A raster data calculator

    Evaluates an expression using input datasets and writes the result
    to a new dataset.

    Command syntax is lisp-like. An expression consists of an operator
    or function name and one or more strings, numbers, or expressions
    enclosed in parentheses. Functions include ``read`` (gets a raster
    array) and ``asarray`` (makes a 3-D array from 2-D arrays).

    \b
        * (read i) evaluates to the i-th input dataset (a 3-D array).
        * (read i j) evaluates to the j-th band of the i-th dataset (a 2-D
          array).
        * (take foo j) evaluates to the j-th band of a dataset named foo (see
          help on the --name option above).
        * Standard numpy array operators (+, -, *, /) are available.
        * When the final result is a list of arrays, a multi band output
          file is written.
        * When the final result is a single array, a single band output
          file is written.

    Example:

    \b
         $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\
         > /tmp/out.tif

    Produces a 3-band GeoTIFF with all values scaled by 0.95 and
    incremented by 2.

    \b
        $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\
        > tests/data/shade.tif /tmp/out.tif

    Produces a 3-band RGB GeoTIFF, with red levels incremented by 125,
    from the single-band input.

    """
    import numpy as np

    try:
        with ctx.obj['env']:
            output, files = resolve_inout(files=files,
                                          output=output,
                                          overwrite=overwrite)

            inputs = ([tuple(n.split('=')) for n in name] + [(None, n)
                                                             for n in files])

            with rasterio.open(inputs[0][1]) as first:
                kwargs = first.meta
                kwargs.update(**creation_options)
                dtype = dtype or first.meta['dtype']
                kwargs['dtype'] = dtype

            ctxkwds = OrderedDict()
            for i, (name, path) in enumerate(inputs):
                with rasterio.open(path) as src:
                    # Using the class method instead of instance
                    # method. Latter raises
                    #
                    # TypeError: astype() got an unexpected keyword
                    # argument 'copy'
                    #
                    # possibly something to do with the instance being
                    # a masked array.
                    ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked)

            # Extend snuggs.
            snuggs.func_map['read'] = read_array
            snuggs.func_map['band'] = lambda d, i: get_bands(inputs, d, i)
            snuggs.func_map['bands'] = lambda d: get_bands(inputs, d)
            snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)
            snuggs.func_map['sieve'] = lambda *args: sieve(*args)

            res = snuggs.eval(command, ctxkwds)

            if (isinstance(res, np.ma.core.MaskedArray) and
                (tuple(LooseVersion(np.__version__).version) <
                 (1, 9) or tuple(LooseVersion(np.__version__).version) >
                 (1, 10))):
                res = res.filled(kwargs['nodata'])

            if len(res.shape) == 3:
                results = np.ndarray.astype(res, dtype, copy=False)
            else:
                results = np.asanyarray(
                    [np.ndarray.astype(res, dtype, copy=False)])

            kwargs['count'] = results.shape[0]

            with rasterio.open(output, 'w', **kwargs) as dst:
                dst.write(results)

    except snuggs.ExpressionError as err:
        click.echo("Expression Error:")
        click.echo('  %s' % err.text)
        click.echo(' ' + ' ' * err.offset + "^")
        click.echo(err)
        raise click.Abort()
コード例 #20
0
ファイル: merge.py プロジェクト: tfeuvrier-cs/rasterio
def merge(ctx, files, output, driver, bounds, res, resampling,
          nodata, bidx, overwrite, precision, creation_options):
    """Copy valid pixels from input files to an output file.

    All files must have the same number of bands, data type, and
    coordinate reference system.

    Input files are merged in their listed order using the reverse
    painter's algorithm. If the output file exists, its values will be
    overwritten by input values.

    Geospatial bounds and resolution of a new output file in the
    units of the input file coordinate reference system may be provided
    and are otherwise taken from the first input file.

    Note: --res changed from 2 parameters in 0.25.

    \b
      --res 0.1 0.1  => --res 0.1 (square)
      --res 0.1 0.2  => --res 0.1 --res 0.2  (rectangular)
    """
    from rasterio.merge import merge as merge_tool

    output, files = resolve_inout(
        files=files, output=output, overwrite=overwrite)

    resampling = Resampling[resampling]

    with ctx.obj["env"]:
        dest, output_transform = merge_tool(
            files,
            bounds=bounds,
            res=res,
            nodata=nodata,
            precision=precision,
            indexes=(bidx or None),
            resampling=resampling,
        )

        with rasterio.open(files[0]) as first:
            profile = first.profile
            profile["transform"] = output_transform
            profile["height"] = dest.shape[1]
            profile["width"] = dest.shape[2]
            profile["count"] = dest.shape[0]
            profile.pop("driver", None)
            if driver:
                profile["driver"] = driver
            if nodata is not None:
                profile["nodata"] = nodata

            profile.update(**creation_options)

            with rasterio.open(output, "w", **profile) as dst:
                dst.write(dest)

                # uses the colormap in the first input raster.
                try:
                    colormap = first.colormap(1)
                    dst.write_colormap(1, colormap)
                except ValueError:
                    pass
コード例 #21
0
def warp(ctx, files, output, driver, like, dst_crs, dimensions, src_bounds,
         dst_bounds, res, resampling, src_nodata, dst_nodata, threads,
         check_invert_proj, overwrite, creation_options,
         target_aligned_pixels):
    """
    Warp a raster dataset.

    If a template raster is provided using the --like option, the
    coordinate reference system, affine transform, and dimensions of
    that raster will be used for the output.  In this case --dst-crs,
    --bounds, --res, and --dimensions options are not applicable and
    an exception will be raised.

    \b
        $ rio warp input.tif output.tif --like template.tif

    The output coordinate reference system may be either a PROJ.4 or
    EPSG:nnnn string,

    \b
        --dst-crs EPSG:4326
        --dst-crs '+proj=longlat +ellps=WGS84 +datum=WGS84'

    or a JSON text-encoded PROJ.4 object.

    \b
        --dst-crs '{"proj": "utm", "zone": 18, ...}'

    If --dimensions are provided, --res and --bounds are not applicable and an
    exception will be raised.
    Resolution is calculated based on the relationship between the
    raster bounds in the target coordinate system and the dimensions,
    and may produce rectangular rather than square pixels.

    \b
        $ rio warp input.tif output.tif --dimensions 100 200 \\
        > --dst-crs EPSG:4326

    If --bounds are provided, --res is required if --dst-crs is provided
    (defaults to source raster resolution otherwise).

    \b
        $ rio warp input.tif output.tif \\
        > --bounds -78 22 -76 24 --res 0.1 --dst-crs EPSG:4326

    """
    output, files = resolve_inout(files=files,
                                  output=output,
                                  overwrite=overwrite)

    resampling = Resampling[resampling]  # get integer code for method

    if not len(res):
        # Click sets this as an empty tuple if not provided
        res = None
    else:
        # Expand one value to two if needed
        res = (res[0], res[0]) if len(res) == 1 else res

    if target_aligned_pixels:
        if not res:
            raise click.BadParameter(
                '--target-aligned-pixels requires a specified resolution')
        if src_bounds or dst_bounds:
            raise click.BadParameter(
                '--target-aligned-pixels cannot be used with '
                '--src-bounds or --dst-bounds')

    # Check invalid parameter combinations
    if like:
        invalid_combos = (dimensions, dst_bounds, dst_crs, res)
        if any(p for p in invalid_combos if p is not None):
            raise click.BadParameter(
                "--like cannot be used with any of --dimensions, --bounds, "
                "--dst-crs, or --res")

    elif dimensions:
        invalid_combos = (dst_bounds, res)
        if any(p for p in invalid_combos if p is not None):
            raise click.BadParameter(
                "--dimensions cannot be used with --bounds or --res")

    with ctx.obj['env']:
        setenv(CHECK_WITH_INVERT_PROJ=check_invert_proj)

        with rasterio.open(files[0]) as src:
            l, b, r, t = src.bounds
            out_kwargs = src.profile.copy()
            out_kwargs['driver'] = driver

            # Sort out the bounds options.
            if src_bounds and dst_bounds:
                raise click.BadParameter(
                    "--src-bounds and destination --bounds may not be "
                    "specified simultaneously.")

            if like:
                with rasterio.open(like) as template_ds:
                    dst_crs = template_ds.crs
                    dst_transform = template_ds.transform
                    dst_height = template_ds.height
                    dst_width = template_ds.width

            elif dst_crs is not None:
                try:
                    dst_crs = CRS.from_string(dst_crs)
                except ValueError as err:
                    raise click.BadParameter(str(err),
                                             param='dst_crs',
                                             param_hint='dst_crs')

                if dimensions:
                    # Calculate resolution appropriate for dimensions
                    # in target.
                    dst_width, dst_height = dimensions
                    bounds = src_bounds or src.bounds
                    try:
                        xmin, ymin, xmax, ymax = transform_bounds(
                            src.crs, dst_crs, *bounds)
                    except CRSError as err:
                        raise click.BadParameter(str(err),
                                                 param='dst_crs',
                                                 param_hint='dst_crs')
                    dst_transform = Affine(
                        (xmax - xmin) / float(dst_width), 0, xmin, 0,
                        (ymin - ymax) / float(dst_height), ymax)

                elif src_bounds or dst_bounds:
                    if not res:
                        raise click.BadParameter(
                            "Required when using --bounds.",
                            param='res',
                            param_hint='res')

                    if src_bounds:
                        try:
                            xmin, ymin, xmax, ymax = transform_bounds(
                                src.crs, dst_crs, *src_bounds)
                        except CRSError as err:
                            raise click.BadParameter(str(err),
                                                     param='dst_crs',
                                                     param_hint='dst_crs')
                    else:
                        xmin, ymin, xmax, ymax = dst_bounds

                    dst_transform = Affine(res[0], 0, xmin, 0, -res[1], ymax)
                    dst_width = max(int(ceil((xmax - xmin) / res[0])), 1)
                    dst_height = max(int(ceil((ymax - ymin) / res[1])), 1)

                else:
                    try:
                        if src.transform.is_identity and src.gcps:
                            src_crs = src.gcps[1]
                            kwargs = {'gcps': src.gcps[0]}
                        else:
                            src_crs = src.crs
                            kwargs = src.bounds._asdict()
                        dst_transform, dst_width, dst_height = calcdt(
                            src_crs,
                            dst_crs,
                            src.width,
                            src.height,
                            resolution=res,
                            **kwargs)
                    except CRSError as err:
                        raise click.BadParameter(str(err),
                                                 param='dst_crs',
                                                 param_hint='dst_crs')

            elif dimensions:
                # Same projection, different dimensions, calculate resolution.
                dst_crs = src.crs
                dst_width, dst_height = dimensions
                l, b, r, t = src_bounds or (l, b, r, t)
                dst_transform = Affine((r - l) / float(dst_width), 0, l, 0,
                                       (b - t) / float(dst_height), t)

            elif src_bounds or dst_bounds:
                # Same projection, different dimensions and possibly
                # different resolution.
                if not res:
                    res = (src.transform.a, -src.transform.e)

                dst_crs = src.crs
                xmin, ymin, xmax, ymax = (src_bounds or dst_bounds)
                dst_transform = Affine(res[0], 0, xmin, 0, -res[1], ymax)
                dst_width = max(int(ceil((xmax - xmin) / res[0])), 1)
                dst_height = max(int(ceil((ymax - ymin) / res[1])), 1)

            elif res:
                # Same projection, different resolution.
                dst_crs = src.crs
                dst_transform = Affine(res[0], 0, l, 0, -res[1], t)
                dst_width = max(int(ceil((r - l) / res[0])), 1)
                dst_height = max(int(ceil((t - b) / res[1])), 1)

            else:
                dst_crs = src.crs
                dst_transform = src.transform
                dst_width = src.width
                dst_height = src.height

            if target_aligned_pixels:
                dst_transform, dst_width, dst_height = aligned_target(
                    dst_transform, dst_width, dst_height, res)

            # If src_nodata is not None, update the dst metadata NODATA
            # value to src_nodata (will be overridden by dst_nodata if it is not None
            if src_nodata is not None:
                # Update the dst nodata value
                out_kwargs.update({'nodata': src_nodata})

            # Validate a manually set destination NODATA value
            # against the input datatype.
            if dst_nodata is not None:
                if src_nodata is None and src.meta['nodata'] is None:
                    raise click.BadParameter(
                        "--src-nodata must be provided because dst-nodata is not None"
                    )
                else:
                    # Update the dst nodata value
                    out_kwargs.update({'nodata': dst_nodata})

            # When the bounds option is misused, extreme values of
            # destination width and height may result.
            if (dst_width < 0 or dst_height < 0 or dst_width > MAX_OUTPUT_WIDTH
                    or dst_height > MAX_OUTPUT_HEIGHT):
                raise click.BadParameter(
                    "Invalid output dimensions: {0}.".format(
                        (dst_width, dst_height)))

            out_kwargs.update({
                'crs': dst_crs,
                'transform': dst_transform,
                'width': dst_width,
                'height': dst_height
            })

            # Adjust block size if necessary.
            if ('blockxsize' in out_kwargs
                    and dst_width < out_kwargs['blockxsize']):
                del out_kwargs['blockxsize']
            if ('blockysize' in out_kwargs
                    and dst_height < out_kwargs['blockysize']):
                del out_kwargs['blockysize']

            out_kwargs.update(**creation_options)

            with rasterio.open(output, 'w', **out_kwargs) as dst:
                reproject(source=rasterio.band(src,
                                               list(range(1, src.count + 1))),
                          destination=rasterio.band(
                              dst, list(range(1, src.count + 1))),
                          src_transform=src.transform,
                          src_crs=src.crs,
                          src_nodata=src_nodata,
                          dst_transform=out_kwargs['transform'],
                          dst_crs=out_kwargs['crs'],
                          dst_nodata=dst_nodata,
                          resampling=resampling,
                          num_threads=threads)
コード例 #22
0
ファイル: cli.py プロジェクト: clustergis/rio-mbtiles
def mbtiles(ctx, files, output_opt, title, description, layer_type,
            img_format, zoom_levels, image_dump, num_workers):
    """Export a dataset to MBTiles (version 1.1) in a SQLite file.

    The input dataset may have any coordinate reference system. It must
    have at least three bands, which will be become the red, blue, and
    green bands of the output image tiles.

    If no zoom levels are specified, the defaults are the zoom levels
    nearest to the one at which one tile may contain the entire source
    dataset.

    If a title or description for the output file are not provided,
    they will be taken from the input dataset's filename.

    This command is suited for small to medium (~1 GB) sized sources.

    Python package: rio-mbtiles (https://github.com/mapbox/rio-mbtiles).
    """

    verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
    logger = logging.getLogger('rio')

    output, files = resolve_inout(files=files, output=output_opt)
    inputfile = files[0]

    with rasterio.drivers(CPL_DEBUG=verbosity > 2):

        # Read metadata from the source dataset.
        with rasterio.open(inputfile) as src:

            # Name and description.
            title = title or os.path.basename(src.name)
            description = description or src.name

            # Compute the geographic bounding box of the dataset.
            (west, east), (south, north) = transform(
                src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])

        # Resolve the minimum and maximum zoom levels for export.
        if zoom_levels:
            minzoom, maxzoom = map(int, zoom_levels.split('..'))
        else:
            zw = int(round(math.log(360.0/(east-west), 2.0)))
            zh = int(round(math.log(170.1022/(north-south), 2.0)))
            minzoom = min(zw, zh)
            maxzoom = max(zw, zh)
        logger.debug("Zoom range: %d..%d", minzoom, maxzoom)

        # Parameters for creation of tile images.
        base_kwds = {
            'driver': img_format.upper(),
            'dtype': 'uint8',
            'nodata': 0,
            'height': 256,
            'width': 256,
            'count': 3,
            'crs': 'EPSG:3857'}

        img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'

        # Initialize the sqlite db.
        if os.path.exists(output):
            os.unlink(output)
        conn = sqlite3.connect(output)
        cur = conn.cursor()
        cur.execute(
            "CREATE TABLE tiles "
            "(zoom_level integer, tile_column integer, "
            "tile_row integer, tile_data blob);")
        cur.execute(
            "CREATE TABLE metadata (name text, value text);")

        # Insert mbtiles metadata into db.
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("name", title))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("type", layer_type))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("version", "1.1"))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("description", description))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("format", img_ext))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("bounds", "%f,%f,%f,%f" % (west, south, east, north)))

        conn.commit()

        # Create a pool of workers to process tile tasks.
        pool = Pool(num_workers, init_worker, (inputfile, base_kwds), 100)

        # Constrain bounds.
        EPS = 1.0e-10
        west = max(-180+EPS, west)
        south = max(-85.051129, south)
        east = min(180-EPS, east)
        north = min(85.051129, north)

        # Initialize iterator over output tiles.
        tiles = mercantile.tiles(
            west, south, east, north, range(minzoom, maxzoom+1))

        for tile, contents in pool.imap_unordered(process_tile, tiles):

            # MBTiles has a different origin than Mercantile/tilebelt.
            tiley = int(math.pow(2, tile.z)) - tile.y - 1

            # Optional image dump.
            if image_dump:
                img_name = '%d-%d-%d.%s' % (
                    tile.x, tiley, tile.z, img_ext)
                img_path = os.path.join(image_dump, img_name)
                with open(img_path, 'wb') as img:
                    img.write(contents)

            # Insert tile into db.
            cur.execute(
                "INSERT INTO tiles "
                "(zoom_level, tile_column, tile_row, tile_data) "
                "VALUES (?, ?, ?, ?);",
                (tile.z, tile.x, tiley, buffer(contents)))

            conn.commit()

        conn.close()
コード例 #23
0
ファイル: stack.py プロジェクト: RodrigoGonzalez/rasterio
def stack(ctx, files, output, driver, bidx, photometric, force_overwrite,
          creation_options):
    """Stack a number of bands from one or more input files into a
    multiband dataset.

    Input datasets must be of a kind: same data type, dimensions, etc. The
    output is cloned from the first input.

    By default, rio-stack will take all bands from each input and write them
    in same order to the output. Optionally, bands for each input may be
    specified using a simple syntax:

      --bidx N takes the Nth band from the input (first band is 1).

      --bidx M,N,0 takes bands M, N, and O.

      --bidx M..O takes bands M-O, inclusive.

      --bidx ..N takes all bands up to and including N.

      --bidx N.. takes all bands from N to the end.

    Examples, using the Rasterio testing dataset, which produce a copy.

      rio stack RGB.byte.tif -o stacked.tif

      rio stack RGB.byte.tif --bidx 1,2,3 -o stacked.tif

      rio stack RGB.byte.tif --bidx 1..3 -o stacked.tif

      rio stack RGB.byte.tif --bidx ..2 RGB.byte.tif --bidx 3.. -o stacked.tif

    """
    logger = logging.getLogger('rio')
    try:
        with ctx.obj['env']:
            output, files = resolve_inout(files=files, output=output,
                                          force_overwrite=force_overwrite)
            output_count = 0
            indexes = []
            for path, item in zip_longest(files, bidx, fillvalue=None):
                with rasterio.open(path) as src:
                    src_indexes = src.indexes
                if item is None:
                    indexes.append(src_indexes)
                    output_count += len(src_indexes)
                elif '..' in item:
                    start, stop = map(
                        lambda x: int(x) if x else None, item.split('..'))
                    if start is None:
                        start = 1
                    indexes.append(src_indexes[slice(start - 1, stop)])
                    output_count += len(src_indexes[slice(start - 1, stop)])
                else:
                    parts = list(map(int, item.split(',')))
                    if len(parts) == 1:
                        indexes.append(parts[0])
                        output_count += 1
                    else:
                        parts = list(parts)
                        indexes.append(parts)
                        output_count += len(parts)

            with rasterio.open(files[0]) as first:
                kwargs = first.meta
                kwargs.update(**creation_options)

            kwargs.update(
                driver=driver,
                count=output_count)

            if photometric:
                kwargs['photometric'] = photometric

            with rasterio.open(output, 'w', **kwargs) as dst:
                dst_idx = 1
                for path, index in zip(files, indexes):
                    with rasterio.open(path) as src:
                        if isinstance(index, int):
                            data = src.read(index)
                            dst.write(data, dst_idx)
                            dst_idx += 1
                        elif isinstance(index, collections.Iterable):
                            data = src.read(index)
                            dst.write(data, range(dst_idx, dst_idx + len(index)))
                            dst_idx += len(index)

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
コード例 #24
0
def calc(ctx, command, files, output, name, dtype, masked, overwrite,
         mem_limit, creation_options):
    """A raster data calculator

    Evaluates an expression using input datasets and writes the result
    to a new dataset.

    Command syntax is lisp-like. An expression consists of an operator
    or function name and one or more strings, numbers, or expressions
    enclosed in parentheses. Functions include ``read`` (gets a raster
    array) and ``asarray`` (makes a 3-D array from 2-D arrays).

    \b
        * (read i) evaluates to the i-th input dataset (a 3-D array).
        * (read i j) evaluates to the j-th band of the i-th dataset (a
          2-D array).
        * (take foo j) evaluates to the j-th band of a dataset named foo
          (see help on the --name option above).
        * Standard numpy array operators (+, -, *, /) are available.
        * When the final result is a list of arrays, a multiple band
          output file is written.
        * When the final result is a single array, a single band output
          file is written.

    Example:

    \b
         $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\
         > /tmp/out.tif

    The command above produces a 3-band GeoTIFF with all values scaled
    by 0.95 and incremented by 2.

    \b
        $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\
        > tests/data/shade.tif /tmp/out.tif

    The command above produces a 3-band RGB GeoTIFF, with red levels
    incremented by 125, from the single-band input.

    The maximum amount of memory used to perform caculations defaults to
    64 MB. This number can be increased to improve speed of calculation.

    """
    import numpy as np

    try:
        with ctx.obj['env']:
            output, files = resolve_inout(files=files,
                                          output=output,
                                          overwrite=overwrite)
            inputs = ([tuple(n.split('=')) for n in name] + [(None, n)
                                                             for n in files])
            sources = [rasterio.open(path) for name, path in inputs]

            first = sources[0]
            kwargs = first.profile
            kwargs.update(**creation_options)
            dtype = dtype or first.meta['dtype']
            kwargs['dtype'] = dtype

            # Extend snuggs.
            snuggs.func_map['read'] = _read_array
            snuggs.func_map['band'] = lambda d, i: _get_bands(
                inputs, sources, d, i)
            snuggs.func_map['bands'] = lambda d: _get_bands(inputs, sources, d)
            snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)
            snuggs.func_map['sieve'] = lambda *args: sieve(*args)

            dst = None

            # The windows iterator is initialized with a single sample.
            # The actual work windows will be added in the second
            # iteration of the loop.
            work_windows = [(None, Window(0, 0, 16, 16))]

            for ij, window in work_windows:

                ctxkwds = OrderedDict()

                for i, ((name, path), src) in enumerate(zip(inputs, sources)):

                    # Using the class method instead of instance
                    # method. Latter raises
                    #
                    # TypeError: astype() got an unexpected keyword
                    # argument 'copy'
                    #
                    # possibly something to do with the instance being
                    # a masked array.
                    ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked,
                                                                 window=window)

                res = snuggs.eval(command, **ctxkwds)

                if (isinstance(res, np.ma.core.MaskedArray) and
                    (tuple(LooseVersion(np.__version__).version) <
                     (1, 9) or tuple(LooseVersion(np.__version__).version) >
                     (1, 10))):
                    res = res.filled(kwargs['nodata'])

                if len(res.shape) == 3:
                    results = np.ndarray.astype(res, dtype, copy=False)
                else:
                    results = np.asanyarray(
                        [np.ndarray.astype(res, dtype, copy=False)])

                # The first iteration is only to get sample results and from them
                # compute some properties of the output dataset.
                if dst is None:
                    kwargs['count'] = results.shape[0]
                    dst = rasterio.open(output, 'w', **kwargs)
                    work_windows.extend(
                        _chunk_output(dst.width,
                                      dst.height,
                                      dst.count,
                                      np.dtype(dst.dtypes[0]).itemsize,
                                      mem_limit=mem_limit))

                # In subsequent iterations we write results.
                else:
                    dst.write(results, window=window)

    except snuggs.ExpressionError as err:
        click.echo("Expression Error:")
        click.echo('  %s' % err.text)
        click.echo(' ' + ' ' * err.offset + "^")
        click.echo(err)
        raise click.Abort()

    finally:
        if dst:
            dst.close()
        for src in sources:
            src.close()
コード例 #25
0
def test_implicit_overwrite(tmpdir):
    """Implicit overwrite of existing file fails."""
    foo_tif = tmpdir.join('foo.tif')
    foo_tif.write("content")
    with pytest.raises(FileOverwriteError):
        helpers.resolve_inout(output=str(foo_tif))
コード例 #26
0
ファイル: rasterize.py プロジェクト: yangmaoer/rasterio
def rasterize(ctx, files, output, driver, like, bounds, dimensions, res,
              src_crs, all_touched, default_value, fill, prop, force_overwrite,
              creation_options):
    """Rasterize GeoJSON into a new or existing raster.

    If the output raster exists, rio-rasterize will rasterize feature values
    into all bands of that raster.  The GeoJSON is assumed to be in the same
    coordinate reference system as the output unless --src-crs is provided.

    --default_value or property values when using --property must be using a
    data type valid for the data type of that raster.


    If a template raster is provided using the --like option, the affine
    transform and data type from that raster will be used to create the output.
    Only a single band will be output.

    The GeoJSON is assumed to be in the same coordinate reference system unless
    --src-crs is provided.

    --default_value or property values when using --property must be using a
    data type valid for the data type of that raster.

    --driver, --bounds, --dimensions, and --res are ignored when output exists
    or --like raster is provided


    If the output does not exist and --like raster is not provided, the input
    GeoJSON will be used to determine the bounds of the output unless
    provided using --bounds.

    --dimensions or --res are required in this case.

    If --res is provided, the bottom and right coordinates of bounds are
    ignored.


    Note:
    The GeoJSON is not projected to match the coordinate reference system
    of the output or --like rasters at this time.  This functionality may be
    added in the future.
    """
    from rasterio.crs import CRS
    from rasterio.features import rasterize
    from rasterio.features import bounds as calculate_bounds

    output, files = resolve_inout(files=files,
                                  output=output,
                                  force_overwrite=force_overwrite)

    bad_param = click.BadParameter('invalid CRS.  Must be an EPSG code.',
                                   ctx,
                                   param=src_crs,
                                   param_hint='--src_crs')
    has_src_crs = src_crs is not None
    try:
        src_crs = CRS.from_string(src_crs) if has_src_crs else CRS.from_string(
            'EPSG:4326')
    except CRSError:
        raise bad_param

    # If values are actually meant to be integers, we need to cast them
    # as such or rasterize creates floating point outputs
    if default_value == int(default_value):
        default_value = int(default_value)
    if fill == int(fill):
        fill = int(fill)

    with ctx.obj['env']:

        def feature_value(feature):
            if prop and 'properties' in feature:
                return feature['properties'].get(prop, default_value)
            return default_value

        with click.open_file(files.pop(0) if files else '-') as gj_f:
            geojson = json.loads(gj_f.read())
        if 'features' in geojson:
            geometries = []
            for f in geojson['features']:
                geometries.append((f['geometry'], feature_value(f)))
        elif 'geometry' in geojson:
            geometries = ((geojson['geometry'], feature_value(geojson)), )
        else:
            raise click.BadParameter('Invalid GeoJSON',
                                     param=input,
                                     param_hint='input')

        geojson_bounds = geojson.get('bbox', calculate_bounds(geojson))

        if rasterio.shutil.exists(output):
            with rasterio.open(output, 'r+') as out:
                if has_src_crs and src_crs != out.crs:
                    raise click.BadParameter(
                        'GeoJSON does not match crs of '
                        'existing output raster',
                        param='input',
                        param_hint='input')

                if disjoint_bounds(geojson_bounds, out.bounds):
                    click.echo(
                        "GeoJSON outside bounds of existing output "
                        "raster. Are they in different coordinate "
                        "reference systems?",
                        err=True)

                meta = out.meta.copy()

                result = rasterize(geometries,
                                   out_shape=(meta['height'], meta['width']),
                                   transform=meta.get('affine',
                                                      meta['transform']),
                                   all_touched=all_touched,
                                   dtype=meta.get('dtype', None),
                                   default_value=default_value,
                                   fill=fill)

                for bidx in range(1, meta['count'] + 1):
                    data = out.read(bidx, masked=True)
                    # Burn in any non-fill pixels, and update mask accordingly
                    ne = result != fill
                    data[ne] = result[ne]
                    data.mask[ne] = False
                    out.write(data, indexes=bidx)

        else:
            if like is not None:
                template_ds = rasterio.open(like)

                if has_src_crs and src_crs != template_ds.crs:
                    raise click.BadParameter(
                        'GeoJSON does not match crs of '
                        '--like raster',
                        param='input',
                        param_hint='input')

                if disjoint_bounds(geojson_bounds, template_ds.bounds):
                    click.echo(
                        "GeoJSON outside bounds of --like raster. "
                        "Are they in different coordinate reference "
                        "systems?",
                        err=True)

                kwargs = template_ds.meta.copy()
                kwargs['count'] = 1
                kwargs['transform'] = template_ds.transform

                template_ds.close()

            else:
                bounds = bounds or geojson_bounds

                if src_crs.is_geographic:
                    if (bounds[0] < -180 or bounds[2] > 180 or bounds[1] < -80
                            or bounds[3] > 80):
                        raise click.BadParameter(
                            "Bounds are beyond the valid extent for "
                            "EPSG:4326.",
                            ctx,
                            param=bounds,
                            param_hint='--bounds')

                if dimensions:
                    width, height = dimensions
                    res = ((bounds[2] - bounds[0]) / float(width),
                           (bounds[3] - bounds[1]) / float(height))

                else:
                    if not res:
                        raise click.BadParameter(
                            'pixel dimensions are required',
                            ctx,
                            param=res,
                            param_hint='--res')

                    elif len(res) == 1:
                        res = (res[0], res[0])

                    width = max(
                        int(ceil((bounds[2] - bounds[0]) / float(res[0]))), 1)
                    height = max(
                        int(ceil((bounds[3] - bounds[1]) / float(res[1]))), 1)

                kwargs = {
                    'count':
                    1,
                    'crs':
                    src_crs,
                    'width':
                    width,
                    'height':
                    height,
                    'transform':
                    Affine(res[0], 0, bounds[0], 0, -res[1], bounds[3]),
                    'driver':
                    driver
                }
                kwargs.update(**creation_options)

            result = rasterize(geometries,
                               out_shape=(kwargs['height'], kwargs['width']),
                               transform=kwargs['transform'],
                               all_touched=all_touched,
                               dtype=kwargs.get('dtype', None),
                               default_value=default_value,
                               fill=fill)

            if 'dtype' not in kwargs:
                kwargs['dtype'] = result.dtype

            kwargs['nodata'] = fill

            with rasterio.open(output, 'w', **kwargs) as out:
                out.write(result, indexes=1)
コード例 #27
0
def test_resolve_files_inout__inout_files():
    assert helpers.resolve_inout(files=('a', 'b', 'c')) == ('c', ['a', 'b'])
コード例 #28
0
def test_resolve_files_inout__inout_files():
    assert helpers.resolve_inout(files=('a', 'b', 'c')) == ('c', ['a', 'b'])
コード例 #29
0
def test_resolve_files_inout__output():
    assert helpers.resolve_inout(input='in', output='out') == ('out', ['in'])
コード例 #30
0
def test_resolve_files_inout__inout_files_output_o():
    assert helpers.resolve_inout(
        files=('a', 'b', 'c'), output='out') == ('out', ['a', 'b', 'c'])
コード例 #31
0
ファイル: calc.py プロジェクト: DanLipsitt/rasterio
def calc(ctx, command, files, output, name, dtype, masked, overwrite,
         creation_options):
    """A raster data calculator

    Evaluates an expression using input datasets and writes the result
    to a new dataset.

    Command syntax is lisp-like. An expression consists of an operator
    or function name and one or more strings, numbers, or expressions
    enclosed in parentheses. Functions include ``read`` (gets a raster
    array) and ``asarray`` (makes a 3-D array from 2-D arrays).

    \b
        * (read i) evaluates to the i-th input dataset (a 3-D array).
        * (read i j) evaluates to the j-th band of the i-th dataset (a 2-D
          array).
        * (take foo j) evaluates to the j-th band of a dataset named foo (see
          help on the --name option above).
        * Standard numpy array operators (+, -, *, /) are available.
        * When the final result is a list of arrays, a multi band output
          file is written.
        * When the final result is a single array, a single band output
          file is written.

    Example:

    \b
         $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\
         > /tmp/out.tif

    Produces a 3-band GeoTIFF with all values scaled by 0.95 and
    incremented by 2.

    \b
        $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\
        > tests/data/shade.tif /tmp/out.tif

    Produces a 3-band RGB GeoTIFF, with red levels incremented by 125,
    from the single-band input.

    """
    import numpy as np

    try:
        with ctx.obj['env']:
            output, files = resolve_inout(files=files, output=output,
                                          overwrite=overwrite)

            inputs = ([tuple(n.split('=')) for n in name] +
                      [(None, n) for n in files])

            with rasterio.open(inputs[0][1]) as first:
                kwargs = first.meta
                kwargs.update(**creation_options)
                dtype = dtype or first.meta['dtype']
                kwargs['dtype'] = dtype

            ctxkwds = OrderedDict()
            for i, (name, path) in enumerate(inputs):
                with rasterio.open(path) as src:
                    # Using the class method instead of instance
                    # method. Latter raises
                    #
                    # TypeError: astype() got an unexpected keyword
                    # argument 'copy'
                    #
                    # possibly something to do with the instance being
                    # a masked array.
                    ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked)

            # Extend snuggs.
            snuggs.func_map['read'] = read_array
            snuggs.func_map['band'] = lambda d, i: get_bands(inputs, d, i)
            snuggs.func_map['bands'] = lambda d: get_bands(inputs, d)
            snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)
            snuggs.func_map['sieve'] = lambda *args: sieve(*args)

            res = snuggs.eval(command, ctxkwds)

            if (isinstance(res, np.ma.core.MaskedArray) and (
                    tuple(LooseVersion(np.__version__).version) < (1, 9) or
                    tuple(LooseVersion(np.__version__).version) > (1, 10))):
                res = res.filled(kwargs['nodata'])

            if len(res.shape) == 3:
                results = np.ndarray.astype(res, dtype, copy=False)
            else:
                results = np.asanyarray(
                    [np.ndarray.astype(res, dtype, copy=False)])

            kwargs['count'] = results.shape[0]

            with rasterio.open(output, 'w', **kwargs) as dst:
                dst.write(results)

    except snuggs.ExpressionError as err:
        click.echo("Expression Error:")
        click.echo('  %s' % err.text)
        click.echo(' ' + ' ' * err.offset + "^")
        click.echo(err)
        raise click.Abort()
コード例 #32
0
def test_resolve_files_inout__output():
    assert helpers.resolve_inout(input='in', output='out') == ('out', ['in'])
コード例 #33
0
ファイル: warp.py プロジェクト: ocefpaf/rasterio
def warp(ctx, files, output, driver, like, dst_crs, dimensions, src_bounds,
         dst_bounds, res, resampling, src_nodata, dst_nodata, threads,
         check_invert_proj, overwrite, creation_options,
         target_aligned_pixels):
    """
    Warp a raster dataset.

    If a template raster is provided using the --like option, the
    coordinate reference system, affine transform, and dimensions of
    that raster will be used for the output.  In this case --dst-crs,
    --bounds, --res, and --dimensions options are not applicable and
    an exception will be raised.

    \b
        $ rio warp input.tif output.tif --like template.tif

    The output coordinate reference system may be either a PROJ.4 or
    EPSG:nnnn string,

    \b
        --dst-crs EPSG:4326
        --dst-crs '+proj=longlat +ellps=WGS84 +datum=WGS84'

    or a JSON text-encoded PROJ.4 object.

    \b
        --dst-crs '{"proj": "utm", "zone": 18, ...}'

    If --dimensions are provided, --res and --bounds are not applicable and an
    exception will be raised.
    Resolution is calculated based on the relationship between the
    raster bounds in the target coordinate system and the dimensions,
    and may produce rectangular rather than square pixels.

    \b
        $ rio warp input.tif output.tif --dimensions 100 200 \\
        > --dst-crs EPSG:4326

    If --bounds are provided, --res is required if --dst-crs is provided
    (defaults to source raster resolution otherwise).

    \b
        $ rio warp input.tif output.tif \\
        > --bounds -78 22 -76 24 --res 0.1 --dst-crs EPSG:4326

    """
    output, files = resolve_inout(
        files=files, output=output, overwrite=overwrite)

    resampling = Resampling[resampling]  # get integer code for method

    if not len(res):
        # Click sets this as an empty tuple if not provided
        res = None
    else:
        # Expand one value to two if needed
        res = (res[0], res[0]) if len(res) == 1 else res

    if target_aligned_pixels:
        if not res:
            raise click.BadParameter(
                '--target-aligned-pixels requires a specified resolution')
        if src_bounds or dst_bounds:
            raise click.BadParameter(
                '--target-aligned-pixels cannot be used with '
                '--src-bounds or --dst-bounds')

    # Check invalid parameter combinations
    if like:
        invalid_combos = (dimensions, dst_bounds, dst_crs, res)
        if any(p for p in invalid_combos if p is not None):
            raise click.BadParameter(
                "--like cannot be used with any of --dimensions, --bounds, "
                "--dst-crs, or --res")

    elif dimensions:
        invalid_combos = (dst_bounds, res)
        if any(p for p in invalid_combos if p is not None):
            raise click.BadParameter(
                "--dimensions cannot be used with --bounds or --res")

    with ctx.obj['env']:
        setenv(CHECK_WITH_INVERT_PROJ=check_invert_proj)

        with rasterio.open(files[0]) as src:
            l, b, r, t = src.bounds
            out_kwargs = src.profile.copy()
            out_kwargs['driver'] = driver

            # Sort out the bounds options.
            if src_bounds and dst_bounds:
                raise click.BadParameter(
                    "--src-bounds and destination --bounds may not be "
                    "specified simultaneously.")

            if like:
                with rasterio.open(like) as template_ds:
                    dst_crs = template_ds.crs
                    dst_transform = template_ds.transform
                    dst_height = template_ds.height
                    dst_width = template_ds.width

            elif dst_crs is not None:
                try:
                    dst_crs = CRS.from_string(dst_crs)
                except ValueError as err:
                    raise click.BadParameter(
                        str(err), param='dst_crs', param_hint='dst_crs')

                if dimensions:
                    # Calculate resolution appropriate for dimensions
                    # in target.
                    dst_width, dst_height = dimensions
                    try:
                        xmin, ymin, xmax, ymax = transform_bounds(
                            src.crs, dst_crs, *src.bounds)
                    except CRSError as err:
                        raise click.BadParameter(
                            str(err), param='dst_crs', param_hint='dst_crs')
                    dst_transform = Affine(
                        (xmax - xmin) / float(dst_width),
                        0, xmin, 0,
                        (ymin - ymax) / float(dst_height),
                        ymax
                    )

                elif src_bounds or dst_bounds:
                    if not res:
                        raise click.BadParameter(
                            "Required when using --bounds.",
                            param='res', param_hint='res')

                    if src_bounds:
                        try:
                            xmin, ymin, xmax, ymax = transform_bounds(
                                src.crs, dst_crs, *src_bounds)
                        except CRSError as err:
                            raise click.BadParameter(
                                str(err), param='dst_crs',
                                param_hint='dst_crs')
                    else:
                        xmin, ymin, xmax, ymax = dst_bounds

                    dst_transform = Affine(res[0], 0, xmin, 0, -res[1], ymax)
                    dst_width = max(int(ceil((xmax - xmin) / res[0])), 1)
                    dst_height = max(int(ceil((ymax - ymin) / res[1])), 1)

                else:
                    try:
                        if src.transform.is_identity and src.gcps:
                            src_crs = src.gcps[1]
                            kwargs = {'gcps': src.gcps[0]}
                        else:
                            src_crs = src.crs
                            kwargs = src.bounds._asdict()
                        dst_transform, dst_width, dst_height = calcdt(
                            src_crs, dst_crs, src.width, src.height,
                            resolution=res, **kwargs)
                    except CRSError as err:
                        raise click.BadParameter(
                            str(err), param='dst_crs', param_hint='dst_crs')

            elif dimensions:
                # Same projection, different dimensions, calculate resolution.
                dst_crs = src.crs
                dst_width, dst_height = dimensions
                dst_transform = Affine(
                    (r - l) / float(dst_width),
                    0, l, 0,
                    (b - t) / float(dst_height),
                    t
                )

            elif src_bounds or dst_bounds:
                # Same projection, different dimensions and possibly
                # different resolution.
                if not res:
                    res = (src.transform.a, -src.transform.e)

                dst_crs = src.crs
                xmin, ymin, xmax, ymax = (src_bounds or dst_bounds)
                dst_transform = Affine(res[0], 0, xmin, 0, -res[1], ymax)
                dst_width = max(int(ceil((xmax - xmin) / res[0])), 1)
                dst_height = max(int(ceil((ymax - ymin) / res[1])), 1)

            elif res:
                # Same projection, different resolution.
                dst_crs = src.crs
                dst_transform = Affine(res[0], 0, l, 0, -res[1], t)
                dst_width = max(int(ceil((r - l) / res[0])), 1)
                dst_height = max(int(ceil((t - b) / res[1])), 1)

            else:
                dst_crs = src.crs
                dst_transform = src.transform
                dst_width = src.width
                dst_height = src.height

            if target_aligned_pixels:
                dst_transform, dst_width, dst_height = aligned_target(dst_transform, dst_width, dst_height, res)

            # If src_nodata is not None, update the dst metadata NODATA
            # value to src_nodata (will be overridden by dst_nodata if it is not None
            if src_nodata is not None:
                # Update the dst nodata value
                out_kwargs.update({
                    'nodata': src_nodata
                })

            # Validate a manually set destination NODATA value
            # against the input datatype.
            if dst_nodata is not None:
                if src_nodata is None and src.meta['nodata'] is None:
                    raise click.BadParameter(
                        "--src-nodata must be provided because dst-nodata is not None")
                else:
                    # Update the dst nodata value
                    out_kwargs.update({'nodata': dst_nodata})

            # When the bounds option is misused, extreme values of
            # destination width and height may result.
            if (dst_width < 0 or dst_height < 0 or
                    dst_width > MAX_OUTPUT_WIDTH or
                    dst_height > MAX_OUTPUT_HEIGHT):
                raise click.BadParameter(
                    "Invalid output dimensions: {0}.".format(
                        (dst_width, dst_height)))

            out_kwargs.update({
                'crs': dst_crs,
                'transform': dst_transform,
                'width': dst_width,
                'height': dst_height
            })

            # Adjust block size if necessary.
            if ('blockxsize' in out_kwargs and
                    dst_width < out_kwargs['blockxsize']):
                del out_kwargs['blockxsize']
            if ('blockysize' in out_kwargs and
                    dst_height < out_kwargs['blockysize']):
                del out_kwargs['blockysize']

            out_kwargs.update(**creation_options)

            with rasterio.open(output, 'w', **out_kwargs) as dst:
                reproject(
                    source=rasterio.band(src, list(range(1, src.count + 1))),
                    destination=rasterio.band(
                        dst, list(range(1, src.count + 1))),
                    src_transform=src.transform,
                    src_crs=src.crs,
                    src_nodata=src_nodata,
                    dst_transform=out_kwargs['transform'],
                    dst_crs=out_kwargs['crs'],
                    dst_nodata=dst_nodata,
                    resampling=resampling,
                    num_threads=threads)
コード例 #34
0
ファイル: cli.py プロジェクト: mapbox/rio-mbtiles
def mbtiles(ctx, files, output, overwrite, title, description,
            layer_type, img_format, tile_size, zoom_levels, image_dump,
            num_workers, src_nodata, dst_nodata, resampling, rgba):
    """Export a dataset to MBTiles (version 1.1) in a SQLite file.

    The input dataset may have any coordinate reference system. It must
    have at least three bands, which will be become the red, blue, and
    green bands of the output image tiles.

    An optional fourth alpha band may be copied to the output tiles by
    using the --rgba option in combination with the PNG format. This
    option requires that the input dataset has at least 4 bands.

    If no zoom levels are specified, the defaults are the zoom levels
    nearest to the one at which one tile may contain the entire source
    dataset.

    If a title or description for the output file are not provided,
    they will be taken from the input dataset's filename.

    This command is suited for small to medium (~1 GB) sized sources.

    Python package: rio-mbtiles (https://github.com/mapbox/rio-mbtiles).
    """
    output, files = resolve_inout(files=files, output=output,
                                  overwrite=overwrite)
    inputfile = files[0]

    log = logging.getLogger(__name__)

    with ctx.obj['env']:

        # Read metadata from the source dataset.
        with rasterio.open(inputfile) as src:

            validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))
            base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}

            if src_nodata is not None:
                base_kwds.update(nodata=src_nodata)

            if dst_nodata is not None:
                base_kwds.update(nodata=dst_nodata)

            # Name and description.
            title = title or os.path.basename(src.name)
            description = description or src.name

            # Compute the geographic bounding box of the dataset.
            (west, east), (south, north) = transform(
                src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])

        # Resolve the minimum and maximum zoom levels for export.
        if zoom_levels:
            minzoom, maxzoom = map(int, zoom_levels.split('..'))
        else:
            zw = int(round(math.log(360.0 / (east - west), 2.0)))
            zh = int(round(math.log(170.1022 / (north - south), 2.0)))
            minzoom = min(zw, zh)
            maxzoom = max(zw, zh)

        log.debug("Zoom range: %d..%d", minzoom, maxzoom)

        if rgba:
            if img_format == 'JPEG':
                raise click.BadParameter("RGBA output is not possible with JPEG format.")
            else:
                count = 4
        else:
            count = 3

        # Parameters for creation of tile images.
        base_kwds.update({
            'driver': img_format.upper(),
            'dtype': 'uint8',
            'nodata': 0,
            'height': tile_size,
            'width': tile_size,
            'count': count,
            'crs': TILES_CRS})

        img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'

        # Initialize the sqlite db.
        if os.path.exists(output):
            os.unlink(output)

        # workaround for bug here: https://bugs.python.org/issue27126
        sqlite3.connect(':memory:').close()

        conn = sqlite3.connect(output)
        cur = conn.cursor()
        cur.execute(
            "CREATE TABLE tiles "
            "(zoom_level integer, tile_column integer, "
            "tile_row integer, tile_data blob);")
        cur.execute(
            "CREATE TABLE metadata (name text, value text);")

        # Insert mbtiles metadata into db.
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("name", title))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("type", layer_type))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("version", "1.1"))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("description", description))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("format", img_ext))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("bounds", "%f,%f,%f,%f" % (west, south, east, north)))

        conn.commit()

        # Create a pool of workers to process tile tasks.
        pool = Pool(num_workers, init_worker,
                    (inputfile, base_kwds, resampling), 100)

        # Constrain bounds.
        EPS = 1.0e-10
        west = max(-180 + EPS, west)
        south = max(-85.051129, south)
        east = min(180 - EPS, east)
        north = min(85.051129, north)

        # Initialize iterator over output tiles.
        tiles = mercantile.tiles(
            west, south, east, north, range(minzoom, maxzoom + 1))

        for tile, contents in pool.imap_unordered(process_tile, tiles):

            if contents is None:
                log.info("Tile %r is empty and will be skipped", tile)
                continue

            # MBTiles have a different origin than Mercantile/tilebelt.
            tiley = int(math.pow(2, tile.z)) - tile.y - 1

            # Optional image dump.
            if image_dump:
                img_name = '%d-%d-%d.%s' % (
                    tile.x, tiley, tile.z, img_ext)
                img_path = os.path.join(image_dump, img_name)
                with open(img_path, 'wb') as img:
                    img.write(contents)

            # Insert tile into db.
            cur.execute(
                "INSERT INTO tiles "
                "(zoom_level, tile_column, tile_row, tile_data) "
                "VALUES (?, ?, ?, ?);",
                (tile.z, tile.x, tiley, sqlite3.Binary(contents)))

            conn.commit()

        conn.close()