コード例 #1
0
ファイル: test_update.py プロジェクト: DanLipsitt/rasterio
def test_update_nodatavals_none(data):
    """GDAL 2.1 does support un-setting nodata values."""
    tiffname = str(data.join('RGB.byte.tif'))
    with rasterio.open(tiffname, 'r+') as f:
        f.nodata = None
    with rasterio.open(tiffname) as f:
        assert f.nodatavals == (None, None, None)
コード例 #2
0
def test_options(tmpdir):
    """Test that setting CPL_DEBUG=True results in GDAL debug messages.
    """
    logger = logging.getLogger('GDAL')
    logger.setLevel(logging.DEBUG)
    logfile1 = str(tmpdir.join('test_options1.log'))
    fh = logging.FileHandler(logfile1)
    logger.addHandler(fh)
    
    # With CPL_DEBUG=True, expect debug messages from GDAL in
    # logfile1
    with rasterio.drivers(CPL_DEBUG=True):
        with rasterio.open("rasterio/tests/data/RGB.byte.tif") as src:
            pass

    log = open(logfile1).read()
    assert "GDAL: GDALOpen(rasterio/tests/data/RGB.byte.tif" in log
    
    # The GDAL env above having exited, CPL_DEBUG should be OFF.
    logfile2 = str(tmpdir.join('test_options2.log'))
    fh = logging.FileHandler(logfile2)
    logger.addHandler(fh)

    with rasterio.open("rasterio/tests/data/RGB.byte.tif") as src:
        pass
    
    # Expect no debug messages from GDAL.
    log = open(logfile2).read()
    assert "GDAL: GDALOpen(rasterio/tests/data/RGB.byte.tif" not in log
コード例 #3
0
def test_warp_no_reproject_bounds_res(runner, tmpdir):
    srcname = 'tests/data/shade.tif'
    outputname = str(tmpdir.join('test.tif'))
    out_bounds = [-11850000, 4810000, -11849000, 4812000]
    result = runner.invoke(main_group, [
        'warp', srcname, outputname, '--res', 30, '--bounds'] + out_bounds)
    assert result.exit_code == 0
    assert os.path.exists(outputname)

    with rasterio.open(srcname) as src:
        with rasterio.open(outputname) as output:
            assert output.crs == src.crs
            assert np.allclose(output.bounds, out_bounds)
            assert np.allclose([30, 30], [output.transform.a, -output.transform.e])
            assert output.width == 34
            assert output.height == 67

    # dst-bounds should be an alias to bounds
    outputname = str(tmpdir.join('test2.tif'))
    out_bounds = [-11850000, 4810000, -11849000, 4812000]
    result = runner.invoke(main_group, [
        'warp', srcname, outputname, '--res', 30, '--dst-bounds'] + out_bounds)
    assert result.exit_code == 0
    assert os.path.exists(outputname)
    with rasterio.open(srcname) as src:
        with rasterio.open(outputname) as output:
            assert np.allclose(output.bounds, out_bounds)
コード例 #4
0
def get_resized_8chan_image_test(image_id, datapath, bs_rgb, bs_mul):
    im = []

    fn = get_test_image_path_from_imageid(image_id, datapath)
    with rasterio.open(fn, 'r') as f:
        values = f.read().astype(np.float32)
        for chan_i in range(3):
            min_val = bs_rgb[chan_i]['min']
            max_val = bs_rgb[chan_i]['max']
            values[chan_i] = np.clip(values[chan_i], min_val, max_val)
            values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
            im.append(skimage.transform.resize(
                values[chan_i],
                (INPUT_SIZE, INPUT_SIZE)))

    fn = get_test_image_path_from_imageid(image_id, datapath, mul=True)
    with rasterio.open(fn, 'r') as f:
        values = f.read().astype(np.float32)
        usechannels = [1, 2, 5, 6, 7]
        for chan_i in usechannels:
            min_val = bs_mul[chan_i]['min']
            max_val = bs_mul[chan_i]['max']
            values[chan_i] = np.clip(values[chan_i], min_val, max_val)
            values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
            im.append(skimage.transform.resize(
                values[chan_i],
                (INPUT_SIZE, INPUT_SIZE)))

    im = np.array(im)  # (ch, w, h)
    im = np.swapaxes(im, 0, 2)  # -> (h, w, ch)
    im = np.swapaxes(im, 0, 1)  # -> (w, h, ch)
    return im
コード例 #5
0
def test_warp_reproject_like(runner, tmpdir):
    likename = str(tmpdir.join('like.tif'))
    kwargs = {
        "crs": {'init': 'epsg:4326'},
        "transform": affine.Affine(0.001, 0, -106.523,
                                   0, -0.001, 39.6395),
        "count": 1,
        "dtype": rasterio.uint8,
        "driver": "GTiff",
        "width": 10,
        "height": 10,
        "nodata": 0
    }

    with rasterio.open(likename, 'w', **kwargs) as dst:
        data = np.zeros((10, 10), dtype=rasterio.uint8)
        dst.write(data, indexes=1)

    srcname = 'tests/data/shade.tif'
    outputname = str(tmpdir.join('test.tif'))
    result = runner.invoke(main_group, [
        'warp', srcname, outputname, '--like', likename])
    assert result.exit_code == 0
    assert os.path.exists(outputname)

    with rasterio.open(outputname) as output:
        assert output.crs == {'init': 'epsg:4326'}
        assert np.allclose(
            [0.001, 0.001], [output.transform.a, -output.transform.e])
        assert output.width == 10
        assert output.height == 10
コード例 #6
0
def writeRasterFile(outputArray, outputFilename, refRaster):
    
    with rasterio.open(refRaster) as src:
        kwargs = src.meta
        
    with rasterio.open(outputFilename, 'w', **kwargs) as dst:
        dst.write_band(1, outputArray.astype(kwargs['dtype']))
コード例 #7
0
def tiffs(tmpdir):
    with rasterio.open('tests/data/RGB.byte.tif') as src:
        profile = src.profile

        shadowed_profile = profile.copy()
        shadowed_profile['count'] = 4
        with rasterio.open(
                str(tmpdir.join('shadowed.tif')), 'w',
                **shadowed_profile) as dst:

            for i, band in enumerate(src.read(masked=False), 1):
                dst.write(band, i)
            dst.write(band, 4)

        del profile['nodata']
        with rasterio.open(
                str(tmpdir.join('no-nodata.tif')), 'w',
                **profile) as dst:
            dst.write(src.read(masked=False))

        with rasterio.open(
                str(tmpdir.join('sidecar-masked.tif')), 'w',
                **profile) as dst:
            dst.write(src.read(masked=False))
            mask = np.zeros(src.shape, dtype='uint8')
            dst.write_mask(mask)

    return tmpdir
コード例 #8
0
ファイル: test_rio_features.py プロジェクト: ozius13/rasterio
def test_rasterize_property_value(tmpdir, runner):
    # Test feature collection property values
    output = str(tmpdir.join('test.tif'))
    result = runner.invoke(features.rasterize,
                           [output,
                            '--res', 1000,
                            '--property', 'val',
                            '--src-crs', 'EPSG:3857'
                           ],
                           input=TEST_MERC_FEATURECOLLECTION)
    assert result.exit_code == 0
    assert os.path.exists(output)
    with rasterio.open(output) as out:
        assert out.count == 1
        data = out.read(1, masked=False)
        assert (data == 0).sum() == 50
        assert (data == 2).sum() == 25
        assert (data == 3).sum() == 25

    # Test feature property values
    output = str(tmpdir.join('test2.tif'))
    result = runner.invoke(features.rasterize,
                           [output, '--res', 0.5, '--property', 'val'],
                           input=TEST_FEATURES)
    assert result.exit_code == 0
    assert os.path.exists(output)
    with rasterio.open(output) as out:
        assert out.count == 1
        data = out.read(1, masked=False)
        assert (data == 0).sum() == 55
        assert (data == 15).sum() == 145
コード例 #9
0
def test_mask_crop(runner, tmpdir, basic_feature, pixelated_image):
    """
    In order to test --crop option, we need to use a transform more similar to
    a normal raster, with a negative y pixel size.
    """

    image = pixelated_image
    outfilename = str(tmpdir.join('pixelated_image.tif'))
    kwargs = {
        "crs": CRS({'init': 'epsg:4326'}),
        "transform": Affine(1, 0, 0, 0, -1, 0),
        "count": 1,
        "dtype": rasterio.uint8,
        "driver": "GTiff",
        "width": image.shape[1],
        "height": image.shape[0],
        "nodata": 255}
    with rasterio.open(outfilename, 'w', **kwargs) as out:
        out.write(image, indexes=1)

    output = str(tmpdir.join('test.tif'))

    truth = np.zeros((3, 3))
    truth[0:2, 0:2] = 1

    result = runner.invoke(
        main_group,
        ['mask', outfilename, output, '--crop', '--geojson-mask', '-'],
        input=json.dumps(basic_feature))
    assert result.exit_code == 0
    assert os.path.exists(output)
    with rasterio.open(output) as out:
        assert np.array_equal(
            truth,
            out.read(1, masked=True).filled(0))
コード例 #10
0
ファイル: test_crs.py プロジェクト: clembou/rasterio
def test_write_3857(tmpdir):
    src_path = str(tmpdir.join('lol.tif'))
    subprocess.call([
        'gdalwarp', '-t_srs', 'EPSG:3857', 
        'tests/data/RGB.byte.tif', src_path])
    dst_path = str(tmpdir.join('wut.tif'))
    with rasterio.drivers():
        with rasterio.open(src_path) as src:
            with rasterio.open(dst_path, 'w', **src.meta) as dst:
                assert dst.crs == {'init': 'epsg:3857'}
    info = subprocess.check_output([
        'gdalinfo', dst_path])
    assert """PROJCS["WGS 84 / Pseudo-Mercator",
    GEOGCS["WGS 84",
        DATUM["WGS_1984",
            SPHEROID["WGS 84",6378137,298.257223563,
                AUTHORITY["EPSG","7030"]],
            AUTHORITY["EPSG","6326"]],
        PRIMEM["Greenwich",0],
        UNIT["degree",0.0174532925199433],
        AUTHORITY["EPSG","4326"]],
    PROJECTION["Mercator_1SP"],
    PARAMETER["central_meridian",0],
    PARAMETER["scale_factor",1],
    PARAMETER["false_easting",0],
    PARAMETER["false_northing",0],
    UNIT["metre",1,
        AUTHORITY["EPSG","9001"]],
    EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext  +no_defs"],
    AUTHORITY["EPSG","3857"]]""" in info.decode('utf-8')
コード例 #11
0
ファイル: test_rio_features.py プロジェクト: ozius13/rasterio
def test_mask_crop(runner, tmpdir):
    output = str(tmpdir.join('test.tif'))

    with rasterio.open('tests/data/shade.tif') as src:

        result = runner.invoke(
            features.mask,
            [
                'tests/data/shade.tif', output,
                '--crop',
                '--geojson-mask', '-'
            ],
            input=TEST_MERC_FEATURES
        )
        assert result.exit_code == 0
        assert os.path.exists(output)
        with rasterio.open(output) as out:
            assert out.shape[1] == src.shape[1]
            assert out.shape[0] < src.shape[0]
            assert out.shape[0] == 824

    # Adding invert option after crop should be ignored
    result = runner.invoke(
        features.mask,
        [
            'tests/data/shade.tif', output,
            '--crop',
            '--invert',
            '--geojson-mask', '-'
        ],
        input=TEST_MERC_FEATURES
    )
    assert result.exit_code == 0
    assert 'Invert option ignored' in result.output
コード例 #12
0
ファイル: test_warp.py プロジェクト: clembou/rasterio
def test_warp_from_to_file_multi(tmpdir):
    """File to file"""
    tiffname = str(tmpdir.join('foo.tif'))
    with rasterio.open('tests/data/RGB.byte.tif') as src:
        dst_crs = dict(
            proj='merc',
            a=6378137,
            b=6378137,
            lat_ts=0.0,
            lon_0=0.0,
            x_0=0.0,
            y_0=0,
            k=1.0,
            units='m',
            nadgrids='@null',
            wktext=True,
            no_defs=True)
        kwargs = src.meta.copy()
        kwargs.update(
            transform=DST_TRANSFORM,
            crs=dst_crs)
        with rasterio.open(tiffname, 'w', **kwargs) as dst:
            for i in (1, 2, 3):
                reproject(
                    rasterio.band(src, i),
                    rasterio.band(dst, i),
                    num_threads=2)
コード例 #13
0
ファイル: test_tags.py プロジェクト: ColinTalbert/rasterio
def test_tags_update(tmpdir):
    tiffname = str(tmpdir.join('foo.tif'))
    with rasterio.open(
            tiffname, 
            'w', 
            driver='GTiff', 
            count=1, 
            dtype=rasterio.uint8, 
            width=10, 
            height=10) as dst:

        dst.update_tags(a='1', b='2')
        dst.update_tags(1, c=3)
        with pytest.raises(IndexError):
            dst.update_tags(4, d=4)

        assert dst.tags() == {'a': '1', 'b': '2'}
        assert dst.tags(1) == {'c': '3' }
        
        # Assert that unicode tags work.
        # Russian text appropriated from pytest issue #319
        # https://bitbucket.org/hpk42/pytest/issue/319/utf-8-output-in-assertion-error-converted
        dst.update_tags(ns='rasterio_testing', rus=u'другая строка')
        assert dst.tags(ns='rasterio_testing') == {'rus': u'другая строка'}

    with rasterio.open(tiffname) as src:
        assert src.tags() == {'a': '1', 'b': '2'}
        assert src.tags(1) == {'c': '3'}
        assert src.tags(ns='rasterio_testing') == {'rus': u'другая строка'}
コード例 #14
0
ファイル: test_read.py プロジェクト: aashish24/rasterio
 def test_read_basic(self):
     with rasterio.open('tests/data/shade.tif') as s:
         a = s.read(masked=True)  # Gray
         self.assertEqual(a.ndim, 3)
         self.assertEqual(a.shape, (1, 1024, 1024))
         self.assertTrue(hasattr(a, 'mask'))
         self.assertEqual(a.fill_value, 255)
         self.assertEqual(list(set(s.nodatavals)), [255])
         self.assertEqual(a.dtype, rasterio.ubyte)
         self.assertEqual(a.sum((1, 2)).tolist(), [0])
     with rasterio.open('tests/data/RGB.byte.tif') as s:
         a = s.read(masked=True)  # RGB
         self.assertEqual(a.ndim, 3)
         self.assertEqual(a.shape, (3, 718, 791))
         self.assertTrue(hasattr(a, 'mask'))
         self.assertEqual(a.fill_value, 0)
         self.assertEqual(list(set(s.nodatavals)), [0])
         self.assertEqual(a.dtype, rasterio.ubyte)
         a = s.read(masked=False)  # no mask
         self.assertFalse(hasattr(a, 'mask'))
         self.assertEqual(list(set(s.nodatavals)), [0])
         self.assertEqual(a.dtype, rasterio.ubyte)
     with rasterio.open('tests/data/float.tif') as s:
         a = s.read(masked=True)  # floating point values
         self.assertEqual(a.ndim, 3)
         self.assertEqual(a.shape, (1, 2, 3))
         self.assert_(hasattr(a, 'mask'))
         self.assertEqual(list(set(s.nodatavals)), [None])
         self.assertEqual(a.dtype, rasterio.float64)
コード例 #15
0
ファイル: test_update.py プロジェクト: andreas-h/rasterio
def test_update_band(tmpdir):
    tiffname = str(tmpdir.join('foo.tif'))
    shutil.copy('rasterio/tests/data/RGB.byte.tif', tiffname)
    with rasterio.open(tiffname, 'r+') as f:
        f.write_band(1, numpy.zeros(f.shape, dtype=f.dtypes[0]))
    with rasterio.open(tiffname) as f:
        assert not f.read_band(1).any()
コード例 #16
0
ファイル: test_update.py プロジェクト: ColinTalbert/rasterio
def test_update_mask_false(data):
    """Provide an option to set a uniformly invalid mask."""
    tiffname = str(data.join('RGB.byte.tif'))
    with rasterio.open(tiffname, 'r+') as f:
        f.write_mask(False)
    with rasterio.open(tiffname) as f:
        assert not f.read_masks().any()
コード例 #17
0
ファイル: test_rio_warp.py プロジェクト: alexatodd/rasterio
def test_warp_reproject_dst_bounds(runner, tmpdir):
    """--x-dst-bounds option works."""
    srcname = 'tests/data/shade.tif'
    outputname = str(tmpdir.join('test.tif'))
    out_bounds = [-106.45036, 39.6138, -106.44136, 39.6278]
    result = runner.invoke(
        warp.warp, [srcname, outputname, '--dst-crs', 'EPSG:4326',
                    '--res', 0.001, '--x-dst-bounds'] + out_bounds)
    assert result.exit_code == 0
    assert os.path.exists(outputname)

    with rasterio.open(srcname) as src:
        with rasterio.open(outputname) as output:
            assert output.crs == {'init': 'epsg:4326'}
            assert np.allclose(output.bounds[0::3],
                                  [-106.45036, 39.6278])
            assert np.allclose([0.001, 0.001],
                                  [output.affine.a, -output.affine.e])

            # XXX: an extra row and column is produced in the dataset
            # because we're using ceil instead of floor internally.
            # Not necessarily a bug, but may change in the future.
            assert np.allclose([output.bounds[2]-0.001, output.bounds[1]+0.001],
                                  [-106.44136, 39.6138])
            assert output.width == 10
            assert output.height == 15
コード例 #18
0
ファイル: conftest.py プロジェクト: DanLipsitt/rasterio
    def _create_path_multiband_no_colorinterp(count):
        # For GDAL 2.2.2 the first band can be 'undefined', but on older
        # versions it must be 'gray'.
        undefined_ci = [ColorInterp.gray]
        if count > 1:
            undefined_ci += [ColorInterp.undefined] * (count - 1)
        dst_path = str(tmpdir.join('4band-byte-no-ci.tif'))
        profile = {
            'height': 10,
            'width': 10,
            'count': count,
            'dtype': rasterio.ubyte,
            'transform': affine.Affine(1, 0.0, 0,
                                       0.0, -1, 1),
            'driver': 'GTiff',
            'photometric': 'minisblack'
        }

        undefined_ci = tuple(undefined_ci)
        with rasterio.open(dst_path, 'w', **profile) as src:
            src.colorinterp = undefined_ci

        # Ensure override occurred.  Setting color interpretation on an
        # existing file is surrounded by traps and forceful GDAL assumptions,
        # especially on older versions.
        with rasterio.open(dst_path) as src:
            if src.colorinterp != undefined_ci:
                raise ValueError(
                    "Didn't properly set color interpretation.  GDAL can "
                    "forcefully make assumptions.")

        return dst_path
コード例 #19
0
ファイル: test_err.py プロジェクト: alexatodd/rasterio
def test_io_error(tmpdir):
    """RasterioIOError is raised when a disk file can't be opened.
    Newlines are removed from GDAL error messages."""
    with pytest.raises(RasterioIOError) as exc_info:
        rasterio.open(str(tmpdir.join('foo.tif')))
    msg, = exc_info.value.args
    assert "\n" not in msg
コード例 #20
0
ファイル: test_profile.py プロジェクト: DanLipsitt/rasterio
def test_blockysize_guard(tmpdir):
    """blockysize can't be greater than image height."""
    tiffname = str(tmpdir.join('foo.tif'))
    with pytest.raises(ValueError):
        profile = default_gtiff_profile.copy()
        profile.update(count=1, width=256, height=128)
        rasterio.open(tiffname, 'w', **profile)
コード例 #21
0
ファイル: test_vfs.py プロジェクト: coolbole/rasterio
def test_update_vfs(tmpdir, mode):
    """VFS datasets can not be created or updated"""
    with pytest.raises(TypeError):
        rasterio.open(
            'zip://{0}'.format(tmpdir), mode,
            **default_gtiff_profile(
                count=1, width=1, height=1))
コード例 #22
0
def warp_tif(combined_tif_path, warped_tif_path, dst_crs={
        'init': 'EPSG:3857'
}):
    logger.info('Warping tif to web mercator: %s', combined_tif_path)
    with rasterio.open(combined_tif_path) as src:
        meta = src.meta
        new_meta = meta.copy()
        transform, width, height = calculate_default_transform(
            src.crs, dst_crs, src.width, src.height, *src.bounds)
        new_meta.update({
            'crs': dst_crs,
            'transform': transform,
            'width': width,
            'height': height,
            'nodata': -28762
        })
        with rasterio.open(
                warped_tif_path, 'w', compress='DEFLATE', tiled=True,
                **new_meta) as dst:
            for i in range(1, src.count):
                reproject(
                    source=rasterio.band(src, i),
                    destination=rasterio.band(dst, i),
                    src_transform=src.transform,
                    src_crs=src.crs,
                    dst_transform=transform,
                    dst_crs=dst_crs,
                    resampling=Resampling.nearest,
                    src_nodata=-28762
                )
コード例 #23
0
ファイル: test_gcps.py プロジェクト: RodrigoGonzalez/rasterio
def test_write_read_gcps(tmpdir):
    tiffname = str(tmpdir.join('test.tif'))
    gcps = [GroundControlPoint(1, 1, 100.0, 1000.0, z=0.0)]

    with rasterio.open(tiffname, 'w', driver='GTiff', dtype='uint8', count=1,
                       width=10, height=10, crs='epsg:4326', gcps=gcps) as dst:
        pass

    with rasterio.open(tiffname, 'r+') as dst:
        gcps, crs = dst.gcps
        assert crs['init'] == 'epsg:4326'
        assert len(gcps) == 1
        point = gcps[0]
        assert (1, 1) == (point.row, point.col)
        assert (100.0, 1000.0, 0.0) == (point.x, point.y, point.z)

        dst.gcps = [
            GroundControlPoint(1, 1, 100.0, 1000.0, z=0.0),
            GroundControlPoint(2, 2, 200.0, 2000.0, z=0.0)], crs

        gcps, crs = dst.gcps

        assert crs['init'] == 'epsg:4326'
        assert len(gcps) == 2
        point = gcps[1]
        assert (2, 2) == (point.row, point.col)
        assert (200.0, 2000.0, 0.0) == (point.x, point.y, point.z)
コード例 #24
0
ファイル: test_rio_merge.py プロジェクト: aashish24/rasterio
def tiffs(tmpdir):

    data = numpy.ones((1, 1, 1), 'uint8')

    kwargs = {'count': '1',
              'driver': 'GTiff',
              'dtype': 'uint8',
              'height': 1,
              'width': 1}

    kwargs['transform'] = Affine( 1, 0, 1,
                                  0,-1, 1)
    with rasterio.open(str(tmpdir.join('a-sw.tif')), 'w', **kwargs) as r:
        r.write(data * 40)

    kwargs['transform'] = Affine( 1, 0, 2,
                                  0,-1, 2)
    with rasterio.open(str(tmpdir.join('b-ct.tif')), 'w', **kwargs) as r:
        r.write(data * 60)

    kwargs['transform'] = Affine( 2, 0, 3,
                                  0,-2, 4)
    with rasterio.open(str(tmpdir.join('c-ne.tif')), 'w', **kwargs) as r:
        r.write(data * 90)

    kwargs['transform'] = Affine( 2, 0, 2,
                                  0,-2, 4)
    with rasterio.open(str(tmpdir.join('d-ne.tif')), 'w', **kwargs) as r:
        r.write(data * 120)

    return tmpdir
コード例 #25
0
ファイル: test_rio_merge.py プロジェクト: aashish24/rasterio
def test_data_dir_2(tmpdir):
    kwargs = {
        "crs": {'init': 'epsg:4326'},
        "transform": (-114, 0.2, 0, 46, 0, -0.1),
        "count": 1,
        "dtype": rasterio.uint8,
        "driver": "GTiff",
        "width": 10,
        "height": 10
        # these files have undefined nodata.
    }

    with rasterio.drivers():

        with rasterio.open(str(tmpdir.join('b.tif')), 'w', **kwargs) as dst:
            data = numpy.zeros((10, 10), dtype=rasterio.uint8)
            data[0:6, 0:6] = 255
            dst.write_band(1, data)

        with rasterio.open(str(tmpdir.join('a.tif')), 'w', **kwargs) as dst:
            data = numpy.zeros((10, 10), dtype=rasterio.uint8)
            data[4:8, 4:8] = 254
            dst.write_band(1, data)

    return tmpdir
コード例 #26
0
def calc_monthly_average(basepath, filename, layers_by_month, epsg="3857"):
    print "-----Averages"

    if not os.path.exists(basepath):
        os.makedirs(basepath)

    for month in layers_by_month:
        output_path = basepath + "/" + filename + "_" + month + "_" + epsg + ".tif"

        data = None
        kwargs = None

        print "Processing: ", str(month)
        for f in layers_by_month[month]:
            print "Reading: ",  f
            r = rasterio.open(f)

            if data is None:
                data, kwargs = initialize_rasterio_raster(r, rasterio.float32)

            data = data + r.read_band(1).astype(float)

        data = data / len(layers_by_month[month])

        # writing
        print "Writing: ", output_path
        with rasterio.open(output_path, 'w', **kwargs) as dst:
            dst.write_band(1, data.astype(rasterio.float32))
コード例 #27
0
    def run(self):
        out_fname = pjoin(self.out_dir,
                          CONFIG.get('outputs', 'rasterise_filename'))
        ds_list_fname = pjoin(self.out_dir,
                              CONFIG.get('outputs', 'query_filename'))

        with open(ds_list_fname, 'r') as infile:
            ds_list = pickle.load(infile)

        vector_fname = CONFIG.get('work', 'vector_filename')

        img_fname = ds_list[0].datasets[DatasetType.FC25].path
        with rasterio.open(img_fname) as src:
            crs = src.crs
            transform = src.affine
            height = src.height
            width = src.width

        res = rasterise_vector(vector_fname, shape=(height, width),
                               transform=transform, crs=crs)

        kwargs = {'count': 1,
                  'width': width,
                  'height': height,
                  'crs': crs,
                  'transform': transform,
                  'dtype': res.dtype.name,
                  'nodata': 0}

        with rasterio.open(out_fname, 'w', **kwargs) as src:
            src.write(1, res)

        # We could just set the image as the Luigi completion target...
        with self.output().open('w') as outf:
            outf.write('Complete')
コード例 #28
0
ファイル: loader.py プロジェクト: idil77soltahanov/hugin-1
    def __next__(self):
        length = len(self)
        if length == 0:
            raise StopIteration()
        if self._curent_position == length:
            if self._loop:
                self.reset()
            else:
                raise StopIteration()

        entry = self._datasets[self._curent_position]
        env = getattr(self, 'rasterio_env', {})
        self._curent_position += 1
        entry_name, entry_components = entry
        new_components = {}
        cache_data = self._cache_data
        use_tensorflow_io = False
        for component_name, component_path in entry_components.items():
            if isinstance(component_path, DatasetReader):
                component_path = component_path.name
            local_component_path = component_path
            url_components = urlparse(component_path)
            if not url_components.scheme:
                cache_data = False
                if url_components.path.startswith('/vsigs/'):
                    cache_data = True  # We should check if we run inside GCP ML Engine
                    use_tensorflow_io = True
                    component_path = url_components.path[6:]
                    component_path = "gs:/" + component_path
            else:
                if url_components.scheme == 'file':
                    local_component_path = url_components.path
                    use_tensorflow_io = False
                    cache_data = False

            with rasterio.Env(**env):
                if use_tensorflow_io:
                    real_path = component_path
                    data = IOUtils.open_file(real_path, "rb").read()
                    if cache_data:
                        hash = sha224(component_path.encode("utf8")).hexdigest()
                        hash_part = "/".join(list(hash)[:3])
                        dataset_path = os.path.join(self._temp_dir, hash_part)
                        if not IOUtils.file_exists(dataset_path):
                            IOUtils.recursive_create_dir(dataset_path)
                        dataset_path = os.path.join(dataset_path, os.path.basename(component_path))
                        if not IOUtils.file_exists(dataset_path):
                            f = IOUtils.open_file(dataset_path, "wb")
                            f.write(data)
                            f.close()
                        component_src = rasterio.open(dataset_path)
                    else:
                        with NamedTemporaryFile() as tmpfile:
                            tmpfile.write(data)
                            tmpfile.flush()
                            component_src = rasterio.open(tmpfile.name)
                else:
                    component_src = rasterio.open(local_component_path)
                new_components[component_name] = component_src
        return (entry_name, new_components)
コード例 #29
0
ファイル: test_warp.py プロジェクト: robintw/rasterio
def test_warp_from_to_file(tmpdir):
    """File to file"""
    tiffname = str(tmpdir.join('foo.tif'))
    with rasterio.open('rasterio/tests/data/RGB.byte.tif') as src:
        dst_transform = [-8789636.708, 300.0, 0.0, 2943560.235, 0.0, -300.0]
        dst_crs = dict(
                    proj='merc',
                    a=6378137,
                    b=6378137,
                    lat_ts=0.0,
                    lon_0=0.0,
                    x_0=0.0,
                    y_0=0,
                    k=1.0,
                    units='m',
                    nadgrids='@null',
                    wktext=True,
                    no_defs=True)
        kwargs = src.meta.copy()
        kwargs.update(
            transform=dst_transform,
            crs=dst_crs)
        with rasterio.open(tiffname, 'w', **kwargs) as dst:
            for i in (1, 2, 3):
                reproject(rasterio.band(src, i), rasterio.band(dst, i))
def run( args ):
	# unpack some args being passed into the run wrapper
	x = args['x']
	y = args['y']
	z = args['z']
	meshgrid_10min = args['meshgrid_10min']
	output_filename_10min = args['output_filename_10min']
	meta_10min = args['meta_10min']
	meta_10min.update( compress='lzw' )

	# interpolate to a global (unmasked) 10 min output
	new_grid = interpolate_to_grid( x, y, z, meshgrid_10min, output_filename_10min, meta_10min )
	new_grid = rasterio.open( output_filename_10min ).read( 1 ) # read it back in << this is a hack!

	# unpack some args being passed in
	template_raster = args['template_raster']
	src_affine = args['meta_10min']['affine']
	dst_affine = template_raster.affine
	method = 'cubic_spline'
	output_filename = args['output_filename']
	src_crs = {'init':'epsg:4326'}
	dst_crs = {'init':'epsg:3338'}
	meta_akcan = template_raster.meta
	meta_akcan.update( compress='lzw', crs=dst_crs  )
	mask = args['mask']

	# regrid to 3338 and AKCAN extent -- currently hardwired
	akcan = regrid( new_grid, template_raster.read( 1 ), src_affine, src_crs, dst_affine, dst_crs, method=method )
	with rasterio.open( output_filename, 'w', **meta_akcan ) as out:
		if mask is not None:
			akcan[ mask == 0 ] = meta_akcan[ 'nodata' ]
		out.write( akcan.astype( np.float32 ), 1 ) # watch this hardwired type!
	return output_filename
コード例 #31
0
ファイル: decimate.py プロジェクト: v0lat1le/rasterio
import os.path
import subprocess
import tempfile

import rasterio

with rasterio.Env():

    with rasterio.open('tests/data/RGB.byte.tif') as src:
        b, g, r = (src.read(k) for k in (1, 2, 3))
        meta = src.meta

    tmpfilename = os.path.join(tempfile.mkdtemp(), 'decimate.tif')

    meta.update(width=src.width / 2, height=src.height / 2)

    with rasterio.open(tmpfilename, 'w', **meta) as dst:
        for k, a in [(1, b), (2, g), (3, r)]:
            dst.write(a, indexes=k)

    outfilename = os.path.join(tempfile.mkdtemp(), 'decimate.jpg')

    rasterio.copy(tmpfilename, outfilename, driver='JPEG', quality='30')

info = subprocess.call(['open', outfilename])
コード例 #32
0
ファイル: io.py プロジェクト: waynedou/datacube-core
def write_gtiff(fname,
                pix,
                crs='epsg:3857',
                resolution=(10, -10),
                offset=(0.0, 0.0),
                nodata=None,
                overwrite=False,
                blocksize=None,
                gbox=None,
                **extra_rio_opts):
    """ Write ndarray to GeoTiff file.

    Geospatial info can be supplied either via
    - resolution, offset, crs
    or
    - gbox (takes precedence if supplied)
    """
    # pylint: disable=too-many-locals

    from affine import Affine
    import rasterio
    from pathlib import Path

    if pix.ndim == 2:
        h, w = pix.shape
        nbands = 1
        band = 1
    elif pix.ndim == 3:
        nbands, h, w = pix.shape
        band = tuple(i for i in range(1, nbands + 1))
    else:
        raise ValueError('Need 2d or 3d ndarray on input')

    if not isinstance(fname, Path):
        fname = Path(fname)

    if fname.exists():
        if overwrite:
            fname.unlink()
        else:
            raise IOError("File exists")

    if gbox is not None:
        assert gbox.shape == (h, w)

        A = gbox.transform
        crs = str(gbox.crs)
    else:
        sx, sy = resolution
        tx, ty = offset

        A = Affine(sx, 0, tx, 0, sy, ty)

    rio_opts = dict(width=w,
                    height=h,
                    count=nbands,
                    dtype=pix.dtype.name,
                    crs=crs,
                    transform=A,
                    predictor=2,
                    compress='DEFLATE')

    if blocksize is not None:
        rio_opts.update(tiled=True,
                        blockxsize=min(blocksize, w),
                        blockysize=min(blocksize, h))

    if nodata is not None:
        rio_opts.update(nodata=nodata)

    rio_opts.update(extra_rio_opts)

    with rasterio.open(str(fname), 'w', driver='GTiff', **rio_opts) as dst:
        dst.write(pix, band)
        meta = dst.meta

    meta['gbox'] = gbox if gbox is not None else rio_geobox(meta)
    meta['path'] = fname
    return SimpleNamespace(**meta)
コード例 #33
0
ファイル: geoio.py プロジェクト: WIEQLI/uncover-ml
def resample(input_tif, output_tif, ratio, resampling=5):
    """
    Parameters
    ----------
    input_tif: str or rasterio.io.DatasetReader
        input file path or rasterio.io.DatasetReader object
    output_tif: str
        output file path
    ratio: float
        ratio by which to shrink/expand
        ratio > 1 means shrink
    resampling: int, optional
        default is 5 (average) resampling. Other options are as follows:
        nearest = 0
        bilinear = 1
        cubic = 2
        cubic_spline = 3
        lanczos = 4
        average = 5
        mode = 6
        gauss = 7
        max = 8
        min = 9
        med = 10
        q1 = 11
        q3 = 12
    """
    src = rasterio.open(input_tif, mode='r')

    nodatavals = src.get_nodatavals()
    new_shape = round(src.height / ratio), round(src.width / ratio)
    # adjust the new affine transform to the smaller cell size
    aff = Affine.from_gdal(*src.get_transform())
    newaff = aff * Affine.scale(ratio)

    dest = rasterio.open(output_tif,
                         'w',
                         driver='GTiff',
                         height=new_shape[0],
                         width=new_shape[1],
                         count=src.count,
                         dtype=rasterio.float32,
                         crs=src.crs,
                         transform=newaff,
                         nodata=nodatavals[0])

    for b in range(src.count):
        arr = src.read(b + 1)
        new_arr = np.empty(shape=new_shape, dtype=arr.dtype)
        reproject(arr,
                  new_arr,
                  src_transform=aff,
                  dst_transform=newaff,
                  src_crs=src.crs,
                  src_nodata=nodatavals[b],
                  dst_crs=src.crs,
                  dst_nodata=nodatavals[b],
                  resample=resampling)
        dest.write(new_arr, b + 1)
    src.close()
    dest.close()
コード例 #34
0
ファイル: geoio.py プロジェクト: WIEQLI/uncover-ml
    def __init__(self,
                 shape,
                 bbox,
                 crs,
                 name,
                 n_subchunks,
                 outpath,
                 band_tags=None,
                 independent=False,
                 **kwargs):
        """
        pass in additional geotif write options in kwargs
        """
        # affine
        self.A, _, _ = image.bbox2affine(bbox[1, 0], bbox[0, 0], bbox[0, 1],
                                         bbox[1, 1], shape[0], shape[1])
        self.shape = shape
        self.outbands = len(band_tags)
        self.bbox = bbox
        self.name = name
        self.outpath = outpath
        self.n_subchunks = n_subchunks
        self.independent = independent  # mpi control
        self.sub_starts = [
            k[0]
            for k in np.array_split(np.arange(self.shape[1]), mpiops.chunks *
                                    self.n_subchunks)
        ]

        # file tags don't have spaces
        if band_tags:
            file_tags = ["_".join(k.lower().split()) for k in band_tags]
        else:
            file_tags = [str(k) for k in range(self.outbands)]
            band_tags = file_tags

        files = []
        file_names = []

        if mpiops.chunk_index == 0:
            for band in range(self.outbands):
                output_filename = self.outpath.format(file_tags[band])
                f = rasterio.open(output_filename,
                                  'w',
                                  driver='GTiff',
                                  width=self.shape[0],
                                  height=self.shape[1],
                                  dtype=np.float32,
                                  count=1,
                                  crs=crs,
                                  transform=self.A,
                                  nodata=self.nodata_value,
                                  **kwargs)
                f.update_tags(1, image_type=band_tags[band])
                files.append(f)
                file_names.append(output_filename)

        if independent:
            self.files = files
        else:
            if mpiops.chunk_index == 0:
                # create a file for each band
                self.files = files
                self.file_names = file_names
            else:
                self.file_names = []

            self.file_names = mpiops.comm.bcast(self.file_names, root=0)
コード例 #35
0
def generate_tfrecords(shapefile,
                       HSI_sensor_path,
                       RGB_sensor_path,
                       site,
                       elevation,
                       species_label_dict,
                       chunk_size=1000,
                       savedir=".",
                       HSI_size=40,
                       RGB_size=40,
                       classes=20,
                       number_of_sites=23,
                       train=True,
                       extend_box=0,
                       shuffle=True):
    """Yield one instance of data with one hot labels
    Args:
        chunk_size: number of windows per tfrecord
        savedir: directory to save tfrecords
        site: metadata site label as integer
        elevation: height above sea level in meters
        label_dict: taxonID -> numeric label
        RGB_size: size in pixels of one side of image
        HSI_size: size in pixels of one side of image
        train: training mode to include yielded labels
        number_of_sites: total number of sites used for one-hot encoding
        extend_box: units in meters to expand DeepForest bounding box to give crop more context
    Returns:
        filename: tfrecords path
    """
    gdf = geopandas.read_file(shapefile)
    basename = os.path.splitext(os.path.basename(shapefile))[0]
    HSI_src = rasterio.open(HSI_sensor_path)
    RGB_src = rasterio.open(HSI_sensor_path)

    gdf["box_index"] = ["{}_{}".format(basename, x) for x in gdf.index.values]
    labels = []
    HSI_crops = []
    RGB_crops = []
    indices = []

    for index, row in gdf.iterrows():
        #Add training label, ignore unclassified 0 class
        if train:
            labels.append(row["label"])

        try:
            HSI_crop = crop(HSI_src, row["geometry"], extend_box)
            RGB_crop = crop(RGB_src, row["geometry"], extend_box)
        except ValueError:
            continue

        HSI_crops.append(HSI_crop)
        RGB_crops.append(RGB_crop)
        indices.append(row["box_index"])

    #If passes a species label dict
    if species_label_dict is None:
        #Create and save a new species and site label dict
        unique_species_labels = np.unique(labels)
        species_label_dict = {}
        for index, label in enumerate(unique_species_labels):
            species_label_dict[label] = index
        pd.DataFrame(species_label_dict.items(),
                     columns=["taxonID", "label"]).to_csv(
                         "{}/species_class_labels.csv".format(savedir))

    numeric_species_labels = [species_label_dict[x] for x in labels]

    #shuffle before writing to help with validation data split
    if shuffle:
        if train:
            z = list(zip(HSI_crops, RGB_crops, indices,
                         numeric_species_labels))
            random.shuffle(z)
            HSI_crops, RGB_crops, indices, numeric_species_labels = zip(*z)

    #get keys and divide into chunks for a single tfrecord
    filenames = []
    counter = 0
    for i in range(0, len(HSI_crops) + 1, chunk_size):
        chunk_HSI_crops = HSI_crops[i:i + chunk_size]
        chunk_RGB_crops = RGB_crops[i:i + chunk_size]
        chunk_index = indices[i:i + chunk_size]

        #All records in a single shapefile are the same site
        chunk_sites = np.repeat(site, len(chunk_index))
        chunk_elevations = np.repeat(elevation, len(chunk_index))

        if train:
            chunk_labels = numeric_species_labels[i:i + chunk_size]
        else:
            chunk_labels = None

        #resize crops
        resized_HSI_crops = [
            resize(x, HSI_size, HSI_size).astype("int16")
            for x in chunk_HSI_crops
        ]
        resized_RGB_crops = [
            resize(x, RGB_size, RGB_size).astype("int16")
            for x in chunk_RGB_crops
        ]

        filename = "{}/{}_{}.tfrecord".format(savedir, basename, counter)

        write_tfrecord(filename=filename,
                       HSI_images=resized_HSI_crops,
                       RGB_images=resized_RGB_crops,
                       labels=chunk_labels,
                       sites=chunk_sites,
                       elevations=chunk_elevations,
                       indices=chunk_index,
                       number_of_sites=number_of_sites,
                       classes=classes)

        filenames.append(filename)
        counter += 1

    return filenames
コード例 #36
0
def optimize_rasters(raster_files: Sequence[Sequence[Path]],
                     output_folder: Path,
                     overwrite: bool = False,
                     resampling_method: str = 'average',
                     reproject: bool = False,
                     in_memory: bool = None,
                     compression: str = 'auto',
                     quiet: bool = False) -> None:
    """Optimize a collection of raster files for use with Terracotta.

    First argument is a list of input files or glob patterns.

    Example:

        $ terracotta optimize-rasters rasters/*.tif -o cloud-optimized/

    Note that all rasters may only contain a single band.
    """
    raster_files_flat = sorted(set(itertools.chain.from_iterable(raster_files)))

    if not raster_files_flat:
        click.echo('No files given')
        return

    rs_method = RESAMPLING_METHODS[resampling_method]

    if compression == 'auto':
        compression = _prefered_compression_method()

    total_pixels = 0
    for f in raster_files_flat:
        if not f.is_file():
            raise click.BadParameter(f'Input raster {f!s} is not a file')

        with rasterio.open(str(f), 'r') as src:
            if src.count > 1 and not quiet:
                click.echo(
                    f'Warning: raster file {f!s} has more than one band. '
                    'Only the first one will be used.', err=True
                )
            total_pixels += src.height * src.width

    output_folder.mkdir(exist_ok=True)

    if not quiet:
        # insert newline for nicer progress bar style
        click.echo('')

    sub_pbar_args = dict(
        disable=quiet,
        leave=False,
        bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}'
    )

    with contextlib.ExitStack() as outer_env:
        pbar = outer_env.enter_context(tqdm.tqdm(
            total=total_pixels, smoothing=0, disable=quiet,
            bar_format='{l_bar}{bar}| [{elapsed}<{remaining}{postfix}]',
            desc='Optimizing rasters'
        ))
        outer_env.enter_context(rasterio.Env(**GDAL_CONFIG))

        for input_file in raster_files_flat:
            if len(input_file.name) > 30:
                short_name = input_file.name[:13] + '...' + input_file.name[-13:]
            else:
                short_name = input_file.name

            pbar.set_postfix(file=short_name)

            output_file = output_folder / input_file.with_suffix('.tif').name

            if not overwrite and output_file.is_file():
                raise click.BadParameter(
                    f'Output file {output_file!s} exists (use --overwrite to ignore)'
                )

            with contextlib.ExitStack() as es, warnings.catch_warnings():
                warnings.filterwarnings('ignore', message='invalid value encountered.*')

                src = es.enter_context(rasterio.open(str(input_file)))

                if reproject:
                    vrt = es.enter_context(_get_vrt(src, rs_method=rs_method))
                else:
                    vrt = src

                profile = vrt.profile.copy()
                profile.update(COG_PROFILE)

                if in_memory is None:
                    in_memory = vrt.width * vrt.height < IN_MEMORY_THRESHOLD

                if in_memory:
                    memfile = es.enter_context(MemoryFile())
                    dst = es.enter_context(memfile.open(**profile))
                else:
                    tempraster = es.enter_context(TemporaryRasterFile(basedir=output_folder))
                    dst = es.enter_context(rasterio.open(tempraster, 'w', **profile))

                # iterate over blocks
                windows = list(dst.block_windows(1))

                for _, w in tqdm.tqdm(windows, desc='Reading', **sub_pbar_args):
                    block_data = vrt.read(window=w, indexes=[1])
                    dst.write(block_data, window=w)
                    block_mask = vrt.dataset_mask(window=w).astype('uint8')
                    dst.write_mask(block_mask, window=w)

                # add overviews
                if not in_memory:
                    # work around bug mapbox/rasterio#1497
                    dst.close()
                    dst = es.enter_context(rasterio.open(tempraster, 'r+'))

                max_overview_level = math.ceil(math.log2(max(
                    dst.height // profile['blockysize'],
                    dst.width // profile['blockxsize']
                )))

                overviews = [2 ** j for j in range(1, max_overview_level + 1)]
                with tqdm.tqdm(desc='Creating overviews', total=1, **sub_pbar_args):
                    dst.build_overviews(overviews, rs_method)

                dst.update_tags(ns='rio_overview', resampling=rs_method.value)

                # copy to destination (this is necessary to push overviews to start of file)
                with tqdm.tqdm(desc='Compressing', total=1, **sub_pbar_args):
                    copy(
                        dst, str(output_file), copy_src_overviews=True,
                        compress=compression, **COG_PROFILE
                    )

            pbar.update(dst.height * dst.width)
コード例 #37
0
ファイル: plotting.py プロジェクト: hugovk/contextily
def add_basemap(ax,
                zoom=ZOOM,
                url=sources.ST_TERRAIN,
                interpolation=INTERPOLATION,
                attribution_text=ATTRIBUTION,
                **extra_imshow_args):
    '''
    Add a (web/local) basemap to `ax`
    ...

    Arguments
    ---------
    ax                  : AxesSubplot
                          Matplotlib axis with `x_lim` and `y_lim` set in Web
                          Mercator (EPSG=3857)
    zoom                : int/'auto'
                          [Optional. Default='auto'] Level of detail for the
                          basemap. If 'auto', if calculates it automatically.
                          Ignored if `url` is a local file.
    url                 : str
                          [Optional. Default: 'http://tile.stamen.com/terrain/tileZ/tileX/tileY.png']
                          Source url for web tiles, or path to local file. If
                          local, the file is read with `rasterio` and all
                          bands are loaded into the basemap.
    interpolation       : str
                          [Optional. Default='bilinear'] Interpolation
                          algorithm to be passed to `imshow`. See
                          `matplotlib.pyplot.imshow` for further details.
    attribution_text    : str
                          [Optional. Default=''] Text to be added at the
                          bottom of the axis.
    **extra_imshow_args : dict
                          Other parameters to be passed to `imshow`.

    Returns
    -------
    ax                  : AxesSubplot
                          Matplotlib axis with `x_lim` and `y_lim` set in Web
                          Mercator (EPSG=3857) containing the basemap

    Example
    -------

    >>> db = gpd.read_file(ps.examples.get_path('virginia.shp'))\
                .to_crs(epsg=3857)

    Add a web basemap:

    >>> ax = db.plot(alpha=0.5, color='k', figsize=(6, 6))
    >>> ax = ctx.add_basemap(ax, url=url)
    >>> plt.show()

    Or download a basemap to a local file and then plot it:

    >>> url = 'virginia.tiff'
    >>> _ = ctx.bounds2raster(*db.total_bounds, zoom=6, path=url)
    >>> ax = db.plot(alpha=0.5, color='k', figsize=(6, 6))
    >>> ax = ctx.add_basemap(ax, url=url)
    >>> plt.show()

    '''
    # If web source
    if url[:4] == 'http':
        # Extent
        left, right = ax.get_xlim()
        bottom, top = ax.get_ylim()
        # Zoom
        if isinstance(zoom, str) and (zoom.lower() == 'auto'):
            min_ll = _sm2ll(left, bottom)
            max_ll = _sm2ll(right, top)
            zoom = _calculate_zoom(*min_ll, *max_ll)
        image, extent = bounds2img(left,
                                   bottom,
                                   right,
                                   top,
                                   zoom=zoom,
                                   url=url,
                                   ll=False)
    # If local source
    else:
        import rasterio as rio
        # Read extent
        raster = rio.open(url)
        image = np.array([ band for band in raster.read() ])\
                  .transpose(1, 2, 0)
        bb = raster.bounds
        extent = bb.left, bb.right, bb.bottom, bb.top
    # Plotting
    ax.imshow(image,
              extent=extent,
              interpolation=interpolation,
              **extra_imshow_args)
    return ax
コード例 #38
0
def main(tile_path, label_path, tiles_bbox, car_geometries_json,
         tile_to_car_json):
    car_geometries_file = open(car_geometries_json, "r")
    car_geometries = json.load(car_geometries_file)
    car_geometries_file.close()

    tile_to_car_file = open(tile_to_car_json, "r")
    tile_to_car = json.load(tile_to_car_file)
    tile_to_car_file.close()

    tiles_file = open(tiles_bbox, "r")
    tiles = json.load(tiles_file)
    tiles_file.close()

    for tile in tile_to_car:

        t = find_tile_geometry(tile, tiles)
        t_shape = shape(t)
        src = rasterio.open(os.path.join(tile_path, tile))

        # print("TSHAPE:",t_shape)

        transformer = Transformer.from_crs("epsg:4326", src.crs)
        cars = tile_to_car[tile]
        labels = []
        for car in cars:
            # print("CAR:",car)

            car_shape = shape(car_geometries[car])

            # break
            # print(t_shape.contains(car_shape))
            if car_shape.geom_type == 'Point':

                min_lon, min_lat, max_lon, max_lat = car_shape.bounds

                center_y, center_x = coords_to_pixels(transformer, src,
                                                      min_lon, min_lat)

                pixel_size = 16
                width = pixel_size
                height = pixel_size

                labels.append("0 {} {} {} {}".format(
                    float(center_x / src.width), float(center_y / src.height),
                    float(width / src.width), float(height / src.height)))
            else:

                min_lon, min_lat, max_lon, max_lat = car_shape.bounds
                top_py, top_px = coords_to_pixels(transformer, src, min_lon,
                                                  min_lat)
                bottom_py, bottom_px = coords_to_pixels(
                    transformer, src, max_lon, max_lat)

                print("BOUNDS:", car_shape.bounds)
                print("min max:", min_lon, min_lat, max_lon, max_lat)
                print(top_px, top_py, bottom_px, bottom_py)

                center_x = (top_px + bottom_px) / 2
                center_y = (top_py + bottom_py) / 2

                width = abs(top_px - bottom_px)
                height = abs(top_py - bottom_py)

                # print("width {} height {}".format(width,height))
                # width = top_px - bottom_px
                # height = bottom_py - top_py

                labels.append("0 {} {} {} {}".format(
                    float(center_x / src.width), float(center_y / src.height),
                    float(width / src.width), float(height / src.height)))
            # print("0 {} {} {} {}".format(
            #     float(center_x / src.width),
            #     float(center_y / src.height),
            #     float(width / src.width),
            #     float(height / src.height)
            # ))

        label_file_name = os.path.join(label_path,
                                       tile.replace(".tif", ".txt"))
        print("writing to ", label_file_name, " lines ", len(labels))
        label_file = open(label_file_name, "w")
        label_file.write("\n".join(labels))
        label_file.close()
コード例 #39
0
ファイル: test_rio_merge.py プロジェクト: dmwelch/rasterio
def test_merge_tiny_intres(tiffs):
    from rasterio.merge import merge
    inputs = [str(x) for x in tiffs.listdir()]
    inputs.sort()
    sources = [rasterio.open(x) for x in inputs]
    merge(sources, res=2)
コード例 #40
0
def worker(data):
    image_file, cars, car_geometries = data
    src = rasterio.open("./raw/tif/" + image_file)

    labels = []
    # transformer = Transformer.from_crs(4326,25833)
    transformer = Transformer.from_crs("epsg:4326", src.crs)
    count = 0
    for cars in car_geometries:
        # print("CAR",cars,car_geometries[cars])

        car_shape = shape(car_geometries[cars])

        for tile in tile_shapes:

            print("\t\t {} contains {} -> {} ".format(
                tile["image"], cars, tile["shape"].contains(car_shape)))

        # min_x, min_y, max_x, max_y = get_bounds(car_geometries[cars])

        min_lon, min_lat, max_lon, max_lat = car_shape.bounds
        top_px, top_py = coords_to_pixels(transformer, src, min_lon, min_lat)
        bottom_px, bottom_py = coords_to_pixels(transformer, src, max_lon,
                                                max_lat)

        # print("BOUNDS:", car_shape.bounds)
        # print("min max:",min_lon,min_lat,max_lon,max_lat)
        # print(top_px,top_py, bottom_px,bottom_py)

        center_x = (top_px + bottom_px) / 2
        center_y = (top_py + bottom_py) / 2

        width = abs(top_px - bottom_px)
        height = abs(top_py - bottom_py)

        # print("width {} height {}".format(width,height))
        # width = top_px - bottom_px
        # height = bottom_py - top_py

        labels.append("0 {} {} {} {}".format(float(center_x / src.width),
                                             float(center_y / src.height),
                                             float(width / src.width),
                                             float(height / src.height)))
        count += 1
        # if count > 0:
        #     break

        # if debug:
        #     print("center x{} center y{}".format(center_x, center_y))
        #     print("width {} height {}".format(width, height))
        #     print('Top Pixel coords: {}, {}'.format(top_px, top_py))
        #     print('Bottom Pixel coords: {}, {}'.format(bottom_px, bottom_py))
        #     window = rasterio.windows.Window(top_px, bottom_py, width, height)
        #     # window = rasterio.windows.from_bounds(min_x,min_y,max_x,max_y,src.transform,width,height)
        #     clip = src.read(window=window)

        #     # You can then write out a new file
        #     meta = {}
        #     meta["driver"] = "PNG"
        #     meta['width'] = width
        #     meta['height'] = height
        #     meta["count"] = 3
        #     meta["dtype"] = "uint8"

        #     dst = rasterio.open('clipped-{}.png'.format(cars), 'w', **meta)
        #     dst.write(clip)
        #     dst.close()

    label_file = open("./raw/labels/" + image_file.replace(".tif", ".txt"),
                      "w")
    for line in labels:
        label_file.write(line + "\n")
    label_file.close()
コード例 #41
0
ファイル: inputs.py プロジェクト: nguyetlm/Hapi
    def ExtractParametersBoundaries(Basin):
        """
        =====================================================
            ExtractParametersBoundaries(Basin)
        =====================================================

        Parameters
        ----------
        Basin : [Geodataframe]
            gepdataframe of catchment polygon, make sure that the geodataframe contains
            one row only, if not merge all the polygons in the shapefile first.

        Returns
        -------
        UB : [list]
            list of the upper bound of the parameters.
        LB : [list]
            list of the lower bound of the parameters.

        the parameters are
            ["tt", "sfcf","cfmax","cwh","cfr","fc","beta",
             "lp","k0","k1","k2","uzl","perc", "maxbas"]
        """
        ParametersPath = os.path.dirname(Hapi.__file__)
        ParametersPath = ParametersPath + "/Parameters"
        ParamList = [
            "01_tt", "02_rfcf", "03_sfcf", "04_cfmax", "05_cwh", "06_cfr",
            "07_fc", "08_beta", "09_etf", "10_lp", "11_k0", "12_k1", "13_k2",
            "14_uzl", "15_perc", "16_maxbas", "17_K_muskingum",
            "18_x_muskingum"
        ]

        raster = rasterio.open(ParametersPath + "/max/" + ParamList[0] +
                               ".tif")
        Basin = Basin.to_crs(crs=raster.crs)
        # max values
        UB = list()
        for i in range(len(ParamList)):
            raster = rasterio.open(ParametersPath + "/max/" + ParamList[i] +
                                   ".tif")
            array = raster.read(1)
            affine = raster.transform
            UB.append(
                zonal_stats(Basin, array, affine=affine, stats=['max'])[0]
                ['max'])  #stats=['min', 'max', 'mean', 'median', 'majority']

        # min values
        LB = list()
        for i in range(len(ParamList)):
            raster = rasterio.open(ParametersPath + "/min/" + ParamList[i] +
                                   ".tif")
            array = raster.read(1)
            affine = raster.transform
            LB.append(
                zonal_stats(Basin, array, affine=affine,
                            stats=['min'])[0]['min'])

        Par = pd.DataFrame(index=ParamList)

        Par['UB'] = UB
        Par['LB'] = LB
        # plot the given basin with the parameters raster
        ax = show((raster, 1), with_bounds=True)
        Basin.plot(facecolor='None', edgecolor='blue', linewidth=2, ax=ax)
        # ax.set_xbound([Basin.bounds.loc[0,'minx']-10,Basin.bounds.loc[0,'maxx']+10])
        # ax.set_ybound([Basin.bounds.loc[0,'miny']-1, Basin.bounds.loc[0,'maxy']+1])

        return Par
コード例 #42
0
# private
import read_POLSAR as pol

# * * * *  * * # * * * *  * * # * * * *  * *# # * *   define input files   # * * * *  * * # * * * *  * * # * * * *  * *# # ** * *  * * # * * * *  * * # * * * *  * *#

#open the original image as array
#set files Area1
fileVV = '/mnt/usr1/home/prakhar/Research/AQM_research/Data/Data_process/PALSAR2/Subset/subset_0_of_ALOS2-HBQR1_5RUA-ORBIT__ALOS2105520569-160506_Spk.data/Amplitude_VV.img'
fileHV = '/mnt/usr1/home/prakhar/Research/AQM_research/Data/Data_process/PALSAR2/Subset/subset_0_of_ALOS2-HBQR1_5RUA-ORBIT__ALOS2105520569-160506_Spk.data/Amplitude_HV.img'
fileHH = '/mnt/usr1/home/prakhar/Research/AQM_research/Data/Data_process/PALSAR2/Subset/subset_0_of_ALOS2-HBQR1_5RUA-ORBIT__ALOS2105520569-160506_Spk.data/Amplitude_HH.img'

#set emplty bsae image
baseimage = np.zeros(
    [3,
     rio.open(fileVV).read(1).shape[0],
     rio.open(fileVV).read(1).shape[1]])

#set values in baseimage
baseimage[0, :] = rio.open(fileHH).read(1)
baseimage[1, :] = rio.open(fileHV).read(1)
baseimage[2, :] = (baseimage[0, :] - baseimage[1, :]) / (baseimage[0, :] +
                                                         baseimage[1, :])

arrimg = np.load('baseimageA1.npy')
plt.figure()
plt.imshow(arrimg)

#open the stored segmented image
arrseg = np.load('segmented_meanshiftA1.npy')
plt.figure()
コード例 #43
0
ファイル: bands.py プロジェクト: ozak/rasterio
def stack(ctx, files, driver, bidx, photometric):
    """Stack a number of bands from one or more input files into a
    multiband dataset.

    Input datasets must be of a kind: same data type, dimensions, etc. The
    output is cloned from the first input.

    By default, rio-stack will take all bands from each input and write them
    in same order to the output. Optionally, bands for each input may be
    specified using a simple syntax:

      --bidx N takes the Nth band from the input (first band is 1).

      --bidx M,N,0 takes bands M, N, and O.

      --bidx M..O takes bands M-O, inclusive.

      --bidx ..N takes all bands up to and including N.

      --bidx N.. takes all bands from N to the end.

    Examples, using the Rasterio testing dataset, which produce a copy.

      rio stack RGB.byte.tif -o stacked.tif

      rio stack RGB.byte.tif --bidx 1,2,3 -o stacked.tif

      rio stack RGB.byte.tif --bidx 1..3 -o stacked.tif

      rio stack RGB.byte.tif --bidx ..2 RGB.byte.tif --bidx 3.. -o stacked.tif

    """
    import numpy as np

    verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 2
    logger = logging.getLogger('rio')
    try:
        with rasterio.drivers(CPL_DEBUG=verbosity > 2):
            output = files[-1]
            files = files[:-1]
            output_count = 0
            indexes = []
            for path, item in zip_longest(files, bidx, fillvalue=None):
                with rasterio.open(path) as src:
                    src_indexes = src.indexes
                if item is None:
                    indexes.append(src_indexes)
                    output_count += len(src_indexes)
                elif '..' in item:
                    start, stop = map(lambda x: int(x) if x else None,
                                      item.split('..'))
                    if start is None:
                        start = 1
                    indexes.append(src_indexes[slice(start - 1, stop)])
                    output_count += len(src_indexes[slice(start - 1, stop)])
                else:
                    parts = list(map(int, item.split(',')))
                    if len(parts) == 1:
                        indexes.append(parts[0])
                        output_count += 1
                    else:
                        parts = list(parts)
                        indexes.append(parts)
                        output_count += len(parts)

            with rasterio.open(files[0]) as first:
                kwargs = first.meta
                kwargs['transform'] = kwargs.pop('affine')

            kwargs.update(driver=driver, count=output_count)

            if photometric:
                kwargs['photometric'] = photometric

            with rasterio.open(output, 'w', **kwargs) as dst:
                dst_idx = 1
                for path, index in zip(files, indexes):
                    with rasterio.open(path) as src:
                        if isinstance(index, int):
                            data = src.read(index)
                            dst.write(data, dst_idx)
                            dst_idx += 1
                        elif isinstance(index, list):
                            data = src.read(index)
                            dst.write(data, range(dst_idx,
                                                  dst_idx + len(index)))
                            dst_idx += len(index)

        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
コード例 #44
0
ファイル: inputs.py プロジェクト: nguyetlm/Hapi
    def ExtractParameters(src, scenario, AsRaster=False, SaveTo=''):
        """
        =====================================================
            ExtractParameters(Basin)
        =====================================================
        ExtractParameters method extracts the parameter rasters at the location
        of the source raster, there are 12 set of parameters 10 sets of parameters
        (Beck et al., (2016)) and the max, min and average of all sets


        Beck, H. E., Dijk, A. I. J. M. van, Ad de Roo, Diego G. Miralles,
        T. R. M. & Jaap Schellekens, and L. A. B. (2016) Global-scale
        regionalization of hydrologic model parameters-Supporting materials
        3599–3622. doi:10.1002/2015WR018247.Received

        Parameters
        ----------
        src : [Geodataframe]
            gepdataframe of catchment polygon, make sure that the geodataframe contains
            one row only, if not merge all the polygons in the shapefile first.

        Returns
        -------
        Parameters : [list]
            list of the upper bound of the parameters.

        scenario : [str]
            name of the parameter set, there are 12 sets of parameters
            ["1","2","3","4","5","6","7","8","9","10","avg","max","min"]

        the parameters are
            ["tt", rfcf,"sfcf","cfmax","cwh","cfr","fc","beta",'etf'
             "lp","k0","k1","k2","uzl","perc", "maxbas",'K_muskingum',
             'x_muskingum']
        """
        ParametersPath = os.path.dirname(Hapi.__file__)
        ParametersPath = ParametersPath + "/Parameters/" + scenario
        ParamList = [
            "01_tt", "02_rfcf", "03_sfcf", "04_cfmax", "05_cwh", "06_cfr",
            "07_fc", "08_beta", "09_etf", "10_lp", "11_k0", "12_k1", "13_k2",
            "14_uzl", "15_perc", "16_maxbas", "17_K_muskingum",
            "18_x_muskingum"
        ]

        if not AsRaster:
            raster = rasterio.open(ParametersPath + "/" + ParamList[0] +
                                   ".tif")
            src = src.to_crs(crs=raster.crs)
            # max values
            Par = list()
            for i in range(len(ParamList)):
                raster = rasterio.open(ParametersPath + "/" + ParamList[i] +
                                       ".tif")
                array = raster.read(1)
                affine = raster.transform
                Par.append(
                    zonal_stats(src, array, affine=affine,
                                stats=['max'])[0]['max']
                )  #stats=['min', 'max', 'mean', 'median', 'majority']

            # plot the given basin with the parameters raster

            # Plot DEM
            ax = show((raster, 1), with_bounds=True)
            src.plot(facecolor='None', edgecolor='blue', linewidth=2, ax=ax)
            # ax.set_xbound([Basin.bounds.loc[0,'minx']-10,Basin.bounds.loc[0,'maxx']+10])
            # ax.set_ybound([Basin.bounds.loc[0,'miny']-1, Basin.bounds.loc[0,'maxy']+1])

            return Par
        else:
            Inputs.PrepareInputs(src, ParametersPath + "/", SaveTo)
コード例 #45
0
    "data/cold-springs-fire/landsat_collect/LC080340322016072301T1-SC20180214145802/crop/*band*.tif"
)

"this compiles and sorts the landsat bands"
all_landsat_bands.sort()
"""setting our output file path"""
landsat_post_fire_path = "data/cold-springs-fire/outputs/landsat_post_fire.tif"
"""es.stack will stack all bands on top of one another.
making raster values more easily comparable."""
es.stack_raster_tifs(all_landsat_bands, landsat_post_fire_path)

# we can clip the files but in this case we use .read()
"""the next 5 lines will open the file, set up profile, set up bounding box,
our plotting extent, and validates that our data has values assigned to array"""

with rio.open(landsat_post_fire_path) as src:
    landsat_post_fire = src.read(masked=True)
    landsat_post_meta = src.profile
    landsat_post_bounds = src.bounds
    landsat_extent = plotting_extent(src)

# Open fire boundary layer and reproject it to match the Landsat data
fire_boundary_path = "data/cold-springs-fire/vector_layers/fire-boundary-geomac/co_cold_springs_20160711_2200_dd83.shp"
fire_boundary = gpd.read_file(fire_boundary_path)

# If the CRS' are not the same be sure to reproject
"""this reprojects the coordinate reference system for the fire boundary"""
fire_bound_utmz13 = fire_boundary.to_crs(landsat_post_meta['crs'])

#calculating NBR postfire and plotting results
"""this calculates postfire NBR"""
コード例 #46
0
#!/usr/bin/env python

import os
import subprocess
import numpy
import rasterio
from rasterio.features import sieve, shapes

os.chdir('/projectnb/landsat/projects/Colombia/Mosaics/M2B/')

# Register GDAL and OGR drivers.
with rasterio.drivers():

    # Read a raster to be sieved.
    with rasterio.open('2001-01-01_seq.tif') as src:
        shade = src.read(1)

    # Sieve out features 13 pixels or smaller.
    sieved = sieve(shade, 4, out=numpy.zeros(src.shape, src.dtypes[0]))

    # Print the number of shapes in the sieved raster.
    print("Sieved (13) shapes: %d" % len(list(shapes(sieved))))

    # Write out the sieved raster.
    kwargs = src.meta
    kwargs['transform'] = kwargs.pop('affine')
    with rasterio.open('example-sieved.tif', 'w', **kwargs) as dst:
        dst.write(sieved, indexes=1)

# Dump out gdalinfo's report card and open (or "eog") the TIFF.
#print(subprocess.check_output(
コード例 #47
0
def WriteOutput(var, var_name, in_folder):
    # this functions write tif files based on a model file, here "Oa01"
    # opens a file for writing

    with rio.open(in_folder + var_name + '.tif', 'w+', **meta) as dst:
        dst.write(var.astype('float32'), 1)
コード例 #48
0
import rasterio, rasterio.mask, argparse

# create the arguments for the algorithm
parser = argparse.ArgumentParser()

# set arguments
parser.add_argument('-r','--inputRaster', help='Input raster', type=str, required=True)
parser.add_argument('-v','--inputVector', help='Input raster', type=str, required=True)
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = vars(parser.parse_args())


# set argument
raster = args["inputRaster"]
shp = args["inputVector"]

# open and crop raster
print("Clipping raster...")
with rasterio.open(raster) as src:
     img, transform = rasterio.mask.mask(src, shp, crop=True)
     meta = src.meta.copy()
     baseName = src.name

# output name
output = baseName[:-4] + "_mask.tif"

# save
with rasterio.open(output, 'w', **meta) as dst:
    dst.write(img, 1)
print("Done!")
]

#pick one item in the maplist at the time
myMap = maplist[int(sys.argv[1])]

#loop through the utm10 polygons
for index, row in utm.iterrows():
    #is the polygon on the coastline?
    if row['LEHTITUNNU'][0:2] == myMap:
        # construct filepath of the dem file
        filepath = os.path.join(rootdir + '/{}/{}/{}.tif'.format(
            row.LEHTITUNNU[0:2], row.LEHTITUNNU[0:3], row.LEHTITUNNU))
        # can the filepath be found in the demfile km2 demfile location.
        if os.path.isfile(filepath):
            # open the demfile and mask it with the intersecting sea polygons
            with rasterio.open(filepath) as demdata:
                bounds = demdata.bounds
                with fiona.open(seafp, layer='meri') as sea:
                    hits = sea.items(bbox=(bounds[0], bounds[1], bounds[2],
                                           bounds[3]))
                    items = [i for i in hits]
                if len(items) > 0:
                    geoms = [item[1]['geometry'] for item in items]
                    demarr, out_transform = mask(demdata, geoms, invert=True)
                else:
                    demarr = demdata.read()
                demarr[demarr < 0] = 0
                # If after masking the min value of the km2 file is below 7.3m and the max calue above 0 continue
                if demarr.min() <= 7.3 and demarr.max() > 0:
                    # use the bounds of the DEM file to create a grid and interpolate the isostacy data to it
                    print(row.LEHTITUNNU, 'in km2')
コード例 #50
0
            'init': 'epsg:4326'
        },
        'count': count,
        'height': height,
        'width': width,
        'driver': 'GTiff',
        'dtype': 'float64',
        'affine': affine
    }

    output_filename = os.path.join(
        output_path,
        '{}_{}_{}_multiband_anomalies.tif'.format(variable, model, scenario))

    # write anom to disk
    with rasterio.open(output_filename, 'w', **meta) as rst:
        rst.write(anomalies.values)

    # write climatologies to disk
    # make some metadata
    count, height, width = climatology.shape
    affine = transform_from_latlon(ds.lat, ds.lon)
    meta = {
        'crs': {
            'init': 'epsg:4326'
        },
        'count': count,
        'height': height,
        'width': width,
        'driver': 'GTiff',
        'dtype': 'float64',
コード例 #51
0
ファイル: test_roi.py プロジェクト: valpasq/yatsm
def training_raster():
    # dataset is 1 band and all values == 255
    ds = rasterio.open(TRAINING_RASTER)
    return ds
コード例 #52
0
import numpy as np
from numpy import genfromtxt
import sice_lib as sl
import rasterio as rio
import time
import sys
from constants import w, bai, sol1_clean, sol2, sol3_clean, sol1_pol, sol3_pol, asol
np.seterr(invalid='ignore')

start_time = time.process_time()

InputFolder = sys.argv[1] + '/'

#%% ========= input tif ================
Oa01 = rio.open(InputFolder + 'r_TOA_01.tif')
meta = Oa01.meta
with rio.Env():
    meta.update(compress='DEFLATE')


def WriteOutput(var, var_name, in_folder):
    # this functions write tif files based on a model file, here "Oa01"
    # opens a file for writing

    with rio.open(in_folder + var_name + '.tif', 'w+', **meta) as dst:
        dst.write(var.astype('float32'), 1)


toa = np.tile(Oa01.read(1) * np.nan, (21, 1, 1))
コード例 #53
0
ファイル: prepare.py プロジェクト: gitter-badger/ap-latam
def get_raster_band_count(raster_path):
    """Return band count of +raster_path+"""
    with rasterio.open(raster_path) as dataset:
        return dataset.count
コード例 #54
0
def mock_rasterio_open(band):
    """Mock rasterio Open for Sentinel2 dataset."""
    assert band.startswith("s3://sentinel-s2-l")
    band = band.replace("s3://sentinel-s2", SENTINEL_BUCKET)
    return rasterio.open(band)
コード例 #55
0
def sample_single_channel_raster_file(path_raster, x, y, xy_crs):
    """Sample data from a geotiff file taking care to transform
    coordinates. Returns numpy array (Npos, Mchannels)."""
    with rasterio.open(path_raster) as raster:
        return sample_raster(raster, x, y, xy_crs).T[0]
コード例 #56
0
import sys

import numpy as np
import rasterio as rio

bands = []
for i in sys.argv[1:]:
    with rio.open('nyc/B%s.TIF' % i) as raster:
        bands.append(raster.read(1))
        profile = raster.meta


def correct(img):
    mask = np.logical_and(img > 0, img < 65535)
    top = np.percentile(img[mask], 99.9)
    bottom = np.percentile(img[mask], 0.01)
    scaled = (img - bottom) / float(top - bottom) * 65535
    return np.clip(scaled, 0, 65535).astype(np.uint16)

img = correct(np.array(bands))

profile['count'] = img.shape[0]
profile['photometric'] = 'RGB'
with rio.open('nyc/%s.tif' % (''.join(sys.argv[1:])), 'w', **profile) as dest:
    dest.write(correct(img), indexes=list(range(img.shape[0] + 1))[1:])
コード例 #57
0
import pyproj
from shapely.ops import transform
from shapely.geometry import Point, LineString
import pickle
from bulletin import Bulletin
import itertools
import pandas as pd
import xarray as xr
from tqdm import tqdm

from geodesiclinestogis.geodesicline2gisfile import GeodesicLine2Gisfile
line_builder = GeodesicLine2Gisfile(loglevel='ERROR')

colors = {'WARM': 1, 'COLD': 2, 'STNRY': 3, 'OCFNT': 4, 'TROF': 5}

with rasterio.open('example.grib') as example:
    crs = example.crs
    tf = example.transform
    width = example.width
    height = example.height
project = partial(
    pyproj.transform,
    pyproj.Proj(init='epsg:4326'),  # source coordinate system
    pyproj.Proj(crs))  # destination coordinate system
inf = open('data.bin', 'rb')
bulletins = pickle.load(inf)
data = []
for date, bulletin in tqdm(bulletins.items()):
    result = np.zeros((height, width), dtype=np.uint8)
    for front in bulletin.fronts:
        if front[0] == 'TROF':
コード例 #58
0
def rasterio_geo_crop(outpath,
                      inpath,
                      ulx,
                      uly,
                      lrx,
                      lry,
                      epsg=None,
                      output_type=None,
                      debug=False):
    """
    Write a crop to disk from an input image, given the coordinates of the geographical
    bounding box.

    Args:
        outpath (str): path to the output crop
        inpath (str): path to the input image
        ulx, uly, lrx, lry (float): geographical coordinates of the crop bounding box
        epsg (int): EPSG code of the coordinate system in which the bounding box
            coordinates are expressed. If None, it is assumed that the coordinates
            are expressed in the CRS of the input image.
        output_type (str): output type of the crop
    """
    gdal_options = dict()

    # these GDAL configuration options speed up the access to remote files
    if inpath.startswith(("http://", "https://", "s3://")):
        _, file_ext = os.path.splitext(inpath)
        file_ext = file_ext[1:]  # Remove the leading dot from file_ext
        gdal_options["CPL_VSIL_CURL_ALLOWED_EXTENSIONS"] = file_ext
        gdal_options["GDAL_DISABLE_READDIR_ON_OPEN"] = "EMPTY_DIR"
        gdal_options["VSI_CACHE"] = "TRUE"
        gdal_options[
            "GDAL_HTTP_MAX_RETRY"] = "100"  # needed for storage.googleapis.com 503
        gdal_options["GDAL_HTTP_RETRY_DELAY"] = "1"

    if debug:
        left = ulx
        bottom = lry
        right = lrx
        top = uly
        print(
            'AWS_REQUEST_PAYER=requester rio clip {} {} --bounds "{} {} {} {}" --geographic'
            .format(inpath, outpath, left, bottom, right, top))
        #print('AWS_REQUEST_PAYER=requester gdal_translate /vsis3/{} {} -projwin {} {} {} {}'.format(inpath[5:], outpath, ulx, uly, lrx, lry))

    if inpath.startswith("s3://"):
        session = rasterio.session.AWSSession(requester_pays=True)
    else:
        session = None

    bounds = ulx, lry, lrx, uly
    with rasterio.Env(session=session, **gdal_options):
        try:
            with rasterio.open(inpath) as src:

                # Convert the bounds to the CRS of inpath if epsg is given
                if epsg:
                    bounds = rasterio.warp.transform_bounds(
                        epsg, src.crs, *bounds)

                # Get the pixel coordinates of the bounds in inpath
                window = src.window(*bounds)

                # Do a "floor" operation on offsets to match what gdal_translate does
                window = window.round_offsets()

                # Do a "round" operation on lengths to match what gdal_translate does
                width = round(window.width)
                height = round(window.height)
                window = rasterio.windows.Window(window.col_off,
                                                 window.row_off, width, height)

                profile = src.profile
                transform = src.window_transform(window)
                crop = rasterio_window_crop(src, window.col_off,
                                            window.row_off, width, height)

        except rasterio.errors.RasterioIOError:
            print("WARNING: download of {} failed".format(inpath))
            return

        profile.update({
            "driver": "GTiff",
            "compress": "deflate",
            "height": height,
            "width": width,
            "transform": transform
        })
        if output_type:
            profile["dtype"] = output_type.lower()

        with rasterio.open(outpath, "w", **profile) as out:
            out.write(crop)
コード例 #59
0
ファイル: downscale.py プロジェクト: paolof89/HRM
def downscale(config, request):

    country = request.form['country']
    algorithm = request.form['algorithm']

    # country raster --------------------------------------
    # use the country 2 raster app to generate new ones: https://countrytoraster.herokuapp.com/
    raster = '{}_0.01_4326_1.tif'.format(country)
    local_raster = 'temp/' + raster
    print('-> getting raster ', raster)
    # download from AWS S3
    import boto3
    bucket_name = config['rasters_bucket']
    s3 = boto3.resource('s3')
    s3.Bucket(bucket_name).download_file(raster, local_raster)
    print('-> raster loaded.')

    # load dataset from input -------------------------------
    print('-> loading dataset from input form...')
    data = pd.read_csv(request.files['file'])

    # load relative raster
    print('-> loading raster ', local_raster)
    GRID = RasterGrid(local_raster)
    try:
        data['i'], data['j'] = GRID.get_gridcoordinates(data)
    except IndexError:
        print('ERROR: raster and data are not from the same country!')
        raise
    # ------------------------------------

    # Grouping clusters that belong to the same tile.
    cluster_N = 'countbyEA'
    print("Number of clusters: {} ".format(len(data)))

    def wavg(g, df, weight_series):
        w = df.ix[g.index][weight_series]
        return (g * w).sum() / w.sum()

    import functools
    fnc = functools.partial(wavg, df=data, weight_series=cluster_N)

    try:
        data = data.groupby(["i", "j"]).agg({
            'Indicator': fnc,
            'gpsLatitude': fnc,
            'gpsLongitude': fnc
        }).reset_index()
    except:
        print("No weights, taking the average per i and j")
        data = data[['gpsLatitude', 'gpsLongitude',
                     'Indicator']].groupby(["i", "j"]).mean().reset_index()

    print("Number of unique tiles: {} ".format(len(data)))

    # train model ------------------------------------
    X = pd.DataFrame({"i": data["i"], "j": data["j"]})
    y = data.Indicator.values

    from model import IndicatorScaler

    model = IndicatorScaler(algorithm, X, y)

    # all country predictions ------------
    print('-> loading all grid points in the country')
    import rasterio
    src = rasterio.open(local_raster)
    list_j, list_i = np.where(src.read()[0] > 0)
    src.close()

    # also add the gps coordinates to the data for later use
    coords_i, coords_j = GRID.get_gpscoordinates(list_i, list_j)
    res = pd.DataFrame({
        "i": list_i,
        "j": list_j,
        "gpsLongitude": coords_i,
        "gpsLatitude": coords_j
    })

    # ------------------------------------

    # filter on built areas -------------
    # use WorlPop layer to filter on inhabited locations.
    pop_raster = '{}_worldpop.tif'.format(country)
    local_pop_raster = 'temp/' + pop_raster
    print('-> getting population from WorldPop ({})'.format(local_pop_raster))
    if not os.path.exists(local_pop_raster):
        s3.Bucket(bucket_name).download_file(pop_raster, local_pop_raster)

    from img_utils import getRastervalue
    res = getRastervalue(res, local_pop_raster)
    # ------------------------------------

    # predictions for all data left -------
    print('-> running predictions...')
    res['yhat'] = model.model.predict(res[['i', 'j']])
    # ------------------------------------

    # saves to disk ---------------------
    # no idea how this works
    from exporter import tifgenerator
    outfile = "temp/scalerout_{}_{}.tif".format(country, algorithm)
    tifgenerator(outfile=outfile, raster_path=local_raster, df=res)
    # -------------------------------------

    print('-> return file to client.')
    return send_file('../' + outfile,
                     mimetype='image/tiff',
                     as_attachment=True,
                     attachment_filename=country + "_" + algorithm + ".tif")
コード例 #60
0
def extract_arcticdem(
        adem='/srv/home/8675309/AW/arctic_dem/slope.img',
        region='NovayaZemlya',
        regional_mask='/srv/home/8675309/AW/masks/NovayaZemlya.tif',
        outpath='/srv/home/8675309/AW/',
        verbose=True):
    '''
    INPUTS:
        adem: path of ArcticDEM .tif file [string]
        region: region to clip [string]
        regional_mask: path of the mask associated to the selected region [.tif]
        outpath: folder where to the clipped ArcticDEM [string]
        verbose: set to True to print details about processing [boolean]
        
    OUTPUTS:
        {outpath}/{region}_arcticdem_slope.tif: clipped ArcticDEM (EPSG: 3413) [.tif]
        {outpath}/{region}_mask_resampled.tif: resampled mask to fit clipped 
                                               ArcticDEM resolution [.tif]
    
    '''

    import rasterio
    from shapely.geometry import box
    import geopandas as gpd
    from rasterio.mask import mask
    from rasterio.warp import calculate_default_transform, reproject, Resampling
    from IPython import get_ipython
    import os
    from osgeo import gdal, gdalconst

    #clear all variables to enable file deletion
    get_ipython().magic('reset -sf')

    #check if running slope or aspect
    var = adem.split('/')[-1].split('.')[0]

    if verbose:
        if var == 'slope':
            print('\n')
            print('Running extract_arctidem for %s... [SLOPE]' % region)
        elif var == 'aspect':
            print('\n')
            print('ASPECT: Running extract_arctidem for %s... [ASPECT]' %
                  region)
        else:
            print('\n')
            print(
                'ERROR: Wrong ArcticDEM input file. Rename to slope.img/aspect.img or modify the code'
            )

    #initialize output name
    target_crs = 'EPSG: 3413'
    target_crs_name = target_crs.split(':')[1]

    out_tif = outpath + region + '_adem_' + var + '.tif'
    out_tif_3413 = outpath + region + '_arcticdem_' + var + '_temp' + '.tif'

    #delete outputs if already exists
    if os.path.exists(out_tif):
        if verbose:
            if var == 'slope':
                print(
                    'WARNING: Clipped ArcticDEM derived slopes already exist...'
                )
                print('Deleting Clipped ArcticDEM derived slopes...')
            elif var == 'aspect':
                print(
                    'WARNING: Clipped ArcticDEM derived slope aspects already exist...'
                )
                print('Deleting Clipped ArcticDEM derived slope aspects...')
        os.remove(out_tif)

    if os.path.exists(out_tif_3413):
        if verbose:
            if var == 'slope':
                print(
                    'WARNING: Reprojected and clipped ArcticDEM derived slopes already exist...'
                )
                print(
                    'Deleting reprojected and clipped ArcticDEM derived slopes...'
                )
            elif var == 'aspect':
                print(
                    'WARNING: Reprojected and clipped ArcticDEM derived slope aspect already exist...'
                )
                print(
                    'Deleting reprojected and clipped ArcticDEM derived slope aspect...'
                )
        os.remove(out_tif_3413)

    data = rasterio.open(adem)
    regional_mask_path = regional_mask
    regional_mask = rasterio.open(regional_mask)

    #create the bbox with regional_mask dimensions
    lower_right_corner = regional_mask.transform * (regional_mask.width,
                                                    regional_mask.height)
    upper_left_corner = regional_mask.transform * (0, 0)
    bbox = box(upper_left_corner[0], lower_right_corner[1],
               lower_right_corner[0], upper_left_corner[1])

    #insert the bbox into a GeoDataFrame
    geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=data.crs)

    #coordinates of the geometry so that rasterio handles it
    def getFeatures(gdf):
        """Function to parse features from GeoDataFrame in such a manner that rasterio wants them"""
        import json
        return [json.loads(gdf.to_json())['features'][0]['geometry']]

    if verbose:
        print('Creating mask...')
    coords = getFeatures(geo)

    #clip source image using coords
    if verbose:
        print('Clipping input...')
    out_img, out_transform = mask(dataset=data, shapes=coords, crop=True)

    #mask output image
    out_img[regional_mask == 255] = 0

    #copy the metadata
    out_meta = data.meta.copy()

    out_meta.update({
        "driver": "GTiff",
        "height": out_img.shape[1],
        "width": out_img.shape[2],
        "transform": out_transform,
        "crs": regional_mask.crs
    })
    if verbose:
        print('Saving output...')
    with rasterio.open(out_tif, "w", compress='deflate', **out_meta) as dest:
        dest.write(out_img)

    if verbose:
        print('Reprojecting output...')
    dst_crs = regional_mask.crs
    with rasterio.open(out_tif) as src:
        transform, width, height = calculate_default_transform(
            src.crs, dst_crs, src.width, src.height, *src.bounds)
        kwargs = src.meta.copy()
        kwargs.update({
            'crs': dst_crs,
            'transform': transform,
            'width': width,
            'height': height
        })

        with rasterio.open(out_tif_3413, 'w', compress='deflate',
                           **kwargs) as dst:
            reproject(source=rasterio.band(src, 1),
                      destination=rasterio.band(dst, 1),
                      src_transform=src.transform,
                      src_crs=src.crs,
                      dst_transform=transform,
                      dst_crs=dst_crs,
                      resampling=Resampling.nearest)

    if verbose:
        print('Resampling mask to fit output resolution...')
    #source
    src_filename = regional_mask_path
    src = gdal.Open(src_filename, gdalconst.GA_ReadOnly)
    src_proj = src.GetProjection()
    src_geotrans = src.GetGeoTransform()

    #raster to match
    match_filename = out_tif_3413
    match_ds = gdal.Open(match_filename, gdalconst.GA_ReadOnly)
    match_proj = match_ds.GetProjection()
    match_geotrans = match_ds.GetGeoTransform()
    wide = match_ds.RasterXSize
    high = match_ds.RasterYSize

    #output/destination
    dst_filename = outpath + region + 'mask_resampled_temp.tif'
    dst = gdal.GetDriverByName('Gtiff').Create(dst_filename, wide, high, 1,
                                               gdalconst.GDT_Float32)
    dst.SetGeoTransform(match_geotrans)
    dst.SetProjection(match_proj)

    #run
    gdal.ReprojectImage(src, dst, src_proj, match_proj,
                        gdalconst.GRA_NearestNeighbour)

    del dst  # Flush

    if verbose:
        print('Masking output...')

    mask = rasterio.open(outpath + region + 'mask_resampled_temp.tif')
    mask_data = mask.read(1)
    output = rasterio.open(out_tif_3413)
    output_data = output.read(1)
    profile_mask = mask.profile
    profile_output = output.profile  #saving metadata
    output_data[mask.read(1) == 0] = 0
    mask_data[mask_data == 0] = 255
    profile_mask.update(nodata=255)

    if var == 'slope':
        with rasterio.open(outpath + region + '_arcticdem_' + var + '.tif',
                           'w', **profile_output) as dst:
            dst.write(output_data, 1)
        with rasterio.open(outpath + region + '_mask_resampled.tif', 'w',
                           **profile_output) as dst:
            dst.write(mask_data, 1)

    if var == 'aspect':
        with rasterio.open(outpath + region + '_arcticdem_' + var + '.tif',
                           'w', **profile_output) as dst:
            dst.write(output_data, 1)

    temp = outpath + region + 'mask_resampled_temp.tif'

    return out_tif, out_tif_3413, temp