Exemplo n.º 1
0
    def test_simplify_geometry_lines(self):
        """HRA: test _simplify_geometry does not alter geometry given lines."""
        from natcap.invest.hra import _simplify_geometry
        from natcap.invest.utils import _assert_vectors_equal

        srs = osr.SpatialReference()
        srs.ImportFromEPSG(EPSG_CODE)
        projection_wkt = srs.ExportToWkt()
        base_lines_path = os.path.join(self.workspace_dir, 'base_lines.gpkg')
        lines = [LineString([(0.0, 0.0), (10.0, 10.0)])]
        pygeoprocessing.shapely_geometry_to_vector(
            lines, base_lines_path, projection_wkt, 'GPKG',
            ogr_geom_type=ogr.wkbLineString)

        target_simplified_vector_path = os.path.join(
            self.workspace_dir, 'simplified_vector.gpkg')

        tolerance = 3000  # in meters
        _simplify_geometry(
            base_lines_path, tolerance, target_simplified_vector_path)

        _assert_vectors_equal(
            target_simplified_vector_path, base_lines_path)
Exemplo n.º 2
0
    def test_hra_regression_euclidean_linear(self):
        """HRA: regression testing synthetic data w/ linear, euclidean eqn."""
        import natcap.invest.hra
        from natcap.invest.utils import _assert_vectors_equal

        args = HraRegressionTests.generate_base_args(self.workspace_dir)
        # Also test on GeoJSON outputs for visualization
        args['visualize_outputs'] = True

        # Also test relative file paths in Info CSV file
        _make_info_csv(
            args['info_table_path'], args['workspace_dir'], rel_path=True)
        _make_criteria_csv(
            args['criteria_table_path'], args['workspace_dir'],
            extra_metadata=True)
        _make_aoi_vector(args['aoi_vector_path'])
        args['n_workers'] = ''  # tests empty string for ``n_workers``

        natcap.invest.hra.execute(args)

        output_rasters = [
            'TOTAL_RISK_habitat_0', 'TOTAL_RISK_habitat_1',
            'TOTAL_RISK_Ecosystem', 'RECLASS_RISK_habitat_0',
            'RECLASS_RISK_habitat_1', 'RECLASS_RISK_Ecosystem']

        output_vectors = [
            'RECLASS_RISK_habitat_0', 'RECLASS_RISK_habitat_1',
            'RECLASS_RISK_Ecosystem', 'STRESSOR_stressor_0',
            'STRESSOR_stressor_1']

        # Assert rasters are equal
        output_raster_paths = [
            os.path.join(
                args['workspace_dir'], 'outputs', raster_name + '.tif')
            for raster_name in output_rasters]
        expected_raster_paths = [os.path.join(
            TEST_DATA, raster_name + '_euc_lin.tif') for raster_name in
            output_rasters]

        # Append a intermediate raster to test the linear decay equation
        output_raster_paths.append(
            os.path.join(args['workspace_dir'], 'intermediate_outputs',
                         'C_habitat_0_stressor_1.tif'))
        expected_raster_paths.append(
            os.path.join(TEST_DATA, 'C_habitat_0_stressor_1_euc_lin.tif'))

        for output_raster, expected_raster in zip(
                output_raster_paths, expected_raster_paths):
            model_array = pygeoprocessing.raster_to_numpy_array(output_raster)
            reg_array = pygeoprocessing.raster_to_numpy_array(expected_raster)
            numpy.testing.assert_allclose(model_array, reg_array)

        # Assert GeoJSON vectors are equal
        output_vector_paths = [os.path.join(
            args['workspace_dir'], 'visualization_outputs',
            vector_name + '.geojson') for vector_name in output_vectors]
        expected_vector_paths = [
            os.path.join(TEST_DATA, vector_name + '_euc_lin.geojson') for
            vector_name in output_vectors]

        for output_vector, expected_vector in zip(
                output_vector_paths, expected_vector_paths):
            _assert_vectors_equal(
                output_vector, expected_vector, field_value_atol=1e-6)

        # Assert summary statistics CSV equal
        output_csv_path = os.path.join(
            args['workspace_dir'], 'outputs', 'SUMMARY_STATISTICS.csv')
        expected_csv_path = os.path.join(
            TEST_DATA, 'SUMMARY_STATISTICS_euc_lin.csv')
        model_df = pandas.read_csv(output_csv_path)
        reg_df = pandas.read_csv(expected_csv_path)
        pandas.testing.assert_frame_equal(model_df, reg_df)
Exemplo n.º 3
0
    def test_clip_vector_by_vector_polygons(self):
        """WaveEnergy: testing clipping polygons from polygons."""
        from natcap.invest import wave_energy
        from natcap.invest.utils import _assert_vectors_equal

        projection_wkt = osr.SRS_WKT_WGS84_LAT_LONG
        origin = (-62.00, 44.00)
        pos_x = origin[0]
        pos_y = origin[1]

        fields_aoi = {'id': ogr.OFTInteger}
        attrs_aoi = [{'id': 1}]
        # Create polygon for the aoi
        aoi_polygon = [
            Polygon([(pos_x, pos_y),
                     (pos_x + 2, pos_y), (pos_x + 2, pos_y - 2),
                     (pos_x, pos_y - 2), (pos_x, pos_y)])
        ]

        aoi_path = os.path.join(self.workspace_dir, 'aoi.shp')
        # Create the polygon shapefile
        pygeoprocessing.shapely_geometry_to_vector(aoi_polygon,
                                                   aoi_path,
                                                   projection_wkt,
                                                   'ESRI Shapefile',
                                                   fields=fields_aoi,
                                                   attribute_list=attrs_aoi)

        fields_data = {'id': ogr.OFTInteger, 'myattr': ogr.OFTString}
        attrs_data = [{'id': 1, 'myattr': 'hello'}]
        # Create polygon to clip with the aoi
        data_polygon = [
            Polygon([(pos_x - 2, pos_y + 2), (pos_x + 6, pos_y - 2),
                     (pos_x + 6, pos_y - 4), (pos_x - 2, pos_y - 6),
                     (pos_x - 2, pos_y + 2)])
        ]

        data_path = os.path.join(self.workspace_dir, 'data.shp')
        # Create the polygon shapefile
        pygeoprocessing.shapely_geometry_to_vector(data_polygon,
                                                   data_path,
                                                   projection_wkt,
                                                   'ESRI Shapefile',
                                                   fields=fields_data,
                                                   attribute_list=attrs_data)

        result_path = os.path.join(self.workspace_dir, 'aoi_clipped.shp')
        wave_energy._clip_vector_by_vector(data_path, aoi_path, result_path,
                                           projection_wkt, self.workspace_dir)

        fields_expected = {'id': ogr.OFTInteger, 'myattr': ogr.OFTString}
        attrs_expected = [{'id': 1, 'myattr': 'hello'}]
        # Create polygon to clip with the aoi
        expected_polygon = aoi_polygon
        expected_path = os.path.join(self.workspace_dir, 'expected.shp')
        # Create the polygon shapefile
        pygeoprocessing.shapely_geometry_to_vector(
            expected_polygon,
            expected_path,
            projection_wkt,
            'ESRI Shapefile',
            fields=fields_expected,
            attribute_list=attrs_expected)

        _assert_vectors_equal(expected_path, result_path)
Exemplo n.º 4
0
    def test_archive_extraction(self):
        """Datastack: test archive extraction."""
        from natcap.invest import datastack
        from natcap.invest import utils

        params = {
            'blank': '',
            'a': 1,
            'b': 'hello there',
            'c': 'plain bytestring',
            'foo': os.path.join(self.workspace, 'foo.txt'),
            'bar': os.path.join(self.workspace, 'foo.txt'),
            'data_dir': os.path.join(self.workspace, 'data_dir'),
            'raster': os.path.join(DATA_DIR, 'dem'),
            'vector': os.path.join(DATA_DIR, 'watersheds.shp'),
            'simple_table': os.path.join(DATA_DIR, 'carbon_pools_samp.csv'),
            'spatial_table': os.path.join(self.workspace, 'spatial_table.csv'),
        }
        # synthesize sample data
        os.makedirs(params['data_dir'])
        for filename in ('foo.txt', 'bar.txt', 'baz.txt'):
            data_filepath = os.path.join(params['data_dir'], filename)
            with open(data_filepath, 'w') as textfile:
                textfile.write(filename)

        with open(params['foo'], 'w') as textfile:
            textfile.write('hello world!')

        with open(params['spatial_table'], 'w') as spatial_csv:
            # copy existing DEM
            # copy existing watersheds
            # new raster
            # new vector
            spatial_csv.write('ID,path\n')
            spatial_csv.write(f"1,{params['raster']}\n")
            spatial_csv.write(f"2,{params['vector']}\n")

            # Create a raster only referenced by the CSV
            target_csv_raster_path = os.path.join(self.workspace,
                                                  'new_raster.tif')
            pygeoprocessing.new_raster_from_base(params['raster'],
                                                 target_csv_raster_path,
                                                 gdal.GDT_UInt16, [0])
            spatial_csv.write(f'3,{target_csv_raster_path}\n')

            # Create a vector only referenced by the CSV
            target_csv_vector_path = os.path.join(self.workspace,
                                                  'new_vector.geojson')
            pygeoprocessing.shapely_geometry_to_vector(
                [shapely.geometry.Point(100, 100)],
                target_csv_vector_path,
                pygeoprocessing.get_raster_info(
                    params['raster'])['projection_wkt'],
                'GeoJSON',
                ogr_geom_type=ogr.wkbPoint)
            spatial_csv.write(f'4,{target_csv_vector_path}\n')

        archive_path = os.path.join(self.workspace, 'archive.invs.tar.gz')
        datastack.build_datastack_archive(
            params, 'test_datastack_modules.archive_extraction', archive_path)
        out_directory = os.path.join(self.workspace, 'extracted_archive')
        archive_params = datastack.extract_datastack_archive(
            archive_path, out_directory)
        model_array = pygeoprocessing.raster_to_numpy_array(
            archive_params['raster'])
        reg_array = pygeoprocessing.raster_to_numpy_array(params['raster'])
        numpy.testing.assert_allclose(model_array, reg_array)
        utils._assert_vectors_equal(archive_params['vector'], params['vector'])
        pandas.testing.assert_frame_equal(
            pandas.read_csv(archive_params['simple_table']),
            pandas.read_csv(params['simple_table']))
        for key in ('blank', 'a', 'b', 'c'):
            self.assertEqual(archive_params[key], params[key],
                             f'Params differ for key {key}')

        for key in ('foo', 'bar'):
            self.assertTrue(
                filecmp.cmp(archive_params[key], params[key], shallow=False))

        spatial_csv_dict = utils.build_lookup_from_csv(
            archive_params['spatial_table'], 'ID', to_lower=True)
        spatial_csv_dir = os.path.dirname(archive_params['spatial_table'])
        numpy.testing.assert_allclose(
            pygeoprocessing.raster_to_numpy_array(
                os.path.join(spatial_csv_dir, spatial_csv_dict[3]['path'])),
            pygeoprocessing.raster_to_numpy_array(target_csv_raster_path))
        utils._assert_vectors_equal(
            os.path.join(spatial_csv_dir, spatial_csv_dict[4]['path']),
            target_csv_vector_path)
Exemplo n.º 5
0
    def test_archive_extraction(self):
        """Datastack: test archive extraction."""
        from natcap.invest import datastack
        from natcap.invest.utils import _assert_vectors_equal
        params = {
            'blank':
            '',
            'a':
            1,
            'b':
            'hello there',
            'c':
            'plain bytestring',
            'foo':
            os.path.join(self.workspace, 'foo.txt'),
            'bar':
            os.path.join(self.workspace, 'foo.txt'),
            'file_list': [
                os.path.join(self.workspace, 'file1.txt'),
                os.path.join(self.workspace, 'file2.txt'),
            ],
            'data_dir':
            os.path.join(self.workspace, 'data_dir'),
            'raster':
            os.path.join(DATA_DIR, 'dem'),
            'vector':
            os.path.join(DATA_DIR, 'watersheds.shp'),
            'table':
            os.path.join(DATA_DIR, 'carbon_pools_samp.csv'),
        }
        # synthesize sample data
        os.makedirs(params['data_dir'])
        for filename in ('foo.txt', 'bar.txt', 'baz.txt'):
            data_filepath = os.path.join(params['data_dir'], filename)
            with open(data_filepath, 'w') as textfile:
                textfile.write(filename)

        with open(params['foo'], 'w') as textfile:
            textfile.write('hello world!')

        for filename in params['file_list']:
            with open(filename, 'w') as textfile:
                textfile.write(filename)

        # collect parameters:
        archive_path = os.path.join(self.workspace, 'archive.invs.tar.gz')
        datastack.build_datastack_archive(params, 'sample_model', archive_path)
        out_directory = os.path.join(self.workspace, 'extracted_archive')
        archive_params = datastack.extract_datastack_archive(
            archive_path, out_directory)
        model_array = pygeoprocessing.raster_to_numpy_array(
            archive_params['raster'])
        reg_array = pygeoprocessing.raster_to_numpy_array(params['raster'])
        numpy.testing.assert_allclose(model_array, reg_array)
        _assert_vectors_equal(archive_params['vector'], params['vector'])
        model_df = pandas.read_csv(archive_params['table'])
        reg_df = pandas.read_csv(params['table'])
        pandas.testing.assert_frame_equal(model_df, reg_df)
        for key in ('blank', 'a', 'b', 'c'):
            self.assertEqual(archive_params[key], params[key],
                             f'Params differ for key {key}')

        for key in ('foo', 'bar'):
            self.assertTrue(
                filecmp.cmp(archive_params[key], params[key], shallow=False))

        for expected_file, archive_file in zip(params['file_list'],
                                               archive_params['file_list']):
            self.assertTrue(
                filecmp.cmp(expected_file, archive_file, shallow=False))