コード例 #1
0
    def test_tag_regions_by_flood(self):
        """Regions can be tagged correctly with data from flood forecasts
        """

        threshold = 0.3
        label = 'affected'

        tif_filename = convert_netcdf2tif(self.nc_filename, 24, verbose=False)
        region_filename = os.path.join(TESTDATA, 'rw_jakarta_singlepart.shp')

        grid = read_layer(tif_filename)
        polygons = read_layer(region_filename)

        res = tag_polygons_by_grid(polygons, grid,
                                   threshold=threshold,
                                   tag=label)
        os.remove(tif_filename)
        geom = res.get_geometry()
        data = res.get_data()

        # Check correctness of affected regions
        affected_geom = []
        affected_data = []
        for i, d in enumerate(data):
            if d[label]:
                g = geom[i]
                affected_geom.append(g)
                affected_data.append(d)

        assert len(affected_geom) == 37
        assert len(affected_data) == 37

        # Check that every grid point exceeding threshold lies inside
        # one of the polygons marked as affected
        P, V = grid.to_vector_points()

        flooded_points_geom = []
        flooded_points_data = []
        for i, point in enumerate(P):
            val = V[i]
            if val > threshold:
                # Point that is flooded must be in one of the tagged polygons
                found = False
                for polygon in affected_geom:
                    if is_inside_polygon(point, polygon):
                        found = True
                msg = ('No affected polygon was found for point [%f, %f] '
                       'with value %f' % (point[0], point[1], val))
                verify(found, msg)

                # Collected flooded points for visualisation
                flooded_points_geom.append(point)
                flooded_points_data.append({'depth': val})

        # To generate files for visual inspection.
        # See
# https://raw.github.com/AIFDR/inasafe/master/files/flood_tagging_test.png
# https://github.com/AIFDR/inasafe/blob/master/files/flood_tagging_test.tgz

        tmp_filename = unique_filename(prefix='grid', suffix='.tif')
        grid.write_to_file(tmp_filename)
        #print 'Grid written to ', tmp_filename

        tmp_filename = unique_filename(prefix='regions', suffix='.shp')
        res.write_to_file(tmp_filename)
        #print 'Regions written to ', tmp_filename

        tmp_filename = unique_filename(prefix='flooded_points', suffix='.shp')
        v = Vector(geometry=flooded_points_geom, data=flooded_points_data)
        v.write_to_file(tmp_filename)
コード例 #2
0
    def test_tag_regions_by_flood(self):
        """Regions can be tagged correctly with data from flood forecasts.
        """
        threshold = 0.3
        label = 'affected'

        tif_filename = convert_netcdf2tif(self.nc_filename, 24, verbose=False)
        region_filename = os.path.join(TESTDATA, 'rw_jakarta_singlepart.shp')

        grid = read_layer(tif_filename)
        polygons = read_layer(region_filename)

        res = tag_polygons_by_grid(polygons, grid,
                                   threshold=threshold,
                                   tag=label)
        os.remove(tif_filename)
        geom = res.get_geometry()
        data = res.get_data()

        # Check correctness of affected regions
        affected_geom = []
        affected_data = []
        for i, d in enumerate(data):
            if d[label]:
                g = geom[i]
                affected_geom.append(g)
                affected_data.append(d)

        assert len(affected_geom) == 37
        assert len(affected_data) == 37

        # Check that every grid point exceeding threshold lies inside
        # one of the polygons marked as affected
        P, V = grid.to_vector_points()

        flooded_points_geom = []
        flooded_points_data = []
        for i, point in enumerate(P):
            val = V[i]
            if val > threshold:
                # Point that is flooded must be in one of the tagged polygons
                found = False
                for polygon in affected_geom:
                    if is_inside_polygon(point, polygon):
                        found = True
                msg = ('No affected polygon was found for point [%f, %f] '
                       'with value %f' % (point[0], point[1], val))
                verify(found, msg)

                # Collected flooded points for visualisation
                flooded_points_geom.append(point)
                flooded_points_data.append({'depth': val})

        # To generate files for visual inspection.
        # See
# https://raw.github.com/AIFDR/inasafe/master/files/flood_tagging_test.png
# https://github.com/AIFDR/inasafe/blob/master/files/flood_tagging_test.tgz

        tmp_filename = unique_filename(prefix='grid', suffix='.tif')
        grid.write_to_file(tmp_filename)
        #print 'Grid written to ', tmp_filename

        tmp_filename = unique_filename(prefix='regions', suffix='.shp')
        res.write_to_file(tmp_filename)
        #print 'Regions written to ', tmp_filename

        tmp_filename = unique_filename(prefix='flooded_points', suffix='.shp')
        v = Vector(geometry=flooded_points_geom, data=flooded_points_data)
        v.write_to_file(tmp_filename)
コード例 #3
0
    args = parser.parse_args()
    print args
    print

    tif_filename = convert_netcdf2tif(args.filename, args.hours,
                                      verbose=True)

    # Tag each polygon with Y if it contains at least one pixel
    # exceeding a specific threshold (e.g. 0.3m).
    if args.regions is not None:
        print 'Tagging %s as "affected" or not' % args.regions
        polygons = read_layer(args.regions)
        grid = read_layer(tif_filename)
        res = tag_polygons_by_grid(polygons, grid,
                                   threshold=0.3,
                                   tag='affected')

        # Keep only those that are affected (speeds things up a lot,
        # but will reduce overall bounding box for buildings under
        # consideration)
        # geom = res.get_geometry()
        # data = res.get_data()
        # new_geom = []
        # new_data = []
        #
        # for i, d in enumerate(data):
        #    if d['affected']:
        #        g = geom[i]
        #        new_geom.append(g)
        #        new_data.append(d)
コード例 #4
0
                              'flooded or not'))

    args = parser.parse_args()
    print args
    print

    tif_filename = convert_netcdf2tif(args.filename, args.hours, verbose=True)

    # Tag each polygon with Y if it contains at least one pixel
    # exceeding a specific threshold (e.g. 0.3m).
    if args.regions is not None:
        print 'Tagging %s as "affected" or not' % args.regions
        polygons = read_layer(args.regions)
        grid = read_layer(tif_filename)
        res = tag_polygons_by_grid(polygons,
                                   grid,
                                   threshold=0.3,
                                   tag='affected')

        # Keep only those that are affected (speeds things up a lot,
        # but will reduce overall bounding box for buildings under
        # consideration)
        #geom = res.get_geometry()
        #data = res.get_data()
        #new_geom = []
        #new_data = []
        #
        #for i, d in enumerate(data):
        #    if d['affected']:
        #        g = geom[i]
        #        new_geom.append(g)
        #        new_data.append(d)
コード例 #5
0
def process_flood_event(netcdf_file=None, hours=24):
    """A function to process this_netcdf_file to a forecast file.

    :param netcdf_file: The netcdf file. If it's None the download it.

    :param hours: Positive integer determining how many bands to use.
    :type hours: int
    """
    print 'Start flood forecasting'

    if netcdf_file is None:
        # retrieve data from the web
        netcdf_file = download_file_url(netcdf_url, forecast_directory)
    else:
        netcdf_file = download_file_url(netcdf_url,
                                        name=netcdf_file,
                                        download_directory=forecast_directory)
    print 'Do flood forecasting for %s ...' % netcdf_file

    ## check if a forecasting file has been created or not
    # is_exist, polyforecast_filepath = get_result_file_name(this_netcdf_file,
    # hours)
    #
    #if is_exist:
    #    print 'Current flood forecasting has been already created.'
    #    print 'You can look it at %s' % polyforecast_filepath
    #    return

    # convert to tif
    # tif_file = polyforecast_filepath.replace('_regions.shp', '.tif')
    tif_filename = convert_netcdf2tif(netcdf_file,
                                      hours,
                                      verbose=False,
                                      output_dir=flood_directory)
    print 'tif_file', tif_filename
    tif_file = read_layer(tif_filename)

    # check if there is another file with the same name
    # if so, do not do the forecasting
    polyforecast_filepath = tif_filename.replace('.tif', '_regions.shp')
    zip_filename = polyforecast_filepath.replace('.shp', '.zip')
    if os.path.isfile(zip_filename):
        print('File %s is exist, so we do not do the forecasting' %
              zip_filename)
    else:
        polygons = read_layer(polygons_path)
        result = tag_polygons_by_grid(polygons,
                                      tif_file,
                                      threshold=0.3,
                                      tag='affected')

        new_geom = result.get_geometry()
        new_data = result.get_data()

        date = os.path.split(netcdf_file)[-1].split('_')[0]

        v = Vector(geometry=new_geom,
                   data=new_data,
                   projection=result.projection,
                   keywords={
                       'category':
                       'hazard',
                       'subcategory':
                       'flood',
                       'title': ('%d hour flood forecast regions '
                                 'in Jakarta at %s' % (hours, date))
                   })

        print 'polyforecast_filepath', polyforecast_filepath
        v.write_to_file(polyforecast_filepath)
        print 'Wrote tagged polygons to %s' % polyforecast_filepath

    # zip all file
    if os.path.isfile(zip_filename):
        print 'Has been zipped to %s' % zip_filename
    else:
        zip_shp(polyforecast_filepath,
                extra_ext=['.keywords'],
                remove_file=True)
        print 'Zipped to %s' % zip_filename
コード例 #6
0
def processFloodEvent(netcdf_file=None, hours=24):
    """A function to process netcdf_file to a forecast file.
    """
    print 'Start flood forecasting'

    if netcdf_file is None:
        # retrieve data from the web
        netcdf_file = download_file_url(netcdf_url, forecast_directory)
    else:
        netcdf_file = download_file_url(netcdf_url, name=netcdf_file,
            download_directory=forecast_directory)
    print 'Do flood forecasting for %s ...' % netcdf_file

#    # check if a forecasting file has been created or not
#    is_exist, polyforecast_filepath = get_result_file_name(netcdf_file, hours)
#
#    if is_exist:
#        print 'Current flood forecasting has been already created.'
#        print 'You can look it at %s' % polyforecast_filepath
#        return

    # convert to tif
#    tif_file = polyforecast_filepath.replace('_regions.shp', '.tif')
    tif_filename = convert_netcdf2tif(netcdf_file, hours,
            verbose=False, output_dir=flood_directory)
    print 'tif_file', tif_filename
    tif_file = read_layer(tif_filename)

    # check if there is another file with the same name
    # if so, do not do the forecasting
    polyforecast_filepath = tif_filename.replace('.tif', '_regions.shp')
    zip_filename = polyforecast_filepath.replace('.shp', '.zip')
    if os.path.isfile(zip_filename):
        print ('File %s is exist, so we do not do the forecasting'
               % zip_filename)
    else:
        my_polygons = read_layer(polygons_path)
        my_result = tag_polygons_by_grid(my_polygons, tif_file, threshold=0.3,
            tag='affected')

        new_geom = my_result.get_geometry()
        new_data = my_result.get_data()

        date = os.path.split(netcdf_file)[-1].split('_')[0]

        v = Vector(geometry=new_geom, data=new_data,
            projection=my_result.projection,
            keywords={'category': 'hazard',
                      'subcategory': 'flood',
                      'title': ('%d hour flood forecast regions '
                                'in Jakarta at %s' % (hours,
                                                      date))})

        print 'polyforecast_filepath', polyforecast_filepath
        v.write_to_file(polyforecast_filepath)
        print 'Wrote tagged polygons to %s' % polyforecast_filepath

    # zip all file
    if os.path.isfile(zip_filename):
        print 'Has been zipped to %s' % zip_filename
    else:
        zip_shp(polyforecast_filepath, extra_ext=['.keywords'],
            remove_file=True)
        print 'Zipped to %s' % zip_filename
コード例 #7
0
ファイル: netcdf2tif.py プロジェクト: zzpwelkin/inasafe
        "--regions", metavar="regions", type=str, help=("Administrative areas to be flagged as " "flooded or not")
    )

    args = parser.parse_args()
    print args
    print

    tif_filename = convert_netcdf2tif(args.filename, args.hours)

    # Tag each polygon with Y if it contains at least one pixel
    # exceeding a specific threshold (e.g. 0.3m).
    if args.regions is not None:
        print 'Tagging %s as "Flooded" or not' % args.regions
        polygons = read_layer(args.regions)
        grid = read_layer(tif_filename)
        res = tag_polygons_by_grid(polygons, grid, threshold=0.3, tag="Flooded")

        # Keep only those that are affected (speeds things up a lot,
        # but will reduce overall bounding box for buildings under
        # consideration)
        # geom = res.get_geometry()
        # data = res.get_data()
        # new_geom = []
        # new_data = []
        #
        # for i, d in enumerate(data):
        #    if d['Flooded']:
        #        g = geom[i]
        #        new_geom.append(g)
        #        new_data.append(d)
コード例 #8
0
ファイル: netcdf2tif.py プロジェクト: zzpwelkin/inasafe
                              'flooded or not'))

    args = parser.parse_args()
    print args
    print

    tif_filename = convert_netcdf2tif(args.filename, args.hours)

    # Tag each polygon with Y if it contains at least one pixel
    # exceeding a specific threshold (e.g. 0.3m).
    if args.regions is not None:
        print 'Tagging %s as "Flooded" or not' % args.regions
        polygons = read_layer(args.regions)
        grid = read_layer(tif_filename)
        res = tag_polygons_by_grid(polygons,
                                   grid,
                                   threshold=0.3,
                                   tag='Flooded')

        # Keep only those that are affected (speeds things up a lot,
        # but will reduce overall bounding box for buildings under
        # consideration)
        #geom = res.get_geometry()
        #data = res.get_data()
        #new_geom = []
        #new_data = []
        #
        #for i, d in enumerate(data):
        #    if d['Flooded']:
        #        g = geom[i]
        #        new_geom.append(g)
        #        new_data.append(d)