예제 #1
0
def write_vector_data(data, projection, geometry, filename, keywords=None):
    """Write point data and any associated attributes to vector file

    Input:
        data: List of N dictionaries each with M fields where
              M is the number of attributes.
              A value of None is acceptable.
        projection: WKT projection information
        geometry: List of points or polygons.
        filename: Output filename
        keywords: Optional dictionary

    Note: The only format implemented is GML and SHP so the extension
    must be either .gml or .shp

    # FIXME (Ole): When the GML driver is used,
    #              the spatial reference is not stored.
    #              I suspect this is a bug in OGR.

    Background:
    * http://www.gdal.org/ogr/ogr_apitut.html (last example)
    * http://invisibleroads.com/tutorials/gdal-shapefile-points-save.html
    """

    V = Vector(data, projection, geometry, keywords=keywords)
    V.write_to_file(filename)
예제 #2
0
파일: test_vector.py 프로젝트: gijs/inasafe
 def testSqliteWriting(self):
     """Test that writing a dataset to sqlite works."""
     keywords = read_keywords(SHP_BASE + '.keywords')
     layer = Vector(data=SHP_BASE + '.shp', keywords=keywords)
     test_dir = temp_dir(sub_dir='test')
     test_file = unique_filename(suffix='.sqlite', dir=test_dir)
     layer.write_to_file(test_file, sublayer='foo')
예제 #3
0
 def test_sqlite_writing(self):
     """Test that writing a dataset to sqlite works."""
     keywords = read_keywords(SHP_BASE + ".keywords")
     layer = Vector(data=SHP_BASE + ".shp", keywords=keywords)
     test_dir = temp_dir(sub_dir="test")
     test_file = unique_filename(suffix=".sqlite", dir=test_dir)
     layer.write_to_file(test_file, sublayer="foo")
     self.assertTrue(os.path.exists(test_file))
예제 #4
0
 def test_sqlite_writing(self):
     """Test that writing a dataset to sqlite works."""
     keywords = {}
     layer = Vector(data=SHP_BASE + '.shp', keywords=keywords)
     test_dir = temp_dir(sub_dir='test')
     test_file = unique_filename(suffix='.sqlite', dir=test_dir)
     layer.write_to_file(test_file, sublayer='foo')
     self.assertTrue(os.path.exists(test_file))
예제 #5
0
def interpolate_raster_vector(source,
                              target,
                              layer_name=None,
                              attribute_name=None,
                              mode='linear'):
    """Interpolate from raster layer to vector data

    Args:
        * source: Raster data set (grid)
        * target: Vector data set (points or polygons)
        * layer_name: Optional name of returned interpolated layer.
              If None the name of V is used for the returned layer.
        * attribute_name: Name for new attribute.
              If None (default) the name of R is used

    Returns:
        I: Vector data set; points located as target with values
           interpolated from source

    Note: If target geometry is polygon, data will be interpolated to
    its centroids and the output is a point data set.
    """

    # Input checks
    verify(source.is_raster)
    verify(target.is_vector)

    if target.is_point_data:
        # Interpolate from raster to point data
        R = interpolate_raster_vector_points(
            source,
            target,
            layer_name=layer_name,
            attribute_name=attribute_name,
            mode=mode)
    #elif target.is_line_data:
    # TBA - issue https://github.com/AIFDR/inasafe/issues/36
    #
    elif target.is_polygon_data:
        # Use centroids, in case of polygons
        P = convert_polygons_to_centroids(target)
        R = interpolate_raster_vector_points(
            source, P, layer_name=layer_name, attribute_name=attribute_name)
        # In case of polygon data, restore the polygon geometry
        # Do this setting the geometry of the returned set to
        # that of the original polygon
        R = Vector(
            data=R.get_data(),
            projection=R.get_projection(),
            geometry=target.get_geometry(),
            name=R.get_name())
    else:
        msg = ('Unknown datatype for raster2vector interpolation: '
               'I got %s' % str(target))
        raise InaSAFEError(msg)

    # Return interpolated vector layer
    return R
예제 #6
0
    def test_convert_to_qgis_vector_layer(self):
        """Test that converting to QgsVectorLayer works."""
        if QGIS_IS_AVAILABLE:
            # Create vector layer
            keywords = read_keywords(SHP_BASE + ".keywords")
            layer = Vector(data=SHP_BASE + ".shp", keywords=keywords)

            # Convert to QgsVectorLayer
            qgis_layer = layer.as_qgis_native()
            provider = qgis_layer.dataProvider()
            count = provider.featureCount()
            self.assertEqual(count, 250, "Expected 250 features, got %s" % count)
예제 #7
0
    def test_convert_to_qgis_vector_layer(self):
        """Test that converting to QgsVectorLayer works."""
        if qgis_imported:
            # Create vector layer
            keywords = read_keywords(SHP_BASE + '.keywords')
            layer = Vector(data=SHP_BASE + '.shp', keywords=keywords)

            # Convert to QgsVectorLayer
            qgis_layer = layer.as_qgis_native()
            provider = qgis_layer.dataProvider()
            count = provider.featureCount()
            self.assertEqual(
                count, 250, 'Expected 250 features, got %s' % count)
예제 #8
0
    def run(self):
        """Run the impact function.

        :returns: A new line layer with inundated roads marked.
        :type: safe_layer
        """
        # Thresholds for tsunami hazard zone breakdown.
        low_max = self.parameters['low_threshold'].value
        medium_max = self.parameters['medium_threshold'].value
        high_max = self.parameters['high_threshold'].value

        target_field = self.target_field
        # Get parameters from layer's keywords
        road_class_field = self.exposure.keyword('road_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        # reproject self.extent to the hazard projection
        hazard_crs = self.hazard.layer.crs()
        hazard_authid = hazard_crs.authid()

        if hazard_authid == 'EPSG:4326':
            viewport_extent = self.requested_extent
        else:
            geo_crs = QgsCoordinateReferenceSystem()
            geo_crs.createFromSrid(4326)
            viewport_extent = extent_to_geo_array(
                QgsRectangle(*self.requested_extent), geo_crs, hazard_crs)

        # Clip hazard raster
        small_raster = align_clip_raster(self.hazard.layer, viewport_extent)

        # Create vector features from the flood raster
        # For each raster cell there is one rectangular polygon
        # Data also get spatially indexed for faster operation
        ranges = ranges_according_thresholds(low_max, medium_max, high_max)

        index, flood_cells_map = _raster_to_vector_cells(
            small_raster,
            ranges,
            self.exposure.layer.crs())

        # Filter geometry and data using the extent
        ct = QgsCoordinateTransform(
            QgsCoordinateReferenceSystem("EPSG:4326"),
            self.exposure.layer.crs())
        extent = ct.transformBoundingBox(QgsRectangle(*self.requested_extent))
        request = QgsFeatureRequest()
        request.setFilterRect(extent)

        # create template for the output layer
        line_layer_tmp = create_layer(self.exposure.layer)
        new_field = QgsField(target_field, QVariant.Int)
        line_layer_tmp.dataProvider().addAttributes([new_field])
        line_layer_tmp.updateFields()

        # create empty output layer and load it
        filename = unique_filename(suffix='.shp')
        QgsVectorFileWriter.writeAsVectorFormat(
            line_layer_tmp, filename, "utf-8", None, "ESRI Shapefile")
        line_layer = QgsVectorLayer(filename, "flooded roads", "ogr")

        # Do the heavy work - for each road get flood polygon for that area and
        # do the intersection/difference to find out which parts are flooded
        _intersect_lines_with_vector_cells(
            self.exposure.layer,
            request,
            index,
            flood_cells_map,
            line_layer,
            target_field)

        target_field_index = line_layer.dataProvider().\
            fieldNameIndex(target_field)

        # Generate simple impact report
        epsg = get_utm_epsg(self.requested_extent[0], self.requested_extent[1])
        output_crs = QgsCoordinateReferenceSystem(epsg)
        transform = QgsCoordinateTransform(
            self.exposure.layer.crs(), output_crs)

        # Roads breakdown
        self.init_report_var(self.hazard_classes)

        if line_layer.featureCount() < 1:
            raise ZeroImpactException()

        roads_data = line_layer.getFeatures()
        road_type_field_index = line_layer.fieldNameIndex(road_class_field)

        for road in roads_data:
            attributes = road.attributes()

            affected = attributes[target_field_index]
            if isinstance(affected, QPyNullVariant):
                continue
            else:
                hazard_zone = self.hazard_classes[affected]

            usage = attributes[road_type_field_index]
            usage = main_type(usage, exposure_value_mapping)

            geom = road.geometry()
            geom.transform(transform)
            length = geom.length()

            affected = False
            num_classes = len(self.hazard_classes)
            if attributes[target_field_index] in range(num_classes):
                affected = True
            self.classify_feature(hazard_zone, usage, length, affected)

        self.reorder_dictionaries()

        style_classes = [
            # FIXME 0 - 0.1
            dict(
                label=self.hazard_classes[0] + ': 0m',
                value=0,
                colour='#00FF00',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[1] + ': >0 - %.1f m' % low_max,
                value=1,
                colour='#FFFF00',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[2] + ': %.1f - %.1f m' % (
                    low_max + 0.1, medium_max),
                value=2,
                colour='#FFB700',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[3] + ': %.1f - %.1f m' % (
                    medium_max + 0.1, high_max),
                value=3,
                colour='#FF6F00',
                transparency=0,
                size=1
            ),

            dict(
                label=self.hazard_classes[4] + ' > %.1f m' % high_max,
                value=4,
                colour='#FF0000',
                transparency=0,
                size=1
            ),
        ]
        style_info = dict(
            target_field=target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'map_title': self.metadata().key('map_title'),
            'legend_title': self.metadata().key('legend_title'),
            'target_field': target_field
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Convert QgsVectorLayer to inasafe layer and return it
        impact_layer = Vector(
            data=line_layer,
            name=self.metadata().key('layer_name'),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #9
0
    def run(self, layers):
        """Risk plugin for volcano hazard on building/structure.

        Counts number of building exposed to each volcano hazard zones.

        :param layers: List of layers expected to contain.
                * hazard_layer: Hazard layer of volcano
                * exposure_layer: Vector layer of structure data on
                the same grid as hazard_layer

        :returns: Map of building exposed to volcanic hazard zones.
                  Table with number of buildings affected
        :rtype: dict
        """
        # Parameters
        not_affected_value = self.parameters['Not affected value']
        radii = self.parameters['distances [km]']
        target_field = self.parameters['target field']
        name_attribute = self.parameters['name attribute']
        hazard_zone_attribute = self.parameters['hazard zone attribute']

        # Identify hazard and exposure layers
        hazard_layer = get_hazard_layer(layers)  # Volcano hazard layer
        exposure_layer = get_exposure_layer(layers)  # Building exposure layer

        # Get question
        question = get_question(
            hazard_layer.get_name(), exposure_layer.get_name(), self)

        # Input checks
        if not hazard_layer.is_vector:
            message = ('Input hazard %s  was not a vector layer as expected '
                       % hazard_layer.get_name())
            raise Exception(message)

        if not (hazard_layer.is_polygon_data or hazard_layer.is_point_data):
            message = (
                'Input hazard must be a polygon or point layer. I got %s with '
                'layer type %s' %
                (hazard_layer.get_name(), hazard_layer.get_geometry_name()))
            raise Exception(message)

        if hazard_layer.is_point_data:
            # Use concentric circles
            centers = hazard_layer.get_geometry()
            attributes = hazard_layer.get_data()
            radii_meter = [x * 1000 for x in radii]  # Convert to meters
            hazard_layer = buffer_points(
                centers,
                radii_meter,
                hazard_zone_attribute,
                data_table=attributes)
            # To check
            category_names = radii_meter
        else:
            # FIXME (Ole): Change to English and use translation system
            # FIXME (Ismail) : Or simply use the values from the hazard layer
            category_names = ['Kawasan Rawan Bencana III',
                              'Kawasan Rawan Bencana II',
                              'Kawasan Rawan Bencana I']

        category_names.append(not_affected_value)

        # Get names of volcanoes considered
        if name_attribute in hazard_layer.get_attribute_names():
            volcano_name_list = set()
            for row in hazard_layer.get_data():
                # Run through all polygons and get unique names
                volcano_name_list.add(row[name_attribute])
            volcano_names = ', '.join(volcano_name_list)
        else:
            volcano_names = tr('Not specified in data')

        # Check if category_title exists in hazard_layer
        if hazard_zone_attribute not in hazard_layer.get_attribute_names():
            message = (
                'Hazard data %s did not contain expected attribute %s ' %
                (hazard_layer.get_name(), hazard_zone_attribute))
            # noinspection PyExceptionInherit
            raise InaSAFEError(message)

        # Find the target field name that has no conflict with default
        # target
        attribute_names = hazard_layer.get_attribute_names()
        target_field = get_non_conflicting_attribute_name(
            target_field, attribute_names)

        # Run interpolation function for polygon2raster
        interpolated_layer = assign_hazard_values_to_exposure_data(
            hazard_layer, exposure_layer, attribute_name=None)

        # Extract relevant exposure data
        attribute_names = interpolated_layer.get_attribute_names()
        attribute_names_lower = [
            attribute_name.lower() for attribute_name in attribute_names]
        attributes = interpolated_layer.get_data()
        interpolate_size = len(interpolated_layer)

        building_per_category = {}
        building_usages = []
        other_sum = {}

        for category_name in category_names:
            building_per_category[category_name] = {}
            building_per_category[category_name]['total'] = 0
            other_sum[category_name] = 0

        # Building attribute that should be looked up to get the usage
        building_type_attributes = [
            'type',
            'amenity',
            'building_t',
            'office',
            'tourism',
            'leisure',
            'use',
        ]

        for i in range(interpolate_size):
            hazard_value = attributes[i][hazard_zone_attribute]
            if not hazard_value:
                hazard_value = not_affected_value
            attributes[i][target_field] = hazard_value

            if hazard_value in building_per_category.keys():
                building_per_category[hazard_value]['total'] += 1
            elif not hazard_value:
                building_per_category[not_affected_value]['total'] += 1
            else:
                building_per_category[hazard_value] = {}
                building_per_category[hazard_value]['total'] = 1

            # Count affected buildings by usage type if available
            usage = None
            for building_type_attribute in building_type_attributes:
                if (
                        building_type_attribute in attribute_names_lower and (
                            usage is None or usage == 0)):
                    attribute_index = attribute_names_lower.index(
                        building_type_attribute)
                    field_name = attribute_names[attribute_index]
                    usage = attributes[i][field_name]

            if (
                    'building' in attribute_names_lower and (
                        usage is None or usage == 0)):
                attribute_index = attribute_names_lower.index('building')
                field_name = attribute_names[attribute_index]
                usage = attributes[i][field_name]
                if usage == 'yes':
                    usage = 'building'

            if usage is None or usage == 0:
                usage = tr('unknown')

            if usage not in building_usages:
                building_usages.append(usage)
                for building in building_per_category.values():
                    building[usage] = 0

            building_per_category[hazard_value][usage] += 1

        # Generate simple impact report
        blank_cell = ''
        table_body = [question,
                      TableRow([tr('Volcanoes considered'),
                                '%s' % volcano_names, blank_cell],
                               header=True)]

        table_headers = [tr('Building type')]
        table_headers += [tr(x) for x in category_names]
        table_headers += [tr('Total')]

        table_body += [TableRow(table_headers, header=True)]

        for building_usage in building_usages:
            building_usage_good = building_usage.replace('_', ' ')
            building_usage_good = building_usage_good.capitalize()

            building_sum = sum([
                building_per_category[category_name][building_usage] for
                category_name in category_names
            ])

            # Filter building type that has no less than 25 items
            if building_sum >= 25:
                row = [tr(building_usage_good)]
                building_sum = 0
                for category_name in category_names:
                    building_sub_sum = building_per_category[category_name][
                        building_usage]
                    row.append(format_int(building_sub_sum))
                    building_sum += building_sub_sum

                row.append(format_int(building_sum))
                table_body.append(row)

            else:
                for category_name in category_names:
                    if category_name in other_sum.keys():
                        other_sum[category_name] += building_per_category[
                            category_name][building_usage]
                    else:
                        other_sum[category_name] = building_per_category[
                            category_name][building_usage]

        # Adding others building type to the report.
        other_row = [tr('Other')]
        other_building_total = 0
        for category_name in category_names:
            other_building_sum = other_sum[category_name]
            other_row.append(format_int(other_building_sum))
            other_building_total += other_building_sum

        other_row.append(format_int(other_building_total))
        table_body.append(other_row)

        all_row = [tr('Total')]
        all_row += [format_int(building_per_category[category_name]['total'])
                    for category_name in category_names]
        total = sum([building_per_category[category_name]['total'] for
                     category_name in category_names])
        all_row += [format_int(total)]

        table_body.append(TableRow(all_row, header=True))

        table_body += [TableRow(tr('Map shows buildings affected in each of '
                                   'volcano hazard polygons.'))]

        impact_table = Table(table_body).toNewlineFreeString()
        impact_summary = impact_table

        # Extend impact report for on-screen display
        table_body.extend([TableRow(tr('Notes'), header=True),
                           tr('Total number of buildings %s in the viewable '
                              'area') % format_int(total),
                           tr('Only buildings available in OpenStreetMap '
                              'are considered.')])

        # Create style
        colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
                   '#FFCC00', '#FF6600', '#FF0000', '#7A0000']
        colours = colours[::-1]  # flip

        colours = colours[:len(category_names)]

        style_classes = []

        i = 0
        for category_name in category_names:
            style_class = dict()
            style_class['label'] = tr(category_name)
            style_class['transparency'] = 0
            style_class['value'] = category_name
            style_class['size'] = 1

            if i >= len(category_names):
                i = len(category_names) - 1
            style_class['colour'] = colours[i]
            i += 1

            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # For printing map purpose
        map_title = tr('Buildings affected by volcanic hazard zone')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(building)')
        legend_title = tr('Building count')

        # Create vector layer and return
        impact_layer = Vector(
            data=attributes,
            projection=interpolated_layer.get_projection(),
            geometry=interpolated_layer.get_geometry(as_geometry_objects=True),
            name=tr('Buildings affected by volcanic hazard zone'),
            keywords={'impact_summary': impact_summary,
                      'impact_table': impact_table,
                      'target_field': target_field,
                      'map_title': map_title,
                      'legend_notes': legend_notes,
                      'legend_units': legend_units,
                      'legend_title': legend_title},
            style_info=style_info)
        return impact_layer
    def run(self, layers):
        """Experimental impact function.

        Input
          layers: List of layers expected to contain
              H: Polygon layer of inundation areas
              E: Vector layer of roads
        """
        target_field = self.parameters['target_field']
        building_type_field = self.parameters['building_type_field']
        affected_field = self.parameters['affected_field']
        affected_value = self.parameters['affected_value']

        # Extract data
        H = get_hazard_layer(layers)    # Flood
        E = get_exposure_layer(layers)  # Roads

        question = get_question(
            H.get_name(), E.get_name(), self)

        H = H.get_layer()
        h_provider = H.dataProvider()
        affected_field_index = h_provider.fieldNameIndex(affected_field)
        if affected_field_index == -1:
            message = tr('''Parameter "Affected Field"(='%s')
                is not present in the
                attribute table of the hazard layer.''' % (affected_field, ))
            raise GetDataError(message)

        E = E.get_layer()
        srs = E.crs().toWkt()
        e_provider = E.dataProvider()
        fields = e_provider.fields()
        # If target_field does not exist, add it:
        if fields.indexFromName(target_field) == -1:
            e_provider.addAttributes(
                [QgsField(target_field, QVariant.Int)])
        target_field_index = e_provider.fieldNameIndex(target_field)
        fields = e_provider.fields()

        # Create layer for store the lines from E and extent
        building_layer = QgsVectorLayer(
            'Polygon?crs=' + srs, 'impact_buildings', 'memory')
        building_provider = building_layer.dataProvider()

        # Set attributes
        building_provider.addAttributes(fields.toList())
        building_layer.startEditing()
        building_layer.commitChanges()

        # Filter geometry and data using the extent
        extent = QgsRectangle(*self.extent)
        request = QgsFeatureRequest()
        request.setFilterRect(extent)

        # Split building_layer by H and save as result:
        #   1) Filter from H inundated features
        #   2) Mark buildings as inundated (1) or not inundated (0)

        affected_field_type = h_provider.fields()[
            affected_field_index].typeName()
        if affected_field_type in ['Real', 'Integer']:
            affected_value = float(affected_value)

        h_data = H.getFeatures(request)
        hazard_poly = None
        for mpolygon in h_data:
            attributes = mpolygon.attributes()
            if attributes[affected_field_index] != affected_value:
                continue
            if hazard_poly is None:
                hazard_poly = QgsGeometry(mpolygon.geometry())
            else:
                # Make geometry union of inundated polygons

                # But some mpolygon.geometry() could be invalid, skip them
                tmp_geometry = hazard_poly.combine(mpolygon.geometry())
                try:
                    if tmp_geometry.isGeosValid():
                        hazard_poly = tmp_geometry
                except AttributeError:
                    pass

        if hazard_poly is None:
            message = tr(
                '''There are no objects in the hazard layer with "Affected
                value"='%s'. Please check the value or use other extent.''' %
                (affected_value, ))
            raise GetDataError(message)

        e_data = E.getFeatures(request)
        for feat in e_data:
            building_geom = feat.geometry()
            attributes = feat.attributes()
            l_feat = QgsFeature()
            l_feat.setGeometry(building_geom)
            l_feat.setAttributes(attributes)
            if hazard_poly.intersects(building_geom):
                l_feat.setAttribute(target_field_index, 1)
            else:

                l_feat.setAttribute(target_field_index, 0)
            (_, __) = building_layer.dataProvider().addFeatures([l_feat])
        building_layer.updateExtents()

        # Generate simple impact report

        building_count = flooded_count = 0  # Count of buildings
        buildings_by_type = dict()      # Length of flooded roads by types

        buildings_data = building_layer.getFeatures()
        building_type_field_index = building_layer.fieldNameIndex(
            building_type_field)
        for building in buildings_data:
            building_count += 1
            attributes = building.attributes()
            building_type = attributes[building_type_field_index]
            if building_type in [None, 'NULL', 'null', 'Null']:
                building_type = 'Unknown type'
            if building_type not in buildings_by_type:
                buildings_by_type[building_type] = {'flooded': 0, 'total': 0}
            buildings_by_type[building_type]['total'] += 1

            if attributes[target_field_index] == 1:
                flooded_count += 1
                buildings_by_type[building_type]['flooded'] += 1

        table_body = [
            question,
            TableRow(
                [tr('Building Type'), tr('Flooded'), tr('Total')],
                header=True),
            TableRow(
                [tr('All'), int(flooded_count), int(building_count)]),
            TableRow(
                tr('Breakdown by building type'), header=True)]
        for t, v in buildings_by_type.iteritems():
            table_body.append(
                TableRow([t, int(v['flooded']), int(v['total'])]))

        impact_summary = Table(table_body).toNewlineFreeString()
        map_title = tr('Buildings inundated')

        style_classes = [
            dict(label=tr('Not Inundated'), value=0, colour='#1EFC7C',
                 transparency=0, size=0.5),
            dict(label=tr('Inundated'), value=1, colour='#F31A1C',
                 transparency=0, size=0.5)]
        style_info = dict(
            target_field=target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        # Convert QgsVectorLayer to inasafe layer and return it.
        building_layer = Vector(
            data=building_layer,
            name=tr('Flooded buildings'),
            keywords={
                'impact_summary': impact_summary,
                'map_title': map_title,
                'target_field': target_field},
            style_info=style_info)
        return building_layer
예제 #11
0
    def run(self, layers):
        """Experimental impact function for flood polygons on roads.

        :param layers: List of layers expected to contain H: Polygon layer of
            inundation areas E: Vector layer of roads
        """
        target_field = self.parameters['target_field']
        road_type_field = self.parameters['road_type_field']
        affected_field = self.parameters['affected_field']
        affected_value = self.parameters['affected_value']

        # Extract data
        hazard = get_hazard_layer(layers)  # Flood
        exposure = get_exposure_layer(layers)  # Roads

        question = get_question(hazard.get_name(), exposure.get_name(), self)

        hazard = hazard.get_layer()
        hazard_provider = hazard.dataProvider()
        affected_field_index = hazard_provider.fieldNameIndex(affected_field)
        # see #818: should still work if there is no valid attribute
        if affected_field_index == -1:
            pass
            # message = tr('''Parameter "Affected Field"(='%s')
            #     is not present in the attribute table of the hazard layer.
            #     ''' % (affected_field, ))
            # raise GetDataError(message)

        LOGGER.info('Affected field: %s' % affected_field)
        LOGGER.info('Affected field index: %s' % affected_field_index)

        exposure = exposure.get_layer()
        # Filter geometry and data using the extent
        extent = QgsRectangle(*self.extent)
        request = QgsFeatureRequest()
        request.setFilterRect(extent)

        # Split line_layer by hazard and save as result:
        #   1) Filter from hazard inundated features
        #   2) Mark roads as inundated (1) or not inundated (0)

        if affected_field_index != -1:
            affected_field_type = hazard_provider.fields(
            )[affected_field_index].typeName()
            if affected_field_type in ['Real', 'Integer']:
                affected_value = float(affected_value)

        #################################
        #           REMARK 1
        #  In qgis 2.2 we can use request to filter inundated
        #  polygons directly (it allows QgsExpression). Then
        #  we can delete the lines and call
        #
        #  request = ....
        #  hazard_poly = union_geometry(H, request)
        #
        ################################

        hazard_features = hazard.getFeatures(request)
        hazard_poly = None
        for feature in hazard_features:
            attributes = feature.attributes()
            if affected_field_index != -1:
                if attributes[affected_field_index] != affected_value:
                    continue
            if hazard_poly is None:
                hazard_poly = QgsGeometry(feature.geometry())
            else:
                # Make geometry union of inundated polygons
                # But some feature.geometry() could be invalid, skip them
                tmp_geometry = hazard_poly.combine(feature.geometry())
                try:
                    if tmp_geometry.isGeosValid():
                        hazard_poly = tmp_geometry
                except AttributeError:
                    pass

        ###############################################
        # END REMARK 1
        ###############################################

        if hazard_poly is None:
            message = tr(
                '''There are no objects in the hazard layer with "Affected
                value"='%s'. Please check the value or use a different
                extent.''' % (affected_value, ))
            raise GetDataError(message)

        # Clip exposure by the extent
        extent_as_polygon = QgsGeometry().fromRect(extent)
        line_layer = clip_by_polygon(exposure, extent_as_polygon)
        # Find inundated roads, mark them
        line_layer = split_by_polygon(line_layer,
                                      hazard_poly,
                                      request,
                                      mark_value=(target_field, 1))

        # Generate simple impact report
        epsg = get_utm_epsg(self.extent[0], self.extent[1])
        destination_crs = QgsCoordinateReferenceSystem(epsg)
        transform = QgsCoordinateTransform(exposure.crs(), destination_crs)
        road_len = flooded_len = 0  # Length of roads
        roads_by_type = dict()  # Length of flooded roads by types

        roads_data = line_layer.getFeatures()
        road_type_field_index = line_layer.fieldNameIndex(road_type_field)
        target_field_index = line_layer.fieldNameIndex(target_field)

        for road in roads_data:
            attributes = road.attributes()
            road_type = attributes[road_type_field_index]
            if road_type.__class__.__name__ == 'QPyNullVariant':
                road_type = tr('Other')
            geom = road.geometry()
            geom.transform(transform)
            length = geom.length()
            road_len += length

            if road_type not in roads_by_type:
                roads_by_type[road_type] = {'flooded': 0, 'total': 0}
            roads_by_type[road_type]['total'] += length

            if attributes[target_field_index] == 1:
                flooded_len += length
                roads_by_type[road_type]['flooded'] += length
        table_body = [
            question,
            TableRow([
                tr('Road Type'),
                tr('Temporarily closed (m)'),
                tr('Total (m)')
            ],
                     header=True),
            TableRow([tr('All'), int(flooded_len),
                      int(road_len)]),
            TableRow(tr('Breakdown by road type'), header=True)
        ]
        for road_type, value in roads_by_type.iteritems():
            table_body.append(
                TableRow(
                    [road_type,
                     int(value['flooded']),
                     int(value['total'])]))

        impact_summary = Table(table_body).toNewlineFreeString()
        map_title = tr('Roads inundated')

        style_classes = [
            dict(label=tr('Not Inundated'),
                 value=0,
                 colour='#1EFC7C',
                 transparency=0,
                 size=0.5),
            dict(label=tr('Inundated'),
                 value=1,
                 colour='#F31A1C',
                 transparency=0,
                 size=0.5)
        ]
        style_info = dict(target_field=target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # Convert QgsVectorLayer to inasafe layer and return it
        line_layer = Vector(data=line_layer,
                            name=tr('Flooded roads'),
                            keywords={
                                'impact_summary': impact_summary,
                                'map_title': map_title,
                                'target_field': target_field
                            },
                            style_info=style_info)
        return line_layer
예제 #12
0
    def run(self):
        """Classified hazard impact to buildings (e.g. from Open Street Map).
        """
        self.validate()
        self.prepare()

        # Value from layer's keywords
        # Try to get the value from keyword, if not exist, it will not fail,
        # but use the old get_osm_building_usage
        try:
            structure_class_field = self.exposure.keyword(
                'structure_class_field')
        except KeywordNotFoundError:
            structure_class_field = None

        # The 3 classes
        categorical_hazards = self.parameters['Categorical hazards'].value
        low_t = categorical_hazards[0].value
        medium_t = categorical_hazards[1].value
        high_t = categorical_hazards[2].value

        # Determine attribute name for hazard levels
        if self.hazard.layer.is_raster:
            hazard_attribute = 'level'
        else:
            hazard_attribute = None

        interpolated_result = assign_hazard_values_to_exposure_data(
            self.hazard.layer,
            self.exposure.layer,
            attribute_name=hazard_attribute,
            mode='constant')

        # Extract relevant exposure data
        attribute_names = interpolated_result.get_attribute_names()
        attributes = interpolated_result.get_data()

        buildings_total = len(interpolated_result)
        # Calculate building impact
        self.buildings = {}
        self.affected_buildings = OrderedDict([
            (tr('High Hazard Class'), {}),
            (tr('Medium Hazard Class'), {}),
            (tr('Low Hazard Class'), {})
        ])
        for i in range(buildings_total):

            if (structure_class_field and
                    structure_class_field in attribute_names):
                usage = attributes[i][structure_class_field]
            else:
                usage = get_osm_building_usage(attribute_names, attributes[i])

            if usage is None or usage == 0:
                usage = 'unknown'

            if usage not in self.buildings:
                self.buildings[usage] = 0
                for category in self.affected_buildings.keys():
                    self.affected_buildings[category][usage] = OrderedDict([
                        (tr('Buildings Affected'), 0)])

            # Count all buildings by type
            self.buildings[usage] += 1
            attributes[i][self.target_field] = 0
            attributes[i][self.affected_field] = 0
            level = float(attributes[i]['level'])
            level = float(numpy_round(level))
            if level == high_t:
                impact_level = tr('High Hazard Class')
            elif level == medium_t:
                impact_level = tr('Medium Hazard Class')
            elif level == low_t:
                impact_level = tr('Low Hazard Class')
            else:
                continue

            # Add calculated impact to existing attributes
            attributes[i][self.target_field] = {
                tr('High Hazard Class'): 3,
                tr('Medium Hazard Class'): 2,
                tr('Low Hazard Class'): 1
            }[impact_level]
            attributes[i][self.affected_field] = 1
            # Count affected buildings by type
            self.affected_buildings[impact_level][usage][
                tr('Buildings Affected')] += 1

        # Consolidate the small building usage groups < 25 to other
        self._consolidate_to_other()

        # Create style
        style_classes = [dict(label=tr('High'),
                              value=3,
                              colour='#F31A1C',
                              transparency=0,
                              size=2,
                              border_color='#969696',
                              border_width=0.2),
                         dict(label=tr('Medium'),
                              value=2,
                              colour='#F4A442',
                              transparency=0,
                              size=2,
                              border_color='#969696',
                              border_width=0.2),
                         dict(label=tr('Low'),
                              value=1,
                              colour='#EBF442',
                              transparency=0,
                              size=2,
                              border_color='#969696',
                              border_width=0.2),
                         dict(label=tr('Not Affected'),
                              value=None,
                              colour='#1EFC7C',
                              transparency=0,
                              size=2,
                              border_color='#969696',
                              border_width=0.2)]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        impact_table = impact_summary = self.html_report()

        # For printing map purpose
        map_title = tr('Buildings affected')
        legend_title = tr('Structure inundated status')
        legend_units = tr('(Low, Medium, High)')

        # Create vector layer and return
        vector_layer = Vector(
            data=attributes,
            projection=self.exposure.layer.get_projection(),
            geometry=self.exposure.layer.get_geometry(),
            name=tr('Estimated buildings affected'),
            keywords={
                'impact_summary': impact_summary,
                'impact_table': impact_table,
                'target_field': self.affected_field,
                'map_title': map_title,
                'legend_units': legend_units,
                'legend_title': legend_title,
                'buildings_total': buildings_total,
                'buildings_affected': self.total_affected_buildings},
            style_info=style_info)
        self._impact = vector_layer
        return vector_layer
예제 #13
0
    def run(self):
        """Tsunami raster impact to buildings (e.g. from Open Street Map)."""

        # Thresholds for tsunami hazard zone breakdown.
        low_max = self.parameters['low_threshold'].value
        medium_max = self.parameters['medium_threshold'].value
        high_max = self.parameters['high_threshold'].value

        # Interpolate hazard level to building locations
        interpolated_layer = assign_hazard_values_to_exposure_data(
            self.hazard.layer,
            self.exposure.layer,
            attribute_name=self.target_field)

        # Extract relevant exposure data
        features = interpolated_layer.get_data()
        total_features = len(interpolated_layer)

        structure_class_field = self.exposure.keyword('structure_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        self.init_report_var(self.hazard_classes)

        for i in range(total_features):
            # Get the interpolated depth
            water_depth = float(features[i][self.target_field])
            if water_depth <= 0:
                inundated_status = 0
            elif 0 < water_depth <= low_max:
                inundated_status = 1  # low
            elif low_max < water_depth <= medium_max:
                inundated_status = 2  # medium
            elif medium_max < water_depth <= high_max:
                inundated_status = 3  # high
            elif high_max < water_depth:
                inundated_status = 4  # very high
            # If not a number or a value beside real number.
            else:
                inundated_status = 0

            usage = features[i].get(structure_class_field, None)
            usage = main_type(usage, exposure_value_mapping)

            # Add calculated impact to existing attributes
            features[i][self.target_field] = inundated_status
            category = self.categories[inundated_status]

            self.classify_feature(category, usage, True)

        self.reorder_dictionaries()

        style_classes = [
            dict(
                label=self.hazard_classes[0] + ': 0 m',
                value=0,
                colour='#00FF00',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[1] + ': >0 - %.1f m' % low_max,
                value=1,
                colour='#FFFF00',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[2] + ': %.1f - %.1f m' % (
                    low_max + 0.1, medium_max),
                value=2,
                colour='#FFB700',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[3] + ': %.1f - %.1f m' % (
                    medium_max + 0.1, high_max),
                value=3,
                colour='#FF6F00',
                transparency=0,
                size=1
            ),

            dict(
                label=self.hazard_classes[4] + ' > %.1f m' % high_max,
                value=4,
                colour='#FF0000',
                transparency=0,
                size=1
            ),
        ]

        style_info = dict(
            target_field=self.target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'target_field': self.target_field,
            'map_title': self.map_title(),
            'legend_title': self.metadata().key('legend_title'),
            'legend_units': self.metadata().key('legend_units'),
            'buildings_total': total_features,
            'buildings_affected': self.total_affected_buildings
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        impact_layer = Vector(
            data=features,
            projection=interpolated_layer.get_projection(),
            geometry=interpolated_layer.get_geometry(),
            name=self.map_title(),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #14
0
    def run(self):
        """Experimental impact function for flood polygons on roads."""
        self.validate()
        self.prepare()

        # Get parameters from layer's keywords
        self.hazard_class_attribute = self.hazard.keyword('field')
        self.hazard_class_mapping = self.hazard.keyword('value_map')
        self.exposure_class_attribute = self.exposure.keyword(
            'road_class_field')

        hazard_provider = self.hazard.layer.dataProvider()
        affected_field_index = hazard_provider.fieldNameIndex(
            self.hazard_class_attribute)
        # see #818: should still work if there is no valid attribute
        if affected_field_index == -1:
            pass
            # message = tr('''Parameter "Affected Field"(='%s')
            # is not present in the attribute table of the hazard layer.
            #     ''' % (affected_field, ))
            # raise GetDataError(message)

        LOGGER.info('Affected field: %s' % self.hazard_class_attribute)
        LOGGER.info('Affected field index: %s' % affected_field_index)

        # Filter geometry and data using the extent
        requested_extent = QgsRectangle(*self.requested_extent)
        # This is a hack - we should be setting the extent CRS
        # in the IF base class via safe/engine/core.py:calculate_impact
        # for now we assume the extent is in 4326 because it
        # is set to that from geo_extent
        # See issue #1857
        transform = QgsCoordinateTransform(
            QgsCoordinateReferenceSystem('EPSG:%i' %
                                         self._requested_extent_crs),
            self.hazard.layer.crs())
        projected_extent = transform.transformBoundingBox(requested_extent)
        request = QgsFeatureRequest()
        request.setFilterRect(projected_extent)

        # Split line_layer by hazard and save as result:
        # 1) Filter from hazard inundated features
        #   2) Mark roads as inundated (1) or not inundated (0)

        #################################
        #           REMARK 1
        #  In qgis 2.2 we can use request to filter inundated
        #  polygons directly (it allows QgsExpression). Then
        #  we can delete the lines and call
        #
        #  request = ....
        #  hazard_poly = union_geometry(H, request)
        #
        ################################

        hazard_features = self.hazard.layer.getFeatures(request)
        hazard_poly = None
        for feature in hazard_features:
            attributes = feature.attributes()
            if affected_field_index != -1:
                value = attributes[affected_field_index]
                if value not in self.hazard_class_mapping[self.wet]:
                    continue
            if hazard_poly is None:
                hazard_poly = QgsGeometry(feature.geometry())
            else:
                # Make geometry union of inundated polygons
                # But some feature.geometry() could be invalid, skip them
                tmp_geometry = hazard_poly.combine(feature.geometry())
                try:
                    if tmp_geometry.isGeosValid():
                        hazard_poly = tmp_geometry
                except AttributeError:
                    pass

        ###############################################
        # END REMARK 1
        ###############################################

        if hazard_poly is None:
            message = tr(
                'There are no objects in the hazard layer with %s (Affected '
                'Field) in %s (Affected Value). Please check the value or use '
                'a different extent.' % (self.hazard_class_attribute,
                                         self.hazard_class_mapping[self.wet]))
            raise GetDataError(message)

        # Clip exposure by the extent
        extent_as_polygon = QgsGeometry().fromRect(requested_extent)
        line_layer = clip_by_polygon(self.exposure.layer, extent_as_polygon)
        # Find inundated roads, mark them
        line_layer = split_by_polygon(line_layer,
                                      hazard_poly,
                                      request,
                                      mark_value=(self.target_field, 1))

        # Generate simple impact report
        epsg = get_utm_epsg(self.requested_extent[0], self.requested_extent[1])
        destination_crs = QgsCoordinateReferenceSystem(epsg)
        transform = QgsCoordinateTransform(self.exposure.layer.crs(),
                                           destination_crs)

        roads_data = line_layer.getFeatures()
        road_type_field_index = line_layer.fieldNameIndex(
            self.exposure_class_attribute)
        target_field_index = line_layer.fieldNameIndex(self.target_field)
        flooded_keyword = tr('Temporarily closed (m)')
        self.affected_road_categories = [flooded_keyword]
        self.affected_road_lengths = OrderedDict([(flooded_keyword, {})])
        self.road_lengths = OrderedDict()

        for road in roads_data:
            attributes = road.attributes()
            road_type = attributes[road_type_field_index]
            if road_type.__class__.__name__ == 'QPyNullVariant':
                road_type = tr('Other')
            geom = road.geometry()
            geom.transform(transform)
            length = geom.length()

            if road_type not in self.road_lengths:
                self.affected_road_lengths[flooded_keyword][road_type] = 0
                self.road_lengths[road_type] = 0

            self.road_lengths[road_type] += length
            if attributes[target_field_index] == 1:
                self.affected_road_lengths[flooded_keyword][
                    road_type] += length

        impact_summary = self.html_report()

        # For printing map purpose
        map_title = tr('Roads inundated')
        legend_title = tr('Road inundated status')

        style_classes = [
            dict(label=tr('Not Inundated'),
                 value=0,
                 colour='#1EFC7C',
                 transparency=0,
                 size=0.5),
            dict(label=tr('Inundated'),
                 value=1,
                 colour='#F31A1C',
                 transparency=0,
                 size=0.5)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # Convert QgsVectorLayer to inasafe layer and return it
        if line_layer.featureCount() == 0:
            # Raising an exception seems poor semantics here....
            raise ZeroImpactException(
                tr('No roads are flooded in this scenario.'))
        line_layer = Vector(data=line_layer,
                            name=tr('Flooded roads'),
                            keywords={
                                'impact_summary': impact_summary,
                                'map_title': map_title,
                                'legend_title': legend_title,
                                'target_field': self.target_field
                            },
                            style_info=style_info)

        self._impact = line_layer

        return line_layer
예제 #15
0
파일: mappings.py 프로젝트: maning/inasafe
def sigab2padang(E):
    """Map SIGAB attributes to Padang vulnerability classes

    Input
        E: Vector object representing the SIGAB data

    Output:
        Vector object like E, but with one new attribute ('VCLASS')
        representing the vulnerability class used in the Padang dataset

    """

    # Input check
    required = ['Struktur_B', 'Lantai', 'Atap', 'Dinding', 'Tingkat']
    actual = E.get_attribute_names()

    msg = ('Input data to sigab2bnpb must have attributes %s. '
           'It has %s' % (str(required), str(actual)))
    for attribute in required:
        verify(attribute in actual, msg)

    # Start mapping
    N = len(E)
    attributes = E.get_data()
    for i in range(N):
        levels = E.get_data('Tingkat', i).lower()
        structure = E.get_data('Struktur_B', i).lower()
        #roof_type = E.get_data('Atap', i).lower()
        #wall_type = E.get_data('Dinding', i).lower()
        #floor_type = E.get_data('Lantai', i).lower()
        if levels == 'none' or structure == 'none':
            vulnerability_class = 2
        else:
            if int(levels) >= 2:
                vulnerability_class = 7  # RC low
            else:
                # Low
                if structure in ['beton bertulang']:
                    vulnerability_class = 6  # Concrete shear
                elif structure.startswith('rangka'):
                    vulnerability_class = 8  # Confined
                elif 'kayu' in structure or 'wood' in structure:
                    vulnerability_class = 9  # Wood
                else:
                    vulnerability_class = 2  # URM

        # Store new attribute value
        attributes[i]['VCLASS'] = vulnerability_class

        # Selfcheck for use with osm_080811.shp
        if E.get_name() == 'osm_080811':
            if levels > 0:
                msg = ('Got %s expected %s. levels = %f, structure = %s'
                       % (vulnerability_class,
                          attributes[i]['TestBLDGCl'],
                          levels,
                          structure))
                verify(numpy.allclose(attributes[i]['TestBLDGCl'],
                                      vulnerability_class), msg)

    # Create new vector instance and return
    V = Vector(data=attributes,
               projection=E.get_projection(),
               geometry=E.get_geometry(),
               name=E.get_name() + ' mapped to Padang vulnerability classes',
               keywords=E.get_keywords())
    return V
예제 #16
0
    def test_clip_points_by_polygons_with_holes0(self):
        """Points can be clipped by polygons with holes
        """

        # Define an outer ring
        outer_ring = numpy.array([[106.79, -6.233],
                                  [106.80, -6.24],
                                  [106.78, -6.23],
                                  [106.77, -6.21],
                                  [106.79, -6.233]])

        # Define inner rings
        inner_rings = [numpy.array([[106.77827, -6.2252],
                                    [106.77775, -6.22378],
                                    [106.78, -6.22311],
                                    [106.78017, -6.22530],
                                    [106.77827, -6.2252]])[::-1],
                       numpy.array([[106.78652, -6.23215],
                                    [106.78642, -6.23075],
                                    [106.78746, -6.23143],
                                    [106.78831, -6.23307],
                                    [106.78652, -6.23215]])[::-1]]

        v = Vector(geometry=[Polygon(outer_ring=outer_ring,
                                     inner_rings=inner_rings)])
        assert v.is_polygon_data

        # Write it to file
        tmp_filename = unique_filename(suffix='.shp')
        v.write_to_file(tmp_filename)

        # Read polygon it back
        L = read_layer(tmp_filename)
        P = L.get_geometry(as_geometry_objects=True)[0]

        outer_ring = P.outer_ring
        inner_ring0 = P.inner_rings[0]
        inner_ring1 = P.inner_rings[1]

        # Make some test points
        points = generate_random_points_in_bbox(outer_ring, 1000, seed=13)

        # Clip to outer ring, excluding holes
        indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)

        # Sanity
        for point in points[indices, :]:
            # Must be inside outer ring
            assert is_inside_polygon(point, outer_ring)

            # But not in any of the inner rings
            assert not is_inside_polygon(point, inner_ring0)
            assert not is_inside_polygon(point, inner_ring1)

        if False:
            # Store for visual check
            pol = Vector(geometry=[P])
            tmp_filename = unique_filename(suffix='.shp')
            pol.write_to_file(tmp_filename)
            print 'Polygon with holes written to %s' % tmp_filename

            pts = Vector(geometry=points[indices, :])
            tmp_filename = unique_filename(suffix='.shp')
            pts.write_to_file(tmp_filename)
            print 'Clipped points written to %s' % tmp_filename
예제 #17
0
def osm2padang(E):
    """Map OSM attributes to Padang vulnerability classes

    This maps attributes collected in the OpenStreetMap exposure data
    (data.kompetisiosm.org) to 9 vulnerability classes identified by
    Geoscience Australia and ITB in the post 2009 Padang earthquake
    survey (http://trove.nla.gov.au/work/38470066).
    The mapping was developed by Abigail Baca, World Bank-GFDRR.

    Input
        E: Vector object representing the OSM data

    Output:
        Vector object like E, but with one new attribute ('VCLASS')
        representing the vulnerability class used in the Padang dataset


    Algorithm

    1. Class the "levels" field into height bands where 1-3 = low,
       4-10 = mid, >10 = high
    2. Where height band = mid then building type = 4
       "RC medium rise Frame with Masonry in-fill walls"
    3. Where height band = high then building type = 6
       "Concrete Shear wall high rise* Hazus C2H"
    4. Where height band = low and structure = (plastered or
       reinforced_masonry) then building type = 7
       "RC low rise Frame with Masonry in-fill walls"
    5. Where height band = low and structure = confined_masonry then
       building type = 8 "Confined Masonry"
    6. Where height band = low and structure = unreinforced_masonry then
       building type = 2 "URM with Metal Roof"
    """

    # Input check
    required = ['levels', 'structure']
    actual = E.get_attribute_names()
    msg = ('Input data to osm2padang must have attributes %s. '
           'It has %s' % (str(required), str(actual)))
    for attribute in required:
        verify(attribute in actual, msg)

    # Start mapping
    N = len(E)
    attributes = E.get_data()
    count = 0
    for i in range(N):
        levels = E.get_data('levels', i)
        structure = E.get_data('structure', i)
        if levels is None or structure is None:
            vulnerability_class = 2
            count += 1
        else:
            # Map string variable levels to integer
            if levels.endswith('+'):
                levels = 100

            try:
                levels = int(levels)
            except:
                # E.g. 'ILP jalan'
                vulnerability_class = 2
                count += 1
            else:
                # Start mapping depending on levels
                if levels >= 10:
                    # High
                    vulnerability_class = 6  # Concrete shear
                elif 4 <= levels < 10:
                    # Mid
                    vulnerability_class = 4  # RC mid
                elif 1 <= levels < 4:
                    # Low
                    if structure in [
                            'plastered', 'reinforced masonry',
                            'reinforced_masonry'
                    ]:
                        vulnerability_class = 7  # RC low
                    elif structure == 'confined_masonry':
                        vulnerability_class = 8  # Confined
                    elif 'kayu' in structure or 'wood' in structure:
                        vulnerability_class = 9  # Wood
                    else:
                        vulnerability_class = 2  # URM
                elif numpy.allclose(levels, 0):
                    # A few buildings exist with 0 levels.

                    # In general, we should be assigning here the most
                    # frequent building in the area which could be defined
                    # by admin boundaries.
                    vulnerability_class = 2
                else:
                    msg = 'Unknown number of levels: %s' % levels
                    raise Exception(msg)

        # Store new attribute value
        attributes[i]['VCLASS'] = vulnerability_class

        # Selfcheck for use with osm_080811.shp
        if E.get_name() == 'osm_080811':
            if levels > 0:
                msg = ('Got %s expected %s. levels = %f, structure = %s' %
                       (vulnerability_class, attributes[i]['TestBLDGCl'],
                        levels, structure))
                verify(
                    numpy.allclose(attributes[i]['TestBLDGCl'],
                                   vulnerability_class), msg)

    #print 'Got %i without levels or structure (out of %i total)' % (count, N)

    # Create new vector instance and return
    V = Vector(data=attributes,
               projection=E.get_projection(),
               geometry=E.get_geometry(),
               name=E.get_name() + ' mapped to Padang vulnerability classes',
               keywords=E.get_keywords())
    return V
예제 #18
0
    def run(self):
        """Experimental impact function."""

        # Get parameters from layer's keywords
        self.hazard_class_attribute = self.hazard.keyword('field')
        self.hazard_class_mapping = self.hazard.keyword('value_map')
        self.exposure_class_attribute = self.exposure.keyword(
            'structure_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        # Prepare Hazard Layer
        hazard_provider = self.hazard.layer.dataProvider()

        # Check affected field exists in the hazard layer
        affected_field_index = hazard_provider.fieldNameIndex(
            self.hazard_class_attribute)
        if affected_field_index == -1:
            message = tr(
                'Field "%s" is not present in the attribute table of the '
                'hazard layer. Please change the Affected Field parameter in '
                'the IF Option.') % self.hazard_class_attribute
            raise GetDataError(message)

        srs = self.exposure.layer.crs().toWkt()
        exposure_provider = self.exposure.layer.dataProvider()
        exposure_fields = exposure_provider.fields()

        # Check self.exposure_class_attribute exists in exposure layer
        building_type_field_index = exposure_provider.fieldNameIndex(
            self.exposure_class_attribute)
        if building_type_field_index == -1:
            message = tr(
                'Field "%s" is not present in the attribute table of '
                'the exposure layer. Please change the Building Type '
                'Field parameter in the IF Option.'
            ) % self.exposure_class_attribute
            raise GetDataError(message)

        # If target_field does not exist, add it:
        if exposure_fields.indexFromName(self.target_field) == -1:
            exposure_provider.addAttributes(
                [QgsField(self.target_field, QVariant.Int)])
        target_field_index = exposure_provider.fieldNameIndex(
            self.target_field)
        exposure_fields = exposure_provider.fields()

        # Create layer to store the buildings from E and extent
        buildings_are_points = is_point_layer(self.exposure.layer)
        if buildings_are_points:
            building_layer = QgsVectorLayer(
                'Point?crs=' + srs, 'impact_buildings', 'memory')
        else:
            building_layer = QgsVectorLayer(
                'Polygon?crs=' + srs, 'impact_buildings', 'memory')
        building_provider = building_layer.dataProvider()

        # Set attributes
        building_provider.addAttributes(exposure_fields.toList())
        building_layer.startEditing()
        building_layer.commitChanges()

        # Filter geometry and data using the requested extent
        requested_extent = QgsRectangle(*self.requested_extent)

        # This is a hack - we should be setting the extent CRS
        # in the IF base class via safe/engine/core.py:calculate_impact
        # for now we assume the extent is in 4326 because it
        # is set to that from geo_extent
        # See issue #1857
        transform = QgsCoordinateTransform(
            self.requested_extent_crs, self.hazard.crs())
        projected_extent = transform.transformBoundingBox(requested_extent)
        request = QgsFeatureRequest()
        request.setFilterRect(projected_extent)

        # Split building_layer by H and save as result:
        #   1) Filter from H inundated features
        #   2) Mark buildings as inundated (1) or not inundated (0)

        # make spatial index of affected polygons
        hazard_index = QgsSpatialIndex()
        hazard_geometries = {}  # key = feature id, value = geometry
        has_hazard_objects = False
        for feature in self.hazard.layer.getFeatures(request):
            value = feature[affected_field_index]
            if value not in self.hazard_class_mapping[self.wet]:
                continue
            hazard_index.insertFeature(feature)
            hazard_geometries[feature.id()] = QgsGeometry(feature.geometry())
            has_hazard_objects = True

        if not has_hazard_objects:
            message = tr(
                'There are no objects in the hazard layer with %s '
                'value in %s. Please check your data or use another '
                'attribute.') % (
                    self.hazard_class_attribute,
                    ', '.join(self.hazard_class_mapping[self.wet]))
            raise GetDataError(message)

        # Filter out just those EXPOSURE features in the analysis extents
        transform = QgsCoordinateTransform(
            self.requested_extent_crs, self.exposure.layer.crs())
        projected_extent = transform.transformBoundingBox(requested_extent)
        request = QgsFeatureRequest()
        request.setFilterRect(projected_extent)

        # We will use this transform to project each exposure feature into
        # the CRS of the Hazard.
        transform = QgsCoordinateTransform(
            self.exposure.crs(), self.hazard.crs())
        features = []
        for feature in self.exposure.layer.getFeatures(request):
            # Make a deep copy as the geometry is passed by reference
            # If we don't do this, subsequent operations will affect the
            # original feature geometry as well as the copy TS
            building_geom = QgsGeometry(feature.geometry())
            # Project the building geometry to hazard CRS
            building_bounds = transform.transform(building_geom.boundingBox())
            affected = False
            # get tentative list of intersecting hazard features
            # only based on intersection of bounding boxes
            ids = hazard_index.intersects(building_bounds)
            for fid in ids:
                # run (slow) exact intersection test
                building_geom.transform(transform)
                if hazard_geometries[fid].intersects(building_geom):
                    affected = True
                    break
            new_feature = QgsFeature()
            # We write out the original feature geom, not the projected one
            new_feature.setGeometry(feature.geometry())
            new_feature.setAttributes(feature.attributes())
            new_feature[target_field_index] = 1 if affected else 0
            features.append(new_feature)

            # every once in a while commit the created features
            # to the output layer
            if len(features) == 1000:
                (_, __) = building_provider.addFeatures(features)
                features = []

        (_, __) = building_provider.addFeatures(features)
        building_layer.updateExtents()

        # Generate simple impact report
        hazard_classes = [tr('Flooded')]
        self.init_report_var(hazard_classes)

        buildings_data = building_layer.getFeatures()
        building_type_field_index = building_layer.fieldNameIndex(
            self.exposure_class_attribute)
        for building in buildings_data:
            record = building.attributes()

            usage = record[building_type_field_index]
            usage = main_type(usage, exposure_value_mapping)

            affected = False
            if record[target_field_index] == 1:
                affected = True

            self.classify_feature(hazard_classes[0], usage, affected)

        self.reorder_dictionaries()

        style_classes = [
            dict(label=tr('Not Inundated'), value=0, colour='#1EFC7C',
                 transparency=0, size=0.5),
            dict(label=tr('Inundated'), value=1, colour='#F31A1C',
                 transparency=0, size=0.5)]
        style_info = dict(
            target_field=self.target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        # Convert QgsVectorLayer to inasafe layer and return it.
        if building_layer.featureCount() < 1:
            raise ZeroImpactException(tr(
                'No buildings were impacted by this flood.'))

        impact_data = self.generate_data()

        extra_keywords = {
            'map_title': self.metadata().key('map_title'),
            'legend_title': self.metadata().key('legend_title'),
            'target_field': self.target_field,
            'buildings_total': self.total_buildings,
            'buildings_affected': self.total_affected_buildings
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        impact_layer = Vector(
            data=building_layer,
            name=self.metadata().key('layer_name'),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #19
0
    def run(self):
        """Risk plugin for classified polygon hazard on building/structure.

        Counts number of building exposed to each hazard zones.

        :returns: Impact vector layer building exposed to each hazard zones.
            Table with number of buildings affected
        :rtype: Vector
        """

        # Value from layer's keywords
        self.hazard_class_attribute = self.hazard.keyword('field')
        self.hazard_class_mapping = self.hazard.keyword('value_map')
        self.exposure_class_attribute = self.exposure.keyword(
            'structure_class_field')
        try:
            exposure_value_mapping = self.exposure.keyword('value_mapping')
        except KeywordNotFoundError:
            # Generic IF, the keyword might not be defined base.py
            exposure_value_mapping = {}

        # Retrieve the classification that is used by the hazard layer.
        vector_hazard_classification = self.hazard.keyword(
            'vector_hazard_classification')
        # Get the dictionary that contains the definition of the classification
        vector_hazard_classification = definition(vector_hazard_classification)
        # Get the list classes in the classification
        vector_hazard_classes = vector_hazard_classification['classes']
        # Iterate over vector hazard classes
        hazard_classes = []
        for vector_hazard_class in vector_hazard_classes:
            # Check if the key of class exist in hazard_class_mapping
            if vector_hazard_class['key'] in self.hazard_class_mapping.keys():
                # Replace the key with the name as we need to show the human
                # friendly name in the report.
                self.hazard_class_mapping[vector_hazard_class['name']] = \
                    self.hazard_class_mapping.pop(vector_hazard_class['key'])
                # Adding the class name as a key in affected_building
                hazard_classes.append(vector_hazard_class['name'])

        hazard_zone_attribute_index = self.hazard.layer.fieldNameIndex(
            self.hazard_class_attribute)

        # Check if hazard_zone_attribute exists in hazard_layer
        if hazard_zone_attribute_index < 0:
            message = (
                'Hazard data %s does not contain expected attribute %s ' %
                (self.hazard.layer.name(), self.hazard_class_attribute))
            # noinspection PyExceptionInherit
            raise InaSAFEError(message)

        # Hazard zone categories from hazard layer
        unique_values = self.hazard.layer.uniqueValues(
            hazard_zone_attribute_index)
        # Values might be integer or float, we should have unicode. #2626
        self.hazard_zones = [get_unicode(val) for val in unique_values]

        self.init_report_var(hazard_classes)

        wgs84_extent = QgsRectangle(
            self.requested_extent[0], self.requested_extent[1],
            self.requested_extent[2], self.requested_extent[3])

        # Run interpolation function for polygon2polygon
        interpolated_layer = interpolate_polygon_polygon(
            self.hazard.layer, self.exposure.layer, wgs84_extent)

        new_field = QgsField(self.target_field, QVariant.String)
        interpolated_layer.dataProvider().addAttributes([new_field])
        interpolated_layer.updateFields()

        target_field_index = interpolated_layer.fieldNameIndex(
            self.target_field)
        changed_values = {}

        if interpolated_layer.featureCount() < 1:
            raise ZeroImpactException()

        # Extract relevant interpolated data
        for feature in interpolated_layer.getFeatures():
            # Get the hazard value based on the value mapping in keyword
            hazard_value = get_key_for_value(
                    feature[self.hazard_class_attribute],
                    self.hazard_class_mapping)
            if not hazard_value:
                hazard_value = self._not_affected_value
            changed_values[feature.id()] = {target_field_index: hazard_value}

            usage = feature[self.exposure_class_attribute]
            usage = main_type(usage, exposure_value_mapping)

            affected = False
            if hazard_value in self.hazard_class_mapping.keys():
                affected = True

            self.classify_feature(hazard_value, usage, affected)

        interpolated_layer.dataProvider().changeAttributeValues(changed_values)

        self.reorder_dictionaries()

        # Create style
        categories = self.affected_buildings.keys()
        categories.append(self._not_affected_value)
        colours = color_ramp(len(categories))
        style_classes = []

        for i, hazard_zone in enumerate(self.affected_buildings.keys()):
            style_class = dict()
            style_class['label'] = tr(hazard_zone)
            style_class['transparency'] = 0
            style_class['value'] = hazard_zone
            style_class['size'] = 1
            style_class['colour'] = colours[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(
            target_field=self.target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol'
        )

        impact_data = self.generate_data()

        extra_keywords = {
            'target_field': self.target_field,
            'map_title': self.map_title(),
            'legend_notes': self.metadata().key('legend_notes'),
            'legend_units': self.metadata().key('legend_units'),
            'legend_title': self.metadata().key('legend_title')
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Create vector layer and return
        impact_layer = Vector(
            data=interpolated_layer,
            name=self.map_title(),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #20
0
    def run(self):
        """Risk plugin for volcano hazard on building/structure.

        Counts number of building exposed to each volcano hazard zones.

        :returns: Map of building exposed to volcanic hazard zones.
                  Table with number of buildings affected
        :rtype: dict
        """

        # Get parameters from layer's keywords
        self.hazard_class_attribute = self.hazard.keyword('field')
        self.name_attribute = self.hazard.keyword('volcano_name_field')
        self.hazard_class_mapping = self.hazard.keyword('value_map')
        self.exposure_class_attribute = self.exposure.keyword(
            'structure_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        # Input checks
        if not self.hazard.layer.is_polygon_data:
            message = (
                'Input hazard must be a polygon. I got %s with '
                'layer type %s' %
                (self.hazard.name, self.hazard.layer.get_geometry_name()))
            raise Exception(message)

        # Check if hazard_zone_attribute exists in hazard_layer
        if (self.hazard_class_attribute not in
                self.hazard.layer.get_attribute_names()):
            message = (
                'Hazard data %s did not contain expected attribute %s ' %
                (self.hazard.name, self.hazard_class_attribute))
            # noinspection PyExceptionInherit
            raise InaSAFEError(message)

        # Get names of volcanoes considered
        if self.name_attribute in self.hazard.layer.get_attribute_names():
            volcano_name_list = set()
            for row in self.hazard.layer.get_data():
                # Run through all polygons and get unique names
                volcano_name_list.add(row[self.name_attribute])
            self.volcano_names = ', '.join(volcano_name_list)
        else:
            self.volcano_names = tr('Not specified in data')

        # Retrieve the classification that is used by the hazard layer.
        vector_hazard_classification = self.hazard.keyword(
            'vector_hazard_classification')
        # Get the dictionary that contains the definition of the classification
        vector_hazard_classification = definition(vector_hazard_classification)
        # Get the list classes in the classification
        vector_hazard_classes = vector_hazard_classification['classes']
        # Initialize OrderedDict of affected buildings
        hazard_class = []
        # Iterate over vector hazard classes
        for vector_hazard_class in vector_hazard_classes:
            # Check if the key of class exist in hazard_class_mapping
            if vector_hazard_class['key'] in self.hazard_class_mapping.keys():
                # Replace the key with the name as we need to show the human
                # friendly name in the report.
                self.hazard_class_mapping[vector_hazard_class['name']] = \
                    self.hazard_class_mapping.pop(vector_hazard_class['key'])
                # Adding the class name as a key in affected_building
                hazard_class.append(vector_hazard_class['name'])

        # Run interpolation function for polygon2raster
        interpolated_layer = assign_hazard_values_to_exposure_data(
            self.hazard.layer, self.exposure.layer)

        # Extract relevant exposure data
        features = interpolated_layer.get_data()

        self.init_report_var(hazard_class)

        for i in range(len(features)):
            # Get the hazard value based on the value mapping in keyword
            hazard_value = get_key_for_value(
                    features[i][self.hazard_class_attribute],
                    self.hazard_class_mapping)
            if not hazard_value:
                hazard_value = self._not_affected_value
            features[i][self.target_field] = get_string(hazard_value)

            usage = features[i][self.exposure_class_attribute]
            usage = main_type(usage, exposure_value_mapping)

            affected = False
            if hazard_value in self.affected_buildings.keys():
                affected = True

            self.classify_feature(hazard_value, usage, affected)

        self.reorder_dictionaries()

        # Create style
        colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
                   '#FFCC00', '#FF6600', '#FF0000', '#7A0000']
        colours = colours[::-1]  # flip

        colours = colours[:len(self.affected_buildings.keys())]

        style_classes = []

        for i, category_name in enumerate(self.affected_buildings.keys()):
            style_class = dict()
            style_class['label'] = tr(category_name)
            style_class['transparency'] = 0
            style_class['value'] = category_name
            style_class['size'] = 1

            if i >= len(self.affected_buildings.keys()):
                i = len(self.affected_buildings.keys()) - 1
            style_class['colour'] = colours[i]

            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'target_field': self.target_field,
            'map_title': self.metadata().key('map_title'),
            'legend_notes': self.metadata().key('legend_notes'),
            'legend_units': self.metadata().key('legend_units'),
            'legend_title': self.metadata().key('legend_title')
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Create vector layer and return
        impact_layer = Vector(
            data=features,
            projection=interpolated_layer.get_projection(),
            geometry=interpolated_layer.get_geometry(),
            name=self.metadata().key('layer_name'),
            keywords=impact_layer_keywords,
            style_info=style_info
        )

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #21
0
    def run(self):
        """Tsunami raster impact to buildings (e.g. from Open Street Map)."""

        # Range for ash hazard
        group_parameters = self.parameters['group_threshold']
        unaffected_max = group_parameters.value_map[
            'unaffected_threshold'].value
        very_low_max = group_parameters.value_map['very_low_threshold'].value
        low_max = group_parameters.value_map['low_threshold'].value
        medium_max = group_parameters.value_map['moderate_threshold'].value
        high_max = group_parameters.value_map['high_threshold'].value

        # Interpolate hazard level to building locations
        interpolated_layer = assign_hazard_values_to_exposure_data(
            self.hazard.layer,
            self.exposure.layer,
            attribute_name=self.target_field)

        # Extract relevant exposure data
        features = interpolated_layer.get_data()
        total_features = len(interpolated_layer)

        try:
            population_field = self.exposure.keyword('population_field')
        except KeywordNotFoundError:
            population_field = None

        # required for real time
        self.exposure.keyword('name_field')

        structure_class_field = self.exposure.keyword('structure_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        self.init_report_var(self.hazard_classes)

        for i in range(total_features):
            # Get the interpolated depth
            ash_hazard_zone = float(features[i][self.target_field])
            if ash_hazard_zone <= unaffected_max:
                current_hash_zone = 0  # not affected
            elif unaffected_max < ash_hazard_zone <= very_low_max:
                current_hash_zone = 1  # very low
            elif very_low_max < ash_hazard_zone <= low_max:
                current_hash_zone = 2  # low
            elif low_max < ash_hazard_zone <= medium_max:
                current_hash_zone = 2  # medium
            elif medium_max < ash_hazard_zone <= high_max:
                current_hash_zone = 3  # high
            elif high_max < ash_hazard_zone:
                current_hash_zone = 4  # very high
            # If not a number or a value beside real number.
            else:
                current_hash_zone = 0

            usage = features[i].get(structure_class_field, None)
            usage = main_type(usage, exposure_value_mapping)

            # Add calculated impact to existing attributes
            features[i][self.target_field] = current_hash_zone
            category = self.hazard_classes[current_hash_zone]

            if population_field is not None:
                population = float(features[i][population_field])
            else:
                population = 1

            self.classify_feature(category, usage, population, True)

        self.reorder_dictionaries()

        style_classes = [
            dict(
                label=self.hazard_classes[0] + ': >%.1f - %.1f cm' % (
                    unaffected_max, very_low_max),
                value=0,
                colour='#00FF00',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[1] + ': >%.1f - %.1f cm' % (
                    very_low_max, low_max),
                value=1,
                colour='#FFFF00',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[2] + ': >%.1f - %.1f cm' % (
                    low_max, medium_max),
                value=2,
                colour='#FFB700',
                transparency=0,
                size=1
            ),
            dict(
                label=self.hazard_classes[3] + ': >%.1f - %.1f cm' % (
                    medium_max, high_max),
                value=3,
                colour='#FF6F00',
                transparency=0,
                size=1
            ),

            dict(
                label=self.hazard_classes[4] + ': <%.1f cm' % high_max,
                value=4,
                colour='#FF0000',
                transparency=0,
                size=1
            ),
        ]

        style_info = dict(
            target_field=self.target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'target_field': self.target_field,
            'map_title': self.metadata().key('map_title'),
            'legend_title': self.metadata().key('legend_title'),
            'legend_units': self.metadata().key('legend_units'),
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        impact_layer = Vector(
            data=features,
            projection=interpolated_layer.get_projection(),
            geometry=interpolated_layer.get_geometry(),
            name=self.metadata().key('layer_name'),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #22
0
def process_flood_event(netcdf_file=None, hours=24):
    """A function to process this_netcdf_file to a forecast file.

    :param netcdf_file: The netcdf file. If it's None the download it.

    :param hours: Positive integer determining how many bands to use.
    :type hours: int
    """
    print 'Start flood forecasting'

    if netcdf_file is None:
        # retrieve data from the web
        netcdf_file = download_file_url(netcdf_url, forecast_directory)
    else:
        netcdf_file = download_file_url(netcdf_url,
                                        name=netcdf_file,
                                        download_directory=forecast_directory)
    print 'Do flood forecasting for %s ...' % netcdf_file

    ## check if a forecasting file has been created or not
    # is_exist, polyforecast_filepath = get_result_file_name(this_netcdf_file,
    # hours)
    #
    #if is_exist:
    #    print 'Current flood forecasting has been already created.'
    #    print 'You can look it at %s' % polyforecast_filepath
    #    return

    # convert to tif
    # tif_file = polyforecast_filepath.replace('_regions.shp', '.tif')
    tif_filename = convert_netcdf2tif(netcdf_file,
                                      hours,
                                      verbose=False,
                                      output_dir=flood_directory)
    print 'tif_file', tif_filename
    tif_file = read_layer(tif_filename)

    # check if there is another file with the same name
    # if so, do not do the forecasting
    polyforecast_filepath = tif_filename.replace('.tif', '_regions.shp')
    zip_filename = polyforecast_filepath.replace('.shp', '.zip')
    if os.path.isfile(zip_filename):
        print('File %s is exist, so we do not do the forecasting' %
              zip_filename)
    else:
        polygons = read_layer(polygons_path)
        result = tag_polygons_by_grid(polygons,
                                      tif_file,
                                      threshold=0.3,
                                      tag='affected')

        new_geom = result.get_geometry()
        new_data = result.get_data()

        date = os.path.split(netcdf_file)[-1].split('_')[0]

        v = Vector(geometry=new_geom,
                   data=new_data,
                   projection=result.projection,
                   keywords={
                       'category':
                       'hazard',
                       'subcategory':
                       'flood',
                       'title': ('%d hour flood forecast regions '
                                 'in Jakarta at %s' % (hours, date))
                   })

        print 'polyforecast_filepath', polyforecast_filepath
        v.write_to_file(polyforecast_filepath)
        print 'Wrote tagged polygons to %s' % polyforecast_filepath

    # zip all file
    if os.path.isfile(zip_filename):
        print 'Has been zipped to %s' % zip_filename
    else:
        zip_shp(polyforecast_filepath,
                extra_ext=['.keywords'],
                remove_file=True)
        print 'Zipped to %s' % zip_filename
예제 #23
0
    except (BoundsError, InaSAFEError), e:
        msg = (tr('Could not interpolate from raster layer %(raster)s to '
                  'vector layer %(vector)s. Error message: %(error)s') % {
                      'raster': source.get_name(),
                      'vector': target.get_name(),
                      'error': str(e)
                  })
        raise InaSAFEError(msg)

    # Add interpolated attribute to existing attributes and return
    N = len(target)
    for i in range(N):
        attributes[i][attribute_name] = values[i]

    return Vector(data=attributes,
                  projection=target.get_projection(),
                  geometry=coordinates,
                  name=layer_name)


def interpolate_polygon_points(source, target, layer_name=None):
    """Interpolate from polygon vector layer to point vector data

    Args:
        * source: Vector data set (polygon)
        * target: Vector data set (points)
        * layer_name: Optional name of returned interpolated layer.
              If None the name of target is used for the returned layer.

    Output
        I: Vector data set; points located as target with values interpolated
        from source
예제 #24
0
    def run(self):
        """Run the impact function.

        :returns: A new line layer with inundated roads marked.
        :type: safe_layer
        """

        target_field = self.target_field

        # Get parameters from layer's keywords
        road_class_field = self.exposure.keyword('road_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        # Get parameters from IF parameter
        threshold_min = self.parameters['min threshold'].value
        threshold_max = self.parameters['max threshold'].value

        if threshold_min > threshold_max:
            message = tr(
                'The minimal threshold is greater than the maximal specified '
                'threshold. Please check the values.')
            raise GetDataError(message)

        # reproject self.extent to the hazard projection
        hazard_crs = self.hazard.layer.crs()
        hazard_authid = hazard_crs.authid()

        if hazard_authid == 'EPSG:4326':
            viewport_extent = self.requested_extent
        else:
            geo_crs = QgsCoordinateReferenceSystem()
            geo_crs.createFromSrid(4326)
            viewport_extent = extent_to_geo_array(
                QgsRectangle(*self.requested_extent), geo_crs, hazard_crs)

        # Clip hazard raster
        small_raster = align_clip_raster(self.hazard.layer, viewport_extent)

        # Filter geometry and data using the extent
        ct = QgsCoordinateTransform(
            QgsCoordinateReferenceSystem("EPSG:4326"),
            self.exposure.layer.crs())
        extent = ct.transformBoundingBox(QgsRectangle(*self.requested_extent))
        request = QgsFeatureRequest()
        request.setFilterRect(extent)

        # create template for the output layer
        line_layer_tmp = create_layer(self.exposure.layer)
        new_field = QgsField(target_field, QVariant.Int)
        line_layer_tmp.dataProvider().addAttributes([new_field])
        line_layer_tmp.updateFields()

        # create empty output layer and load it
        filename = unique_filename(suffix='.shp')
        QgsVectorFileWriter.writeAsVectorFormat(
            line_layer_tmp, filename, "utf-8", None, "ESRI Shapefile")
        line_layer = QgsVectorLayer(filename, "flooded roads", "ogr")

        # Create vector features from the flood raster
        # For each raster cell there is one rectangular polygon
        # Data also get spatially indexed for faster operation
        index, flood_cells_map = _raster_to_vector_cells(
            small_raster,
            threshold_min,
            threshold_max,
            self.exposure.layer.crs())

        if len(flood_cells_map) == 0:
            message = tr(
                'There are no objects in the hazard layer with "value" > %s. '
                'Please check the value or use other extent.' % (
                    threshold_min, ))
            raise GetDataError(message)

        # Do the heavy work - for each road get flood polygon for that area and
        # do the intersection/difference to find out which parts are flooded
        _intersect_lines_with_vector_cells(
            self.exposure.layer,
            request,
            index,
            flood_cells_map,
            line_layer,
            target_field)

        target_field_index = line_layer.dataProvider().\
            fieldNameIndex(target_field)

        # Generate simple impact report
        epsg = get_utm_epsg(self.requested_extent[0], self.requested_extent[1])
        output_crs = QgsCoordinateReferenceSystem(epsg)
        transform = QgsCoordinateTransform(
            self.exposure.layer.crs(), output_crs)

        classes = [tr('Flooded in the threshold (m)')]
        self.init_report_var(classes)

        if line_layer.featureCount() < 1:
            raise ZeroImpactException()

        roads_data = line_layer.getFeatures()
        road_type_field_index = line_layer.fieldNameIndex(road_class_field)

        for road in roads_data:
            attributes = road.attributes()

            usage = attributes[road_type_field_index]
            usage = main_type(usage, exposure_value_mapping)

            geom = road.geometry()
            geom.transform(transform)
            length = geom.length()

            affected = False
            if attributes[target_field_index] == 1:
                affected = True

            self.classify_feature(classes[0], usage, length, affected)

        self.reorder_dictionaries()

        style_classes = [
            dict(
                label=tr('Not Inundated'), value=0,
                colour='#1EFC7C', transparency=0, size=0.5),
            dict(
                label=tr('Inundated'), value=1,
                colour='#F31A1C', transparency=0, size=0.5)]
        style_info = dict(
            target_field=target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'map_title': self.metadata().key('map_title'),
            'legend_title': self.metadata().key('legend_title'),
            'target_field': target_field
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Convert QgsVectorLayer to inasafe layer and return it
        impact_layer = Vector(
            data=line_layer,
            name=self.metadata().key('layer_name'),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #25
0
def interpolate_polygon_points(source, target, layer_name=None):
    """Interpolate from polygon vector layer to point vector data

    Args:
        * source: Vector data set (polygon)
        * target: Vector data set (points)
        * layer_name: Optional name of returned interpolated layer.
              If None the name of target is used for the returned layer.

    Output
        I: Vector data set; points located as target with values interpolated
        from source

    Note
        All attribute names from polygons are transferred to the points
        that are inside them.
    """

    msg = ('Vector layer to interpolate to must be point geometry. '
           'I got OGR geometry type %s' %
           geometrytype2string(target.geometry_type))
    verify(target.is_point_data, msg)

    msg = ('Name must be either a string or None. I got %s' %
           (str(type(target)))[1:-1])
    verify(layer_name is None or isinstance(layer_name, basestring), msg)

    attribute_names = source.get_attribute_names()

    #----------------
    # Start algorithm
    #----------------

    # Extract point features
    points = ensure_numeric(target.get_geometry())
    attributes = target.get_data()
    original_geometry = target.get_geometry()  # Geometry for returned data

    # Extract polygon features
    geom = source.get_geometry(as_geometry_objects=True)
    data = source.get_data()
    verify(len(geom) == len(data))

    # Include polygon_id as attribute
    attribute_names.append('polygon_id')
    attribute_names.append(DEFAULT_ATTRIBUTE)

    # Augment point features with empty attributes from polygon
    for a in attributes:
        # Create all attributes that exist in source
        for key in attribute_names:
            a[key] = None

    # Traverse polygons and assign attributes to points that fall inside
    for i, polygon in enumerate(geom):
        # Carry all attributes across from source
        poly_attr = data[i]

        # Assign default attribute to indicate points inside
        poly_attr[DEFAULT_ATTRIBUTE] = True

        # Clip data points by polygons and add polygon attributes
        indices = inside_polygon(points,
                                 polygon.outer_ring,
                                 holes=polygon.inner_rings)

        for k in indices:
            for key in poly_attr:
                # Assign attributes from polygon to points
                attributes[k][key] = poly_attr[key]
            attributes[k]['polygon_id'] = i  # Store id for associated polygon

    # Create new Vector instance and return
    V = Vector(data=attributes,
               projection=target.get_projection(),
               geometry=original_geometry,
               name=layer_name)
    return V
예제 #26
0
    def run(self):
        """Classified hazard impact to buildings (e.g. from Open Street Map).
        """

        # Value from layer's keywords

        structure_class_field = self.exposure.keyword('structure_class_field')
        try:
            exposure_value_mapping = self.exposure.keyword('value_mapping')
        except KeywordNotFoundError:
            # Generic IF, the keyword might not be defined base.py
            exposure_value_mapping = {}

        # The 3 classes
        categorical_hazards = self.parameters['Categorical hazards'].value
        low_t = categorical_hazards[0].value
        medium_t = categorical_hazards[1].value
        high_t = categorical_hazards[2].value

        # Determine attribute name for hazard levels
        if self.hazard.layer.is_raster:
            hazard_attribute = 'level'
        else:
            hazard_attribute = None

        interpolated_result = assign_hazard_values_to_exposure_data(
            self.hazard.layer,
            self.exposure.layer,
            attribute_name=hazard_attribute,
            mode='constant')

        # Extract relevant exposure data
        attributes = interpolated_result.get_data()

        buildings_total = len(interpolated_result)

        hazard_classes = [
            tr('Low Hazard Class'),
            tr('Medium Hazard Class'),
            tr('High Hazard Class')
        ]
        self.init_report_var(hazard_classes)

        for i in range(buildings_total):
            usage = attributes[i][structure_class_field]
            usage = main_type(usage, exposure_value_mapping)

            # Count all buildings by type
            attributes[i][self.target_field] = 0
            attributes[i][self.affected_field] = 0
            level = float(attributes[i]['level'])
            level = float(numpy_round(level))
            if level == high_t:
                impact_level = tr('High Hazard Class')
            elif level == medium_t:
                impact_level = tr('Medium Hazard Class')
            elif level == low_t:
                impact_level = tr('Low Hazard Class')
            else:
                continue

            # Add calculated impact to existing attributes
            attributes[i][self.target_field] = {
                tr('High Hazard Class'): 3,
                tr('Medium Hazard Class'): 2,
                tr('Low Hazard Class'): 1
            }[impact_level]
            attributes[i][self.affected_field] = 1
            # Count affected buildings by type
            self.classify_feature(impact_level, usage, True)

        self.reorder_dictionaries()

        # Consolidate the small building usage groups < 25 to other
        # Building threshold #2468
        postprocessors = self.parameters['postprocessors']
        building_postprocessors = postprocessors['BuildingType'][0]
        self.building_report_threshold = building_postprocessors.value[0].value
        self._consolidate_to_other()

        # Create style
        style_classes = [
            dict(
                label=tr('Not Affected'),
                value=None,
                colour='#1EFC7C',
                transparency=0,
                size=2,
                border_color='#969696',
                border_width=0.2),
            dict(
                label=tr('Low'),
                value=1,
                colour='#EBF442',
                transparency=0,
                size=2,
                border_color='#969696',
                border_width=0.2),
            dict(
                label=tr('Medium'),
                value=2,
                colour='#F4A442',
                transparency=0,
                size=2,
                border_color='#969696',
                border_width=0.2),
            dict(
                label=tr('High'),
                value=3,
                colour='#F31A1C',
                transparency=0,
                size=2,
                border_color='#969696',
                border_width=0.2),
        ]
        style_info = dict(
            target_field=self.target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'target_field': self.affected_field,
            'map_title': self.metadata().key('map_title'),
            'legend_units': self.metadata().key('legend_units'),
            'legend_title': self.metadata().key('legend_title'),
            'buildings_total': buildings_total,
            'buildings_affected': self.total_affected_buildings
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Create impact layer and return
        impact_layer = Vector(
            data=attributes,
            projection=self.exposure.layer.get_projection(),
            geometry=self.exposure.layer.get_geometry(),
            name=self.metadata().key('layer_name'),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #27
0
def interpolate_polygon_lines(source, target, layer_name=None):
    """Interpolate from polygon vector layer to line vector data

    Args:
        * source: Vector data set (polygon)
        * target: Vector data set (lines)
        * layer_name: Optional name of returned interpolated layer.
              If None the name of target is used for the returned layer.

    Returns:
        Vector data set of lines inside polygons
           Attributes are combined from polygon they fall into and
           line that was clipped.

           Lines not in any polygon are ignored.
    """

    # Extract line features
    lines = target.get_geometry()
    line_attributes = target.get_data()
    N = len(target)
    verify(len(lines) == N)
    verify(len(line_attributes) == N)

    # Extract polygon features
    polygons = source.get_geometry()
    polygon_attributes = source.get_data()
    verify(len(polygons) == len(polygon_attributes))

    # Data structure for resulting line segments
    #clipped_geometry = []
    #clipped_attributes = []

    # Clip line lines to polygons
    lines_covered = clip_lines_by_polygons(lines, polygons)

    # Create one new line data layer with joined attributes
    # from polygons and lines
    new_geometry = []
    new_attributes = []
    for i in range(len(polygons)):
        # Loop over polygons

        for j in lines_covered[i]:
            # Loop over parent lines

            lines = lines_covered[i][j]
            for line in lines:
                # Loop over lines clipped from line j by polygon i

                # Associated polygon and line attributes
                attr = polygon_attributes[i].copy()
                attr.update(line_attributes[j].copy())
                attr['polygon_id'] = i  # Store id for associated polygon
                attr['parent_line_id'] = j  # Store id for parent line
                attr[DEFAULT_ATTRIBUTE] = True

                # Store new line feature
                new_geometry.append(line)
                new_attributes.append(attr)

    R = Vector(data=new_attributes,
               projection=source.get_projection(),
               geometry=new_geometry,
               geometry_type='line',
               name=layer_name)
    return R
예제 #28
0
    def run(self):
        """Counts number of building exposed to each volcano hazard zones.

        :returns: Map of building exposed to volcanic hazard zones.
                  Table with number of buildings affected
        :rtype: dict
        """
        # Parameters
        radii = self.parameters['distances'].value

        # Get parameters from layer's keywords
        volcano_name_attribute = self.hazard.keyword('volcano_name_field')
        self.exposure_class_attribute = self.exposure.keyword(
            'structure_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        # Category names for the impact zone
        category_names = radii
        # In kilometers
        self._affected_categories_volcano = [
            tr('Radius %.1f km') % key for key in radii[::]]

        # Get names of volcanoes considered
        if volcano_name_attribute in self.hazard.layer.get_attribute_names():
            for row in self.hazard.layer.get_data():
                # Run through all polygons and get unique names
                self.volcano_names.add(row[volcano_name_attribute])

        # Find the target field name that has no conflict with the attribute
        # names in the hazard layer
        hazard_attribute_names = self.hazard.layer.get_attribute_names()
        target_field = get_non_conflicting_attribute_name(
            self.target_field, hazard_attribute_names)

        # Run interpolation function for polygon2polygon
        interpolated_layer = assign_hazard_values_to_exposure_data(
            self.hazard.layer, self.exposure.layer)

        # Extract relevant interpolated layer data
        features = interpolated_layer.get_data()

        self.init_report_var(radii)

        # Iterate the interpolated building layer
        for i in range(len(features)):
            hazard_value = features[i][self.hazard_zone_attribute]
            if not hazard_value:
                hazard_value = self._not_affected_value
            features[i][target_field] = hazard_value

            # Count affected buildings by usage type if available
            usage = features[i][self.exposure_class_attribute]
            usage = main_type(usage, exposure_value_mapping)

            affected = False
            if hazard_value in self.affected_buildings.keys():
                affected = True

            self.classify_feature(hazard_value, usage, affected)

        self.reorder_dictionaries()

        # Adding 'km'.
        affected_building_keys = self.affected_buildings.keys()
        for key in affected_building_keys:
            self.affected_buildings[tr('Radius %.1f km' % key)] = \
                self.affected_buildings.pop(key)

        # Create style
        colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
                   '#FFCC00', '#FF6600', '#FF0000', '#7A0000']
        colours = colours[::-1]  # flip
        colours = colours[:len(category_names)]
        style_classes = []

        for i, category_name in enumerate(category_names):
            style_class = dict()
            style_class['label'] = tr('Radius %s km') % tr(category_name)
            style_class['transparency'] = 0
            style_class['value'] = category_name
            style_class['size'] = 1

            if i >= len(category_names):
                i = len(category_names) - 1
            style_class['colour'] = colours[i]

            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(
            target_field=target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'target_field': target_field,
            'map_title': self.map_title(),
            'legend_notes': self.metadata().key('legend_notes'),
            'legend_units': self.metadata().key('legend_units'),
            'legend_title': self.metadata().key('legend_title')
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Create vector layer and return
        impact_layer = Vector(
            data=features,
            projection=interpolated_layer.get_projection(),
            geometry=interpolated_layer.get_geometry(),
            name=self.map_title(),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
    def run(self, layers):
        """
        Experimental impact function

        Input
          layers: List of layers expected to contain
              H: Polygon layer of inundation areas
              E: Vector layer of roads
        """
        target_field = self.parameters['target_field']
        road_type_field = self.parameters['road_type_field']
        threshold_min = self.parameters['min threshold [m]']
        threshold_max = self.parameters['max threshold [m]']

        if threshold_min > threshold_max:
            message = tr('''The minimal threshold is
                greater then the maximal specified threshold.
                Please check the values.''')
            raise GetDataError(message)

        # Extract data
        H = get_hazard_layer(layers)  # Flood
        E = get_exposure_layer(layers)  # Roads

        question = get_question(H.get_name(), E.get_name(), self)

        H = H.get_layer()
        E = E.get_layer()

        # Get necessary width and height of raster
        height = (self.extent[3] - self.extent[1]) / H.rasterUnitsPerPixelY()
        height = int(height)
        width = (self.extent[2] - self.extent[0]) / H.rasterUnitsPerPixelX()
        width = int(width)

        # Align raster extent and self.extent
        raster_extent = H.dataProvider().extent()
        xmin = raster_extent.xMinimum()
        xmax = raster_extent.xMaximum()
        ymin = raster_extent.yMinimum()
        ymax = raster_extent.yMaximum()

        x_delta = (xmax - xmin) / H.width()
        x = xmin
        for i in range(H.width()):
            if abs(x - self.extent[0]) < x_delta:
                # We have found the aligned raster boundary
                break
            x += x_delta
            _ = i

        y_delta = (ymax - ymin) / H.height()
        y = ymin
        for i in range(H.width()):
            if abs(y - self.extent[1]) < y_delta:
                # We have found the aligned raster boundary
                break
            y += y_delta
        clip_extent = [x, y, x + width * x_delta, y + height * y_delta]

        # Clip and polygonize
        small_raster = clip_raster(H, width, height,
                                   QgsRectangle(*clip_extent))
        flooded_polygon = polygonize(small_raster, threshold_min,
                                     threshold_max)

        # Filter geometry and data using the extent
        extent = QgsRectangle(*self.extent)
        request = QgsFeatureRequest()
        request.setFilterRect(extent)

        if flooded_polygon is None:
            message = tr('''There are no objects
                in the hazard layer with
                "value">'%s'.
                Please check the value or use other
                extent.''' % (threshold_min, ))
            raise GetDataError(message)

        # Clip exposure by the extent
        extent_as_polygon = QgsGeometry().fromRect(extent)
        line_layer = clip_by_polygon(E, extent_as_polygon)
        # Find inundated roads, mark them
        line_layer = split_by_polygon(line_layer,
                                      flooded_polygon,
                                      request,
                                      mark_value=(target_field, 1))

        # Find inundated roads, mark them
        # line_layer = split_by_polygon(
        #     E,
        #     flooded_polygon,
        #     request,
        #     mark_value=(target_field, 1))
        target_field_index = line_layer.dataProvider().\
            fieldNameIndex(target_field)

        # Generate simple impact report
        epsg = get_utm_epsg(self.extent[0], self.extent[1])
        output_crs = QgsCoordinateReferenceSystem(epsg)
        transform = QgsCoordinateTransform(E.crs(), output_crs)
        road_len = flooded_len = 0  # Length of roads
        roads_by_type = dict()  # Length of flooded roads by types

        roads_data = line_layer.getFeatures()
        road_type_field_index = line_layer.fieldNameIndex(road_type_field)
        for road in roads_data:
            attributes = road.attributes()
            road_type = attributes[road_type_field_index]
            if road_type.__class__.__name__ == 'QPyNullVariant':
                road_type = tr('Other')
            geom = road.geometry()
            geom.transform(transform)
            length = geom.length()
            road_len += length

            if not road_type in roads_by_type:
                roads_by_type[road_type] = {'flooded': 0, 'total': 0}
            roads_by_type[road_type]['total'] += length

            if attributes[target_field_index] == 1:
                flooded_len += length
                roads_by_type[road_type]['flooded'] += length
        table_body = [
            question,
            TableRow([
                tr('Road Type'),
                tr('Flooded in the threshold (m)'),
                tr('Total (m)')
            ],
                     header=True),
            TableRow([tr('All'), int(flooded_len),
                      int(road_len)])
        ]
        table_body.append(TableRow(tr('Breakdown by road type'), header=True))
        for t, v in roads_by_type.iteritems():
            table_body.append(TableRow([t,
                                        int(v['flooded']),
                                        int(v['total'])]))

        impact_summary = Table(table_body).toNewlineFreeString()
        map_title = tr('Roads inundated')

        style_classes = [
            dict(label=tr('Not Inundated'),
                 value=0,
                 colour='#1EFC7C',
                 transparency=0,
                 size=0.5),
            dict(label=tr('Inundated'),
                 value=1,
                 colour='#F31A1C',
                 transparency=0,
                 size=0.5)
        ]
        style_info = dict(target_field=target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # Convert QgsVectorLayer to inasafe layer and return it
        line_layer = Vector(data=line_layer,
                            name=tr('Flooded roads'),
                            keywords={
                                'impact_summary': impact_summary,
                                'map_title': map_title,
                                'target_field': target_field
                            },
                            style_info=style_info)
        return line_layer
예제 #30
0
    def run(self, layers):
        """Risk plugin for tsunami population
        """

        # Extract data
        H = get_hazard_layer(layers)  # Depth
        E = get_exposure_layer(layers)  # Building locations

        # Interpolate hazard level to building locations
        Hi = H.interpolate(E, attribute_name='depth')

        # Extract relevant numerical data
        coordinates = Hi.get_geometry()
        depth = Hi.get_data()
        N = len(depth)

        # List attributes to carry forward to result layer
        attributes = E.get_attribute_names()

        # Calculate building impact according to guidelines
        count3 = 0
        count1 = 0
        count0 = 0
        population_impact = []
        for i in range(N):

            if H.is_raster:
                # Get depth
                dep = float(depth[i]['depth'])

                # Classify buildings according to depth
                if dep >= 3:
                    affected = 3  # FIXME: Colour upper bound is 100 but
                    count3 += 1  # does not catch affected == 100
                elif 1 <= dep < 3:
                    affected = 2
                    count1 += 1
                else:
                    affected = 1
                    count0 += 1
            elif H.is_vector:
                dep = 0  # Just put something here
                cat = depth[i]['Affected']
                if cat is True:
                    affected = 3
                    count3 += 1
                else:
                    affected = 1
                    count0 += 1

            # Collect depth and calculated damage
            result_dict = {self.target_field: affected, 'DEPTH': dep}

            # Carry all original attributes forward
            # FIXME: This should be done in interpolation. Check.
            #for key in attributes:
            #    result_dict[key] = E.get_data(key, i)

            # Record result for this feature
            population_impact.append(result_dict)

        # Create report
        Hname = H.get_name()
        Ename = E.get_name()
        if H.is_raster:
            impact_summary = ('<b>In case of "%s" the estimated impact to '
                              '"%s" '
                              'is&#58;</b><br><br><p>' % (Hname, Ename))
            impact_summary += (
                '<table border="0" width="320px">'
                '   <tr><th><b>%s</b></th><th><b>%s</b></th></tr>'
                '   <tr></tr>'
                '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                '</table>' %
                (tr('Impact'), tr('Number of buildings'), tr('Low'), count0,
                 tr('Medium'), count1, tr('High'), count3))
        else:
            impact_summary = (
                '<table border="0" width="320px">'
                '   <tr><th><b>%s</b></th><th><b>%s</b></th></tr>'
                '   <tr></tr>'
                '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                '</table>' %
                ('Terdampak oleh tsunami', 'Jumlah gedung', 'Terdampak',
                 count3, 'Tidak terdampak', count0, 'Semua', N))

        impact_summary += '<br>'  # Blank separation row
        impact_summary += '<b>' + tr('Assumption') + '&#58;</b><br>'
        impact_summary += ('Levels of impact are defined by BNPB\'s '
                           '<i>Pengkajian Risiko Bencana</i>')
        impact_summary += ('<table border="0" width="320px">'
                           '   <tr><th><b>%s</b></th><th><b>%s</b></th></tr>'
                           '   <tr></tr>'
                           '   <tr><td>%s&#58;</td><td>%s&#58;</td></tr>'
                           '   <tr><td>%s&#58;</td><td>%s&#58;</td></tr>'
                           '   <tr><td>%s&#58;</td><td>%s&#58;</td></tr>'
                           '</table>' %
                           (tr('Impact'), tr('Tsunami height'), tr('Low'),
                            '<1 m', tr('Medium'), '1-3 m', tr('High'), '>3 m'))

        # Create style
        style_classes = [
            dict(label='< 1 m',
                 min=0,
                 max=1,
                 colour='#1EFC7C',
                 transparency=0,
                 size=1),
            dict(label='1 - 3 m',
                 min=1,
                 max=2,
                 colour='#FFA500',
                 transparency=0,
                 size=1),
            dict(label='> 3 m',
                 min=2,
                 max=4,
                 colour='#F31A1C',
                 transparency=0,
                 size=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes)

        # Create vector layer and return
        if Hi.is_line_data:
            name = 'Roads flooded'
        elif Hi.is_point_data:
            name = 'Buildings flooded'

        V = Vector(data=population_impact,
                   projection=E.get_projection(),
                   geometry=coordinates,
                   keywords={'impact_summary': impact_summary},
                   geometry_type=Hi.geometry_type,
                   name=name,
                   style_info=style_info)
        return V
예제 #31
0
    def run(layers):
        """Risk plugin for tsunami building damage
        """

        # Extract data
        H = get_hazard_layer(layers)    # Ground shaking
        E = get_exposure_layer(layers)  # Building locations

        # Interpolate hazard level to building locations
        H = assign_hazard_values_to_exposure_data(H, E,
                                             attribute_name='depth')

        # Extract relevant numerical data
        coordinates = E.get_geometry()
        inundation = H.get_data()

        # Calculate
        N = len(H)
        impact = []
        for i in range(N):

            #-------------------
            # Extract parameters
            #-------------------
            depth = float(inundation[i]['depth'])
            #shore_distance = E.get_data('SHORE_DIST', i)

            # FIXME: Get rid of the type casting when
            #        issue #66 is done
            number_of_people_in_building = int(E.get_data('NEXIS_PEOP', i))
            wall_type = E.get_data('WALL_TYPE', i)
            contents_value = E.get_data('CONT_VALUE', i)
            structure_value = E.get_data('STR_VALUE', i)

            #------------------------
            # Compute people affected
            #------------------------
            if 0.01 < depth < 1.0:
                people_affected = number_of_people_in_building
            else:
                people_affected = 0

            if depth >= 1.0:
                people_severely_affected = number_of_people_in_building
            else:
                people_severely_affected = 0

            #----------------------------------------
            # Compute impact on buldings and contents
            #----------------------------------------
            depth_floor = depth - 0.3  # Adjust for floor height

            if depth_floor >= 0.0:
                buildings_inundated = 1
            else:
                buildings_inundated = 0

            if depth_floor < 0.0:
                structural_damage = contents_damage = 0.0
            else:
                # Water is deep enough to cause damage
                if wall_type in struct_damage_curve:
                    curve = struct_damage_curve[wall_type]
                else:
                    # Establish default for unknown wall type
                    curve = struct_damage_curve['Brick veneer']

                structural_damage = curve(depth_floor)
                contents_damage = contents_damage_curve(depth_floor)

            #---------------
            # Compute losses
            #---------------
            structural_loss = structural_damage * structure_value
            contents_loss = contents_damage * contents_value

            #-------
            # Return
            #-------
            impact.append({'NEXIS_PEOP': number_of_people_in_building,
                           'PEOPLE_AFFECTED': people_affected,
                           'PEOPLE_SEV_AFFECTED': people_severely_affected,
                           'STRUCT_INUNDATED': buildings_inundated,
                           'STRUCT_DAMAGE_fraction': structural_damage,
                           'CONTENTS_DAMAGE_fraction': contents_damage,
                           'STRUCT_LOSS_AUD': structural_loss,
                           'CONTENTS_LOSS_AUD': contents_loss,
                           'DEPTH': depth})

        # FIXME (Ole): Need helper to generate new layer using
        #              correct spatial reference
        #              (i.e. sensibly wrap the following lines)
        V = Vector(data=impact, projection=E.get_projection(),
                   geometry=coordinates,
                   name='Estimated tsunami impact')
        return V
예제 #32
0
    def run(self):
        """Run the impact function.

        :returns: A new line layer with inundated roads marked.
        :type: safe_layer
        """
        # Thresholds for tsunami hazard zone breakdown.
        low_max = self.parameters['low_threshold'].value
        medium_max = self.parameters['medium_threshold'].value
        high_max = self.parameters['high_threshold'].value

        target_field = self.target_field
        # Get parameters from layer's keywords
        road_class_field = self.exposure.keyword('road_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        # reproject self.extent to the hazard projection
        hazard_crs = self.hazard.layer.crs()
        hazard_authid = hazard_crs.authid()

        if hazard_authid == 'EPSG:4326':
            viewport_extent = self.requested_extent
        else:
            geo_crs = QgsCoordinateReferenceSystem()
            geo_crs.createFromSrid(4326)
            viewport_extent = extent_to_geo_array(
                QgsRectangle(*self.requested_extent), geo_crs, hazard_crs)

        # Clip hazard raster
        small_raster = align_clip_raster(self.hazard.layer, viewport_extent)

        # Create vector features from the flood raster
        # For each raster cell there is one rectangular polygon
        # Data also get spatially indexed for faster operation
        ranges = ranges_according_thresholds(low_max, medium_max, high_max)

        index, flood_cells_map = _raster_to_vector_cells(
            small_raster, ranges, self.exposure.layer.crs())

        # Filter geometry and data using the extent
        ct = QgsCoordinateTransform(QgsCoordinateReferenceSystem("EPSG:4326"),
                                    self.exposure.layer.crs())
        extent = ct.transformBoundingBox(QgsRectangle(*self.requested_extent))
        request = QgsFeatureRequest()
        request.setFilterRect(extent)

        # create template for the output layer
        line_layer_tmp = create_layer(self.exposure.layer)
        new_field = QgsField(target_field, QVariant.Int)
        line_layer_tmp.dataProvider().addAttributes([new_field])
        line_layer_tmp.updateFields()

        # create empty output layer and load it
        filename = unique_filename(suffix='.shp')
        QgsVectorFileWriter.writeAsVectorFormat(line_layer_tmp, filename,
                                                "utf-8", None,
                                                "ESRI Shapefile")
        line_layer = QgsVectorLayer(filename, "flooded roads", "ogr")

        # Do the heavy work - for each road get flood polygon for that area and
        # do the intersection/difference to find out which parts are flooded
        _intersect_lines_with_vector_cells(self.exposure.layer, request, index,
                                           flood_cells_map, line_layer,
                                           target_field)

        target_field_index = line_layer.dataProvider().\
            fieldNameIndex(target_field)

        # Generate simple impact report
        epsg = get_utm_epsg(self.requested_extent[0], self.requested_extent[1])
        output_crs = QgsCoordinateReferenceSystem(epsg)
        transform = QgsCoordinateTransform(self.exposure.layer.crs(),
                                           output_crs)

        # Roads breakdown
        self.init_report_var(self.hazard_classes)

        if line_layer.featureCount() < 1:
            raise ZeroImpactException()

        roads_data = line_layer.getFeatures()
        road_type_field_index = line_layer.fieldNameIndex(road_class_field)

        for road in roads_data:
            attributes = road.attributes()

            affected = attributes[target_field_index]
            if isinstance(affected, QPyNullVariant):
                continue
            else:
                hazard_zone = self.hazard_classes[affected]

            usage = attributes[road_type_field_index]
            usage = main_type(usage, exposure_value_mapping)

            geom = road.geometry()
            geom.transform(transform)
            length = geom.length()

            affected = False
            num_classes = len(self.hazard_classes)
            if attributes[target_field_index] in range(num_classes):
                affected = True
            self.classify_feature(hazard_zone, usage, length, affected)

        self.reorder_dictionaries()

        style_classes = [
            # FIXME 0 - 0.1
            dict(label=self.hazard_classes[0] + ': 0m',
                 value=0,
                 colour='#00FF00',
                 transparency=0,
                 size=1),
            dict(label=self.hazard_classes[1] + ': >0 - %.1f m' % low_max,
                 value=1,
                 colour='#FFFF00',
                 transparency=0,
                 size=1),
            dict(label=self.hazard_classes[2] + ': %.1f - %.1f m' %
                 (low_max + 0.1, medium_max),
                 value=2,
                 colour='#FFB700',
                 transparency=0,
                 size=1),
            dict(label=self.hazard_classes[3] + ': %.1f - %.1f m' %
                 (medium_max + 0.1, high_max),
                 value=3,
                 colour='#FF6F00',
                 transparency=0,
                 size=1),
            dict(label=self.hazard_classes[4] + ' > %.1f m' % high_max,
                 value=4,
                 colour='#FF0000',
                 transparency=0,
                 size=1),
        ]
        style_info = dict(target_field=target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'map_title': self.map_title(),
            'legend_title': self.metadata().key('legend_title'),
            'target_field': target_field
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Convert QgsVectorLayer to inasafe layer and return it
        impact_layer = Vector(data=line_layer,
                              name=self.map_title(),
                              keywords=impact_layer_keywords,
                              style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
    def run(self, layers):
        """Risk plugin for volcano hazard on building/structure

        Input
          layers: List of layers expected to contain
              my_hazard: Hazard layer of volcano
              my_exposure: Vector layer of structure data on
              the same grid as my_hazard

        Counts number of building exposed to each volcano hazard zones.

        Return
          Map of building exposed to volcanic hazard zones
          Table with number of buildings affected
        """

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Volcano hazard layer
        my_exposure = get_exposure_layer(layers)
        is_point_data = False

        question = get_question(my_hazard.get_name(),
                                my_exposure.get_name(),
                                self)

        # Input checks
        if not my_hazard.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected '
                   % my_hazard.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon or point layer. I got %s '
               'with layer type %s' %
               (my_hazard.get_name(), my_hazard.get_geometry_name()))
        if not (my_hazard.is_polygon_data or my_hazard.is_point_data):
            raise Exception(msg)

        if my_hazard.is_point_data:
            # Use concentric circles
            radii = self.parameters['distances [km]']
            is_point_data = True

            centers = my_hazard.get_geometry()
            attributes = my_hazard.get_data()
            rad_m = [x * 1000 for x in radii]  # Convert to meters
            Z = make_circular_polygon(centers, rad_m, attributes=attributes)
            # To check
            category_title = 'Radius'
            my_hazard = Z

            category_names = rad_m
            name_attribute = 'NAME'  # As in e.g. the Smithsonian dataset
        else:
            # Use hazard map
            category_title = 'KRB'

            # FIXME (Ole): Change to English and use translation system
            category_names = ['Kawasan Rawan Bencana III',
                              'Kawasan Rawan Bencana II',
                              'Kawasan Rawan Bencana I']
            name_attribute = 'GUNUNG'  # As in e.g. BNPB hazard map

        # Get names of volcanos considered
        if name_attribute in my_hazard.get_attribute_names():
            D = {}
            for att in my_hazard.get_data():
                # Run through all polygons and get unique names
                D[att[name_attribute]] = None

            volcano_names = ''
            for name in D:
                volcano_names += '%s, ' % name
            volcano_names = volcano_names[:-2]  # Strip trailing ', '
        else:
            volcano_names = tr('Not specified in data')

        if not category_title in my_hazard.get_attribute_names():
            msg = ('Hazard data %s did not contain expected '
                   'attribute %s ' % (my_hazard.get_name(), category_title))
            # noinspection PyExceptionInherit
            raise InaSAFEError(msg)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(my_hazard, my_exposure)

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a building count of zero
        new_attributes = my_hazard.get_data()

        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            cat = attr[category_title]
            categories[cat] = 0

        # Count impacted building per polygon and total
        for attr in P.get_data():

            # Update building count for associated polygon
            poly_id = attr['polygon_id']
            if poly_id is not None:
                new_attributes[poly_id][self.target_field] += 1

                # Update building count for each category
                cat = new_attributes[poly_id][category_title]
                categories[cat] += 1

        # Count totals
        total = len(my_exposure)

        # Generate simple impact report
        blank_cell = ''
        table_body = [question,
                      TableRow([tr('Volcanos considered'),
                                '%s' % volcano_names, blank_cell],
                               header=True),
                      TableRow([tr('Distance [km]'), tr('Total'),
                                tr('Cumulative')],
                               header=True)]

        cum = 0
        for name in category_names:
            # prevent key error
            count = categories.get(name, 0)
            cum += count
            if is_point_data:
                name = int(name) / 1000
            table_body.append(TableRow([name, format_int(count),
                                        format_int(cum)]))

        table_body.append(TableRow(tr('Map shows buildings affected in '
                                      'each of volcano hazard polygons.')))
        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend([TableRow(tr('Notes'), header=True),
                           tr('Total number of buildings %s in the viewable '
                              'area') % format_int(total),
                           tr('Only buildings available in OpenStreetMap '
                              'are considered.')])

        impact_summary = Table(table_body).toNewlineFreeString()
        building_counts = [x[self.target_field] for x in new_attributes]

        if max(building_counts) == 0 == min(building_counts):
            table_body = [
                question,
                TableRow([tr('Number of buildings affected'),
                          '%s' % format_int(cum), blank_cell],
                         header=True)]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
                   '#FFCC00', '#FF6600', '#FF0000', '#7A0000']
        classes = create_classes(building_counts, len(colours))
        interval_classes = humanize_class(classes)
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 100
                style_class['min'] = 0
            else:
                transparency = 30
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('Buildings affected by volcanic hazard zone')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(building)')
        legend_title = tr('Building count')

        # Create vector layer and return
        V = Vector(data=new_attributes,
                   projection=my_hazard.get_projection(),
                   geometry=my_hazard.get_geometry(as_geometry_objects=True),
                   name=tr('Buildings affected by volcanic hazard zone'),
                   keywords={'impact_summary': impact_summary,
                             'impact_table': impact_table,
                             'target_field': self.target_field,
                             'map_title': map_title,
                             'legend_notes': legend_notes,
                             'legend_units': legend_units,
                             'legend_title': legend_title},
                   style_info=style_info)
        return V
예제 #34
0
    def run(self, layers):
        """Risk plugin for Padang building survey
        """

        # Extract data
        H = get_hazard_layer(layers)  # Ground shaking
        E = get_exposure_layer(layers)  # Building locations

        datatype = E.get_keywords()['datatype']
        vclass_tag = 'ITB_Class'
        if datatype.lower() == 'osm':
            # Map from OSM attributes to the ITB building classes
            #            Emap = osm2itb(E)
            print 'osm2itb has not been implemented'
        elif datatype.lower() == 'sigab':
            #            Emap = sigabitb(E)
            print 'sigab2itb has not been implemented'
        elif datatype.lower() == 'itb':
            Emap = E

        # Interpolate hazard level to building locations
        Hi = assign_hazard_values_to_exposure_data(H,
                                                   Emap,
                                                   attribute_name='MMI')

        # Extract relevant numerical data
        coordinates = Emap.get_geometry()
        shaking = Hi.get_data()
        N = len(shaking)

        # List attributes to carry forward to result layer
        attributes = Emap.get_attribute_names()
        # Calculate building damage
        count50 = 0
        count25 = 0
        count10 = 0
        count0 = 0
        building_damage = []
        for i in range(N):
            mmi = float(shaking[i]['MMI'])

            building_class = Emap.get_data(vclass_tag, i)

            building_type = str(building_class)
            damage_params = vul_curves[building_type]
            beta = damage_params['beta']
            median = damage_params['median']

            msg = 'Invalid parameter value for ' + building_type
            verify(beta + median > 0.0, msg)
            percent_damage = lognormal_cdf(mmi, median=median,
                                           sigma=beta) * 100

            # Collect shake level and calculated damage
            result_dict = {self.target_field: percent_damage, 'MMI': mmi}

            # Carry all orginal attributes forward
            for key in attributes:
                result_dict[key] = Emap.get_data(key, i)

            # Record result for this feature
            building_damage.append(result_dict)

            # Debugging
            #if percent_damage > 0.01:
            #    print mmi, percent_damage

            # Calculate statistics
            if percent_damage < 10:
                count0 += 1

            if 10 <= percent_damage < 33:
                count10 += 1

            if 33 <= percent_damage < 66:
                count25 += 1

            if 66 <= percent_damage:
                count50 += 1

#        fid.close()
# Create report
        Hname = H.get_name()
        Ename = E.get_name()
        impact_summary = ('<b>In case of "%s" the estimated impact to '
                          '"%s" '
                          'is&#58;</b><br><br><p>' % (Hname, Ename))
        impact_summary += (
            '<table border="0" width="320px">'
            '   <tr><th><b>%s</b></th><th><b>%s</b></th></th>'
            '   <tr></tr>'
            '   <tr><td>%s&#58;</td><td>%i</td></tr>'
            '   <tr><td>%s (<10%%)&#58;</td><td>%i</td></tr>'
            '   <tr><td>%s (10-33%%)&#58;</td><td>%i</td></tr>'
            '   <tr><td>%s (33-66%%)&#58;</td><td>%i</td></tr>'
            '   <tr><td>%s (66-100%%)&#58;</td><td>%i</td></tr>'
            '</table></font>' %
            (_('Buildings'), _('Total'), _('All'), N, _('No damage'), count0,
             _('Low damage'), count10, _('Medium damage'), count25,
             _('High damage'), count50))
        impact_summary += '<br>'  # Blank separation row
        impact_summary += '<b>' + _('Assumption') + '&#58;</b><br>'
        # This is the proper text:
        #_('Levels of impact are defined by post 2009 '
        #  'Padang earthquake survey conducted by Geoscience '
        #  'Australia and Institute of Teknologi Bandung.'))
        #_('Unreinforced masonry is assumed where no '
        #  'structural information is available.'))
        impact_summary += _('Levels of impact are defined by post 2009 '
                            'Padang earthquake survey conducted by Geoscience '
                            'Australia and Institute of Teknologi Bandung.')
        impact_summary += _('Unreinforced masonry is assumed where no '
                            'structural information is available.')
        # Create style
        style_classes = [
            dict(label=_('No damage'),
                 min=0,
                 max=10,
                 colour='#00ff00',
                 transparency=1),
            dict(label=_('Low damage'),
                 min=10,
                 max=33,
                 colour='#ffff00',
                 transparency=1),
            dict(label=_('Medium damage'),
                 min=33,
                 max=66,
                 colour='#ffaa00',
                 transparency=1),
            dict(label=_('High damage'),
                 min=66,
                 max=100,
                 colour='#ff0000',
                 transparency=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes)

        # Create vector layer and return
        V = Vector(data=building_damage,
                   projection=E.get_projection(),
                   geometry=coordinates,
                   name='Estimated pct damage',
                   keywords={'impact_summary': impact_summary},
                   style_info=style_info)
        return V
예제 #35
0
    def run(self):
        """Classified hazard impact to buildings (e.g. from Open Street Map).
        """

        # Value from layer's keywords
        structure_class_field = self.exposure.keyword("structure_class_field")
        try:
            exposure_value_mapping = self.exposure.keyword("value_mapping")
        except KeywordNotFoundError:
            # Generic IF, the keyword might not be defined base.py
            exposure_value_mapping = {}
        self.hazard_class_mapping = self.hazard.keyword("value_map")

        keys = [x["key"] for x in generic_raster_hazard_classes["classes"]]
        names = [x["name"] for x in generic_raster_hazard_classes["classes"]]
        classes = OrderedDict()
        for i in range(len(keys)):
            classes[keys[i]] = names[i]

        # Determine attribute name for hazard class
        hazard_class_attribute = get_non_conflicting_attribute_name(
            "haz_class", [x.keys() for x in self.exposure.layer.data][0]
        )

        interpolated_result = assign_hazard_values_to_exposure_data(
            self.hazard.layer, self.exposure.layer, attribute_name=hazard_class_attribute, mode="constant"
        )

        # Extract relevant exposure data
        attributes = interpolated_result.get_data()

        # Number of building in the interpolated layer
        buildings_total = len(interpolated_result)

        # Inverse the order from low to high
        self.init_report_var(classes.values()[::-1])

        for i in range(buildings_total):
            # Get the usage of the building
            usage = attributes[i][structure_class_field]
            usage = main_type(usage, exposure_value_mapping)

            # Initialize value as Not affected
            attributes[i][self.target_field] = tr("Not affected")
            attributes[i][self.affected_field] = 0

            # Get the hazard level of the building
            level = float(attributes[i][hazard_class_attribute])
            level = float(numpy_round(level))

            # Find the class according the building's level
            for k, v in self.hazard_class_mapping.items():
                if level in v:
                    impact_class = classes[k]
                    # Set the impact level
                    attributes[i][self.target_field] = impact_class
                    # Set to affected
                    attributes[i][self.affected_field] = 1
                    break

            # Count affected buildings by type
            self.classify_feature(attributes[i][self.target_field], usage, bool(attributes[i][self.affected_field]))

        self.reorder_dictionaries()

        # Create style
        # Low, Medium and High are translated in the attribute table.
        # "Not affected" is not translated in the attribute table.
        style_classes = [
            dict(
                label=tr("Not Affected"),
                value="Not affected",
                colour="#1EFC7C",
                transparency=0,
                size=2,
                border_color="#969696",
                border_width=0.2,
            ),
            dict(
                label=tr("Low"),
                value=tr("Low hazard zone"),
                colour="#EBF442",
                transparency=0,
                size=2,
                border_color="#969696",
                border_width=0.2,
            ),
            dict(
                label=tr("Medium"),
                value=tr("Medium hazard zone"),
                colour="#F4A442",
                transparency=0,
                size=2,
                border_color="#969696",
                border_width=0.2,
            ),
            dict(
                label=tr("High"),
                value=tr("High hazard zone"),
                colour="#F31A1C",
                transparency=0,
                size=2,
                border_color="#969696",
                border_width=0.2,
            ),
        ]
        style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type="categorizedSymbol")
        LOGGER.debug("target field : " + self.target_field)

        impact_data = self.generate_data()

        extra_keywords = {
            "target_field": self.affected_field,
            "map_title": self.map_title(),
            "legend_units": self.metadata().key("legend_units"),
            "legend_title": self.metadata().key("legend_title"),
            "buildings_total": buildings_total,
            "buildings_affected": self.total_affected_buildings,
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Create impact layer and return
        impact_layer = Vector(
            data=attributes,
            projection=self.exposure.layer.get_projection(),
            geometry=self.exposure.layer.get_geometry(),
            name=self.map_title(),
            keywords=impact_layer_keywords,
            style_info=style_info,
        )

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #36
0
    def test_clip_raster_by_polygons(self):
        """Raster grids can be clipped by polygon layers

        # See qgis project in test data: raster_point_and_clipping_test.qgs
        """

        # Name input files
        poly = join(TESTDATA, 'kabupaten_jakarta_singlepart.shp')
        grid = join(TESTDATA, 'population_5x5_jakarta.asc')

        # Get layers using API
        P = read_layer(poly)
        R = read_layer(grid)

        M = len(P)
        N = len(R)
        assert N == 56

        # Clip
        C = clip_raster_by_polygons(R, P)
        assert len(C) == M

        # Check points inside polygon
        tot = 0
        for c in C:
            tot += len(c)
        assert tot == 14

        # Check that points are inside the right polygon
        for i, polygon in enumerate(P.get_geometry()):

            points = C[i][0]
            values = C[i][1]

            # Sanity first
            for point in points:
                assert is_inside_polygon(point, polygon)

            # Specific tests against raster pixel values inside polygons
            # The values are read from qgis
            if i == 0:
                assert len(points) == 6
                assert numpy.allclose(values[0], 200951)
                assert numpy.allclose(values[1], 283237)
                assert numpy.allclose(values[2], 278385)
                assert numpy.allclose(values[3], 516061)
                assert numpy.allclose(values[4], 207414)
                assert numpy.allclose(values[5], 344466)

            elif i == 1:
                assert len(points) == 2
                msg = ('Got wrong coordinates %s, expected %s'
                       % (str(points[0, :]), str([106.8125, -6.1875])))
                assert numpy.allclose(points[0, :], [106.8125, -6.1875]), msg
                assert numpy.allclose(points[1, :], [106.8541667, -6.1875])
                assert numpy.allclose(values[0], 331942)
                assert numpy.allclose(values[1], 496446)
            elif i == 2:
                assert len(points) == 7
                assert numpy.allclose(values[0], 268579)
                assert numpy.allclose(values[1], 155795)
                assert numpy.allclose(values[2], 403674)
                assert numpy.allclose(values[3], 259280)
                assert numpy.allclose(values[4], 284526)
                assert numpy.allclose(values[5], 334370)
                assert numpy.allclose(values[6], 143325)
            elif i == 3:
                assert len(points) == 0  # Degenerate
            elif i == 4:
                assert len(points) == 0  # Degenerate
            elif i == 5:
                assert len(points) == 8
                assert numpy.allclose(values[0], 279103)
                assert numpy.allclose(values[1], 205762)
                assert numpy.allclose(values[2], 428705)
                assert numpy.allclose(values[3], 331093)
                assert numpy.allclose(values[4], 227514)
                assert numpy.allclose(values[5], 249308)
                assert numpy.allclose(values[6], 215739)
                assert numpy.allclose(values[7], 147447)
            elif i == 6:
                assert len(points) == 6
                assert numpy.allclose(values[0], 61836.4)
                assert numpy.allclose(values[1], 165723)
                assert numpy.allclose(values[2], 151307)
                assert numpy.allclose(values[3], 343787)
                assert numpy.allclose(values[4], 303627)
                assert numpy.allclose(values[5], 225232)

            # Generate layer objects
            values = [{'value': x} for x in C[i][1]]
            point_layer = Vector(data=values, geometry=points,
                                 projection=P.get_projection())

            if len(point_layer) > 0:
                # Geometry is only defined for layers that are not degenerate
                assert point_layer.is_point_data

            polygon_layer = Vector(geometry=[polygon],
                                   projection=P.get_projection())
            assert polygon_layer.is_polygon_data

            # Generate spatial data for visualisation with e.g. QGIS
            if False:
                point_layer.write_to_file('points_%i.shp' % i)
                polygon_layer.write_to_file('polygon_%i.shp' % i)
    def run(self, layers):
        """Risk plugin for flood population evacuation

        Input:
          layers: List of layers expected to contain

              my_hazard : Vector polygon layer of flood depth

              my_exposure : Raster layer of population data on the same
                grid as my_hazard

        Counts number of people exposed to areas identified as flood prone

        Return
          Map of population exposed to flooding

          Table with number of people evacuated and supplies required
        """
        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Flood inundation
        my_exposure = get_exposure_layer(layers)

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Check that hazard is polygon type
        if not my_hazard.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected ' %
                   my_hazard.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon layer. I got %s with layer '
               'type %s' %
               (my_hazard.get_name(), my_hazard.get_geometry_name()))
        if not my_hazard.is_polygon_data:
            raise Exception(msg)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(my_hazard,
                                                  my_exposure,
                                                  attribute_name='population')

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a population count of zero
        new_attributes = my_hazard.get_data()
        category_title = 'affected'  # FIXME: Should come from keywords
        deprecated_category_title = 'FLOODPRONE'
        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            try:
                cat = attr[category_title]
            except KeyError:
                try:
                    cat = attr['FLOODPRONE']
                    categories[cat] = 0
                except KeyError:
                    pass

        # Count affected population per polygon, per category and total
        affected_population = 0
        for attr in P.get_data():

            affected = False
            if 'affected' in attr:
                res = attr['affected']
                if res is None:
                    x = False
                else:
                    x = bool(res)
                affected = x
            elif 'FLOODPRONE' in attr:
                # If there isn't an 'affected' attribute,
                res = attr['FLOODPRONE']
                if res is not None:
                    affected = res.lower() == 'yes'
            elif 'Affected' in attr:
                # Check the default attribute assigned for points
                # covered by a polygon
                res = attr['Affected']
                if res is None:
                    x = False
                else:
                    x = res
                affected = x
            else:
                #assume that every polygon is affected (see #816)
                affected = True
                # there is no flood related attribute
                #msg = ('No flood related attribute found in %s. '
                #       'I was looking for either "Flooded", "FLOODPRONE" '
                #       'or "Affected". The latter should have been '
                #       'automatically set by call to '
                #       'assign_hazard_values_to_exposure_data(). '
                #       'Sorry I can\'t help more.')
                #raise Exception(msg)

            if affected:
                # Get population at this location
                pop = float(attr['population'])

                # Update population count for associated polygon
                poly_id = attr['polygon_id']
                new_attributes[poly_id][self.target_field] += pop

                # Update population count for each category
                if len(categories) > 0:
                    try:
                        cat = new_attributes[poly_id][category_title]
                    except KeyError:
                        cat = new_attributes[poly_id][
                            deprecated_category_title]
                    categories[cat] += pop

                # Update total
                affected_population += pop

        affected_population = round_thousand(affected_population)
        # Estimate number of people in need of evacuation
        evacuated = (affected_population *
                     self.parameters['evacuation_percentage'] / 100.0)

        total = int(numpy.sum(my_exposure.get_data(nan=0, scaling=False)))

        # Don't show digits less than a 1000
        total = round_thousand(total)
        evacuated = round_thousand(evacuated)

        # Calculate estimated minimum needs
        minimum_needs = self.parameters['minimum needs']
        tot_needs = evacuated_population_weekly_needs(evacuated, minimum_needs)

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([
                tr('People affected'),
                '%s%s' % (format_int(int(affected_population)),
                          ('*' if affected_population >= 1000 else ''))
            ],
                     header=True),
            TableRow([
                tr('People needing evacuation'),
                '%s%s' % (format_int(int(evacuated)),
                          ('*' if evacuated >= 1000 else ''))
            ],
                     header=True),
            TableRow([
                TableCell(tr('* Number is rounded to the nearest 1000'),
                          col_span=2)
            ],
                     header=False),
            TableRow([
                tr('Evacuation threshold'),
                '%s%%' % format_int(self.parameters['evacuation_percentage'])
            ],
                     header=True),
            TableRow(
                tr('Map shows population affected in each flood'
                   ' prone area')),
            TableRow(
                tr('Table below shows the weekly minimum needs '
                   'for all evacuated people')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), format_int(tot_needs['rice'])],
            [
                tr('Drinking Water [l]'),
                format_int(tot_needs['drinking_water'])
            ], [tr('Clean Water [l]'),
                format_int(tot_needs['water'])],
            [tr('Family Kits'),
             format_int(tot_needs['family_kits'])],
            [tr('Toilets'), format_int(tot_needs['toilets'])]
        ]
        impact_table = Table(table_body).toNewlineFreeString()

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(TableRow(tr('How will warnings be disseminated?')))
        table_body.append(TableRow(tr('How will we reach stranded people?')))
        table_body.append(TableRow(tr('Do we have enough relief items?')))
        table_body.append(
            TableRow(
                tr('If yes, where are they located and how '
                   'will we distribute them?')))
        table_body.append(
            TableRow(
                tr('If no, where can we obtain additional '
                   'relief items from and how will we '
                   'transport them to here?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People need evacuation if in area identified '
               'as "Flood Prone"'),
            tr('Minimum needs are defined in BNPB '
               'regulation 7/2008')
        ])
        impact_summary = Table(table_body).toNewlineFreeString()

        # Create style
        # Define classes for legend for flooded population counts
        colours = [
            '#08FFDA', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]

        population_counts = [x['population'] for x in new_attributes]
        classes = create_classes(population_counts, len(colours))
        interval_classes = humanize_class(classes)

        # Define style info for output polygons showing population counts
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 0
                style_class['min'] = 0
            else:
                transparency = 0
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('People affected by flood prone areas')
        legend_notes = tr('Thousand separator is represented by \'.\'')
        legend_units = tr('(people per polygon)')
        legend_title = tr('Population Count')

        # Create vector layer and return
        V = Vector(data=new_attributes,
                   projection=my_hazard.get_projection(),
                   geometry=my_hazard.get_geometry(),
                   name=tr('Population affected by flood prone areas'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title,
                       'affected_population': affected_population,
                       'total_population': total
                   },
                   style_info=style_info)
        return V
예제 #38
0
    def run(self, layers):
        """Flood impact to buildings (e.g. from Open Street Map)
        """

        threshold = self.parameters['threshold [m]']  # Flood threshold [m]

        verify(isinstance(threshold, float),
               'Expected thresholds to be a float. Got %s' % str(threshold))

        # Extract data
        H = get_hazard_layer(layers)  # Depth
        E = get_exposure_layer(layers)  # Building locations

        question = get_question(H.get_name(), E.get_name(), self)

        # Determine attribute name for hazard levels
        if H.is_raster:
            mode = 'grid'
            hazard_attribute = 'depth'
        else:
            mode = 'regions'
            hazard_attribute = None

        # Interpolate hazard level to building locations
        I = assign_hazard_values_to_exposure_data(
            H, E, attribute_name=hazard_attribute)

        # Extract relevant exposure data
        attribute_names = I.get_attribute_names()
        attributes = I.get_data()
        N = len(I)
        # Calculate building impact
        count = 0
        buildings = {}
        affected_buildings = {}
        for i in range(N):
            if mode == 'grid':
                # Get the interpolated depth
                x = float(attributes[i]['depth'])
                x = x >= threshold
            elif mode == 'regions':
                # Use interpolated polygon attribute
                atts = attributes[i]

                # FIXME (Ole): Need to agree whether to use one or the
                # other as this can be very confusing!
                # For now look for 'affected' first
                if 'affected' in atts:
                    # E.g. from flood forecast
                    # Assume that building is wet if inside polygon
                    # as flagged by attribute Flooded
                    res = atts['affected']
                    if res is None:
                        x = False
                    else:
                        x = bool(res)

                elif 'FLOODPRONE' in atts:
                    res = atts['FLOODPRONE']
                    if res is None:
                        x = False
                    else:
                        x = res.lower() == 'yes'
                elif DEFAULT_ATTRIBUTE in atts:
                    # Check the default attribute assigned for points
                    # covered by a polygon
                    res = atts[DEFAULT_ATTRIBUTE]
                    if res is None:
                        x = False
                    else:
                        x = res
                else:
                    # there is no flood related attribute
                    msg = ('No flood related attribute found in %s. '
                           'I was looking for either "affected", "FLOODPRONE" '
                           'or "inapolygon". The latter should have been '
                           'automatically set by call to '
                           'assign_hazard_values_to_exposure_data(). '
                           'Sorry I can\'t help more.')
                    raise Exception(msg)
            else:
                msg = (tr(
                    'Unknown hazard type %s. Must be either "depth" or "grid"')
                       % mode)
                raise Exception(msg)

            # Count affected buildings by usage type if available
            if 'type' in attribute_names:
                usage = attributes[i]['type']
            else:
                usage = None
            if 'amenity' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['amenity']
            if 'building_t' in attribute_names and (usage is None
                                                    or usage == 0):
                usage = attributes[i]['building_t']
            if 'office' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['office']
            if 'tourism' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['tourism']
            if 'leisure' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['leisure']
            if 'building' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['building']
                if usage == 'yes':
                    usage = 'building'

            if usage is not None and usage != 0:
                key = usage
            else:
                key = 'unknown'

            if key not in buildings:
                buildings[key] = 0
                affected_buildings[key] = 0

            # Count all buildings by type
            buildings[key] += 1
            if x is True:
                # Count affected buildings by type
                affected_buildings[key] += 1

                # Count total affected buildings
                count += 1

            # Add calculated impact to existing attributes
            attributes[i][self.target_field] = x

        # Lump small entries and 'unknown' into 'other' category
        for usage in buildings.keys():
            x = buildings[usage]
            if x < 25 or usage == 'unknown':
                if 'other' not in buildings:
                    buildings['other'] = 0
                    affected_buildings['other'] = 0

                buildings['other'] += x
                affected_buildings['other'] += affected_buildings[usage]
                del buildings[usage]
                del affected_buildings[usage]

        # Generate simple impact report
        table_body = [
            question,
            TableRow([tr('Building type'),
                      tr('Number flooded'),
                      tr('Total')],
                     header=True),
            TableRow([tr('All'), format_int(count),
                      format_int(N)])
        ]

        school_closed = 0
        hospital_closed = 0
        # Generate break down by building usage type is available
        list_type_attribute = [
            'type', 'amenity', 'building_t', 'office', 'tourism', 'leisure',
            'building'
        ]
        intersect_type = set(attribute_names) & set(list_type_attribute)
        if len(intersect_type) > 0:
            # Make list of building types
            building_list = []
            for usage in buildings:
                building_type = usage.replace('_', ' ')

                # Lookup internationalised value if available
                building_type = tr(building_type)
                building_list.append([
                    building_type.capitalize(),
                    format_int(affected_buildings[usage]),
                    format_int(buildings[usage])
                ])
                if building_type == 'school':
                    school_closed = affected_buildings[usage]
                if building_type == 'hospital':
                    hospital_closed = affected_buildings[usage]

            # Sort alphabetically
            building_list.sort()

            table_body.append(
                TableRow(tr('Breakdown by building type'), header=True))
            for row in building_list:
                s = TableRow(row)
                table_body.append(s)

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(
            TableRow(tr('Are the critical facilities still open?')))
        table_body.append(
            TableRow(
                tr('Which structures have warning capacity (eg. sirens, speakers, '
                   'etc.)?')))
        table_body.append(
            TableRow(tr('Which buildings will be evacuation centres?')))
        table_body.append(
            TableRow(tr('Where will we locate the operations centre?')))
        table_body.append(
            TableRow(
                tr('Where will we locate warehouse and/or distribution centres?'
                   )))

        if school_closed > 0:
            table_body.append(
                TableRow(
                    tr('Where will the students from the %s closed schools go to '
                       'study?') % format_int(school_closed)))

        if hospital_closed > 0:
            table_body.append(
                TableRow(
                    tr('Where will the patients from the %s closed hospitals go '
                       'for treatment and how will we transport them?') %
                    format_int(hospital_closed)))

        table_body.append(TableRow(tr('Notes'), header=True))
        assumption = tr('Buildings are said to be flooded when ')
        if mode == 'grid':
            assumption += tr('flood levels exceed %.1f m') % threshold
        else:
            assumption += tr('in regions marked as affected')
        table_body.append(assumption)

        # Result
        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # Create style
        style_classes = [
            dict(label=tr('Not Inundated'),
                 value=0,
                 colour='#1EFC7C',
                 transparency=0,
                 size=1),
            dict(label=tr('Inundated'),
                 value=1,
                 colour='#F31A1C',
                 transparency=0,
                 size=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # For printing map purpose
        map_title = tr('Buildings inundated')
        legend_units = tr('(inundated or not inundated)')
        legend_title = tr('Structure inundated status')

        # Create vector layer and return
        V = Vector(data=attributes,
                   projection=I.get_projection(),
                   geometry=I.get_geometry(),
                   name=tr('Estimated buildings affected'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return V
예제 #39
0
    def run(self, layers):
        """Earthquake impact to buildings (e.g. from Open Street Map)
        """

        LOGGER.debug('Running earthquake building impact')

        # Thresholds for mmi breakdown
        t0 = self.parameters['low_threshold']
        t1 = self.parameters['medium_threshold']
        t2 = self.parameters['high_threshold']

        class_1 = tr('Low')
        class_2 = tr('Medium')
        class_3 = tr('High')

        # Extract data
        H = get_hazard_layer(layers)  # Depth
        E = get_exposure_layer(layers)  # Building locations

        question = get_question(H.get_name(), E.get_name(), self)

        # Define attribute name for hazard levels
        hazard_attribute = 'mmi'

        # Determine if exposure data have NEXIS attributes
        attribute_names = E.get_attribute_names()
        if ('FLOOR_AREA' in attribute_names and 'BUILDING_C' in attribute_names
                and 'CONTENTS_C' in attribute_names):
            is_NEXIS = True
        else:
            is_NEXIS = False

        # Interpolate hazard level to building locations
        I = assign_hazard_values_to_exposure_data(
            H, E, attribute_name=hazard_attribute)

        # Extract relevant exposure data
        #attribute_names = I.get_attribute_names()
        attributes = I.get_data()

        N = len(I)

        # Calculate building impact
        lo = 0
        me = 0
        hi = 0
        building_values = {}
        contents_values = {}
        for key in range(4):
            building_values[key] = 0
            contents_values[key] = 0

        for i in range(N):
            # Classify building according to shake level
            # and calculate dollar losses

            if is_NEXIS:
                try:
                    area = float(attributes[i]['FLOOR_AREA'])
                except (ValueError, KeyError):
                    #print 'Got area', attributes[i]['FLOOR_AREA']
                    area = 0.0

                try:
                    building_value_density = float(attributes[i]['BUILDING_C'])
                except (ValueError, KeyError):
                    #print 'Got bld value', attributes[i]['BUILDING_C']
                    building_value_density = 0.0

                try:
                    contents_value_density = float(attributes[i]['CONTENTS_C'])
                except (ValueError, KeyError):
                    #print 'Got cont value', attributes[i]['CONTENTS_C']
                    contents_value_density = 0.0

                building_value = building_value_density * area
                contents_value = contents_value_density * area

            x = float(attributes[i][hazard_attribute])  # MMI
            if t0 <= x < t1:
                lo += 1
                cls = 1
            elif t1 <= x < t2:
                me += 1
                cls = 2
            elif t2 <= x:
                hi += 1
                cls = 3
            else:
                # Not reported for less than level t0
                cls = 0

            attributes[i][self.target_field] = cls

            if is_NEXIS:
                # Accumulate values in 1M dollar units
                building_values[cls] += building_value
                contents_values[cls] += contents_value

        if is_NEXIS:
            # Convert to units of one million dollars
            for key in range(4):
                building_values[key] = int(building_values[key] / 1000000)
                contents_values[key] = int(contents_values[key] / 1000000)

        if is_NEXIS:
            # Generate simple impact report for NEXIS type buildings
            table_body = [
                question,
                TableRow([
                    tr('Hazard Level'),
                    tr('Buildings Affected'),
                    tr('Buildings value ($M)'),
                    tr('Contents value ($M)')
                ],
                         header=True),
                TableRow([class_1, lo, building_values[1],
                          contents_values[1]]),
                TableRow([class_2, me, building_values[2],
                          contents_values[2]]),
                TableRow([class_3, hi, building_values[3], contents_values[3]])
            ]
        else:
            # Generate simple impact report for unspecific buildings
            table_body = [
                question,
                TableRow([tr('Hazard Level'),
                          tr('Buildings Affected')],
                         header=True),
                TableRow([class_1, str(lo)]),
                TableRow([class_2, str(me)]),
                TableRow([class_3, str(hi)])
            ]

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('High hazard is defined as shake levels greater '
               'than %i on the MMI scale.') % t2)
        table_body.append(
            tr('Medium hazard is defined as shake levels '
               'between %i and %i on the MMI scale.') % (t1, t2))
        table_body.append(
            tr('Low hazard is defined as shake levels '
               'between %i and %i on the MMI scale.') % (t0, t1))
        if is_NEXIS:
            table_body.append(
                tr('Values are in units of 1 million Australian '
                   'Dollars'))

        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary
        map_title = tr('Buildings affected')

        # Create style
        style_classes = [
            dict(label=class_1, min=1, max=1, colour='#ffff00',
                 transparency=1),
            dict(label=class_2, min=2, max=2, colour='#ffaa00',
                 transparency=1),
            dict(label=class_3, min=3, max=3, colour='#ff0000', transparency=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes)

        # Create vector layer and return
        V = Vector(data=attributes,
                   projection=I.get_projection(),
                   geometry=I.get_geometry(),
                   name=tr('Estimated buildings affected'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'map_title': map_title,
                       'target_field': self.target_field
                   },
                   style_info=style_info)

        LOGGER.debug('Created vector layer  %s' % str(V))
        return V
예제 #40
0
def osm2bnpb(E, target_attribute='VCLASS'):
    """Map OSM attributes to BNPB vulnerability classes

    This maps attributes collected in the OpenStreetMap exposure data
    (data.kompetisiosm.org) to 2 vulnerability classes identified by
    BNPB in Kajian Risiko Gempabumi VERS 1.0, 2011. They are
    URM: Unreinforced Masonry and RM: Reinforced Masonry

    Input
        E: Vector object representing the OSM data
        target_attribute: Optional name of the attribute containing
                          the mapped vulnerability class. Default
                          value is 'VCLASS'

    Output:
        Vector object like E, but with one new attribute (e.g. 'VCLASS')
        representing the vulnerability class used in the guidelines
    """

    # Input check
    required = ['levels', 'structure']
    actual = E.get_attribute_names()
    msg = ('Input data to osm2bnpb must have attributes %s. '
           'It has %s' % (str(required), str(actual)))
    for attribute in required:
        verify(attribute in actual, msg)

    # Start mapping
    N = len(E)
    attributes = E.get_data()
    count = 0
    for i in range(N):
        levels = E.get_data('levels', i)
        structure = E.get_data('structure', i)
        if levels is None or structure is None:
            vulnerability_class = 'URM'
            count += 1
        else:
            # Map string variable levels to integer
            if levels.endswith('+'):
                levels = 100

            try:
                levels = int(levels)
            except:
                # E.g. 'ILP jalan'
                vulnerability_class = 'URM'
                count += 1
            else:
                # Start mapping depending on levels
                if levels >= 4:
                    # High
                    vulnerability_class = 'RM'
                elif 1 <= levels < 4:
                    # Low
                    if structure in ['reinforced_masonry', 'confined_masonry']:
                        vulnerability_class = 'RM'
                    elif 'kayu' in structure or 'wood' in structure:
                        vulnerability_class = 'RM'
                    else:
                        vulnerability_class = 'URM'
                elif numpy.allclose(levels, 0):
                    # A few buildings exist with 0 levels.

                    # In general, we should be assigning here the most
                    # frequent building in the area which could be defined
                    # by admin boundaries.
                    vulnerability_class = 'URM'
                else:
                    msg = 'Unknown number of levels: %s' % levels
                    raise Exception(msg)

        # Store new attribute value
        attributes[i][target_attribute] = vulnerability_class

    #print 'Got %i without levels or structure (out of %i total)' % (count, N)

    # Create new vector instance and return
    V = Vector(data=attributes,
               projection=E.get_projection(),
               geometry=E.get_geometry(),
               name=E.get_name() + ' mapped to BNPB vulnerability classes',
               keywords=E.get_keywords())
    return V
예제 #41
0
    def run(self):
        """Flood impact to buildings (e.g. from Open Street Map)."""

        threshold = self.parameters['threshold'].value  # Flood threshold [m]

        verify(isinstance(threshold, float),
               'Expected thresholds to be a float. Got %s' % str(threshold))

        # Determine attribute name for hazard levels
        hazard_attribute = 'depth'

        # Interpolate hazard level to building locations
        interpolated_layer = assign_hazard_values_to_exposure_data(
            self.hazard.layer,
            self.exposure.layer,
            attribute_name=hazard_attribute)

        # Extract relevant exposure data
        features = interpolated_layer.get_data()
        total_features = len(interpolated_layer)

        structure_class_field = self.exposure.keyword('structure_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        hazard_classes = [tr('Flooded'), tr('Wet'), tr('Dry')]
        self.init_report_var(hazard_classes)

        for i in range(total_features):
            # Get the interpolated depth
            water_depth = float(features[i]['depth'])
            if water_depth <= 0:
                inundated_status = 0  # dry
            elif water_depth >= threshold:
                inundated_status = 1  # inundated
            else:
                inundated_status = 2  # wet

            usage = features[i].get(structure_class_field, None)
            usage = main_type(usage, exposure_value_mapping)

            # Add calculated impact to existing attributes
            features[i][self.target_field] = inundated_status
            category = [tr('Dry'), tr('Flooded'), tr('Wet')][inundated_status]
            self.classify_feature(category, usage, True)

        self.reorder_dictionaries()

        style_classes = [
            dict(label=tr('Dry (<= 0 m)'),
                 value=0,
                 colour='#1EFC7C',
                 transparency=0,
                 size=1),
            dict(label=tr('Wet (0 m - %.1f m)') % threshold,
                 value=2,
                 colour='#FF9900',
                 transparency=0,
                 size=1),
            dict(label=tr('Flooded (>= %.1f m)') % threshold,
                 value=1,
                 colour='#F31A1C',
                 transparency=0,
                 size=1)
        ]

        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'target_field': self.target_field,
            'map_title': self.map_title(),
            'legend_title': self.metadata().key('legend_title'),
            'legend_units': self.metadata().key('legend_units'),
            'buildings_total': total_features,
            'buildings_affected': self.total_affected_buildings
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        impact_layer = Vector(data=features,
                              projection=interpolated_layer.get_projection(),
                              geometry=interpolated_layer.get_geometry(),
                              name=self.map_title(),
                              keywords=impact_layer_keywords,
                              style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #42
0
    def run(self):
        """Run the impact function.

        :returns: A new line layer with inundated roads marked.
        :type: safe_layer
        """

        target_field = self.target_field

        # Get parameters from layer's keywords
        road_class_field = self.exposure.keyword('road_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        # Get parameters from IF parameter
        threshold_min = self.parameters['min threshold'].value
        threshold_max = self.parameters['max threshold'].value

        if threshold_min > threshold_max:
            message = tr(
                'The minimal threshold is greater than the maximal specified '
                'threshold. Please check the values.')
            raise GetDataError(message)

        # reproject self.extent to the hazard projection
        hazard_crs = self.hazard.layer.crs()
        hazard_authid = hazard_crs.authid()

        if hazard_authid == 'EPSG:4326':
            viewport_extent = self.requested_extent
        else:
            geo_crs = QgsCoordinateReferenceSystem()
            geo_crs.createFromSrid(4326)
            viewport_extent = extent_to_geo_array(
                QgsRectangle(*self.requested_extent), geo_crs, hazard_crs)

        # Clip hazard raster
        small_raster = align_clip_raster(self.hazard.layer, viewport_extent)

        # Filter geometry and data using the extent
        ct = QgsCoordinateTransform(
            QgsCoordinateReferenceSystem("EPSG:4326"),
            self.exposure.layer.crs())
        extent = ct.transformBoundingBox(QgsRectangle(*self.requested_extent))
        request = QgsFeatureRequest()
        request.setFilterRect(extent)

        # create template for the output layer
        line_layer_tmp = create_layer(self.exposure.layer)
        new_field = QgsField(target_field, QVariant.Int)
        line_layer_tmp.dataProvider().addAttributes([new_field])
        line_layer_tmp.updateFields()

        # create empty output layer and load it
        filename = unique_filename(suffix='.shp')
        QgsVectorFileWriter.writeAsVectorFormat(
            line_layer_tmp, filename, "utf-8", None, "ESRI Shapefile")
        line_layer = QgsVectorLayer(filename, "flooded roads", "ogr")

        # Create vector features from the flood raster
        # For each raster cell there is one rectangular polygon
        # Data also get spatially indexed for faster operation
        index, flood_cells_map = _raster_to_vector_cells(
            small_raster,
            threshold_min,
            threshold_max,
            self.exposure.layer.crs())

        if len(flood_cells_map) == 0:
            message = tr(
                'There are no objects in the hazard layer with "value" > %s. '
                'Please check the value or use other extent.' % (
                    threshold_min, ))
            raise GetDataError(message)

        # Do the heavy work - for each road get flood polygon for that area and
        # do the intersection/difference to find out which parts are flooded
        _intersect_lines_with_vector_cells(
            self.exposure.layer,
            request,
            index,
            flood_cells_map,
            line_layer,
            target_field)

        target_field_index = line_layer.dataProvider().\
            fieldNameIndex(target_field)

        # Generate simple impact report
        epsg = get_utm_epsg(self.requested_extent[0], self.requested_extent[1])
        output_crs = QgsCoordinateReferenceSystem(epsg)
        transform = QgsCoordinateTransform(
            self.exposure.layer.crs(), output_crs)

        classes = [tr('Flooded in the threshold (m)')]
        self.init_report_var(classes)

        if line_layer.featureCount() < 1:
            raise ZeroImpactException()

        roads_data = line_layer.getFeatures()
        road_type_field_index = line_layer.fieldNameIndex(road_class_field)

        for road in roads_data:
            attributes = road.attributes()

            usage = attributes[road_type_field_index]
            usage = main_type(usage, exposure_value_mapping)

            geom = road.geometry()
            geom.transform(transform)
            length = geom.length()

            affected = False
            if attributes[target_field_index] == 1:
                affected = True

            self.classify_feature(classes[0], usage, length, affected)

        self.reorder_dictionaries()

        style_classes = [
            dict(
                label=tr('Not Inundated'), value=0,
                colour='#1EFC7C', transparency=0, size=0.5),
            dict(
                label=tr('Inundated'), value=1,
                colour='#F31A1C', transparency=0, size=0.5)]
        style_info = dict(
            target_field=target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'map_title': self.map_title(),
            'legend_title': self.metadata().key('legend_title'),
            'target_field': target_field
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Convert QgsVectorLayer to inasafe layer and return it
        impact_layer = Vector(
            data=line_layer,
            name=self.map_title(),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #43
0
    def run(layers):
        """Risk plugin for tephra impact
        """

        # Extract data
        H = get_hazard_layer(layers)  # Ash load
        E = get_exposure_layer(layers)  # Building locations

        # Interpolate hazard level to building locations
        H = assign_hazard_values_to_exposure_data(H, E, attribute_name='load')

        # Calculate building damage
        count3 = 0
        count2 = 0
        count1 = 0
        count0 = 0
        result = []
        for i in range(len(E)):

            #-------------------
            # Extract parameters
            #-------------------
            load = H.get_data('load', i)

            #------------------------
            # Compute damage level
            #------------------------

            # FIXME: The thresholds have been greatly reduced
            # for the purpose of demonstration. Any real analyis
            # should bring them back to 0, 90, 150, 300
            if 0.01 <= load < 0.5:
                # Loss of crops and livestock
                impact = 0
                count0 += 1
            elif 0.5 <= load < 2.0:
                # Cosmetic damage
                impact = 1
                count1 += 1
            elif 2.0 <= load < 10.0:
                # Partial building collapse
                impact = 2
                count2 += 1
            elif load >= 10.0:
                # Complete building collapse
                impact = 3
                count3 += 1
            else:
                impact = 0
                count0 += 1

            result.append({'DAMAGE': impact, 'ASHLOAD': load})

        # Create report
        impact_summary = ('<font size="3"> <table border="0" width="320px">'
                          '   <tr><th><b>%s</b></th><th><b>%s</b></th></th>'
                          '   <tr></tr>'
                          '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                          '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                          '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                          '   <tr><td>%s&#58;</td><td>%i</td></tr>'
                          '</table></font>' %
                          ('Beban abu', 'Gedung dampak', '< 0.5 kg/m2', count0,
                           '0.5 - 2 kg/m2', count1, '2 - 10 kg/m2', count2,
                           '> 10 kg/m2', count3))
        #'</table>' %
        # ('Beban abu', 'Gedung dampak',
        # 'Gangguan (< 90 kg/m2)', count0,
        # 'Kerusakan kosmetik (90 - 150 kg/m2', count1,
        # 'parsial runtuhnya (150 - 300 kg/m2', count2,
        # 'runtuhnya lengkap (> 300 kg/m2', count3))

        V = Vector(data=result,
                   projection=E.get_projection(),
                   geometry=E.get_geometry(),
                   name='Estimated ashload damage',
                   keywords={'impact_summary': impact_summary})
        return V
    def run(self, layers):
        """Impact plugin for hazard impact
        """

        # Extract data
        H = get_hazard_layer(layers)  # Value
        E = get_exposure_layer(layers)  # Building locations

        question = get_question(H.get_name(), E.get_name(), self)

        # Interpolate hazard level to building locations
        H = assign_hazard_values_to_exposure_data(H,
                                                  E,
                                                  attribute_name='hazard_lev',
                                                  mode='constant')

        # Extract relevant numerical data
        coordinates = H.get_geometry()
        category = H.get_data()
        N = len(category)

        # List attributes to carry forward to result layer
        # attributes = E.get_attribute_names()

        # Calculate building impact according to guidelines
        count2 = 0
        count1 = 0
        count0 = 0
        building_impact = []
        for i in range(N):
            # Get category value
            val = float(category[i]['hazard_lev'])

            # Classify buildings according to value
            # if val >= 2.0 / 3:
            #     affected = 2
            #     count2 += 1
            # elif 1.0 / 3 <= val < 2.0 / 3:
            #     affected = 1
            #     count1 += 1
            # else:
            #     affected = 0
            #     count0 += 1
            # FIXME it would be good if the affected were words not numbers
            # FIXME need to read hazard layer and see category or keyword
            if val == 3:
                affected = 3
                count2 += 1
            elif val == 2:
                affected = 2
                count1 += 1
            elif val == 1:
                affected = 1
                count0 += 1
            else:
                affected = 'None'

            # Collect depth and calculated damage
            result_dict = {self.target_field: affected, 'CATEGORY': val}

            # Record result for this feature
            building_impact.append(result_dict)

        # Create impact report
        # Generate impact summary
        table_body = [
            question,
            TableRow([tr('Category'), tr('Affected')], header=True),
            TableRow([tr('High'), format_int(count2)]),
            TableRow([tr('Medium'), format_int(count1)]),
            TableRow([tr('Low'), format_int(count0)]),
            TableRow([tr('All'), format_int(N)])
        ]

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('Categorised hazard has only 3'
               ' classes, high, medium and low.'))

        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary
        map_title = tr('Categorised hazard impact on buildings')

        # FIXME it would be great to do categorized rather than grduated
        # Create style
        style_classes = [
            dict(label=tr('Low'),
                 min=1,
                 max=1,
                 colour='#1EFC7C',
                 transparency=0,
                 size=1),
            dict(label=tr('Medium'),
                 min=2,
                 max=2,
                 colour='#FFA500',
                 transparency=0,
                 size=1),
            dict(label=tr('High'),
                 min=3,
                 max=3,
                 colour='#F31A1C',
                 transparency=0,
                 size=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes)

        # Create vector layer and return
        name = 'Buildings Affected'

        V = Vector(data=building_impact,
                   projection=E.get_projection(),
                   geometry=coordinates,
                   geometry_type=E.geometry_type,
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'map_title': map_title,
                       'target_field': self.target_field,
                       'statistics_type': self.statistics_type,
                       'statistics_classes': self.statistics_classes
                   },
                   name=name,
                   style_info=style_info)
        return V
    def run(self, layers):
        """Earthquake impact to buildings (e.g. from OpenStreetMap)
        :param layers: All the input layers (Hazard Layer and Exposure Layer)
        """

        LOGGER.debug('Running earthquake building impact')

        # merely initialize
        building_value = 0
        contents_value = 0

        # Thresholds for mmi breakdown
        t0 = self.parameters['low_threshold']
        t1 = self.parameters['medium_threshold']
        t2 = self.parameters['high_threshold']

        # Class Attribute and Label

        class_1 = {'label': tr('Low'), 'class': 1}
        class_2 = {'label': tr('Medium'), 'class': 2}
        class_3 = {'label': tr('High'), 'class': 3}

        # Extract data
        my_hazard = get_hazard_layer(layers)  # Depth
        my_exposure = get_exposure_layer(layers)  # Building locations

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Define attribute name for hazard levels
        hazard_attribute = 'mmi'

        # Determine if exposure data have NEXIS attributes
        attribute_names = my_exposure.get_attribute_names()
        if ('FLOOR_AREA' in attribute_names and 'BUILDING_C' in attribute_names
                and 'CONTENTS_C' in attribute_names):
            is_nexis = True
        else:
            is_nexis = False

        # Interpolate hazard level to building locations
        my_interpolate_result = assign_hazard_values_to_exposure_data(
            my_hazard, my_exposure, attribute_name=hazard_attribute)

        # Extract relevant exposure data
        #attribute_names = my_interpolate_result.get_attribute_names()
        attributes = my_interpolate_result.get_data()

        interpolate_size = len(my_interpolate_result)

        # Calculate building impact
        lo = 0
        me = 0
        hi = 0
        building_values = {}
        contents_values = {}
        for key in range(4):
            building_values[key] = 0
            contents_values[key] = 0
        for i in range(interpolate_size):
            # Classify building according to shake level
            # and calculate dollar losses

            if is_nexis:
                try:
                    area = float(attributes[i]['FLOOR_AREA'])
                except (ValueError, KeyError):
                    #print 'Got area', attributes[i]['FLOOR_AREA']
                    area = 0.0

                try:
                    building_value_density = float(attributes[i]['BUILDING_C'])
                except (ValueError, KeyError):
                    #print 'Got bld value', attributes[i]['BUILDING_C']
                    building_value_density = 0.0

                try:
                    contents_value_density = float(attributes[i]['CONTENTS_C'])
                except (ValueError, KeyError):
                    #print 'Got cont value', attributes[i]['CONTENTS_C']
                    contents_value_density = 0.0

                building_value = building_value_density * area
                contents_value = contents_value_density * area

            try:
                x = float(attributes[i][hazard_attribute])  # MMI
            except TypeError:
                x = 0.0
            if t0 <= x < t1:
                lo += 1
                cls = 1
            elif t1 <= x < t2:
                me += 1
                cls = 2
            elif t2 <= x:
                hi += 1
                cls = 3
            else:
                # Not reported for less than level t0
                cls = 0

            attributes[i][self.target_field] = cls

            if is_nexis:
                # Accumulate values in 1M dollar units
                building_values[cls] += building_value
                contents_values[cls] += contents_value

        if is_nexis:
            # Convert to units of one million dollars
            for key in range(4):
                building_values[key] = int(building_values[key] / 1000000)
                contents_values[key] = int(contents_values[key] / 1000000)

        if is_nexis:
            # Generate simple impact report for NEXIS type buildings
            table_body = [
                question,
                TableRow([
                    tr('Hazard Level'),
                    tr('Buildings Affected'),
                    tr('Buildings value ($M)'),
                    tr('Contents value ($M)')
                ],
                         header=True),
                TableRow([
                    class_1['label'],
                    format_int(lo),
                    format_int(building_values[1]),
                    format_int(contents_values[1])
                ]),
                TableRow([
                    class_2['label'],
                    format_int(me),
                    format_int(building_values[2]),
                    format_int(contents_values[2])
                ]),
                TableRow([
                    class_3['label'],
                    format_int(hi),
                    format_int(building_values[3]),
                    format_int(contents_values[3])
                ])
            ]
        else:
            # Generate simple impact report for unspecific buildings
            table_body = [
                question,
                TableRow([tr('Hazard Level'),
                          tr('Buildings Affected')],
                         header=True),
                TableRow([class_1['label'], format_int(lo)]),
                TableRow([class_2['label'], format_int(me)]),
                TableRow([class_3['label'], format_int(hi)])
            ]

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('High hazard is defined as shake levels greater '
               'than %i on the MMI scale.') % t2)
        table_body.append(
            tr('Medium hazard is defined as shake levels '
               'between %i and %i on the MMI scale.') % (t1, t2))
        table_body.append(
            tr('Low hazard is defined as shake levels '
               'between %i and %i on the MMI scale.') % (t0, t1))
        if is_nexis:
            table_body.append(
                tr('Values are in units of 1 million Australian '
                   'Dollars'))

        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # Create style
        style_classes = [
            dict(label=class_1['label'],
                 value=class_1['class'],
                 colour='#ffff00',
                 transparency=1),
            dict(label=class_2['label'],
                 value=class_2['class'],
                 colour='#ffaa00',
                 transparency=1),
            dict(label=class_3['label'],
                 value=class_3['class'],
                 colour='#ff0000',
                 transparency=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # For printing map purpose
        map_title = tr('Building affected by earthquake')
        legend_notes = tr('The level of the impact is according to the '
                          'threshold the user input.')
        legend_units = tr('(mmi)')
        legend_title = tr('Impact level')

        # Create vector layer and return
        result_layer = Vector(
            data=attributes,
            projection=my_interpolate_result.get_projection(),
            geometry=my_interpolate_result.get_geometry(),
            name=tr('Estimated buildings affected'),
            keywords={
                'impact_summary': impact_summary,
                'impact_table': impact_table,
                'map_title': map_title,
                'legend_notes': legend_notes,
                'legend_units': legend_units,
                'legend_title': legend_title,
                'target_field': self.target_field,
                'statistics_type': self.statistics_type,
                'statistics_classes': self.statistics_classes
            },
            style_info=style_info)

        LOGGER.debug('Created vector layer  %s' % str(result_layer))
        return result_layer
예제 #46
0
    def run(self):
        """Experimental impact function for flood polygons on roads."""

        # Get parameters from layer's keywords
        self.hazard_class_attribute = self.hazard.keyword('field')
        self.hazard_class_mapping = self.hazard.keyword('value_map')
        self.exposure_class_attribute = self.exposure.keyword(
            'road_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        hazard_provider = self.hazard.layer.dataProvider()
        affected_field_index = hazard_provider.fieldNameIndex(
            self.hazard_class_attribute)
        # see #818: should still work if there is no valid attribute
        if affected_field_index == -1:
            pass
            # message = tr('''Parameter "Affected Field"(='%s')
            # is not present in the attribute table of the hazard layer.
            #     ''' % (affected_field, ))
            # raise GetDataError(message)

        # LOGGER.info('Affected field: %s' % self.hazard_class_attribute)
        # LOGGER.info('Affected field index: %s' % affected_field_index)

        # Filter geometry and data using the extent
        requested_extent = QgsRectangle(*self.requested_extent)
        # This is a hack - we should be setting the extent CRS
        # in the IF base class via safe/engine/core.py:calculate_impact
        # for now we assume the extent is in 4326 because it
        # is set to that from geo_extent
        # See issue #1857
        transform = QgsCoordinateTransform(
            self.requested_extent_crs, self.hazard.crs())

        projected_extent = transform.transformBoundingBox(requested_extent)
        request = QgsFeatureRequest()
        request.setFilterRect(projected_extent)

        # Split line_layer by hazard and save as result:
        # 1) Filter from hazard inundated features
        #   2) Mark roads as inundated (1) or not inundated (0)

        #################################
        #           REMARK 1
        #  In qgis 2.2 we can use request to filter inundated
        #  polygons directly (it allows QgsExpression). Then
        #  we can delete the lines and call
        #
        #  request = ....
        #  hazard_poly = union_geometry(H, request)
        #
        ################################

        hazard_features = self.hazard.layer.getFeatures(request)
        hazard_poly = None
        for feature in hazard_features:
            attributes = feature.attributes()
            if affected_field_index != -1:
                value = attributes[affected_field_index]
                if value not in self.hazard_class_mapping[self.wet]:
                    continue
            if hazard_poly is None:
                hazard_poly = QgsGeometry(feature.geometry())
            else:
                # Make geometry union of inundated polygons
                # But some feature.geometry() could be invalid, skip them
                tmp_geometry = hazard_poly.combine(feature.geometry())
                try:
                    if tmp_geometry.isGeosValid():
                        hazard_poly = tmp_geometry
                except AttributeError:
                    pass

        ###############################################
        # END REMARK 1
        ###############################################

        if hazard_poly is None:
            message = tr(
                'There are no objects in the hazard layer with %s (Affected '
                'Field) in %s (Affected Value). Please check the value or use '
                'a different extent.' % (
                    self.hazard_class_attribute,
                    self.hazard_class_mapping[self.wet]))
            raise GetDataError(message)

        # Clip exposure by the extent
        extent_as_polygon = QgsGeometry().fromRect(requested_extent)
        line_layer = clip_by_polygon(self.exposure.layer, extent_as_polygon)
        # Find inundated roads, mark them
        line_layer = split_by_polygon(
            line_layer,
            hazard_poly,
            request,
            mark_value=(self.target_field, 1))

        # Generate simple impact report
        epsg = get_utm_epsg(self.requested_extent[0], self.requested_extent[1])
        destination_crs = QgsCoordinateReferenceSystem(epsg)
        transform = QgsCoordinateTransform(
            self.exposure.layer.crs(), destination_crs)

        roads_data = line_layer.getFeatures()
        road_type_field_index = line_layer.fieldNameIndex(
            self.exposure_class_attribute)
        target_field_index = line_layer.fieldNameIndex(self.target_field)

        classes = [tr('Temporarily closed')]
        self.init_report_var(classes)

        for road in roads_data:
            attributes = road.attributes()

            usage = attributes[road_type_field_index]
            usage = main_type(usage, exposure_value_mapping)

            geom = road.geometry()
            geom.transform(transform)
            length = geom.length()

            affected = False
            if attributes[target_field_index] == 1:
                affected = True

            self.classify_feature(classes[0], usage, length, affected)

        self.reorder_dictionaries()

        style_classes = [dict(label=tr('Not Inundated'), value=0,
                              colour='#1EFC7C', transparency=0, size=0.5),
                         dict(label=tr('Inundated'), value=1,
                              colour='#F31A1C', transparency=0, size=0.5)]
        style_info = dict(
            target_field=self.target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        # Convert QgsVectorLayer to inasafe layer and return it
        if line_layer.featureCount() == 0:
            # Raising an exception seems poor semantics here....
            raise ZeroImpactException(
                tr('No roads are flooded in this scenario.'))

        impact_data = self.generate_data()

        extra_keywords = {
            'map_title': self.metadata().key('map_title'),
            'legend_title': self.metadata().key('legend_title'),
            'target_field': self.target_field
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        impact_layer = Vector(
            data=line_layer,
            name=self.metadata().key('layer_name'),
            keywords=impact_layer_keywords,
            style_info=style_info
        )

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
예제 #47
0
def processFloodEvent(netcdf_file=None, hours=24):
    """A function to process netcdf_file to a forecast file.
    """
    print 'Start flood forecasting'

    if netcdf_file is None:
        # retrieve data from the web
        netcdf_file = download_file_url(netcdf_url, forecast_directory)
    else:
        netcdf_file = download_file_url(netcdf_url, name=netcdf_file,
            download_directory=forecast_directory)
    print 'Do flood forecasting for %s ...' % netcdf_file

#    # check if a forecasting file has been created or not
#    is_exist, polyforecast_filepath = get_result_file_name(netcdf_file, hours)
#
#    if is_exist:
#        print 'Current flood forecasting has been already created.'
#        print 'You can look it at %s' % polyforecast_filepath
#        return

    # convert to tif
#    tif_file = polyforecast_filepath.replace('_regions.shp', '.tif')
    tif_filename = convert_netcdf2tif(netcdf_file, hours,
            verbose=False, output_dir=flood_directory)
    print 'tif_file', tif_filename
    tif_file = read_layer(tif_filename)

    # check if there is another file with the same name
    # if so, do not do the forecasting
    polyforecast_filepath = tif_filename.replace('.tif', '_regions.shp')
    zip_filename = polyforecast_filepath.replace('.shp', '.zip')
    if os.path.isfile(zip_filename):
        print ('File %s is exist, so we do not do the forecasting'
               % zip_filename)
    else:
        my_polygons = read_layer(polygons_path)
        my_result = tag_polygons_by_grid(my_polygons, tif_file, threshold=0.3,
            tag='affected')

        new_geom = my_result.get_geometry()
        new_data = my_result.get_data()

        date = os.path.split(netcdf_file)[-1].split('_')[0]

        v = Vector(geometry=new_geom, data=new_data,
            projection=my_result.projection,
            keywords={'category': 'hazard',
                      'subcategory': 'flood',
                      'title': ('%d hour flood forecast regions '
                                'in Jakarta at %s' % (hours,
                                                      date))})

        print 'polyforecast_filepath', polyforecast_filepath
        v.write_to_file(polyforecast_filepath)
        print 'Wrote tagged polygons to %s' % polyforecast_filepath

    # zip all file
    if os.path.isfile(zip_filename):
        print 'Has been zipped to %s' % zip_filename
    else:
        zip_shp(polyforecast_filepath, extra_ext=['.keywords'],
            remove_file=True)
        print 'Zipped to %s' % zip_filename
예제 #48
0
    def test_clip_points_by_polygons_with_holes_real(self):
        """Points can be clipped by polygons with holes (real data)
        """

        # Read real polygon with holes
        filename = '%s/%s' % (TESTDATA, 'donut.shp')
        L = read_layer(filename)

        # --------------------------------------------
        # Pick one polygon that has 2 inner rings
        P = L.get_geometry(as_geometry_objects=True)[1]

        outer_ring = P.outer_ring
        inner_ring0 = P.inner_rings[0]
        inner_ring1 = P.inner_rings[1]

        # Make some test points
        points_in_bbox = generate_random_points_in_bbox(outer_ring, 1000)
        points_in_inner_ring0 = populate_polygon(inner_ring0, 2, seed=13)
        points_in_inner_ring1 = populate_polygon(inner_ring1, 2, seed=17)
        points = numpy.concatenate((points_in_bbox,
                                    points_in_inner_ring0,
                                    points_in_inner_ring1))

        # Clip
        indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)

        # Sanity
        for point in points[indices, :]:
            # Must be inside outer ring
            assert is_inside_polygon(point, outer_ring)

            # But not in any of the inner rings
            assert not is_inside_polygon(point, inner_ring0)
            assert not is_inside_polygon(point, inner_ring1)

        # ---------------------------------------------------------
        # Pick a polygon that has 1 inner ring (nice visualisation)
        P = L.get_geometry(as_geometry_objects=True)[9]

        outer_ring = P.outer_ring
        inner_ring = P.inner_rings[0]

        # Make some test points
        points = generate_random_points_in_bbox(outer_ring, 500)

        # Clip
        indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)

        # Sanity
        for point in points[indices, :]:
            # Must be inside outer ring
            assert is_inside_polygon(point, outer_ring)

            # But not in the inner ring
            assert not is_inside_polygon(point, inner_ring)

        # Store for visual check (nice one!)
        # Uncomment os.remove if you want see the layers
        pol = Vector(geometry=[P])
        tmp_filename = unique_filename(suffix='.shp')
        pol.write_to_file(tmp_filename)
        # print 'Polygon with holes written to %s' % tmp_filename
        os.remove(tmp_filename)

        pts = Vector(geometry=points[indices, :])
        tmp_filename = unique_filename(suffix='.shp')
        pts.write_to_file(tmp_filename)
        # print 'Clipped points written to %s' % tmp_filename
        os.remove(tmp_filename)
예제 #49
0
        # but will reduce overall bounding box for buildings under
        # consideration)
        # geom = res.get_geometry()
        # data = res.get_data()
        # new_geom = []
        # new_data = []
        #
        # for i, d in enumerate(data):
        #    if d['affected']:
        #        g = geom[i]
        #        new_geom.append(g)
        #        new_data.append(d)

        # Keep all polygons
        new_geom = res.get_geometry()
        new_data = res.get_data()

        date = os.path.split(args.filename)[-1].split('_')[0]
        v = Vector(geometry=new_geom, data=new_data,
                   projection=res.projection,
                   keywords={'category': 'hazard',
                             'subcategory': 'flood',
                             'title': ('%d hour flood forecast regions '
                                       'in Jakarta at %s' % (args.hours,
                                                             date))})

        polyforecast_filename = (os.path.splitext(tif_filename)[0] +
                                 '_regions.shp')
        v.write_to_file(polyforecast_filename)
        print 'Wrote tagged polygons to %s' % polyforecast_filename
예제 #50
0
    def run(self):
        """Risk plugin for classified polygon hazard on building/structure.

        Counts number of building exposed to each hazard zones.

        :returns: Map of building exposed to each hazard zones.
                  Table with number of buildings affected
        :rtype: dict
        """
        self.validate()
        self.prepare()

        # Value from layer's keywords
        self.hazard_class_attribute = self.hazard.keyword('field')
        # Try to get the value from keyword, if not exist, it will not fail,
        # but use the old get_osm_building_usage
        try:
            self.exposure_class_attribute = self.exposure.keyword(
                'structure_class_field')
        except KeywordNotFoundError:
            self.exposure_class_attribute = None

        hazard_zone_attribute_index = self.hazard.layer.fieldNameIndex(
            self.hazard_class_attribute)

        # Check if hazard_zone_attribute exists in hazard_layer
        if hazard_zone_attribute_index < 0:
            message = (
                'Hazard data %s does not contain expected attribute %s ' %
                (self.hazard.layer.name(), self.hazard_class_attribute))
            # noinspection PyExceptionInherit
            raise InaSAFEError(message)

        # Hazard zone categories from hazard layer
        self.hazard_zones = self.hazard.layer.uniqueValues(
            hazard_zone_attribute_index)

        self.buildings = {}
        self.affected_buildings = OrderedDict()
        for hazard_zone in self.hazard_zones:
            self.affected_buildings[hazard_zone] = {}

        wgs84_extent = QgsRectangle(
            self.requested_extent[0], self.requested_extent[1],
            self.requested_extent[2], self.requested_extent[3])

        # Run interpolation function for polygon2polygon
        interpolated_layer = interpolate_polygon_polygon(
            self.hazard.layer, self.exposure.layer, wgs84_extent)

        new_field = QgsField(self.target_field, QVariant.String)
        interpolated_layer.dataProvider().addAttributes([new_field])
        interpolated_layer.updateFields()

        attribute_names = [
            field.name() for field in interpolated_layer.pendingFields()]
        target_field_index = interpolated_layer.fieldNameIndex(
            self.target_field)
        changed_values = {}

        if interpolated_layer.featureCount() < 1:
            raise ZeroImpactException()

        # Extract relevant interpolated data
        for feature in interpolated_layer.getFeatures():
            hazard_value = feature[self.hazard_class_attribute]
            if not hazard_value:
                hazard_value = self._not_affected_value
            changed_values[feature.id()] = {target_field_index: hazard_value}

            if (self.exposure_class_attribute and
                    self.exposure_class_attribute in attribute_names):
                usage = feature[self.exposure_class_attribute]
            else:
                usage = get_osm_building_usage(attribute_names, feature)

            if usage is None:
                usage = tr('Unknown')
            if usage not in self.buildings:
                self.buildings[usage] = 0
                for category in self.affected_buildings.keys():
                    self.affected_buildings[category][usage] = OrderedDict(
                        [(tr('Buildings Affected'), 0)])
            self.buildings[usage] += 1
            if hazard_value in self.affected_buildings.keys():
                self.affected_buildings[hazard_value][usage][
                    tr('Buildings Affected')] += 1

        interpolated_layer.dataProvider().changeAttributeValues(changed_values)

        # Lump small entries and 'unknown' into 'other' category
        self._consolidate_to_other()

        # Generate simple impact report
        impact_summary = impact_table = self.html_report()

        # Create style
        categories = self.hazard_zones
        categories.append(self._not_affected_value)
        colours = color_ramp(len(categories))
        style_classes = []

        i = 0
        for hazard_zone in self.hazard_zones:
            style_class = dict()
            style_class['label'] = tr(hazard_zone)
            style_class['transparency'] = 0
            style_class['value'] = hazard_zone
            style_class['size'] = 1
            style_class['colour'] = colours[i]
            style_classes.append(style_class)
            i += 1

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # For printing map purpose
        map_title = tr('Buildings affected by each hazard zone')
        legend_title = tr('Building count')
        legend_units = tr('(building)')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())

        # Create vector layer and return
        impact_layer = Vector(
            data=interpolated_layer,
            name=tr('Buildings affected by each hazard zone'),
            keywords={'impact_summary': impact_summary,
                      'impact_table': impact_table,
                      'target_field': self.target_field,
                      'map_title': map_title,
                      'legend_notes': legend_notes,
                      'legend_units': legend_units,
                      'legend_title': legend_title},
            style_info=style_info)

        self._impact = impact_layer
        return impact_layer
예제 #51
0
    def run(self):
        """Flood impact to buildings (e.g. from Open Street Map)."""

        threshold = self.parameters['threshold'].value  # Flood threshold [m]

        verify(isinstance(threshold, float),
               'Expected thresholds to be a float. Got %s' % str(threshold))

        # Determine attribute name for hazard levels
        hazard_attribute = 'depth'

        # Interpolate hazard level to building locations
        interpolated_layer = assign_hazard_values_to_exposure_data(
            self.hazard.layer,
            self.exposure.layer,
            attribute_name=hazard_attribute)

        # Extract relevant exposure data
        features = interpolated_layer.get_data()
        total_features = len(interpolated_layer)

        structure_class_field = self.exposure.keyword('structure_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')

        hazard_classes = [tr('Flooded'), tr('Wet'), tr('Dry')]
        self.init_report_var(hazard_classes)

        for i in range(total_features):
            # Get the interpolated depth
            water_depth = float(features[i]['depth'])
            if water_depth <= 0:
                inundated_status = 0  # dry
            elif water_depth >= threshold:
                inundated_status = 1  # inundated
            else:
                inundated_status = 2  # wet

            usage = features[i].get(structure_class_field, None)
            usage = main_type(usage, exposure_value_mapping)

            # Add calculated impact to existing attributes
            features[i][self.target_field] = inundated_status
            category = [
                tr('Dry'),
                tr('Flooded'),
                tr('Wet')][inundated_status]
            self.classify_feature(category, usage, True)

        self.reorder_dictionaries()

        style_classes = [
            dict(
                label=tr('Dry (<= 0 m)'),
                value=0,
                colour='#1EFC7C',
                transparency=0,
                size=1
            ),
            dict(
                label=tr('Wet (0 m - %.1f m)') % threshold,
                value=2,
                colour='#FF9900',
                transparency=0,
                size=1
            ),
            dict(
                label=tr('Flooded (>= %.1f m)') % threshold,
                value=1,
                colour='#F31A1C',
                transparency=0,
                size=1
            )]

        style_info = dict(
            target_field=self.target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol')

        impact_data = self.generate_data()

        extra_keywords = {
            'target_field': self.target_field,
            'map_title': self.map_title(),
            'legend_title': self.metadata().key('legend_title'),
            'legend_units': self.metadata().key('legend_units'),
            'buildings_total': total_features,
            'buildings_affected': self.total_affected_buildings
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        impact_layer = Vector(
            data=features,
            projection=interpolated_layer.get_projection(),
            geometry=interpolated_layer.get_geometry(),
            name=self.map_title(),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer
    def test_tag_regions_by_flood(self):
        """Regions can be tagged correctly with data from flood forecasts
        """

        threshold = 0.3
        label = 'affected'

        tif_filename = convert_netcdf2tif(self.nc_filename, 24, verbose=False)
        region_filename = os.path.join(TESTDATA, 'rw_jakarta_singlepart.shp')

        grid = read_layer(tif_filename)
        polygons = read_layer(region_filename)

        res = tag_polygons_by_grid(polygons, grid,
                                   threshold=threshold,
                                   tag=label)
        os.remove(tif_filename)
        geom = res.get_geometry()
        data = res.get_data()

        # Check correctness of affected regions
        affected_geom = []
        affected_data = []
        for i, d in enumerate(data):
            if d[label]:
                g = geom[i]
                affected_geom.append(g)
                affected_data.append(d)

        assert len(affected_geom) == 37
        assert len(affected_data) == 37

        # Check that every grid point exceeding threshold lies inside
        # one of the polygons marked as affected
        P, V = grid.to_vector_points()

        flooded_points_geom = []
        flooded_points_data = []
        for i, point in enumerate(P):
            val = V[i]
            if val > threshold:
                # Point that is flooded must be in one of the tagged polygons
                found = False
                for polygon in affected_geom:
                    if is_inside_polygon(point, polygon):
                        found = True
                msg = ('No affected polygon was found for point [%f, %f] '
                       'with value %f' % (point[0], point[1], val))
                verify(found, msg)

                # Collected flooded points for visualisation
                flooded_points_geom.append(point)
                flooded_points_data.append({'depth': val})

        # To generate files for visual inspection.
        # See
# https://raw.github.com/AIFDR/inasafe/master/files/flood_tagging_test.png
# https://github.com/AIFDR/inasafe/blob/master/files/flood_tagging_test.tgz

        tmp_filename = unique_filename(prefix='grid', suffix='.tif')
        grid.write_to_file(tmp_filename)
        #print 'Grid written to ', tmp_filename

        tmp_filename = unique_filename(prefix='regions', suffix='.shp')
        res.write_to_file(tmp_filename)
        #print 'Regions written to ', tmp_filename

        tmp_filename = unique_filename(prefix='flooded_points', suffix='.shp')
        v = Vector(geometry=flooded_points_geom, data=flooded_points_data)
        v.write_to_file(tmp_filename)
    def run(self, layers):
        """Risk plugin for volcano population evacuation

        :param layers: List of layers expected to contain where two layers
            should be present.

            * my_hazard: Vector polygon layer of volcano impact zones
            * my_exposure: Raster layer of population data on the same grid as
              my_hazard

        Counts number of people exposed to volcano event.

        :returns: Map of population exposed to the volcano hazard zone.
            The returned dict will include a table with number of people
            evacuated and supplies required.
        :rtype: dict
        """

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Volcano KRB
        my_exposure = get_exposure_layer(layers)

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Input checks
        if not my_hazard.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected ' %
                   my_hazard.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon or point layer. I got %s with '
               'layer type %s' %
               (my_hazard.get_name(), my_hazard.get_geometry_name()))
        if not (my_hazard.is_polygon_data or my_hazard.is_point_data):
            raise Exception(msg)

        if my_hazard.is_point_data:
            # Use concentric circles
            radii = self.parameters['distance [km]']

            centers = my_hazard.get_geometry()
            attributes = my_hazard.get_data()
            rad_m = [x * 1000 for x in radii]  # Convert to meters
            my_hazard = make_circular_polygon(centers,
                                              rad_m,
                                              attributes=attributes)

            category_title = 'Radius'
            category_header = tr('Distance [km]')
            category_names = radii

            name_attribute = 'NAME'  # As in e.g. the Smithsonian dataset
        else:
            # Use hazard map
            category_title = 'KRB'
            category_header = tr('Category')

            # FIXME (Ole): Change to English and use translation system
            category_names = [
                'Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II',
                'Kawasan Rawan Bencana I'
            ]

            name_attribute = 'GUNUNG'  # As in e.g. BNPB hazard map
            attributes = my_hazard.get_data()

        # Get names of volcanos considered
        if name_attribute in my_hazard.get_attribute_names():
            D = {}
            for att in my_hazard.get_data():
                # Run through all polygons and get unique names
                D[att[name_attribute]] = None

            volcano_names = ''
            for name in D:
                volcano_names += '%s, ' % name
            volcano_names = volcano_names[:-2]  # Strip trailing ', '
        else:
            volcano_names = tr('Not specified in data')

        if not category_title in my_hazard.get_attribute_names():
            msg = ('Hazard data %s did not contain expected '
                   'attribute %s ' % (my_hazard.get_name(), category_title))
            # noinspection PyExceptionInherit
            raise InaSAFEError(msg)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(my_hazard,
                                                  my_exposure,
                                                  attribute_name='population')

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a population count of zero
        new_attributes = my_hazard.get_data()

        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            cat = attr[category_title]
            categories[cat] = 0

        # Count affected population per polygon and total
        evacuated = 0
        for attr in P.get_data():
            # Get population at this location
            pop = float(attr['population'])

            # Update population count for associated polygon
            poly_id = attr['polygon_id']
            new_attributes[poly_id][self.target_field] += pop

            # Update population count for each category
            cat = new_attributes[poly_id][category_title]
            categories[cat] += pop

        # Count totals
        total = int(numpy.sum(my_exposure.get_data(nan=0)))

        # Don't show digits less than a 1000
        total = round_thousand(total)

        # Count number and cumulative for each zone
        cum = 0
        pops = {}
        cums = {}
        for name in category_names:
            if category_title == 'Radius':
                key = name * 1000  # Convert to meters
            else:
                key = name
            # prevent key error
            pop = int(categories.get(key, 0))

            pop = round_thousand(pop)

            cum += pop
            cum = round_thousand(cum)

            pops[name] = pop
            cums[name] = cum

        # Use final accumulation as total number needing evac
        evacuated = cum

        tot_needs = evacuated_population_weekly_needs(evacuated)

        # Generate impact report for the pdf map
        blank_cell = ''
        table_body = [
            question,
            TableRow(
                [tr('Volcanos considered'),
                 '%s' % volcano_names, blank_cell],
                header=True),
            TableRow([
                tr('People needing evacuation'),
                '%s' % format_int(evacuated), blank_cell
            ],
                     header=True),
            TableRow(
                [category_header,
                 tr('Total'), tr('Cumulative')], header=True)
        ]

        for name in category_names:
            table_body.append(
                TableRow(
                    [name,
                     format_int(pops[name]),
                     format_int(cums[name])]))

        table_body.extend([
            TableRow(
                tr('Map shows population affected in '
                   'each of volcano hazard polygons.')),
            TableRow([tr('Needs per week'),
                      tr('Total'), blank_cell],
                     header=True),
            [tr('Rice [kg]'),
             format_int(tot_needs['rice']), blank_cell],
            [
                tr('Drinking Water [l]'),
                format_int(tot_needs['drinking_water']), blank_cell
            ],
            [
                tr('Clean Water [l]'),
                format_int(tot_needs['water']), blank_cell
            ],
            [
                tr('Family Kits'),
                format_int(tot_needs['family_kits']), blank_cell
            ], [tr('Toilets'),
                format_int(tot_needs['toilets']), blank_cell]
        ])
        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population %s in the exposure layer') %
            format_int(total),
            tr('People need evacuation if they are within the '
               'volcanic hazard zones.')
        ])

        population_counts = [x[self.target_field] for x in new_attributes]
        impact_summary = Table(table_body).toNewlineFreeString()

        # check for zero impact
        if numpy.nanmax(population_counts) == 0 == numpy.nanmin(
                population_counts):
            table_body = [
                question,
                TableRow([
                    tr('People needing evacuation'),
                    '%s' % format_int(evacuated), blank_cell
                ],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        classes = create_classes(population_counts, len(colours))
        interval_classes = humanize_class(classes)
        # Define style info for output polygons showing population counts
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 100
                style_class['min'] = 0
            else:
                transparency = 30
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('People affected by volcanic hazard zone')
        legend_notes = tr('Thousand separator is represented by  %s' %
                          get_thousand_separator())
        legend_units = tr('(people)')
        legend_title = tr('Population count')

        # Create vector layer and return
        V = Vector(data=new_attributes,
                   projection=my_hazard.get_projection(),
                   geometry=my_hazard.get_geometry(as_geometry_objects=True),
                   name=tr('Population affected by volcanic hazard zone'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return V
예제 #54
0
def buffer_points(centers, radii, hazard_zone_attribute, data_table=None):
    """Buffer points for each center with defined radii.

    If the data_table is defined, then the data will also be copied to the
    result. This function is used for making buffer of volcano point hazard.

    :param centers: All center of each point (longitude, latitude)
    :type centers: list

    :param radii: Desired approximate radii in meters (must be
        monotonically ascending). Can be either one number or list of numbers
    :type radii: int, list

    :param hazard_zone_attribute: The name of the attributes representing
        hazard zone.
    :type hazard_zone_attribute: str

    :param data_table: Data for each center (optional)
    :type data_table: list

    :return: Vector polygon layer representing circle in WGS84
    :rtype: Vector
    """
    if not isinstance(radii, list):
        radii = [radii]

    # Check that radii are monotonically increasing
    monotonically_increasing_flag = all(x < y
                                        for x, y in zip(radii, radii[1:]))
    if not monotonically_increasing_flag:
        raise RadiiException(RadiiException.suggestion)

    circles = []
    new_data_table = []
    for i, center in enumerate(centers):
        p = Point(longitude=center[0], latitude=center[1])
        inner_rings = None
        for radius in radii:
            # Generate circle polygon
            C = p.generate_circle(radius)
            circles.append(Polygon(outer_ring=C, inner_rings=inner_rings))

            # Store current circle and inner ring for next poly
            inner_rings = [C]

            # Carry attributes for center forward (deep copy)
            row = {}
            if data_table is not None:
                for key in data_table[i]:
                    row[key] = data_table[i][key]

            # Add radius to this ring
            row[hazard_zone_attribute] = radius

            new_data_table.append(row)

    circular_polygon = Vector(
        geometry=circles,  # List with circular polygons
        data=new_data_table,  # Associated attributes
        geometry_type='polygon')

    return circular_polygon
예제 #55
0
    def run(self):
        """Earthquake impact to buildings (e.g. from OpenStreetMap)."""

        LOGGER.debug('Running earthquake building impact')

        # merely initialize
        building_value = 0
        contents_value = 0

        # Thresholds for mmi breakdown.
        t0 = self.parameters['low_threshold'].value
        t1 = self.parameters['medium_threshold'].value
        t2 = self.parameters['high_threshold'].value

        # Class Attribute and Label.
        class_1 = {'label': tr('Low'), 'class': 1}
        class_2 = {'label': tr('Medium'), 'class': 2}
        class_3 = {'label': tr('High'), 'class': 3}

        # Define attribute name for hazard levels.
        hazard_attribute = 'mmi'

        # Determine if exposure data have NEXIS attributes.
        attribute_names = self.exposure.layer.get_attribute_names()
        if (
                'FLOOR_AREA' in attribute_names and
                'BUILDING_C' in attribute_names and
                'CONTENTS_C' in attribute_names):
            self.is_nexis = True
        else:
            self.is_nexis = False

        # Interpolate hazard level to building locations.
        interpolate_result = assign_hazard_values_to_exposure_data(
            self.hazard.layer,
            self.exposure.layer,
            attribute_name=hazard_attribute
        )

        # Get parameters from layer's keywords
        structure_class_field = self.exposure.keyword('structure_class_field')
        exposure_value_mapping = self.exposure.keyword('value_mapping')
        attributes = interpolate_result.get_data()

        interpolate_size = len(interpolate_result)

        hazard_classes = [tr('Low'), tr('Medium'), tr('High')]
        self.init_report_var(hazard_classes)

        removed = []
        for i in range(interpolate_size):
            # Classify building according to shake level
            # and calculate dollar losses

            if self.is_nexis:
                try:
                    area = float(attributes[i]['FLOOR_AREA'])
                except (ValueError, KeyError):
                    # print 'Got area', attributes[i]['FLOOR_AREA']
                    area = 0.0

                try:
                    building_value_density = float(attributes[i]['BUILDING_C'])
                except (ValueError, KeyError):
                    # print 'Got bld value', attributes[i]['BUILDING_C']
                    building_value_density = 0.0

                try:
                    contents_value_density = float(attributes[i]['CONTENTS_C'])
                except (ValueError, KeyError):
                    # print 'Got cont value', attributes[i]['CONTENTS_C']
                    contents_value_density = 0.0

                building_value = building_value_density * area
                contents_value = contents_value_density * area

            usage = attributes[i].get(structure_class_field, None)
            usage = main_type(usage, exposure_value_mapping)

            if usage not in self.buildings:
                self.buildings[usage] = 0
                for category in self.affected_buildings.keys():
                    if self.is_nexis:
                        self.affected_buildings[category][usage] = OrderedDict(
                            [
                                (tr('Buildings Affected'), 0),
                                (tr('Buildings value ($M)'), 0),
                                (tr('Contents value ($M)'), 0)])
                    else:
                        self.affected_buildings[category][usage] = \
                            OrderedDict([(tr('Buildings Affected'), 0)])
            self.buildings[usage] += 1
            try:
                mmi = float(attributes[i][hazard_attribute])  # MMI
            except TypeError:
                mmi = 0.0
            if t0 <= mmi < t1:
                cls = 1
                category = tr('Low')
            elif t1 <= mmi < t2:
                cls = 2
                category = tr('Medium')
            elif t2 <= mmi:
                cls = 3
                category = tr('High')
            else:
                # Not reported for less than level t0
                # RMN: We still need to add target_field attribute
                # So, set it to None
                attributes[i][self.target_field] = None
                continue

            attributes[i][self.target_field] = cls
            self.affected_buildings[
                category][usage][tr('Buildings Affected')] += 1
            if self.is_nexis:
                self.affected_buildings[category][usage][
                    tr('Buildings value ($M)')] += building_value / 1000000.0
                self.affected_buildings[category][usage][
                    tr('Contents value ($M)')] += contents_value / 1000000.0

        self.reorder_dictionaries()

        # remove un-categorized element
        removed.reverse()
        geometry = interpolate_result.get_geometry()
        for i in range(0, len(removed)):
            del attributes[removed[i]]
            del geometry[removed[i]]

        if len(attributes) < 1:
            raise ZeroImpactException()

        # Create style
        style_classes = [
            dict(
                label=class_1['label'],
                value=class_1['class'],
                colour='#ffff00',
                transparency=1),
            dict(
                label=class_2['label'],
                value=class_2['class'],
                colour='#ffaa00',
                transparency=1),
            dict(
                label=class_3['label'],
                value=class_3['class'],
                colour='#ff0000',
                transparency=1)]
        style_info = dict(
            target_field=self.target_field,
            style_classes=style_classes,
            style_type='categorizedSymbol'
        )

        impact_data = self.generate_data()

        extra_keywords = {
            'map_title': self.map_title(),
            'legend_notes': self.metadata().key('legend_notes'),
            'legend_units': self.metadata().key('legend_units'),
            'legend_title': self.metadata().key('legend_title'),
            'target_field': self.target_field,
        }

        impact_layer_keywords = self.generate_impact_keywords(extra_keywords)

        # Create vector layer and return
        impact_layer = Vector(
            data=attributes,
            projection=interpolate_result.get_projection(),
            geometry=geometry,
            name=self.map_title(),
            keywords=impact_layer_keywords,
            style_info=style_info)

        impact_layer.impact_data = impact_data
        self._impact = impact_layer
        return impact_layer