def test_parseInput(self):
        myInput = {
            'thresholds': lambda: [1.0],
            'postprocessors': {
                'Gender': {'on': lambda: True},
                'Age': {
                    'on': lambda: True,
                    'params': {
                        'youth_ratio': lambda: 0.263,
                        'elder_ratio': lambda: 0.078,
                        'adult_ratio': lambda: 0.659}}}}

        myDialog = FunctionOptionsDialog(None)
        myResult = myDialog.parseInput(myInput)
        print myResult
        assert myResult == OrderedDict([
            ('thresholds', [1.0]),
            ('postprocessors', OrderedDict([
                ('Gender', OrderedDict([('on', True)])),
                ('Age', OrderedDict([
                    ('on', True),
                    ('params', OrderedDict([
                        ('youth_ratio', 0.263),
                        ('elder_ratio', 0.078),
                        ('adult_ratio', 0.659)]))]))]))])
Exemplo n.º 2
0
    def buildPostProcessorForm(self, theParams):
        """Build Post Processor Tab

        :param  theParams: A Dictionary containing element of form
        """

        # create postprocessors tab
        myTab = QWidget()
        myFormLayout = QFormLayout(myTab)
        myFormLayout.setLabelAlignment(Qt.AlignLeft)
        self.tabWidget.addTab(myTab, self.tr('Postprocessors'))
        self.tabWidget.tabBar().setVisible(True)

        # create element for the tab
        myValues = OrderedDict()
        for myLabel, myOptions in theParams.items():
            myInputValues = OrderedDict()

            # NOTE (gigih) : 'params' is assumed as dictionary
            if 'params' in myOptions:
                myGroupBox = QGroupBox()
                myGroupBox.setCheckable(True)
                myGroupBox.setTitle(get_postprocessor_human_name(myLabel))

                # NOTE (gigih): is 'on' always exist??
                myGroupBox.setChecked(myOptions.get('on'))
                myInputValues['on'] = self.bind(myGroupBox, 'checked', bool)

                myLayout = QFormLayout(myGroupBox)
                myGroupBox.setLayout(myLayout)

                # create widget element from 'params'
                myInputValues['params'] = OrderedDict()
                for myKey, myValue in myOptions['params'].items():
                    myInputValues['params'][myKey] = self.buildWidget(
                        myLayout, myKey, myValue)

                myFormLayout.addRow(myGroupBox, None)

            elif 'on' in myOptions:
                myCheckBox = QCheckBox()
                myCheckBox.setText(get_postprocessor_human_name(myLabel))
                myCheckBox.setChecked(myOptions['on'])

                myInputValues['on'] = self.bind(myCheckBox, 'checked', bool)
                myFormLayout.addRow(myCheckBox, None)
            else:
                raise NotImplementedError('This case is not handled for now')

            myValues[myLabel] = myInputValues

        self.values['postprocessors'] = myValues
Exemplo n.º 3
0
def default_minimum_needs():
    """Helper to get the default minimum needs.

    .. note:: Key names will be translated.
    """
    rice = tr('Rice')
    drinking_water = tr('Drinking Water')
    water = tr('Water')
    family_kits = tr('Family Kits')
    toilets = tr('Toilets')
    minimum_needs = OrderedDict([(rice, 2.8), (drinking_water, 17.5),
                                 (water, 105), (family_kits, 0.2),
                                 (toilets, 0.05)])
    return minimum_needs
Exemplo n.º 4
0
    def __init__(self, theParent=None):
        """Constructor for the dialog.

        This dialog will show the user the form for editing impact functions
        parameters if any.

        :param theParent: Optional widget to use as parent
        """
        QtGui.QDialog.__init__(self, theParent)
        self.setupUi(self)
        self.setWindowTitle(self.tr('InaSAFE impact function configuration'))
        self.tabWidget.tabBar().setVisible(False)

        self._result = None
        self.values = OrderedDict()
Exemplo n.º 5
0
    def test_setSubcategoryList(self):
        """Test set subcategory list works"""
        myDialog = KeywordsDialog(PARENT, IFACE)
        myList = OrderedDict([('population [density]', 'population [density]'),
                              ('population [count]', 'population [count]'),
                              ('building', 'building'),
                              ('building [osm]', 'building [osm]'),
                              ('building [sigab]', 'building [sigab]'),
                              ('roads', 'roads')])
        mySelectedItem = 'building'
        myDialog.set_subcategory_list(myList, mySelectedItem)
        myResult = str(myDialog.cboSubcategory.currentText())
        myMessage = ('\nGot: %s\nExpected: %s\n' % (myResult, mySelectedItem))

        assert myResult == mySelectedItem, myMessage
Exemplo n.º 6
0
    def test_set_subcategory_list(self):
        """Test set subcategory list works"""
        dialog = KeywordsDialog(PARENT, IFACE)
        subcategory_list = OrderedDict([
            ('population [density]', 'population [density]'),
            ('population [count]', 'population [count]'),
            ('building', 'building'), ('building [osm]', 'building [osm]'),
            ('building [sigab]', 'building [sigab]'), ('road', 'road')
        ])
        selected_item = 'building'
        dialog.set_subcategory_list(subcategory_list, selected_item)
        result = str(dialog.cboSubcategory.currentText())
        message = ('\nGot: %s\nExpected: %s\n' % (result, selected_item))

        self.assertTrue(result == selected_item, message)
Exemplo n.º 7
0
    def setup(self, params):
        """Abstract method to be called from the concrete implementation
         with AbstractPostprocessor.setup(self, None) it takes care of results
        being initialized

        Args:
            params: dict of parameters to pass to the post processor
        Returns:
            None
        Raises:
            None
        """
        del params
        if self._results is not None:
            self._raise_error('clear needs to be called before setup')
        self._results = OrderedDict()
Exemplo n.º 8
0
    def buildMinimumNeedsForm(self, theParams):
        """Build minimum needs tab

        :param theParams: A Dictionary containing element of form
        """
        # create minimum needs tab
        myTab = QWidget()
        myFormLayout = QFormLayout(myTab)
        myFormLayout.setLabelAlignment(Qt.AlignLeft)
        self.tabWidget.addTab(myTab, self.tr('Minimum Needs'))
        self.tabWidget.tabBar().setVisible(True)

        myWidget = QWidget()
        myLayout = QFormLayout(myWidget)
        myWidget.setLayout(myLayout)

        myValues = OrderedDict()
        for myLabel, myValue in theParams.items():
            myValues[myLabel] = self.buildWidget(myLayout, myLabel, myValue)

        myFormLayout.addRow(myWidget, None)
        self.values['minimum needs'] = myValues
Exemplo n.º 9
0
    def parseInput(self, theInput):
        """Parse the input value of widget.

        :param theInput: Dictionary that holds all values of element

        :returns: Dictionary that can be consumed for impact functions.

        :raises:
            * ValueError - occurs when some input cannot be converted
                           to suitable type.
        """

        myResult = OrderedDict()
        for myName, myValue in theInput.items():
            if hasattr(myValue, '__call__'):
                myResult[myName] = myValue()
            elif isinstance(myValue, dict):
                myResult[myName] = self.parseInput(myValue)
            else:
                myResult[myName] = myValue

        return myResult
class FloodEvacuationFunctionVectorHazard(FunctionProvider):
    """Impact function for vector flood evacuation

    :author AIFDR
    :rating 4

    :param requires category=='hazard' and \
                    subcategory in ['flood', 'tsunami'] and \
                    layertype=='vector'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """

    title = tr('Need evacuation')
    # Function documentation
    synopsis = tr(
        'To assess the impacts of (flood or tsunami) inundation in vector '
        'format on population.')
    actions = tr(
        'Provide details about how many people would likely need to be '
        'evacuated, where they are located and what resources would be '
        'required to support them.')

    detailed_description = tr(
        'The population subject to inundation is determined whether in an '
        'area which affected or not. You can also set an evacuation '
        'percentage to calculate how many percent of the total population '
        'affected to be evacuated. This number will be used to estimate needs'
        ' based on BNPB Perka 7/2008 minimum bantuan.')

    hazard_input = tr(
        'A hazard vector layer which has attribute affected the value is '
        'either 1 or 0')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Vector layer contains population affected and the minimum needs '
        'based on evacuation percentage.')

    target_field = 'population'
    defaults = get_defaults()

    # Configurable parameters
    # TODO: Share the mimimum needs and make another default value
    parameters = OrderedDict([
        ('evacuation_percentage', 1),  # Percent of affected needing evacuation
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             }),
             ('MinimumNeeds', {
                 'on': True
             }),
         ])),
        ('minimum needs', default_minimum_needs())
    ])

    def run(self, layers):
        """Risk plugin for flood population evacuation

        Input:
          layers: List of layers expected to contain

              my_hazard : Vector polygon layer of flood depth

              my_exposure : Raster layer of population data on the same
                grid as my_hazard

        Counts number of people exposed to areas identified as flood prone

        Return
          Map of population exposed to flooding

          Table with number of people evacuated and supplies required
        """
        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Flood inundation
        my_exposure = get_exposure_layer(layers)

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Check that hazard is polygon type
        if not my_hazard.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected ' %
                   my_hazard.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon layer. I got %s with layer '
               'type %s' %
               (my_hazard.get_name(), my_hazard.get_geometry_name()))
        if not my_hazard.is_polygon_data:
            raise Exception(msg)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(my_hazard,
                                                  my_exposure,
                                                  attribute_name='population')

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a population count of zero
        new_attributes = my_hazard.get_data()
        category_title = 'affected'  # FIXME: Should come from keywords
        deprecated_category_title = 'FLOODPRONE'
        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            try:
                cat = attr[category_title]
            except KeyError:
                cat = attr['FLOODPRONE']
            categories[cat] = 0

        # Count affected population per polygon, per category and total
        affected_population = 0
        for attr in P.get_data():

            affected = False
            if 'affected' in attr:
                res = attr['affected']
                if res is None:
                    x = False
                else:
                    x = bool(res)
                affected = x
            elif 'FLOODPRONE' in attr:
                # If there isn't an 'affected' attribute,
                res = attr['FLOODPRONE']
                if res is not None:
                    affected = res.lower() == 'yes'
            elif 'Affected' in attr:
                # Check the default attribute assigned for points
                # covered by a polygon
                res = attr['Affected']
                if res is None:
                    x = False
                else:
                    x = res
                affected = x
            else:
                # there is no flood related attribute
                msg = ('No flood related attribute found in %s. '
                       'I was looking fore either "Flooded", "FLOODPRONE" '
                       'or "Affected". The latter should have been '
                       'automatically set by call to '
                       'assign_hazard_values_to_exposure_data(). '
                       'Sorry I can\'t help more.')
                raise Exception(msg)

            if affected:
                # Get population at this location
                pop = float(attr['population'])

                # Update population count for associated polygon
                poly_id = attr['polygon_id']
                new_attributes[poly_id][self.target_field] += pop

                # Update population count for each category
                try:
                    cat = new_attributes[poly_id][category_title]
                except KeyError:
                    cat = new_attributes[poly_id][deprecated_category_title]
                categories[cat] += pop

                # Update total
                affected_population += pop

        affected_population = round_thousand(affected_population)
        # Estimate number of people in need of evacuation
        evacuated = (affected_population *
                     self.parameters['evacuation_percentage'] / 100.0)

        total = int(numpy.sum(my_exposure.get_data(nan=0, scaling=False)))

        # Don't show digits less than a 1000
        total = round_thousand(total)
        evacuated = round_thousand(evacuated)

        # Calculate estimated minimum needs
        minimum_needs = self.parameters['minimum needs']
        tot_needs = evacuated_population_weekly_needs(evacuated, minimum_needs)

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([
                tr('People affected'),
                '%s%s' % (format_int(int(affected_population)),
                          ('*' if affected_population >= 1000 else ''))
            ],
                     header=True),
            TableRow([
                tr('People needing evacuation'),
                '%s%s' % (format_int(int(evacuated)),
                          ('*' if evacuated >= 1000 else ''))
            ],
                     header=True),
            TableRow([
                TableCell(tr('* Number is rounded to the nearest 1000'),
                          col_span=2)
            ],
                     header=False),
            TableRow([
                tr('Evacuation threshold'),
                '%s%%' % format_int(self.parameters['evacuation_percentage'])
            ],
                     header=True),
            TableRow(
                tr('Map shows population affected in each flood'
                   ' prone area')),
            TableRow(
                tr('Table below shows the weekly minium needs '
                   'for all evacuated people')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), format_int(tot_needs['rice'])],
            [
                tr('Drinking Water [l]'),
                format_int(tot_needs['drinking_water'])
            ], [tr('Clean Water [l]'),
                format_int(tot_needs['water'])],
            [tr('Family Kits'),
             format_int(tot_needs['family_kits'])],
            [tr('Toilets'), format_int(tot_needs['toilets'])]
        ]
        impact_table = Table(table_body).toNewlineFreeString()

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(TableRow(tr('How will warnings be disseminated?')))
        table_body.append(TableRow(tr('How will we reach stranded people?')))
        table_body.append(TableRow(tr('Do we have enough relief items?')))
        table_body.append(
            TableRow(
                tr('If yes, where are they located and how '
                   'will we distribute them?')))
        table_body.append(
            TableRow(
                tr('If no, where can we obtain additional '
                   'relief items from and how will we '
                   'transport them to here?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People need evacuation if in area identified '
               'as "Flood Prone"'),
            tr('Minimum needs are defined in BNPB '
               'regulation 7/2008')
        ])
        impact_summary = Table(table_body).toNewlineFreeString()

        # Create style
        # Define classes for legend for flooded population counts
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        population_counts = [x['population'] for x in new_attributes]
        classes = create_classes(population_counts, len(colours))
        interval_classes = humanize_class(classes)

        # Define style info for output polygons showing population counts
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 100
                style_class['min'] = 0
            else:
                transparency = 0
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('People affected by flood prone areas')
        legend_notes = tr('Thousand separator is represented by \'.\'')
        legend_units = tr('(people per polygon)')
        legend_title = tr('Population Count')

        # Create vector layer and return
        V = Vector(data=new_attributes,
                   projection=my_hazard.get_projection(),
                   geometry=my_hazard.get_geometry(),
                   name=tr('Population affected by flood prone areas'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return V
class CategorisedHazardBuildingImpactFunction(FunctionProvider):
    """Impact plugin for categorising hazard impact on building data

    :author AIFDR
    :rating 2
    :param requires category=='hazard' and \
                    unit=='normalised' and \
                    layertype=='raster'

    :param requires category=='exposure' and \
                    subcategory=='structure' and \
                    layertype=='vector'
    """

    target_field = 'ICLASS'
    # Function documentation
    title = tr('Be affected')
    synopsis = tr('To assess the impacts of categorized hazard in raster '
                  'format on structure/building raster layer.')
    actions = tr('Provide details about how many building would likely need '
                 'to be affected for each category.')
    hazard_input = tr('A hazard raster layer where each cell represents '
                      'the category of the hazard. There should be 3 '
                      'categories: 1, 2, and 3.')
    exposure_input = \
        tr('Vector polygon layer which can be extracted from OSM '
           'where each polygon represents the footprint of a building.')
    output = tr('Map of structure exposed to high category and a table with '
                'number of structure in each category')
    detailed_description = \
        tr('This function will calculate how many buildings will be affected '
           'per each category for all categories in the hazard layer. '
           'Currently there should be 3 categories in the hazard layer. After '
           'that it will show the result and the total of buildings that '
           'will be affected for the hazard given.')
    limitation = tr('The number of categories is three.')
    statistics_type = 'class_count'
    statistics_classes = ['None', 1, 2, 3]
    parameters = OrderedDict([('postprocessors',
                               OrderedDict([('AggregationCategorical', {
                                   'on': True
                               })]))])

    def run(self, layers):
        """Impact plugin for hazard impact
        """

        # Extract data
        H = get_hazard_layer(layers)  # Value
        E = get_exposure_layer(layers)  # Building locations

        question = get_question(H.get_name(), E.get_name(), self)

        # Interpolate hazard level to building locations
        H = assign_hazard_values_to_exposure_data(H,
                                                  E,
                                                  attribute_name='hazard_lev',
                                                  mode='constant')

        # Extract relevant numerical data
        coordinates = H.get_geometry()
        category = H.get_data()
        N = len(category)

        # List attributes to carry forward to result layer
        #attributes = E.get_attribute_names()

        # Calculate building impact according to guidelines
        count2 = 0
        count1 = 0
        count0 = 0
        building_impact = []
        for i in range(N):
            # Get category value
            val = float(category[i]['hazard_lev'])

            # Classify buildings according to value
            ##            if val >= 2.0 / 3:
            ##                affected = 2
            ##                count2 += 1
            ##            elif 1.0 / 3 <= val < 2.0 / 3:
            ##                affected = 1
            ##                count1 += 1
            ##            else:
            ##                affected = 0
            ##                count0 += 1
            ## FIXME it would be good if the affected were words not numbers
            ## FIXME need to read hazard layer and see category or keyword
            if val == 3:
                affected = 3
                count2 += 1
            elif val == 2:
                affected = 2
                count1 += 1
            elif val == 1:
                affected = 1
                count0 += 1
            else:
                affected = 'None'

            # Collect depth and calculated damage
            result_dict = {self.target_field: affected, 'CATEGORY': val}

            # Record result for this feature
            building_impact.append(result_dict)

        # Create impact report
        # Generate impact summary
        table_body = [
            question,
            TableRow([tr('Category'), tr('Affected')], header=True),
            TableRow([tr('High'), format_int(count2)]),
            TableRow([tr('Medium'), format_int(count1)]),
            TableRow([tr('Low'), format_int(count0)]),
            TableRow([tr('All'), format_int(N)])
        ]

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('Categorised hazard has only 3'
               ' classes, high, medium and low.'))

        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary
        map_title = tr('Categorised hazard impact on buildings')

        #FIXME it would be great to do categorized rather than grduated
        # Create style
        style_classes = [
            dict(label=tr('Low'),
                 min=1,
                 max=1,
                 colour='#1EFC7C',
                 transparency=0,
                 size=1),
            dict(label=tr('Medium'),
                 min=2,
                 max=2,
                 colour='#FFA500',
                 transparency=0,
                 size=1),
            dict(label=tr('High'),
                 min=3,
                 max=3,
                 colour='#F31A1C',
                 transparency=0,
                 size=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes)

        # Create vector layer and return
        name = 'Buildings Affected'

        V = Vector(data=building_impact,
                   projection=E.get_projection(),
                   geometry=coordinates,
                   geometry_type=E.geometry_type,
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'map_title': map_title,
                       'target_field': self.target_field,
                       'statistics_type': self.statistics_type,
                       'statistics_classes': self.statistics_classes
                   },
                   name=name,
                   style_info=style_info)
        return V
Exemplo n.º 12
0
    def __init__(self, parent, iface, theDock=None, theLayer=None):
        """Constructor for the dialog.
        .. note:: In QtDesigner the advanced editor's predefined keywords
           list should be shown in english always, so when adding entries to
           cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
           the :safe_qgis:`translatable` property.

        Args:
           * parent - parent widget of this dialog
           * iface - a Quantum GIS QGisAppInterface instance.
           * theDock - Optional dock widget instance that we can notify of
             changes to the keywords.

        Returns:
           not applicable
        Raises:
           no exceptions explicitly raised
        """

        QtGui.QDialog.__init__(self, parent)
        self.setupUi(self)
        self.setWindowTitle(self.tr(
            'InaSAFE %1 Keywords Editor').arg(get_version()))
        self.keywordIO = KeywordIO()
        # note the keys should remain untranslated as we need to write
        # english to the keywords file. The keys will be written as user data
        # in the combo entries.
        # .. seealso:: http://www.voidspace.org.uk/python/odict.html
        self.standardExposureList = OrderedDict([('population',
                                      self.tr('population')),
                                     ('structure', self.tr('structure')),
                                     ('road', self.tr('road')),
                                     ('Not Set', self.tr('Not Set'))])
        self.standardHazardList = OrderedDict([('earthquake [MMI]',
                                    self.tr('earthquake [MMI]')),
                                   ('tsunami [m]', self.tr('tsunami [m]')),
                                   ('tsunami [wet/dry]',
                                    self.tr('tsunami [wet/dry]')),
                                   ('tsunami [feet]',
                                    self.tr('tsunami [feet]')),
                                   ('flood [m]', self.tr('flood [m]')),
                                   ('flood [wet/dry]',
                                    self.tr('flood [wet/dry]')),
                                   ('flood [feet]', self.tr('flood [feet]')),
                                   ('tephra [kg2/m2]',
                                    self.tr('tephra [kg2/m2]')),
                                   ('volcano', self.tr('volcano')),
                                   ('Not Set', self.tr('Not Set'))])
        # Save reference to the QGIS interface and parent
        self.iface = iface
        self.parent = parent
        self.dock = theDock

        QtCore.QObject.connect(self.lstKeywords,
                               QtCore.SIGNAL("itemClicked(QListWidgetItem *)"),
                               self.makeKeyValueEditable)

        # Set up help dialog showing logic.
        self.helpDialog = None
        myButton = self.buttonBox.button(QtGui.QDialogButtonBox.Help)
        QtCore.QObject.connect(myButton, QtCore.SIGNAL('clicked()'),
                               self.showHelp)

        # set some inital ui state:
        self.defaults = getDefaults()
        self.pbnAdvanced.setChecked(True)
        self.pbnAdvanced.toggle()
        self.radPredefined.setChecked(True)
        self.dsbFemaleRatioDefault.blockSignals(True)
        self.dsbFemaleRatioDefault.setValue(self.defaults[
                                            'FEM_RATIO'])
        self.dsbFemaleRatioDefault.blockSignals(False)
        #myButton = self.buttonBox.button(QtGui.QDialogButtonBox.Ok)
        #myButton.setEnabled(False)
        if theLayer is None:
            self.layer = self.iface.activeLayer()
        else:
            self.layer = theLayer
        if self.layer:
            self.loadStateFromKeywords()

        #add a reload from keywords button
        myButton = self.buttonBox.addButton(self.tr('Reload'),
                                            QtGui.QDialogButtonBox.ActionRole)
        QtCore.QObject.connect(myButton, QtCore.SIGNAL('clicked()'),
                               self.loadStateFromKeywords)
Exemplo n.º 13
0
class PAGFatalityFunction(ITBFatalityFunction):
    """
    Population Vulnerability Model Pager
    Loss ratio(MMI) = standard normal distrib( 1 / BETA * ln(MMI/THETA)).
    Reference:
    Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a).
    Estimating casualties for large worldwide earthquakes using an empirical
    approach. U.S. Geological Survey Open-File Report 2009-1136.

    :author Helen Crowley
    :rating 3

    :param requires category=='hazard' and \
                    subcategory=='earthquake' and \
                    layertype=='raster' and \
                    unit=='MMI'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """
    synopsis = tr('To asses the impact of earthquake on population based on '
                  'Population Vulnerability Model Pager')
    citations = \
        tr(' * Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a). '
           '   Estimating casualties for large worldwide earthquakes using '
           '   an empirical approach. U.S. Geological Survey Open-File '
           '   Report 2009-1136.')
    limitation = ''
    detailed_description = ''
    title = tr('Die or be displaced according Pager model')
    defaults = get_defaults()

    # see https://github.com/AIFDR/inasafe/issues/628
    default_needs = default_minimum_needs()
    default_needs[tr('Water')] = 67

    parameters = OrderedDict([
        ('Theta', 11.067),
        ('Beta', 0.106),  # Model coefficients
        # Rates of people displaced for each MMI level
        ('displacement_rate', {
        1: 0, 1.5: 0, 2: 0, 2.5: 0, 3: 0,
        3.5: 0, 4: 0, 4.5: 0, 5: 0, 5.5: 0,
        6: 1.0, 6.5: 1.0, 7: 1.0, 7.5: 1.0,
        8: 1.0, 8.5: 1.0, 9: 1.0, 9.5: 1.0,
        10: 1.0}),
        ('mmi_range', list(numpy.arange(2, 10, 0.5))),
        ('step', 0.25),
        # Threshold below which layer should be transparent
        ('tolerance', 0.01),
        ('calculate_displaced_people', True),
        ('postprocessors', OrderedDict([
            ('Gender', {'on': True}),
            ('Age', {
                'on': True,
                'params': OrderedDict([
                    ('youth_ratio', defaults['YOUTH_RATIO']),
                    ('adult_ratio', defaults['ADULT_RATIO']),
                    ('elder_ratio', defaults['ELDER_RATIO'])])}),
            ('MinimumNeeds', {'on': True})])),
        ('minimum needs', default_needs)])

    def fatality_rate(self, mmi):
        """Pager method to compute fatality rate"""

        N = math.sqrt(2 * math.pi)
        THETA = self.parameters['Theta']
        BETA = self.parameters['Beta']

        x = math.log(mmi / THETA) / BETA
        return math.exp(-x * x / 2.0) / N
Exemplo n.º 14
0
    def __init__(self, parent, iface, dock=None, layer=None):
        """Constructor for the dialog.

        .. note:: In QtDesigner the advanced editor's predefined keywords
           list should be shown in english always, so when adding entries to
           cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
           the :safe_qgis:`translatable` property.

        :param parent: Parent widget of this dialog.
        :type parent: QWidget

        :param iface: Quantum GIS QGisAppInterface instance.
        :type iface: QGisAppInterface

        :param dock: Dock widget instance that we can notify of changes to
            the keywords. Optional.
        :type dock: Dock
        """
        QtGui.QDialog.__init__(self, parent)
        self.setupUi(self)
        self.setWindowTitle(
            self.tr('InaSAFE %s Keywords Editor' % get_version()))
        # Save reference to the QGIS interface and parent
        self.iface = iface
        self.parent = parent
        self.dock = dock

        if layer is None:
            self.layer = iface.activeLayer()
        else:
            self.layer = layer

        self.keyword_io = KeywordIO()

        # note the keys should remain untranslated as we need to write
        # english to the keywords file. The keys will be written as user data
        # in the combo entries.
        # .. seealso:: http://www.voidspace.org.uk/python/odict.html
        self.standard_exposure_list = OrderedDict([
            ('population', self.tr('population')),
            ('structure', self.tr('structure')), ('road', self.tr('road')),
            ('Not Set', self.tr('Not Set'))
        ])
        self.standard_hazard_list = OrderedDict([
            ('earthquake [MMI]', self.tr('earthquake [MMI]')),
            ('tsunami [m]', self.tr('tsunami [m]')),
            ('tsunami [wet/dry]', self.tr('tsunami [wet/dry]')),
            ('tsunami [feet]', self.tr('tsunami [feet]')),
            ('flood [m]', self.tr('flood [m]')),
            ('flood [wet/dry]', self.tr('flood [wet/dry]')),
            ('flood [feet]', self.tr('flood [feet]')),
            ('tephra [kg2/m2]', self.tr('tephra [kg2/m2]')),
            ('volcano', self.tr('volcano')), ('Not Set', self.tr('Not Set'))
        ])

        self.lstKeywords.itemClicked.connect(self.edit_key_value_pair)

        # Set up help dialog showing logic.
        help_button = self.buttonBox.button(QtGui.QDialogButtonBox.Help)
        help_button.clicked.connect(self.show_help)

        # set some initial ui state:
        self.defaults = breakdown_defaults()
        self.pbnAdvanced.setChecked(False)
        self.radPredefined.setChecked(True)
        self.dsbFemaleRatioDefault.blockSignals(True)
        self.dsbFemaleRatioDefault.setValue(self.defaults['FEM_RATIO'])
        self.dsbFemaleRatioDefault.blockSignals(False)

        if self.layer:
            self.load_state_from_keywords()

        # add a reload from keywords button
        reload_button = self.buttonBox.addButton(
            self.tr('Reload'), QtGui.QDialogButtonBox.ActionRole)
        reload_button.clicked.connect(self.load_state_from_keywords)
        self.grpAdvanced.setVisible(False)
        self.resize_dialog()
Exemplo n.º 15
0
    def _aggregateVectorImpact(self, theQGISImpactLayer, theSafeImpactLayer):
        """Performs Aggregation postprocessing step on vector impact layers.

        Args:
            myQGISImpactLayer a valid QgsRasterLayer

        Returns:
            None
        """
        #TODO (MB) implement line aggregation

        myAggrFieldMap = {}
        myAggrFieldIndex = None

        try:
            self.targetField = self.keywordIO.read_keywords(theQGISImpactLayer,
                                                           'target_field')
        except KeywordNotFoundError:
            myMessage = m.Paragraph(
                self.tr(
                    'No "target_field" keyword found in the impact layer %1 '
                    'keywords. The impact function should define this.').arg(
                        theQGISImpactLayer.name()))
            LOGGER.debug('Skipping postprocessing due to: %s' % myMessage)
            self.errorMessage = myMessage
            return
        myImpactProvider = theQGISImpactLayer.dataProvider()
        myTargetFieldIndex = theQGISImpactLayer.fieldNameIndex(
            self.targetField)
        #if a feature has no field called
        if myTargetFieldIndex == -1:
            myMessage = m.Paragraph(
                self.tr('No attribute "%1" was found in the attribute table '
                        'for layer "%2". The impact function must define this'
                        ' attribute for postprocessing to work.').arg(
                            self.targetField, theQGISImpactLayer.name()))
            LOGGER.debug('Skipping postprocessing due to: %s' % myMessage)
            self.errorMessage = myMessage
            return

        # start data retreival: fetch no geometry and
        # 1 attr for each feature
        myImpactProvider.select([myTargetFieldIndex], QgsRectangle(), False)
        myTotal = 0

        myAggregationProvider = self.layer.dataProvider()
        self.layer.startEditing()

        if self.statisticsType == 'class_count':
            #add the class count fields to the layer
            myFields = [QgsField('%s_%s' % (f, self.targetField),
                                 QtCore.QVariant.String) for f in
                        self.statisticsClasses]
            myAggregationProvider.addAttributes(myFields)
            self.layer.commitChanges()

            myTmpAggrFieldMap = myAggregationProvider.fieldNameMap()
            for k, v in myTmpAggrFieldMap.iteritems():
                myAggrFieldMap[str(k)] = v

        elif self.statisticsType == 'sum':
            #add the total field to the layer
            myAggrField = self._sumFieldName()
            myAggregationProvider.addAttributes([QgsField(
                myAggrField, QtCore.QVariant.Int)])

            self.layer.commitChanges()
            myAggrFieldIndex = self.layer.fieldNameIndex(
                myAggrField)

        self.layer.startEditing()

        myImpactGeoms = theSafeImpactLayer.get_geometry()
        myImpactValues = theSafeImpactLayer.get_data()

        if not self.aoiMode:
            myAggregtionUnits = self.safeLayer.get_geometry()

            if (theSafeImpactLayer.is_point_data or
                    theSafeImpactLayer.is_polygon_data):
                LOGGER.debug('Doing point in polygon aggregation')

                myRemainingValues = myImpactValues

                if theSafeImpactLayer.is_polygon_data:
                    # Using centroids to do polygon in polygon aggregation
                    # this is always ok because
                    # deintersect() took care of splitting
                    # polygons that spawn across multiple postprocessing
                    # polygons. After deintersect()
                    # each impact polygon will never be contained by more than
                    # one aggregation polygon

                    # Calculate points for each polygon
                    myCentroids = []
                    for myPolygon in myImpactGeoms:
                        if hasattr(myPolygon, 'outer_ring'):
                            outer_ring = myPolygon.outer_ring
                        else:
                            # Assume it is an array
                            outer_ring = myPolygon
                        c = calculate_polygon_centroid(outer_ring)
                        myCentroids.append(c)
                    myRemainingPoints = myCentroids

                else:
                    #this are already points data
                    myRemainingPoints = myImpactGeoms

                #iterate over the aggregation units
                for myPolygonIndex, myPolygon in enumerate(myAggregtionUnits):
                    if hasattr(myPolygon, 'outer_ring'):
                        outer_ring = myPolygon.outer_ring
                        inner_rings = myPolygon.inner_rings
                    else:
                        # Assume it is an array
                        outer_ring = myPolygon
                        inner_rings = None

                    inside, outside = points_in_and_outside_polygon(
                        myRemainingPoints,
                        outer_ring,
                        holes=inner_rings,
                        closed=True,
                        check_input=True)

                    #self.impactLayerAttributes is a list of list of dict
                    #[
                    #   [{...},{...},{...}],
                    #   [{...},{...},{...}]
                    #]
                    self.impactLayerAttributes.append([])
                    if self.statisticsType == 'class_count':
                        myResults = OrderedDict()
                        for myClass in self.statisticsClasses:
                            myResults[myClass] = 0

                        for i in inside:
                            myKey = myRemainingValues[i][self.targetField]
                            try:
                                myResults[myKey] += 1
                            except KeyError:
                                myError = ('StatisticsClasses %s does not '
                                           'include the %s class which was '
                                           'found in the data. This is a '
                                           'problem in the %s '
                                           'statistics_classes definition' %
                                           (self.statisticsClasses,
                                            myKey,
                                            self.getFunctionID()))
                                raise KeyError(myError)

                            self.impactLayerAttributes[myPolygonIndex].append(
                                myRemainingValues[i])
                        myAttrs = {}
                        for k, v in myResults.iteritems():
                            myKey = '%s_%s' % (k, self.targetField)
                            #FIXME (MB) remove next line when we get rid of
                            #shape files as internal format
                            myKey = myKey[:10]
                            myAggrFieldIndex = myAggrFieldMap[myKey]
                            myAttrs[myAggrFieldIndex] = QtCore.QVariant(v)

                    elif self.statisticsType == 'sum':
                        #by default summ attributes
                        myTotal = 0
                        for i in inside:
                            try:
                                myTotal += myRemainingValues[i][
                                    self.targetField]
                            except TypeError:
                                pass

                            #add all attributes to the impactLayerAttributes
                            self.impactLayerAttributes[myPolygonIndex].append(
                                myRemainingValues[i])
                        myAttrs = {myAggrFieldIndex: QtCore.QVariant(myTotal)}

                    # Add features inside this polygon
                    myFID = myPolygonIndex
                    myAggregationProvider.changeAttributeValues(
                        {myFID: myAttrs})

                    # make outside points the input to the next iteration
                    # this could maybe be done quicklier using directly numpy
                    # arrays like this:
                    # myRemainingPoints = myRemainingPoints[outside]
                    # myRemainingValues =
                    # [myRemainingValues[i] for i in outside]
                    myTmpPoints = []
                    myTmpValues = []
                    for i in outside:
                        myTmpPoints.append(myRemainingPoints[i])
                        myTmpValues.append(myRemainingValues[i])
                    myRemainingPoints = myTmpPoints
                    myRemainingValues = myTmpValues

                    # LOGGER.debug('Before: ' + str(len(myRemainingValues)))
                    # LOGGER.debug('After: ' + str(len(myRemainingValues)))
                    # LOGGER.debug('Inside: ' + str(len(inside)))
                    # LOGGER.debug('Outside: ' + str(len(outside)))

            elif theSafeImpactLayer.is_line_data:
                LOGGER.debug('Doing line in polygon aggregation')

            else:
                myMessage = m.Paragraph(
                    self.tr(
                        'Aggregation on vector impact layers other than points'
                        ' or polygons not implemented yet not implemented yet.'
                        ' Called on %1').arg(theQGISImpactLayer.name()))
                LOGGER.debug('Skipping postprocessing due to: %s' % myMessage)
                self.errorMessage = myMessage
                self.layer.commitChanges()
                return
        else:
            if self.statisticsType == 'class_count':
                #loop over all features in impact layer
                myResults = OrderedDict()
                for myClass in self.statisticsClasses:
                    myResults[myClass] = 0

                self.impactLayerAttributes.append([])
                for myImpactValueList in myImpactValues:
                    myKey = myImpactValueList[self.targetField]
                    try:
                        myResults[myKey] += 1
                    except KeyError:
                        myError = ('StatisticsClasses %s does not '
                                   'include the %s class which was '
                                   'found in the data. This is a '
                                   'problem in the %s '
                                   'statistics_classes definition' %
                                   (self.statisticsClasses,
                                    myKey,
                                    self.getFunctionID()))
                        raise KeyError(myError)

                    self.impactLayerAttributes[0].append(myImpactValueList)

                myAttrs = {}
                for k, v in myResults.iteritems():
                    myKey = '%s_%s' % (k, self.targetField)
                    #FIXME (MB) remove next line when we get rid of
                    #shape files as internal format
                    myKey = myKey[:10]
                    myAggrFieldIndex = myAggrFieldMap[myKey]
                    myAttrs[myAggrFieldIndex] = QtCore.QVariant(v)

            elif self.statisticsType == 'sum':
                #loop over all features in impact layer
                self.impactLayerAttributes.append([])
                for myImpactValueList in myImpactValues:
                    if myImpactValueList[self.targetField] == 'None':
                        myImpactValueList[self.targetField] = None
                    try:
                        myTotal += myImpactValueList[self.targetField]
                    except TypeError:
                        pass
                    self.impactLayerAttributes[0].append(myImpactValueList)
                myAttrs = {myAggrFieldIndex: QtCore.QVariant(myTotal)}

            #apply to all area feature
            myFID = 0
            myAggregationProvider.changeAttributeValues({myFID: myAttrs})

        self.layer.commitChanges()
        return
Exemplo n.º 16
0
class EarthquakeBuildingImpactFunction(FunctionProvider):
    """Earthquake impact on building data

    :param requires category=='hazard' and \
                    subcategory=='earthquake'

    :param requires category=='exposure' and \
                    subcategory=='structure' and \
                    layertype=='vector'
    """

    target_field = 'Shake_cls'
    statistics_type = 'class_count'
    statistics_classes = [0, 1, 2, 3]
    title = tr('Be affected')
    parameters = OrderedDict([('low_threshold', 6), ('medium_threshold', 7),
                              ('high_threshold', 8),
                              ('postprocessors',
                               OrderedDict([('AggregationCategorical', {
                                   'on': True
                               })]))])

    def run(self, layers):
        """Earthquake impact to buildings (e.g. from OpenStreetMap)
        :param layers: All the input layers (Hazard Layer and Exposure Layer)
        """

        LOGGER.debug('Running earthquake building impact')

        # merely initialize
        building_value = 0
        contents_value = 0

        # Thresholds for mmi breakdown
        t0 = self.parameters['low_threshold']
        t1 = self.parameters['medium_threshold']
        t2 = self.parameters['high_threshold']

        # Class Attribute and Label

        class_1 = {'label': tr('Low'), 'class': 1}
        class_2 = {'label': tr('Medium'), 'class': 2}
        class_3 = {'label': tr('High'), 'class': 3}

        # Extract data
        my_hazard = get_hazard_layer(layers)  # Depth
        my_exposure = get_exposure_layer(layers)  # Building locations

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Define attribute name for hazard levels
        hazard_attribute = 'mmi'

        # Determine if exposure data have NEXIS attributes
        attribute_names = my_exposure.get_attribute_names()
        if ('FLOOR_AREA' in attribute_names and 'BUILDING_C' in attribute_names
                and 'CONTENTS_C' in attribute_names):
            is_nexis = True
        else:
            is_nexis = False

        # Interpolate hazard level to building locations
        my_interpolate_result = assign_hazard_values_to_exposure_data(
            my_hazard, my_exposure, attribute_name=hazard_attribute)

        # Extract relevant exposure data
        #attribute_names = my_interpolate_result.get_attribute_names()
        attributes = my_interpolate_result.get_data()

        interpolate_size = len(my_interpolate_result)

        # Calculate building impact
        lo = 0
        me = 0
        hi = 0
        building_values = {}
        contents_values = {}
        for key in range(4):
            building_values[key] = 0
            contents_values[key] = 0
        for i in range(interpolate_size):
            # Classify building according to shake level
            # and calculate dollar losses

            if is_nexis:
                try:
                    area = float(attributes[i]['FLOOR_AREA'])
                except (ValueError, KeyError):
                    #print 'Got area', attributes[i]['FLOOR_AREA']
                    area = 0.0

                try:
                    building_value_density = float(attributes[i]['BUILDING_C'])
                except (ValueError, KeyError):
                    #print 'Got bld value', attributes[i]['BUILDING_C']
                    building_value_density = 0.0

                try:
                    contents_value_density = float(attributes[i]['CONTENTS_C'])
                except (ValueError, KeyError):
                    #print 'Got cont value', attributes[i]['CONTENTS_C']
                    contents_value_density = 0.0

                building_value = building_value_density * area
                contents_value = contents_value_density * area

            try:
                x = float(attributes[i][hazard_attribute])  # MMI
            except TypeError:
                x = 0.0
            if t0 <= x < t1:
                lo += 1
                cls = 1
            elif t1 <= x < t2:
                me += 1
                cls = 2
            elif t2 <= x:
                hi += 1
                cls = 3
            else:
                # Not reported for less than level t0
                cls = 0

            attributes[i][self.target_field] = cls

            if is_nexis:
                # Accumulate values in 1M dollar units
                building_values[cls] += building_value
                contents_values[cls] += contents_value

        if is_nexis:
            # Convert to units of one million dollars
            for key in range(4):
                building_values[key] = int(building_values[key] / 1000000)
                contents_values[key] = int(contents_values[key] / 1000000)

        if is_nexis:
            # Generate simple impact report for NEXIS type buildings
            table_body = [
                question,
                TableRow([
                    tr('Hazard Level'),
                    tr('Buildings Affected'),
                    tr('Buildings value ($M)'),
                    tr('Contents value ($M)')
                ],
                         header=True),
                TableRow([
                    class_1['label'],
                    format_int(lo),
                    format_int(building_values[1]),
                    format_int(contents_values[1])
                ]),
                TableRow([
                    class_2['label'],
                    format_int(me),
                    format_int(building_values[2]),
                    format_int(contents_values[2])
                ]),
                TableRow([
                    class_3['label'],
                    format_int(hi),
                    format_int(building_values[3]),
                    format_int(contents_values[3])
                ])
            ]
        else:
            # Generate simple impact report for unspecific buildings
            table_body = [
                question,
                TableRow([tr('Hazard Level'),
                          tr('Buildings Affected')],
                         header=True),
                TableRow([class_1['label'], format_int(lo)]),
                TableRow([class_2['label'], format_int(me)]),
                TableRow([class_3['label'], format_int(hi)])
            ]

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('High hazard is defined as shake levels greater '
               'than %i on the MMI scale.') % t2)
        table_body.append(
            tr('Medium hazard is defined as shake levels '
               'between %i and %i on the MMI scale.') % (t1, t2))
        table_body.append(
            tr('Low hazard is defined as shake levels '
               'between %i and %i on the MMI scale.') % (t0, t1))
        if is_nexis:
            table_body.append(
                tr('Values are in units of 1 million Australian '
                   'Dollars'))

        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # Create style
        style_classes = [
            dict(label=class_1['label'],
                 value=class_1['class'],
                 colour='#ffff00',
                 transparency=1),
            dict(label=class_2['label'],
                 value=class_2['class'],
                 colour='#ffaa00',
                 transparency=1),
            dict(label=class_3['label'],
                 value=class_3['class'],
                 colour='#ff0000',
                 transparency=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # For printing map purpose
        map_title = tr('Building affected by earthquake')
        legend_notes = tr('The level of the impact is according to the '
                          'threshold the user input.')
        legend_units = tr('(mmi)')
        legend_title = tr('Impact level')

        # Create vector layer and return
        result_layer = Vector(
            data=attributes,
            projection=my_interpolate_result.get_projection(),
            geometry=my_interpolate_result.get_geometry(),
            name=tr('Estimated buildings affected'),
            keywords={
                'impact_summary': impact_summary,
                'impact_table': impact_table,
                'map_title': map_title,
                'legend_notes': legend_notes,
                'legend_units': legend_units,
                'legend_title': legend_title,
                'target_field': self.target_field,
                'statistics_type': self.statistics_type,
                'statistics_classes': self.statistics_classes
            },
            style_info=style_info)

        LOGGER.debug('Created vector layer  %s' % str(result_layer))
        return result_layer
class CategorisedHazardPopulationImpactFunction(FunctionProvider):
    """Plugin for impact of population as derived by categorised hazard

    :author AIFDR
    :rating 2
    :param requires category=='hazard' and \
                    unit=='normalised' and \
                    layertype=='raster'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """
    # Function documentation
    title = tr('Be impacted')
    synopsis = tr('To assess the impacts of categorized hazards in raster '
                  'format on population raster layer.')
    actions = tr('Provide details about how many people would likely need '
                 'to be impacted for each category.')
    hazard_input = tr('A hazard raster layer where each cell represents '
                      'the category of the hazard. There should be 3 '
                      'categories: 1, 2, and 3.')
    exposure_input = tr('An exposure raster layer where each cell represent '
                        'population count.')
    output = tr('Map of population exposed to high category and a table with '
                'number of people in each category')
    detailed_description = \
        tr('This function will calculate how many people will be impacted '
           'per each category for all categories in the hazard layer. '
           'Currently there should be 3 categories in the hazard layer. After '
           'that it will show the result and the total amount of people that '
           'will be impacted for the hazard given.')
    limitation = tr('The number of categories is three.')

    # Configurable parameters
    defaults = get_defaults()
    parameters = OrderedDict([
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             })
         ]))
    ])

    def run(self, layers):
        """Plugin for impact of population as derived by categorised hazard

        Input
          layers: List of layers expected to contain
              my_hazard: Raster layer of categorised hazard
              my_exposure: Raster layer of population data

        Counts number of people exposed to each category of the hazard

        Return
          Map of population exposed to high category
          Table with number of people in each category
        """

        # The 3 category
        high_t = 1
        medium_t = 0.66
        low_t = 0.34

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Categorised Hazard
        my_exposure = get_exposure_layer(layers)  # Population Raster

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Extract data as numeric arrays
        C = my_hazard.get_data(nan=0.0)  # Category

        # Calculate impact as population exposed to each category
        P = my_exposure.get_data(nan=0.0, scaling=True)
        H = numpy.where(C == high_t, P, 0)
        M = numpy.where(C > medium_t, P, 0)
        L = numpy.where(C < low_t, P, 0)

        # Count totals
        total = int(numpy.sum(P))
        high = int(numpy.sum(H))
        medium = int(numpy.sum(M)) - int(numpy.sum(H))
        low = int(numpy.sum(L)) - int(numpy.sum(M))
        total_impact = high + medium + low

        # Don't show digits less than a 1000
        total = round_thousand(total)
        total_impact = round_thousand(total_impact)
        high = round_thousand(high)
        medium = round_thousand(medium)
        low = round_thousand(low)

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([tr('People impacted '),
                      '%s' % format_int(total_impact)],
                     header=True),
            TableRow(
                [tr('People in high hazard area '),
                 '%s' % format_int(high)],
                header=True),
            TableRow([
                tr('People in medium hazard area '),
                '%s' % format_int(medium)
            ],
                     header=True),
            TableRow([tr('People in low hazard area'),
                      '%s' % format_int(low)],
                     header=True)
        ]

        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Map shows population density in high or medium '
               'hazard area'),
            tr('Total population: %s') % format_int(total)
        ])
        impact_summary = Table(table_body).toNewlineFreeString()
        map_title = tr('People in high hazard areas')

        # Generate 8 equidistant classes across the range of flooded population
        # 8 is the number of classes in the predefined flood population style
        # as imported
        # noinspection PyTypeChecker
        classes = numpy.linspace(numpy.nanmin(M.flat[:]),
                                 numpy.nanmax(M.flat[:]), 8)

        # Modify labels in existing flood style to show quantities
        style_classes = style_info['style_classes']

        style_classes[1]['label'] = tr('Low [%i people/cell]') % classes[1]
        style_classes[4]['label'] = tr('Medium [%i people/cell]') % classes[4]
        style_classes[7]['label'] = tr('High [%i people/cell]') % classes[7]

        style_info['legend_title'] = tr('Population Density')

        # Create raster object and return
        R = Raster(M,
                   projection=my_hazard.get_projection(),
                   geotransform=my_hazard.get_geotransform(),
                   name=tr('Population which %s') %
                   (get_function_title(self).lower()),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'map_title': map_title
                   },
                   style_info=style_info)
        return R
Exemplo n.º 18
0
    def _generate_tables(self):
        """Parses the postprocessing output as one table per postprocessor.

        TODO: This should rather return json and then have a helper method to
        make html from the JSON.

        :returns: The html.
        :rtype: str
        """
        message = m.Message()

        for processor, results_list in self.output.iteritems():

            self.current_output_postprocessor = processor
            # results_list is for example:
            # [
            #    (PyQt4.QtCore.QString(u'Entire area'), OrderedDict([
            #        (u'Total', {'value': 977536, 'metadata': {}}),
            #        (u'Female population', {'value': 508319, 'metadata': {}}),
            #        (u'Weekly hygiene packs', {'value': 403453, 'metadata': {
            #         'description': 'Females hygiene packs for weekly use'}})
            #    ]))
            #]

            #sorting using the first indicator of a postprocessor
            sorted_results = sorted(
                results_list,
                key=self._sort_no_data,
                reverse=True)

            #init table
            has_no_data = False
            table = m.Table(
                style_class='table table-condensed table-striped')
            table.caption = self.tr('Detailed %s report') % (safeTr(
                get_postprocessor_human_name(processor)).lower())

            header = m.Row()
            header.add(str(self.attribute_title).capitalize())
            for calculation_name in sorted_results[0][1]:
                header.add(self.tr(calculation_name))
            table.add(header)

            # used to calculate the totals row as per issue #690
            postprocessor_totals = OrderedDict()

            for zone_name, calc in sorted_results:
                row = m.Row(zone_name)

                for indicator, calculation_data in calc.iteritems():
                    value = calculation_data['value']
                    if value == self.aggregator.defaults['NO_DATA']:
                        has_no_data = True
                        value += ' *'
                        try:
                            postprocessor_totals[indicator] += 0
                        except KeyError:
                            postprocessor_totals[indicator] = 0
                    else:
                        try:
                            postprocessor_totals[indicator] += int(value)
                        except KeyError:
                            postprocessor_totals[indicator] = int(value)
                    row.add(value)
                table.add(row)

            # add the totals row
            row = m.Row(self.tr('Total in aggregation areas'))
            for _, total in postprocessor_totals.iteritems():
                row.add(str(total))
            table.add(row)

            # add table to message
            message.add(table)
            if has_no_data:
                message.add(m.EmphasizedText(self.tr(
                    '* "%s" values mean that there where some problems while '
                    'calculating them. This did not affect the other '
                    'values.') % (self.aggregator.defaults['NO_DATA'])))

        return message
class VolcanoPolygonHazardPopulation(FunctionProvider):
    """Impact function for volcano hazard zones impact on population

    :author AIFDR
    :rating 4
    :param requires category=='hazard' and \
                    subcategory in ['volcano'] and \
                    layertype=='vector'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """

    title = tr('Need evacuation')
    target_field = 'population'
    defaults = get_defaults()
    # Function documentation
    synopsis = tr('To assess the impacts of volcano eruption on population.')
    actions = tr(
        'Provide details about how many population would likely be affected '
        'by each hazard zones.')
    hazard_input = tr(
        'A hazard vector layer can be polygon or point. If polygon, it must '
        'have "KRB" attribute and the valuefor it are "Kawasan Rawan '
        'Bencana I", "Kawasan Rawan Bencana II", or "Kawasan Rawan Bencana '
        'III."If you want to see the name of the volcano in the result, you '
        'need to add "NAME" attribute for point data or "GUNUNG" attribute '
        'for polygon data.')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Vector layer contains population affected and the minimum needs '
        'based on the population affected.')

    parameters = OrderedDict([
        ('distance [km]', [3, 5, 10]),
        ('minimum needs', default_minimum_needs()),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             }), ('MinimumNeeds', {
                 'on': True
             })
         ]))
    ])

    def run(self, layers):
        """Risk plugin for volcano population evacuation

        :param layers: List of layers expected to contain where two layers
            should be present.

            * my_hazard: Vector polygon layer of volcano impact zones
            * my_exposure: Raster layer of population data on the same grid as
              my_hazard

        Counts number of people exposed to volcano event.

        :returns: Map of population exposed to the volcano hazard zone.
            The returned dict will include a table with number of people
            evacuated and supplies required.
        :rtype: dict
        """

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Volcano KRB
        my_exposure = get_exposure_layer(layers)

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Input checks
        if not my_hazard.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected ' %
                   my_hazard.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon or point layer. I got %s with '
               'layer type %s' %
               (my_hazard.get_name(), my_hazard.get_geometry_name()))
        if not (my_hazard.is_polygon_data or my_hazard.is_point_data):
            raise Exception(msg)

        if my_hazard.is_point_data:
            # Use concentric circles
            radii = self.parameters['distance [km]']

            centers = my_hazard.get_geometry()
            attributes = my_hazard.get_data()
            rad_m = [x * 1000 for x in radii]  # Convert to meters
            my_hazard = make_circular_polygon(centers,
                                              rad_m,
                                              attributes=attributes)

            category_title = 'Radius'
            category_header = tr('Distance [km]')
            category_names = radii

            name_attribute = 'NAME'  # As in e.g. the Smithsonian dataset
        else:
            # Use hazard map
            category_title = 'KRB'
            category_header = tr('Category')

            # FIXME (Ole): Change to English and use translation system
            category_names = [
                'Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II',
                'Kawasan Rawan Bencana I'
            ]

            name_attribute = 'GUNUNG'  # As in e.g. BNPB hazard map
            attributes = my_hazard.get_data()

        # Get names of volcanos considered
        if name_attribute in my_hazard.get_attribute_names():
            D = {}
            for att in my_hazard.get_data():
                # Run through all polygons and get unique names
                D[att[name_attribute]] = None

            volcano_names = ''
            for name in D:
                volcano_names += '%s, ' % name
            volcano_names = volcano_names[:-2]  # Strip trailing ', '
        else:
            volcano_names = tr('Not specified in data')

        if not category_title in my_hazard.get_attribute_names():
            msg = ('Hazard data %s did not contain expected '
                   'attribute %s ' % (my_hazard.get_name(), category_title))
            # noinspection PyExceptionInherit
            raise InaSAFEError(msg)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(my_hazard,
                                                  my_exposure,
                                                  attribute_name='population')

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a population count of zero
        new_attributes = my_hazard.get_data()

        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            cat = attr[category_title]
            categories[cat] = 0

        # Count affected population per polygon and total
        evacuated = 0
        for attr in P.get_data():
            # Get population at this location
            pop = float(attr['population'])

            # Update population count for associated polygon
            poly_id = attr['polygon_id']
            new_attributes[poly_id][self.target_field] += pop

            # Update population count for each category
            cat = new_attributes[poly_id][category_title]
            categories[cat] += pop

        # Count totals
        total = int(numpy.sum(my_exposure.get_data(nan=0)))

        # Don't show digits less than a 1000
        total = round_thousand(total)

        # Count number and cumulative for each zone
        cum = 0
        pops = {}
        cums = {}
        for name in category_names:
            if category_title == 'Radius':
                key = name * 1000  # Convert to meters
            else:
                key = name
            # prevent key error
            pop = int(categories.get(key, 0))

            pop = round_thousand(pop)

            cum += pop
            cum = round_thousand(cum)

            pops[name] = pop
            cums[name] = cum

        # Use final accumulation as total number needing evac
        evacuated = cum

        tot_needs = evacuated_population_weekly_needs(evacuated)

        # Generate impact report for the pdf map
        blank_cell = ''
        table_body = [
            question,
            TableRow(
                [tr('Volcanos considered'),
                 '%s' % volcano_names, blank_cell],
                header=True),
            TableRow([
                tr('People needing evacuation'),
                '%s' % format_int(evacuated), blank_cell
            ],
                     header=True),
            TableRow(
                [category_header,
                 tr('Total'), tr('Cumulative')], header=True)
        ]

        for name in category_names:
            table_body.append(
                TableRow(
                    [name,
                     format_int(pops[name]),
                     format_int(cums[name])]))

        table_body.extend([
            TableRow(
                tr('Map shows population affected in '
                   'each of volcano hazard polygons.')),
            TableRow([tr('Needs per week'),
                      tr('Total'), blank_cell],
                     header=True),
            [tr('Rice [kg]'),
             format_int(tot_needs['rice']), blank_cell],
            [
                tr('Drinking Water [l]'),
                format_int(tot_needs['drinking_water']), blank_cell
            ],
            [
                tr('Clean Water [l]'),
                format_int(tot_needs['water']), blank_cell
            ],
            [
                tr('Family Kits'),
                format_int(tot_needs['family_kits']), blank_cell
            ], [tr('Toilets'),
                format_int(tot_needs['toilets']), blank_cell]
        ])
        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population %s in the exposure layer') %
            format_int(total),
            tr('People need evacuation if they are within the '
               'volcanic hazard zones.')
        ])

        population_counts = [x[self.target_field] for x in new_attributes]
        impact_summary = Table(table_body).toNewlineFreeString()

        # check for zero impact
        if numpy.nanmax(population_counts) == 0 == numpy.nanmin(
                population_counts):
            table_body = [
                question,
                TableRow([
                    tr('People needing evacuation'),
                    '%s' % format_int(evacuated), blank_cell
                ],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        classes = create_classes(population_counts, len(colours))
        interval_classes = humanize_class(classes)
        # Define style info for output polygons showing population counts
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 100
                style_class['min'] = 0
            else:
                transparency = 30
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('People affected by volcanic hazard zone')
        legend_notes = tr('Thousand separator is represented by  %s' %
                          get_thousand_separator())
        legend_units = tr('(people)')
        legend_title = tr('Population count')

        # Create vector layer and return
        V = Vector(data=new_attributes,
                   projection=my_hazard.get_projection(),
                   geometry=my_hazard.get_geometry(as_geometry_objects=True),
                   name=tr('Population affected by volcanic hazard zone'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return V
Exemplo n.º 20
0
class ITBFatalityFunction(FunctionProvider):
    """Indonesian Earthquake Fatality Model

    This model was developed by Institut Teknologi Bandung (ITB) and
    implemented by Dr. Hadi Ghasemi, Geoscience Australia.


    Reference:

    Indonesian Earthquake Building-Damage and Fatality Models and
    Post Disaster Survey Guidelines Development,
    Bali, 27-28 February 2012, 54pp.


    Algorithm:

    In this study, the same functional form as Allen (2009) is adopted
    to express fatality rate as a function of intensity (see Eq. 10 in the
    report). The Matlab built-in function (fminsearch) for  Nelder-Mead
    algorithm was used to estimate the model parameters. The objective
    function (L2G norm) that is minimised during the optimisation is the
    same as the one used by Jaiswal et al. (2010).

    The coefficients used in the indonesian model are
    x=0.62275231, y=8.03314466, zeta=2.15

    Allen, T. I., Wald, D. J., Earle, P. S., Marano, K. D., Hotovec, A. J.,
    Lin, K., and Hearne, M., 2009. An Atlas of ShakeMaps and population
    exposure catalog for earthquake loss modeling, Bull. Earthq. Eng. 7,
    701-718.

    Jaiswal, K., and Wald, D., 2010. An empirical model for global earthquake
    fatality estimation, Earthq. Spectra 26, 1017-1037.


    Caveats and limitations:

    The current model is the result of the above mentioned workshop and
    reflects the best available information. However, the current model
    has a number of issues listed below and is expected to evolve further
    over time.

    1 - The model is based on limited number of observed fatality
        rates during 4 past fatal events.
    2 - The model clearly over-predicts the fatality rates at
        intensities higher than VIII.
    3 - The model only estimates the expected fatality rate for a given
        intensity level; however the associated uncertainty for the proposed
        model is not addressed.
    4 - There are few known mistakes in developing the current model:
        - rounding MMI values to the nearest 0.5,
        - Implementing Finite-Fault models of candidate events, and
        - consistency between selected GMPEs with those in use by BMKG.
          These issues will be addressed by ITB team in the final report.

    Note: Because of these caveats, decisions should not be made solely on
    the information presented here and should always be verified by ground
    truthing and other reliable information sources.

    :author Hadi Ghasemi
    :rating 3

    :param requires category=='hazard' and \
                    subcategory=='earthquake' and \
                    layertype=='raster' and \
                    unit=='MMI'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'

    """

    title = tr('Die or be displaced')
    synopsis = tr(
        'To asses the impact of earthquake on population based on earthquake '
        'model developed by ITB')
    citations = tr(
        ' * Indonesian Earthquake Building-Damage and Fatality Models and '
        '   Post Disaster Survey Guidelines Development Bali, 27-28 '
        '   February 2012, 54pp.\n'
        ' * Allen, T. I., Wald, D. J., Earle, P. S., Marano, K. D., '
        '   Hotovec, A. J., Lin, K., and Hearne, M., 2009. An Atlas '
        '   of ShakeMaps and population exposure catalog for '
        '   earthquake loss modeling, Bull. Earthq. Eng. 7, 701-718.\n'
        ' * Jaiswal, K., and Wald, D., 2010. An empirical model for '
        '   global earthquake fatality estimation, Earthq. Spectra '
        '   26, 1017-1037.\n')
    limitation = tr(
        ' - The model is based on limited number of observed fatality '
        '   rates during 4 past fatal events. \n'
        ' - The model clearly over-predicts the fatality rates at '
        '   intensities higher than VIII.\n'
        ' - The model only estimates the expected fatality rate '
        '   for a given intensity level; however the associated '
        '   uncertainty for the proposed model is not addressed.\n'
        ' - There are few known mistakes in developing the current '
        '   model:\n\n'
        '   * rounding MMI values to the nearest 0.5,\n'
        '   * Implementing Finite-Fault models of candidate events, and\n'
        '   * consistency between selected GMPEs with those in use by '
        '     BMKG.\n')
    actions = tr(
        'Provide details about the population will be die or displaced')
    detailed_description = tr(
        'This model was developed by Institut Teknologi Bandung (ITB) '
        'and implemented by Dr. Hadi Ghasemi, Geoscience Australia\n'
        'Algorithm:\n'
        'In this study, the same functional form as Allen (2009) is '
        'adopted o express fatality rate as a function of intensity '
        '(see Eq. 10 in the report). The Matlab built-in function '
        '(fminsearch) for  Nelder-Mead algorithm was used to estimate '
        'the model parameters. The objective function (L2G norm) that '
        'is minimized during the optimisation is the same as the one '
        'used by Jaiswal et al. (2010).\n'
        'The coefficients used in the indonesian model are x=0.62275231, '
        'y=8.03314466, zeta=2.15')
    defaults = get_defaults()

    parameters = OrderedDict([
        ('x', 0.62275231),
        ('y', 8.03314466),  # Model coefficients
        # Rates of people displaced for each MMI level
        ('displacement_rate', {
            1: 0,
            2: 0,
            3: 0,
            4: 0,
            5: 0,
            6: 1.0,
            7: 1.0,
            8: 1.0,
            9: 1.0,
            10: 1.0
        }),
        ('mmi_range', range(2, 10)),
        ('step', 0.5),
        # Threshold below which layer should be transparent
        ('tolerance', 0.01),
        ('calculate_displaced_people', True),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             }), ('MinimumNeeds', {
                 'on': True
             })
         ])),
        ('minimum needs', default_minimum_needs())
    ])

    def fatality_rate(self, mmi):
        """
        ITB method to compute fatality rate
        :param mmi:
        """
        # As per email discussion with Ole, Trevor, Hadi, mmi < 4 will have
        # a fatality rate of 0 - Tim
        if mmi < 4:
            return 0

        x = self.parameters['x']
        y = self.parameters['y']
        return numpy.power(10.0, x * mmi - y)

    def run(self, layers):
        """Indonesian Earthquake Fatality Model

        Input:

        :param layers: List of layers expected to contain,

                my_hazard: Raster layer of MMI ground shaking

                my_exposure: Raster layer of population density
        """

        displacement_rate = self.parameters['displacement_rate']

        # Tolerance for transparency
        tolerance = self.parameters['tolerance']

        # Extract input layers
        intensity = get_hazard_layer(layers)
        population = get_exposure_layer(layers)

        question = get_question(intensity.get_name(), population.get_name(),
                                self)

        # Extract data grids
        my_hazard = intensity.get_data()  # Ground Shaking
        my_exposure = population.get_data(scaling=True)  # Population Density

        # Calculate population affected by each MMI level
        # FIXME (Ole): this range is 2-9. Should 10 be included?

        mmi_range = self.parameters['mmi_range']
        number_of_exposed = {}
        number_of_displaced = {}
        number_of_fatalities = {}

        # Calculate fatality rates for observed Intensity values (my_hazard
        # based on ITB power model
        R = numpy.zeros(my_hazard.shape)
        for mmi in mmi_range:
            # Identify cells where MMI is in class i and
            # count population affected by this shake level
            I = numpy.where((my_hazard > mmi - self.parameters['step']) *
                            (my_hazard <= mmi + self.parameters['step']),
                            my_exposure, 0)

            # Calculate expected number of fatalities per level
            fatality_rate = self.fatality_rate(mmi)

            F = fatality_rate * I

            # Calculate expected number of displaced people per level
            try:
                D = displacement_rate[mmi] * I
            except KeyError, e:
                msg = 'mmi = %i, I = %s, Error msg: %s' % (mmi, str(I), str(e))
                # noinspection PyExceptionInherit
                raise InaSAFEError(msg)

            # Adjust displaced people to disregard fatalities.
            # Set to zero if there are more fatalities than displaced.
            D = numpy.where(D > F, D - F, 0)

            # Sum up numbers for map
            R += D  # Displaced

            # Generate text with result for this study
            # This is what is used in the real time system exposure table
            number_of_exposed[mmi] = numpy.nansum(I.flat)
            number_of_displaced[mmi] = numpy.nansum(D.flat)
            # noinspection PyUnresolvedReferences
            number_of_fatalities[mmi] = numpy.nansum(F.flat)

        # Set resulting layer to NaN when less than a threshold. This is to
        # achieve transparency (see issue #126).
        R[R < tolerance] = numpy.nan

        # Total statistics
        total = int(round(numpy.nansum(my_exposure.flat) / 1000) * 1000)

        # Compute number of fatalities
        fatalities = int(
            round(numpy.nansum(number_of_fatalities.values()) / 1000)) * 1000
        # As per email discussion with Ole, Trevor, Hadi, total fatalities < 50
        # will be rounded down to 0 - Tim
        if fatalities < 50:
            fatalities = 0

        # Compute number of people displaced due to building collapse
        displaced = int(
            round(numpy.nansum(number_of_displaced.values()) / 1000)) * 1000

        # Generate impact report
        table_body = [question]

        # Add total fatality estimate
        s = format_int(fatalities)
        table_body.append(
            TableRow([tr('Number of fatalities'), s], header=True))

        if self.parameters['calculate_displaced_people']:
            # Add total estimate of people displaced
            s = format_int(displaced)
            table_body.append(
                TableRow([tr('Number of people displaced'), s], header=True))
        else:
            displaced = 0

        # Add estimate of total population in area
        s = format_int(int(total))
        table_body.append(
            TableRow([tr('Total number of people'), s], header=True))

        # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan
        # FIXME: Refactor and share
        minimum_needs = self.parameters['minimum needs']
        needs = evacuated_population_weekly_needs(displaced, minimum_needs)

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([tr('Fatalities'),
                      '%s' % format_int(fatalities)],
                     header=True),
            TableRow([tr('People displaced'),
                      '%s' % format_int(displaced)],
                     header=True),
            TableRow(
                tr('Map shows density estimate of '
                   'displaced population')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), format_int(needs['rice'])],
            [tr('Drinking Water [l]'),
             format_int(needs['drinking_water'])],
            [tr('Clean Water [l]'),
             format_int(needs['water'])],
            [tr('Family Kits'),
             format_int(needs['family_kits'])],
            TableRow(tr('Action Checklist:'), header=True)
        ]

        if fatalities > 0:
            table_body.append(
                tr('Are there enough victim identification '
                   'units available for %s people?') % format_int(fatalities))
        if displaced > 0:
            table_body.append(
                tr('Are there enough shelters and relief items '
                   'available for %s people?') % format_int(displaced))
            table_body.append(
                TableRow(
                    tr('If yes, where are they located and '
                       'how will we distribute them?')))
            table_body.append(
                TableRow(
                    tr('If no, where can we obtain '
                       'additional relief items from and '
                       'how will we transport them?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People are considered to be displaced if '
               'they experience and survive a shake level'
               'of more than 5 on the MMI scale '),
            tr('Minimum needs are defined in BNPB '
               'regulation 7/2008'),
            tr('The fatality calculation assumes that '
               'no fatalities occur for shake levels below 4 '
               'and fatality counts of less than 50 are '
               'disregarded.'),
            tr('All values are rounded up to the nearest '
               'integer in order to avoid representing human '
               'lives as fractionals.')
        ])

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('Fatality model is from '
               'Institute of Teknologi Bandung 2012.'))
        table_body.append(tr('Population numbers rounded to nearest 1000.'))

        # Result
        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # check for zero impact
        if numpy.nanmax(R) == 0 == numpy.nanmin(R):
            table_body = [
                question,
                TableRow([tr('Fatalities'),
                          '%s' % format_int(fatalities)],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = ['#EEFFEE', '#FFFF7F', '#E15500', '#E4001B', '#730000']
        classes = create_classes(R.flat[:], len(colours))
        interval_classes = humanize_class(classes)
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            style_class['quantity'] = classes[i]
            if i == 0:
                transparency = 100
            else:
                transparency = 30
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_classes.append(style_class)

        style_info = dict(target_field=None,
                          style_classes=style_classes,
                          style_type='rasterStyle')

        # For printing map purpose
        map_title = tr('Earthquake impact to population')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(people per cell)')
        legend_title = tr('Population density')

        # Create raster object and return
        L = Raster(R,
                   projection=population.get_projection(),
                   geotransform=population.get_geotransform(),
                   keywords={
                       'impact_summary': impact_summary,
                       'total_population': total,
                       'total_fatalities': fatalities,
                       'fatalites_per_mmi': number_of_fatalities,
                       'exposed_per_mmi': number_of_exposed,
                       'displaced_per_mmi': number_of_displaced,
                       'impact_table': impact_table,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   name=tr('Estimated displaced population per cell'),
                   style_info=style_info)

        return L
Exemplo n.º 21
0
class FloodBuildingImpactFunction(FunctionProvider):
    """Inundation impact on building data

    :author Ole Nielsen, Kristy van Putten
    # this rating below is only for testing a function, not the real one
    :rating 0
    :param requires category=='hazard' and \
                    subcategory in ['flood', 'tsunami']

    :param requires category=='exposure' and \
                    subcategory=='structure' and \
                    layertype=='vector'
    """

    # Function documentation
    target_field = 'INUNDATED'
    title = tr('Be flooded')
    synopsis = tr(
        'To assess the impacts of (flood or tsunami) inundation on building '
        'footprints originating from OpenStreetMap (OSM).')
    actions = tr(
        'Provide details about where critical infrastructure might be flooded')
    detailed_description = tr(
        'The inundation status is calculated for each building (using the '
        'centroid if it is a polygon) based on the hazard levels provided. if '
        'the hazard is given as a raster a threshold of 1 meter is used. This '
        'is configurable through the InaSAFE interface. If the hazard is '
        'given as a vector polygon layer buildings are considered to be '
        'impacted depending on the value of hazard attributes (in order) '
        '"affected" or "FLOODPRONE": If a building is in a region that has '
        'attribute "affected" set to True (or 1) it is impacted. If attribute '
        '"affected" does not exist but "FLOODPRONE" does, then the building '
        'is considered impacted if "FLOODPRONE" is "yes". If neither '
        '"affected" nor "FLOODPRONE" is available, a building will be '
        'impacted if it belongs to any polygon. The latter behaviour is '
        'implemented through the attribute "inapolygon" which is automatically'
        ' assigned.')
    hazard_input = tr(
        'A hazard raster layer where each cell represents flood depth (in '
        'meters), or a vector polygon layer where each polygon represents an '
        'inundated area. In the latter case, the following attributes are '
        'recognised (in order): "affected" (True or False) or "FLOODPRONE" '
        '(Yes or No). (True may be represented as 1, False as 0')
    exposure_input = tr(
        'Vector polygon layer extracted from OSM where each polygon '
        'represents the footprint of a building.')
    output = tr(
        'Vector layer contains building is estimated to be flooded and the '
        'breakdown of the building by type.')
    limitation = tr(
        'This function only flags buildings as impacted or not either based '
        'on a fixed threshold in case of raster hazard or the the attributes '
        'mentioned under input in case of vector hazard.')

    # parameters
    parameters = OrderedDict([('threshold [m]', 1.0),
                              ('postprocessors',
                               OrderedDict([('BuildingType', {
                                   'on': True
                               })]))])

    def run(self, layers):
        """Flood impact to buildings (e.g. from Open Street Map).
        """

        threshold = self.parameters['threshold [m]']  # Flood threshold [m]

        verify(isinstance(threshold, float),
               'Expected thresholds to be a float. Got %s' % str(threshold))

        # Extract data
        H = get_hazard_layer(layers)  # Depth
        E = get_exposure_layer(layers)  # Building locations

        question = get_question(H.get_name(), E.get_name(), self)

        # Determine attribute name for hazard levels
        if H.is_raster:
            mode = 'grid'
            hazard_attribute = 'depth'
        else:
            mode = 'regions'
            hazard_attribute = None

        # Interpolate hazard level to building locations
        I = assign_hazard_values_to_exposure_data(
            H, E, attribute_name=hazard_attribute)

        # Extract relevant exposure data
        attribute_names = I.get_attribute_names()
        attributes = I.get_data()
        N = len(I)
        # Calculate building impact
        count = 0
        buildings = {}
        affected_buildings = {}
        for i in range(N):
            if mode == 'grid':
                # Get the interpolated depth
                x = float(attributes[i]['depth'])
                x = x >= threshold
            elif mode == 'regions':
                # Use interpolated polygon attribute
                atts = attributes[i]

                # FIXME (Ole): Need to agree whether to use one or the
                # other as this can be very confusing!
                # For now look for 'affected' first
                if 'affected' in atts:
                    # E.g. from flood forecast
                    # Assume that building is wet if inside polygon
                    # as flagged by attribute Flooded
                    res = atts['affected']
                    if res is None:
                        x = False
                    else:
                        x = bool(res)

                elif 'FLOODPRONE' in atts:
                    res = atts['FLOODPRONE']
                    if res is None:
                        x = False
                    else:
                        x = res.lower() == 'yes'
                elif DEFAULT_ATTRIBUTE in atts:
                    # Check the default attribute assigned for points
                    # covered by a polygon
                    res = atts[DEFAULT_ATTRIBUTE]
                    if res is None:
                        x = False
                    else:
                        x = res
                else:
                    # there is no flood related attribute
                    msg = ('No flood related attribute found in %s. '
                           'I was looking for either "affected", "FLOODPRONE" '
                           'or "inapolygon". The latter should have been '
                           'automatically set by call to '
                           'assign_hazard_values_to_exposure_data(). '
                           'Sorry I can\'t help more.')
                    raise Exception(msg)
            else:
                msg = (tr(
                    'Unknown hazard type %s. Must be either "depth" or "grid"')
                       % mode)
                raise Exception(msg)

            # Count affected buildings by usage type if available
            if 'type' in attribute_names:
                usage = attributes[i]['type']
            elif 'TYPE' in attribute_names:
                usage = attributes[i]['TYPE']
            else:
                usage = None
            if 'amenity' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['amenity']
            if 'building_t' in attribute_names and (usage is None
                                                    or usage == 0):
                usage = attributes[i]['building_t']
            if 'office' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['office']
            if 'tourism' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['tourism']
            if 'leisure' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['leisure']
            if 'building' in attribute_names and (usage is None or usage == 0):
                usage = attributes[i]['building']
                if usage == 'yes':
                    usage = 'building'

            if usage is not None and usage != 0:
                key = usage
            else:
                key = 'unknown'

            if key not in buildings:
                buildings[key] = 0
                affected_buildings[key] = 0

            # Count all buildings by type
            buildings[key] += 1
            if x is True:
                # Count affected buildings by type
                affected_buildings[key] += 1

                # Count total affected buildings
                count += 1

            # Add calculated impact to existing attributes
            attributes[i][self.target_field] = x

        # Lump small entries and 'unknown' into 'other' category
        for usage in buildings.keys():
            x = buildings[usage]
            if x < 25 or usage == 'unknown':
                if 'other' not in buildings:
                    buildings['other'] = 0
                    affected_buildings['other'] = 0

                buildings['other'] += x
                affected_buildings['other'] += affected_buildings[usage]
                del buildings[usage]
                del affected_buildings[usage]

        # Generate simple impact report
        table_body = [
            question,
            TableRow([tr('Building type'),
                      tr('Number flooded'),
                      tr('Total')],
                     header=True),
            TableRow([tr('All'), format_int(count),
                      format_int(N)])
        ]

        school_closed = 0
        hospital_closed = 0
        # Generate break down by building usage type is available
        list_type_attribute = [
            'TYPE', 'type', 'amenity', 'building_t', 'office', 'tourism',
            'leisure', 'building'
        ]
        intersect_type = set(attribute_names) & set(list_type_attribute)
        if len(intersect_type) > 0:
            # Make list of building types
            building_list = []
            for usage in buildings:
                building_type = usage.replace('_', ' ')

                # Lookup internationalised value if available
                building_type = tr(building_type)
                building_list.append([
                    building_type.capitalize(),
                    format_int(affected_buildings[usage]),
                    format_int(buildings[usage])
                ])
                if building_type == 'school':
                    school_closed = affected_buildings[usage]
                if building_type == 'hospital':
                    hospital_closed = affected_buildings[usage]

            # Sort alphabetically
            building_list.sort()

            table_body.append(
                TableRow(tr('Breakdown by building type'), header=True))
            for row in building_list:
                s = TableRow(row)
                table_body.append(s)

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(
            TableRow(tr('Are the critical facilities still open?')))
        table_body.append(
            TableRow(
                tr('Which structures have warning capacity (eg. sirens, speakers, '
                   'etc.)?')))
        table_body.append(
            TableRow(tr('Which buildings will be evacuation centres?')))
        table_body.append(
            TableRow(tr('Where will we locate the operations centre?')))
        table_body.append(
            TableRow(
                tr('Where will we locate warehouse and/or distribution centres?'
                   )))

        if school_closed > 0:
            table_body.append(
                TableRow(
                    tr('Where will the students from the %s closed schools go to '
                       'study?') % format_int(school_closed)))

        if hospital_closed > 0:
            table_body.append(
                TableRow(
                    tr('Where will the patients from the %s closed hospitals go '
                       'for treatment and how will we transport them?') %
                    format_int(hospital_closed)))

        table_body.append(TableRow(tr('Notes'), header=True))
        assumption = tr('Buildings are said to be flooded when ')
        if mode == 'grid':
            assumption += tr('flood levels exceed %.1f m') % threshold
        else:
            assumption += tr('in regions marked as affected')
        table_body.append(assumption)

        # Result
        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # Create style
        style_classes = [
            dict(label=tr('Not Inundated'),
                 value=0,
                 colour='#1EFC7C',
                 transparency=0,
                 size=1),
            dict(label=tr('Inundated'),
                 value=1,
                 colour='#F31A1C',
                 transparency=0,
                 size=1)
        ]
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='categorizedSymbol')

        # For printing map purpose
        map_title = tr('Buildings inundated')
        legend_units = tr('(inundated or not inundated)')
        legend_title = tr('Structure inundated status')

        # Create vector layer and return
        V = Vector(data=attributes,
                   projection=I.get_projection(),
                   geometry=I.get_geometry(),
                   name=tr('Estimated buildings affected'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return V
class FloodEvacuationFunction(FunctionProvider):
    """Impact function for flood evacuation

    :author AIFDR
    :rating 4
    :param requires category=='hazard' and \
                    subcategory in ['flood', 'tsunami'] and \
                    layertype=='raster' and \
                    unit=='m'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """

    title = tr('Need evacuation')
    defaults = get_defaults()

    # Function documentation
    synopsis = tr(
        'To assess the impacts of (flood or tsunami) inundation in raster '
        'format on population.')
    actions = tr(
        'Provide details about how many people would likely need to be '
        'evacuated, where they are located and what resources would be '
        'required to support them.')
    detailed_description = tr(
        'The population subject to inundation exceeding a threshold '
        '(default 1m) is calculated and returned as a raster layer. In '
        'addition the total number and the required needs in terms of the '
        'BNPB (Perka 7) are reported. The threshold can be changed and even '
        'contain multiple numbers in which case evacuation and needs are '
        'calculated using the largest number with population breakdowns '
        'provided for the smaller numbers. The population raster is resampled '
        'to the resolution of the hazard raster and is rescaled so that the '
        'resampled population counts reflect estimates of population count '
        'per resampled cell. The resulting impact layer has the same '
        'resolution and reflects population count per cell which are affected '
        'by inundation.')
    hazard_input = tr(
        'A hazard raster layer where each cell represents flood depth '
        '(in meters).')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Raster layer contains population affected and the minimum needs '
        'based on the population affected.')
    limitation = tr(
        'The default threshold of 1 meter was selected based on consensus, '
        'not hard evidence.')

    # Configurable parameters
    # TODO: Share the mimimum needs and make another default value
    parameters = OrderedDict([
        ('thresholds [m]', [1.0]),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             })
         ])),
        ('minimum needs',
         OrderedDict([('Rice', 2.8), ('Drinking Water', 17.5), ('Water', 105),
                      ('Family Kits', 0.2), ('Toilets', 0.05)]))
    ])

    def run(self, layers):
        """Risk plugin for flood population evacuation

        Input
          layers: List of layers expected to contain
              my_hazard: Raster layer of flood depth
              my_exposure: Raster layer of population data on the same grid
              as my_hazard

        Counts number of people exposed to flood levels exceeding
        specified threshold.

        Return
          Map of population exposed to flood levels exceeding the threshold
          Table with number of people evacuated and supplies required
        """

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Flood inundation [m]
        my_exposure = get_exposure_layer(layers)

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Determine depths above which people are regarded affected [m]
        # Use thresholds from inundation layer if specified
        thresholds = self.parameters['thresholds [m]']

        verify(isinstance(thresholds, list),
               'Expected thresholds to be a list. Got %s' % str(thresholds))

        # Extract data as numeric arrays
        D = my_hazard.get_data(nan=0.0)  # Depth

        # Calculate impact as population exposed to depths > max threshold
        P = my_exposure.get_data(nan=0.0, scaling=True)

        # Calculate impact to intermediate thresholds
        counts = []
        # merely initialize
        my_impact = None
        for i, lo in enumerate(thresholds):
            if i == len(thresholds) - 1:
                # The last threshold
                my_impact = M = numpy.where(D >= lo, P, 0)
            else:
                # Intermediate thresholds
                hi = thresholds[i + 1]
                M = numpy.where((D >= lo) * (D < hi), P, 0)

            # Count
            val = int(numpy.sum(M))

            # Don't show digits less than a 1000
            val = round_thousand(val)
            counts.append(val)

        # Count totals
        evacuated = counts[-1]
        total = int(numpy.sum(P))
        # Don't show digits less than a 1000
        total = round_thousand(total)

        # Calculate estimated minimum needs
        # The default value of each logistic is based on BNPB Perka 7/2008
        # minimum bantuan
        minimum_needs = self.parameters['minimum needs']
        mn_rice = minimum_needs['Rice']
        mn_drinking_water = minimum_needs['Drinking Water']
        mn_water = minimum_needs['Water']
        mn_family_kits = minimum_needs['Family Kits']
        mn_toilets = minimum_needs['Toilets']

        rice = int(evacuated * mn_rice)
        drinking_water = int(evacuated * mn_drinking_water)
        water = int(evacuated * mn_water)
        family_kits = int(evacuated * mn_family_kits)
        toilets = int(evacuated * mn_toilets)

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([(tr('People in %.1f m of water') % thresholds[-1]),
                      '%s*' % format_int(evacuated)],
                     header=True),
            TableRow(tr('* Number is rounded to the nearest 1000'),
                     header=False),
            TableRow(tr('Map shows population density needing evacuation')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), format_int(rice)],
            [tr('Drinking Water [l]'),
             format_int(drinking_water)],
            [tr('Clean Water [l]'), format_int(water)],
            [tr('Family Kits'), format_int(family_kits)],
            [tr('Toilets'), format_int(toilets)]
        ]

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(TableRow(tr('How will warnings be disseminated?')))
        table_body.append(TableRow(tr('How will we reach stranded people?')))
        table_body.append(TableRow(tr('Do we have enough relief items?')))
        table_body.append(
            TableRow(
                tr('If yes, where are they located and how '
                   'will we distribute them?')))
        table_body.append(
            TableRow(
                tr('If no, where can we obtain additional relief items from and how '
                   'will we transport them to here?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People need evacuation if flood levels exceed %(eps).1f m') % {
                'eps': thresholds[-1]
            },
            tr('Minimum needs are defined in BNPB regulation 7/2008'),
            tr('All values are rounded up to the nearest integer in order to '
               'avoid representing human lives as fractionals.')
        ])

        if len(counts) > 1:
            table_body.append(TableRow(tr('Detailed breakdown'), header=True))

            for i, val in enumerate(counts[:-1]):
                s = (tr('People in %(lo).1f m to %(hi).1f m of water: %(val)i')
                     % {
                         'lo': thresholds[i],
                         'hi': thresholds[i + 1],
                         'val': format_int(val)
                     })
                table_body.append(TableRow(s, header=False))

        # Result
        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # check for zero impact
        if numpy.nanmax(my_impact) == 0 == numpy.nanmin(my_impact):
            table_body = [
                question,
                TableRow([(tr('People in %.1f m of water') % thresholds[-1]),
                          '%s' % format_int(evacuated)],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        classes = create_classes(my_impact.flat[:], len(colours))
        interval_classes = humanize_class(classes)
        style_classes = []

        for i in xrange(len(colours)):
            style_class = dict()
            if i == 1:
                label = create_label(interval_classes[i], 'Low')
            elif i == 4:
                label = create_label(interval_classes[i], 'Medium')
            elif i == 7:
                label = create_label(interval_classes[i], 'High')
            else:
                label = create_label(interval_classes[i])
            style_class['label'] = label
            style_class['quantity'] = classes[i]
            if i == 0:
                transparency = 100
            else:
                transparency = 0
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_classes.append(style_class)

        style_info = dict(target_field=None,
                          style_classes=style_classes,
                          style_type='rasterStyle')

        # For printing map purpose
        map_title = tr('People in need of evacuation')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(people per cell)')
        legend_title = tr('Population density')

        # Create raster object and return
        R = Raster(my_impact,
                   projection=my_hazard.get_projection(),
                   geotransform=my_hazard.get_geotransform(),
                   name=tr('Population which %s') % get_function_title(self),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return R
Exemplo n.º 23
0
class VolcanoBuildingImpact(FunctionProvider):
    """Risk plugin for volcano building impact

    :author AIFDR
    :rating 4
    :param requires category=='hazard' and \
                    subcategory in ['volcano'] and \
                    layertype=='vector'

    :param requires category=='exposure' and \
                    subcategory=='structure' and \
                    layertype=='vector'
    """

    title = tr('Be affected')
    target_field = 'buildings'

    # Function documentations
    synopsis = tr('To assess the impacts of volcano eruption on building.')
    actions = tr(
        'Provide details about how many building would likely be affected by '
        'each hazard zones.')
    hazard_input = tr(
        'A hazard vector layer can be polygon or point. If polygon, it must '
        'have "KRB" attribute and the values for it are "Kawasan Rawan '
        'Bencana I", "Kawasan Rawan Bencana II", or "Kawasan Rawan Bencana '
        'III." If you want to see the name of the volcano in the result, '
        'you need to add "NAME" attribute for point data or "GUNUNG" '
        'attribute for polygon data.')
    exposure_input = tr(
        'Vector polygon layer extracted from OSM where each polygon '
        'represents the footprint of a building.')
    output = tr(
        'Vector layer contains Map of building exposed to volcanic hazard '
        'zones for each Kawasan Rawan Bencana or radius.')

    parameters = OrderedDict([('distances [km]', [3, 5, 10])])

    def run(self, layers):
        """Risk plugin for volcano hazard on building/structure

        Input
          layers: List of layers expected to contain
              my_hazard: Hazard layer of volcano
              my_exposure: Vector layer of structure data on
              the same grid as my_hazard

        Counts number of building exposed to each volcano hazard zones.

        Return
          Map of building exposed to volcanic hazard zones
          Table with number of buildings affected
        """

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Volcano hazard layer
        my_exposure = get_exposure_layer(layers)
        is_point_data = False

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Input checks
        if not my_hazard.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected ' %
                   my_hazard.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon or point layer. I got %s '
               'with layer type %s' %
               (my_hazard.get_name(), my_hazard.get_geometry_name()))
        if not (my_hazard.is_polygon_data or my_hazard.is_point_data):
            raise Exception(msg)

        if my_hazard.is_point_data:
            # Use concentric circles
            radii = self.parameters['distances [km]']
            is_point_data = True

            centers = my_hazard.get_geometry()
            attributes = my_hazard.get_data()
            rad_m = [x * 1000 for x in radii]  # Convert to meters
            Z = make_circular_polygon(centers, rad_m, attributes=attributes)
            # To check
            category_title = 'Radius'
            my_hazard = Z

            category_names = rad_m
            name_attribute = 'NAME'  # As in e.g. the Smithsonian dataset
        else:
            # Use hazard map
            category_title = 'KRB'

            # FIXME (Ole): Change to English and use translation system
            category_names = [
                'Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II',
                'Kawasan Rawan Bencana I'
            ]
            name_attribute = 'GUNUNG'  # As in e.g. BNPB hazard map

        # Get names of volcanos considered
        if name_attribute in my_hazard.get_attribute_names():
            D = {}
            for att in my_hazard.get_data():
                # Run through all polygons and get unique names
                D[att[name_attribute]] = None

            volcano_names = ''
            for name in D:
                volcano_names += '%s, ' % name
            volcano_names = volcano_names[:-2]  # Strip trailing ', '
        else:
            volcano_names = tr('Not specified in data')

        if not category_title in my_hazard.get_attribute_names():
            msg = ('Hazard data %s did not contain expected '
                   'attribute %s ' % (my_hazard.get_name(), category_title))
            # noinspection PyExceptionInherit
            raise InaSAFEError(msg)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(my_hazard, my_exposure)

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a building count of zero
        new_attributes = my_hazard.get_data()

        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            cat = attr[category_title]
            categories[cat] = 0

        # Count impacted building per polygon and total
        for attr in P.get_data():

            # Update building count for associated polygon
            poly_id = attr['polygon_id']
            if poly_id is not None:
                new_attributes[poly_id][self.target_field] += 1

                # Update building count for each category
                cat = new_attributes[poly_id][category_title]
                categories[cat] += 1

        # Count totals
        total = len(my_exposure)

        # Generate simple impact report
        blank_cell = ''
        table_body = [
            question,
            TableRow(
                [tr('Volcanos considered'),
                 '%s' % volcano_names, blank_cell],
                header=True),
            TableRow([tr('Distance [km]'),
                      tr('Total'),
                      tr('Cumulative')],
                     header=True)
        ]

        cum = 0
        for name in category_names:
            # prevent key error
            count = categories.get(name, 0)
            cum += count
            if is_point_data:
                name = int(name) / 1000
            table_body.append(
                TableRow([name, format_int(count),
                          format_int(cum)]))

        table_body.append(
            TableRow(
                tr('Map shows buildings affected in '
                   'each of volcano hazard polygons.')))
        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total number of buildings %s in the viewable '
               'area') % format_int(total),
            tr('Only buildings available in OpenStreetMap '
               'are considered.')
        ])

        impact_summary = Table(table_body).toNewlineFreeString()
        building_counts = [x[self.target_field] for x in new_attributes]

        if max(building_counts) == 0 == min(building_counts):
            table_body = [
                question,
                TableRow([
                    tr('Number of buildings affected'),
                    '%s' % format_int(cum), blank_cell
                ],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        classes = create_classes(building_counts, len(colours))
        interval_classes = humanize_class(classes)
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 100
                style_class['min'] = 0
            else:
                transparency = 30
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('Buildings affected by volcanic hazard zone')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(building)')
        legend_title = tr('Building count')

        # Create vector layer and return
        V = Vector(data=new_attributes,
                   projection=my_hazard.get_projection(),
                   geometry=my_hazard.get_geometry(as_geometry_objects=True),
                   name=tr('Buildings affected by volcanic hazard zone'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return V
Exemplo n.º 24
0
def get_metadata(func):
    """Collect metadata for an impact function and return it as a dictionary.

    :param func: Name of function.

    :returns: A dictionary containing:
        * author : string (identified by :author)
        * synopsis : string (first line)
        * rating : integer (identified by :rating)
        * param_req : list of param (identified by :param requires)
        * detail : detail description (function properties)
        * citation : list of citation in string (function properties)
        * limitation : string (function properties)
    :rtype: dict
    """
    retval = OrderedDict()
    retval['unique_identifier'] = func

    plugins_dict = dict([(pretty_function_name(p), p)
                         for p in FunctionProvider.plugins])
    if func not in plugins_dict.keys():
        return None
    else:
        func = plugins_dict[func]

    author_tag = ':author'
    rating_tag = ':rating'

    # attributes
    synopsis = 'synopsis'
    actions = 'actions'
    # citations must be a list
    citations = 'citations'
    detailed_description = 'detailed_description'
    hazard_input = 'hazard_input'
    exposure_input = 'exposure_input'
    limitation = 'limitation'
    output = 'output'

    if hasattr(func, '__doc__') and func.__doc__:
        doc_str = func.__doc__
        for line in doc_str.split('\n'):
            doc_line = remove_double_spaces(line)
            doc_line = doc_line.strip()

            if doc_line.startswith(author_tag):
                retval['author'] = remove_double_spaces(
                    doc_line[len(author_tag) + 1:])
            elif doc_line.startswith(rating_tag):
                retval['rating'] = doc_line[len(rating_tag) + 1:]
    retval['title'] = get_function_title(func)

    if hasattr(func, synopsis):
        retval[synopsis] = func.synopsis
    if hasattr(func, actions):
        retval[actions] = func.actions
    if hasattr(func, citations):
        retval[citations] = func.citations
    if hasattr(func, detailed_description):
        retval[detailed_description] = func.detailed_description
    if hasattr(func, hazard_input):
        retval[hazard_input] = func.hazard_input
    if hasattr(func, exposure_input):
        retval[exposure_input] = func.exposure_input
    if hasattr(func, output):
        retval[output] = func.output
    if hasattr(func, limitation):
        retval[limitation] = func.limitation
    return retval
class ITBFatalityFunctionConfigurable(FunctionProvider):
    """Indonesian Earthquake Fatality Model

    This model was developed by Institut Tecknologi Bandung (ITB) and
    implemented by Dr Hadi Ghasemi, Geoscience Australia


    Reference:

    Indonesian Earthquake Building-Damage and Fatality Models and
    Post Disaster Survey Guidelines Development,
    Bali, 27-28 February 2012, 54pp.


    Algorithm:

    In this study, the same functional form as Allen (2009) is adopted
    to express fatality rate as a function of intensity (see Eq. 10 in the
    report). The Matlab built-in function (fminsearch) for  Nelder-Mead
    algorithm is used to estimate the model parameters. The objective
    function (L2G norm) that is minimised during the optimisation is the
    same as the one used by Jaiswal et al. (2010).

    The coefficients used in the indonesian model are
    x=0.62275231, y=8.03314466, zeta=2.15

    Allen, T. I., Wald, D. J., Earle, P. S., Marano, K. D., Hotovec, A. J.,
    Lin, K., and Hearne, M., 2009. An Atlas of ShakeMaps and population
    exposure catalog for earthquake loss modeling, Bull. Earthq. Eng. 7,
    701-718.

    Jaiswal, K., and Wald, D., 2010. An empirical model for global earthquake
    fatality estimation, Earthq. Spectra 26, 1017-1037.


    Caveats and limitations:

    The current model is the result of the above mentioned workshop and
    reflects the best available information. However, the current model
    has a number of issues listed below and is expected to evolve further
    over time.

    1 - The model is based on limited number of observed fatality
        rates during 4 past fatal events.
    2 - The model clearly over-predicts the fatality rates at
        intensities higher than VIII.
    3 - The model only estimates the expected fatality rate for a given
        intensity level; however the associated uncertainty for the proposed
        model is not addressed.
    4 - There are few known mistakes in developing the current model:
        - rounding MMI values to the nearest 0.5,
        - Implementing Finite-Fault models of candidate events, and
        - consistency between selected GMPEs with those in use by BMKG.
          These issues will be addressed by ITB team in the final report.


    :author Hadi Ghasemi
    :rating 3

    :param requires category=='hazard' and \
                    subcategory=='earthquake' and \
                    layertype=='raster' and \
                    unit=='MMI'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'

    """

    title = tr('Die or be displaced')
    defaults = get_defaults()
    parameters = OrderedDict([
        ('x', 0.62275231),
        ('y', 8.03314466),  # Model coefficients
        # Rates of people displaced for each MMI level
        ('displacement_rate', {
            1: 0,
            2: 0,
            3: 0,
            4: 0,
            5: 0,
            6: 1.0,
            7: 1.0,
            8: 1.0,
            9: 1.0,
            10: 1.0
        }),
        # Threshold below which layer should be transparent
        ('tolerance', 0.01),
        ('calculate_displaced_people', True),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             })
         ]))
    ])

    def run(self, layers):
        """Indonesian Earthquake Fatality Model

        Input
          layers: List of layers expected to contain
              H: Raster layer of MMI ground shaking
              P: Raster layer of population density

        """

        # Establish model coefficients
        x = self.parameters['x']
        y = self.parameters['y']

        # Define percentages of people being displaced at each mmi level
        displacement_rate = self.parameters['displacement_rate']

        # Tolerance for transparency
        tolerance = self.parameters['tolerance']

        # Extract input layers
        intensity = get_hazard_layer(layers)
        population = get_exposure_layer(layers)

        question = get_question(intensity.get_name(), population.get_name(),
                                self)

        # Extract data grids
        H = intensity.get_data()  # Ground Shaking
        P = population.get_data(scaling=True)  # Population Density

        # Calculate population affected by each MMI level
        # FIXME (Ole): this range is 2-9. Should 10 be included?
        mmi_range = range(2, 10)
        number_of_exposed = {}
        number_of_displaced = {}
        number_of_fatalities = {}

        # Calculate fatality rates for observed Intensity values (H
        # based on ITB power model
        R = numpy.zeros(H.shape)
        for mmi in mmi_range:

            # Identify cells where MMI is in class i
            mask = (H > mmi - 0.5) * (H <= mmi + 0.5)

            # Count population affected by this shake level
            I = numpy.where(mask, P, 0)

            # Calculate expected number of fatalities per level
            fatality_rate = numpy.power(10.0, x * mmi - y)
            F = fatality_rate * I

            # Calculate expected number of displaced people per level
            try:
                D = displacement_rate[mmi] * I
            except KeyError, e:
                msg = 'mmi = %i, I = %s, Error msg: %s' % (mmi, str(I), str(e))
                raise InaSAFEError(msg)

            # Adjust displaced people to disregard fatalities.
            # Set to zero if there are more fatalities than displaced.
            D = numpy.where(D > F, D - F, 0)

            # Sum up numbers for map
            R += D  # Displaced

            # Generate text with result for this study
            # This is what is used in the real time system exposure table
            number_of_exposed[mmi] = numpy.nansum(I.flat)
            number_of_displaced[mmi] = numpy.nansum(D.flat)
            number_of_fatalities[mmi] = numpy.nansum(F.flat)

        # Set resulting layer to NaN when less than a threshold. This is to
        # achieve transparency (see issue #126).
        R[R < tolerance] = numpy.nan

        # Total statistics
        total = int(round(numpy.nansum(P.flat) / 1000) * 1000)

        # Compute number of fatalities
        fatalities = int(
            round(numpy.nansum(number_of_fatalities.values()) / 1000)) * 1000

        # Compute number of people displaced due to building collapse
        displaced = int(
            round(numpy.nansum(number_of_displaced.values()) / 1000)) * 1000

        # Generate impact report
        table_body = [question]

        # Add total fatality estimate
        s = str(int(fatalities)).rjust(10)
        table_body.append(
            TableRow([tr('Number of fatalities'), s], header=True))

        if self.parameters['calculate_displaced_people']:
            # Add total estimate of people displaced
            s = str(int(displaced)).rjust(10)
            table_body.append(
                TableRow([tr('Number of people displaced'), s], header=True))
        else:
            displaced = 0

        # Add estimate of total population in area
        s = str(int(total)).rjust(10)
        table_body.append(
            TableRow([tr('Total number of people'), s], header=True))

        # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan
        rice = displaced * 2.8
        drinking_water = displaced * 17.5
        water = displaced * 67
        family_kits = displaced / 5
        toilets = displaced / 20

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([tr('Fatalities'), '%i' % fatalities], header=True),
            TableRow([tr('People displaced'),
                      '%i' % displaced], header=True),
            TableRow(
                tr('Map shows density estimate of '
                   'displaced population')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), int(rice)],
            [tr('Drinking Water [l]'),
             int(drinking_water)], [tr('Clean Water [l]'),
                                    int(water)],
            [tr('Family Kits'), int(family_kits)],
            [tr('Toilets'), int(toilets)]
        ]
        impact_table = Table(table_body).toNewlineFreeString()

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        if fatalities > 0:
            table_body.append(
                tr('Are there enough victim identification '
                   'units available for %i people?') % fatalities)
        if displaced > 0:
            table_body.append(
                tr('Are there enough shelters and relief items '
                   'available for %i people?') % displaced)
            table_body.append(
                TableRow(
                    tr('If yes, where are they located and '
                       'how will we distribute them?')))
            table_body.append(
                TableRow(
                    tr('If no, where can we obtain '
                       'additional relief items from and '
                       'how will we transport them?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %i') % total,
            tr('People are considered to be displaced if '
               'they experience and survive a shake level'
               'of more than 5 on the MMI scale '),
            tr('Minimum needs are defined in BNPB '
               'regulation 7/2008')
        ])

        impact_summary = Table(table_body).toNewlineFreeString()
        map_title = tr('People in need of evacuation')

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('Fatality model is from '
               'Institute of Teknologi Bandung 2012.'))
        table_body.append(tr('Population numbers rounded to nearest 1000.'))

        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary
        map_title = tr('Earthquake impact to population')

        # Create style info dynamically
        classes = numpy.linspace(numpy.nanmin(R.flat[:]),
                                 numpy.nanmax(R.flat[:]), 5)

        style_classes = [
            dict(colour='#EEFFEE',
                 quantity=classes[0],
                 transparency=100,
                 label=tr('%.2f people/cell') % classes[0]),
            dict(colour='#FFFF7F', quantity=classes[1], transparency=30),
            dict(colour='#E15500',
                 quantity=classes[2],
                 transparency=30,
                 label=tr('%.2f people/cell') % classes[2]),
            dict(colour='#E4001B', quantity=classes[3], transparency=30),
            dict(colour='#730000',
                 quantity=classes[4],
                 transparency=30,
                 label=tr('%.2f people/cell') % classes[4])
        ]
        style_info = dict(target_field=None, style_classes=style_classes)

        # Create new layer and return
        L = Raster(R,
                   projection=population.get_projection(),
                   geotransform=population.get_geotransform(),
                   keywords={
                       'impact_summary': impact_summary,
                       'total_population': total,
                       'total_fatalities': fatalities,
                       'impact_table': impact_table,
                       'map_title': map_title
                   },
                   name=tr('Estimated displaced population'),
                   style_info=style_info)

        # Maybe return a shape file with contours instead
        return L