コード例 #1
0
ファイル: test_defaults.py プロジェクト: neogis-de/inasafe
    def test_get_defaults(self):
        """Test we can get the defaults.
        """
        expected = {
            'ADULT_RATIO_KEY': 'adult ratio default',
            'ADULT_RATIO_ATTR_KEY': 'adult ratio attribute',
            'ADULT_RATIO': 0.659,
            'FEMALE_RATIO_KEY': 'female ratio default',
            'FEMALE_RATIO_ATTR_KEY': 'female ratio attribute',
            'FEMALE_RATIO': 0.5,
            'ELDERLY_RATIO_ATTR_KEY': 'elderly ratio attribute',
            'ELDERLY_RATIO_KEY': 'elderly ratio default',
            'ELDERLY_RATIO': 0.078,
            'YOUTH_RATIO': 0.263,
            'YOUTH_RATIO_ATTR_KEY': 'youth ratio attribute',
            'YOUTH_RATIO_KEY': 'youth ratio default',
            'NO_DATA': u'No data',
            'AGGR_ATTR_KEY': 'aggregation attribute',
            'ISO19115_EMAIL': '*****@*****.**',
            'ISO19115_LICENSE': 'Free use with accreditation',
            'ISO19115_ORGANIZATION': 'InaSAFE.org',
            'ISO19115_TITLE': 'InaSAFE analysis result',
            'ISO19115_URL': 'http://inasafe.org'
        }

        actual = get_defaults()
        self.maxDiff = None
        self.assertDictEqual(expected, actual)
コード例 #2
0
    def test_check_aggregation_single_attribute(self):
        """Aggregation attribute is chosen correctly when there is only
        one attr available."""
        layer_path = os.path.join(
            TESTDATA, 'kabupaten_jakarta_singlepart_1_good_attr.shp')
        file_list = [layer_path]
        # add additional layers
        load_layers(file_list, clear_flag=False)
        attribute_key = get_defaults('AGGR_ATTR_KEY')

        # with 1 good aggregation attribute using
        # kabupaten_jakarta_singlepart_1_good_attr.shp
        result, message = setup_scenario(
            DOCK,
            hazard='A flood in Jakarta like in 2007',
            exposure='People',
            function_id='Flood Evacuation Function',
            aggregation_layer='kabupaten jakarta singlepart 1 good attr')
        set_jakarta_extent(dock=DOCK)
        assert result, message
        # Press RUN
        # noinspection PyCallByClass,PyTypeChecker
        DOCK.accept()
        DOCK.runtime_keywords_dialog.accept()
        print attribute_key
        print DOCK.analysis.aggregator.attributes
        attribute = DOCK.analysis.aggregator.attributes[attribute_key]
        message = (
            'The aggregation should be KAB_NAME. Found: %s' % attribute)
        self.assertEqual(attribute, 'KAB_NAME', message)
コード例 #3
0
    def setup(self, params):
        """Concrete implementation to ensure needed parameters are initialized.

        :param params: Dict of parameters to pass to the post processor.
        :type params: dict

        """
        AbstractPostprocessor.setup(self, None)
        if self.impact_total is not None:
            self._raise_error('clear needs to be called before setup')

        self.impact_total = params['impact_total']
        try:
            # either all 3 ratio are custom set or we use defaults
            self.youth_ratio = params['youth_ratio']
            self.adult_ratio = params['adult_ratio']
            self.elderly_ratio = params['elderly_ratio']

            ratios_total = (self.youth_ratio + self.adult_ratio +
                            self.elderly_ratio)
            if ratios_total > 1:
                self._raise_error('Age ratios should sum up to 1. Found: '
                                  '%s + %s + %s = %s ' %
                                  (self.youth_ratio, self.adult_ratio,
                                   self.elderly_ratio, ratios_total))
        except KeyError:
            self._log_message('either all 3 age ratio are custom set or we'
                              ' use defaults')
            defaults = get_defaults()
            self.youth_ratio = defaults['YOUTH_RATIO']
            self.adult_ratio = defaults['ADULT_RATIO']
            self.elderly_ratio = defaults['ELDERLY_RATIO']
コード例 #4
0
    def test_check_aggregation_single_attribute(self):
        """Aggregation attribute is chosen correctly when there is only
        one attr available."""
        layer_path = os.path.join(
            TESTDATA, 'kabupaten_jakarta_singlepart_1_good_attr.shp')
        file_list = [layer_path]
        # add additional layers
        load_layers(file_list, clear_flag=False)
        attribute_key = get_defaults('AGGR_ATTR_KEY')

        # with 1 good aggregation attribute using
        # kabupaten_jakarta_singlepart_1_good_attr.shp
        result, message = setup_scenario(
            DOCK,
            hazard='Continuous Flood',
            exposure='Population',
            function_id='FloodEvacuationRasterHazardFunction',
            aggregation_layer='kabupaten jakarta singlepart 1 good attr')
        set_jakarta_extent(dock=DOCK)
        assert result, message
        # Press RUN
        # noinspection PyCallByClass,PyTypeChecker
        DOCK.accept()
        print attribute_key
        print DOCK.analysis.aggregator.attributes
        attribute = DOCK.analysis.aggregator.attributes[attribute_key]
        message = (
            'The aggregation should be KAB_NAME. Found: %s' % attribute)
        self.assertEqual(attribute, 'KAB_NAME', message)
コード例 #5
0
 def test_check_aggregation_no_attributes(self):
     """Aggregation attribute chosen correctly when no attr available."""
     layer_path = os.path.join(
         TESTDATA, 'kabupaten_jakarta_singlepart_0_good_attr.shp')
     file_list = [layer_path]
     # add additional layers
     load_layers(file_list, clear_flag=False)
     attribute_key = get_defaults('AGGR_ATTR_KEY')
     # with no good aggregation attribute using
     # kabupaten_jakarta_singlepart_0_good_attr.shp
     result, message = setup_scenario(
         self.DOCK,
         hazard='Continuous Flood',
         exposure='Population',
         function_id='FloodEvacuationRasterHazardFunction',
         aggregation_layer='kabupaten jakarta singlepart 0 good attr')
     set_jakarta_extent(dock=self.DOCK)
     self.assertTrue(result, message)
     # Press RUN
     self.DOCK.accept()
     aggregator = self.DOCK.impact_function.aggregator
     attribute = aggregator.attributes[attribute_key]
     message = (
         'The aggregation should be None. Found: %s' % attribute)
     self.assertIsNone(attribute, message)
コード例 #6
0
    def setUp(self):
        """Fixture run before all tests"""
        register_impact_functions()
        self.maxDiff = None  # show full diff for assert errors
        os.environ['LANG'] = 'en'
        self.DOCK.show_only_visible_layers_flag = True
        load_standard_layers(self.DOCK)
        self.DOCK.cboHazard.setCurrentIndex(0)
        self.DOCK.cboExposure.setCurrentIndex(0)
        self.DOCK.cboFunction.setCurrentIndex(0)
        self.DOCK.run_in_thread_flag = False
        self.DOCK.show_only_visible_layers_flag = False
        self.DOCK.set_layer_from_title_flag = False
        self.DOCK.zoom_to_impact_flag = False
        self.DOCK.hide_exposure_flag = False
        self.DOCK.show_intermediate_layers = False
        set_jakarta_extent()

        self._keywordIO = KeywordIO()
        self._defaults = get_defaults()

        # Set extent as Jakarta extent
        geo_crs = QgsCoordinateReferenceSystem()
        geo_crs.createFromSrid(4326)
        self.extent = extent_to_geo_array(CANVAS.extent(), geo_crs)
コード例 #7
0
    def setup(self, params):
        """concrete implementation it takes care of the needed parameters being
         initialized

        Args:
            params: dict of parameters to pass to the post processor
        Returns:
            None
        Raises:
            None
        """
        AbstractPostprocessor.setup(self, None)
        if self.impact_total is not None:
            self._raise_error('clear needs to be called before setup')

        self.impact_total = params['impact_total']
        try:
            #either all 3 ratio are custom set or we use defaults
            self.youth_ratio = params['youth_ratio']
            self.adult_ratio = params['adult_ratio']
            self.elder_ratio = params['elder_ratio']
        except KeyError:
            self._log_message('either all 3 age ratio are custom set or we'
                              ' use defaults')
            defaults = get_defaults()
            self.youth_ratio = defaults['YOUTH_RATIO']
            self.adult_ratio = defaults['ADULT_RATIO']
            self.elder_ratio = defaults['ELDER_RATIO']
コード例 #8
0
    def test_check_aggregation_single_attribute(self):
        """Aggregation attribute is chosen correctly when there is only
        one attr available."""
        layer_path = os.path.join(
            TESTDATA, 'kabupaten_jakarta_singlepart_1_good_attr.shp')
        file_list = [layer_path]
        # add additional layers
        load_layers(file_list, clear_flag=False)
        attribute_key = get_defaults('AGGR_ATTR_KEY')

        # with 1 good aggregation attribute using
        # kabupaten_jakarta_singlepart_1_good_attr.shp
        result, message = setup_scenario(
            self.DOCK,
            hazard='Continuous Flood',
            exposure='Population',
            function_id='FloodEvacuationRasterHazardFunction',
            aggregation_layer='kabupaten jakarta singlepart 1 good attr')
        set_jakarta_extent(dock=self.DOCK)
        assert result, message
        # Press RUN
        # noinspection PyCallByClass,PyTypeChecker
        self.DOCK.accept()
        aggregator = self.DOCK.impact_function.aggregator
        attribute = aggregator.attributes[attribute_key]
        message = (
            'The aggregation should be KAB_NAME. Found: %s' % attribute)
        self.assertEqual(attribute, 'KAB_NAME', message)
コード例 #9
0
    def test_get_defaults(self):
        """Test we can get the defaults.
        """
        expected = {
            'ADULT_RATIO_KEY': 'adult ratio default',
            'ADULT_RATIO_ATTR_KEY': 'adult ratio attribute',
            'ADULT_RATIO': 0.659,

            'FEMALE_RATIO_KEY': 'female ratio default',
            'FEMALE_RATIO_ATTR_KEY': 'female ratio attribute',
            'FEMALE_RATIO': 0.5,

            'ELDERLY_RATIO_ATTR_KEY': 'elderly ratio attribute',
            'ELDERLY_RATIO_KEY': 'elderly ratio default',
            'ELDERLY_RATIO': 0.078,

            'YOUTH_RATIO': 0.263,
            'YOUTH_RATIO_ATTR_KEY': 'youth ratio attribute',
            'YOUTH_RATIO_KEY': 'youth ratio default',

            'NO_DATA': u'No data',

            'AGGR_ATTR_KEY': 'aggregation attribute',
            'ISO19115_EMAIL': '*****@*****.**',
            'ISO19115_LICENSE': 'Free use with accreditation',
            'ISO19115_ORGANIZATION': 'InaSAFE.org',
            'ISO19115_TITLE': 'InaSAFE analysis result',
            'ISO19115_URL': 'http://inasafe.org'}

        actual = get_defaults()
        self.maxDiff = None
        self.assertDictEqual(expected, actual)
コード例 #10
0
 def test_check_aggregation_no_attributes(self):
     """Aggregation attribute chosen correctly when no attr available."""
     layer_path = os.path.join(
         TESTDATA, 'kabupaten_jakarta_singlepart_0_good_attr.shp')
     file_list = [layer_path]
     # add additional layers
     load_layers(file_list, clear_flag=False)
     attribute_key = get_defaults('AGGR_ATTR_KEY')
     # with no good aggregation attribute using
     # kabupaten_jakarta_singlepart_0_good_attr.shp
     result, message = setup_scenario(
         DOCK,
         hazard='A flood in Jakarta like in 2007',
         exposure='People',
         function_id='Flood Evacuation Function',
         aggregation_layer='kabupaten jakarta singlepart 0 good attr')
     set_jakarta_extent(dock=DOCK)
     assert result, message
     # Press RUN
     DOCK.accept()
     DOCK.runtime_keywords_dialog.accept()
     attribute = DOCK.analysis.aggregator.attributes[attribute_key]
     message = (
         'The aggregation should be None. Found: %s' % attribute)
     assert attribute is None, message
コード例 #11
0
    def setUp(self):
        """Fixture run before all tests"""
        register_impact_functions()
        self.maxDiff = None  # show full diff for assert errors
        os.environ['LANG'] = 'en'
        DOCK.show_only_visible_layers_flag = True
        load_standard_layers(DOCK)
        DOCK.cboHazard.setCurrentIndex(0)
        DOCK.cboExposure.setCurrentIndex(0)
        DOCK.cboFunction.setCurrentIndex(0)
        DOCK.run_in_thread_flag = False
        DOCK.show_only_visible_layers_flag = False
        DOCK.set_layer_from_title_flag = False
        DOCK.zoom_to_impact_flag = False
        DOCK.hide_exposure_flag = False
        DOCK.show_intermediate_layers = False
        set_jakarta_extent()

        self._keywordIO = KeywordIO()
        self._defaults = get_defaults()

        # Set extent as Jakarta extent
        geo_crs = QgsCoordinateReferenceSystem()
        geo_crs.createFromSrid(4326)
        self.extent = extent_to_geo_array(CANVAS.extent(), geo_crs)
コード例 #12
0
ファイル: test_defaults.py プロジェクト: Mloweedgar/inasafe
    def test_get_defaults(self):
        """Test we can get the defaults.
        """
        expected = {
            "ADULT_RATIO_KEY": "adult ratio default",
            "ADULT_RATIO_ATTR_KEY": "adult ratio attribute",
            "ADULT_RATIO": 0.659,
            "FEMALE_RATIO_KEY": "female ratio default",
            "FEMALE_RATIO_ATTR_KEY": "female ratio attribute",
            "FEMALE_RATIO": 0.5,
            "ELDERLY_RATIO_ATTR_KEY": "elderly ratio attribute",
            "ELDERLY_RATIO_KEY": "elderly ratio default",
            "ELDERLY_RATIO": 0.078,
            "YOUTH_RATIO": 0.263,
            "YOUTH_RATIO_ATTR_KEY": "youth ratio attribute",
            "YOUTH_RATIO_KEY": "youth ratio default",
            "NO_DATA": u"No data",
            "AGGR_ATTR_KEY": "aggregation attribute",
            "ISO19115_EMAIL": "*****@*****.**",
            "ISO19115_LICENSE": "Free use with accreditation",
            "ISO19115_ORGANIZATION": "InaSAFE.org",
            "ISO19115_TITLE": "InaSAFE analysis result",
            "ISO19115_URL": "http://inasafe.org",
        }

        actual = get_defaults()
        self.maxDiff = None
        self.assertDictEqual(expected, actual)
コード例 #13
0
    def test_on_dsb_female_ratio_default_value_changed(self):
        """Test hazard radio button toggle behaviour works"""
        layer = clone_shp_layer(name='district_osm_jakarta',
                                include_keywords=True,
                                source_directory=test_data_path('boundaries'))
        defaults = get_defaults()
        dialog = KeywordsDialog(PARENT, IFACE, layer=layer)
        button = dialog.radPostprocessing
        button.setChecked(False)
        button.click()
        female_ratio_box = dialog.cboFemaleRatioAttribute

        # set to Don't use
        index = female_ratio_box.findText(dialog.do_not_use_string)
        message = (dialog.do_not_use_string + ' not found')
        self.assertNotEqual(index, -1, message)
        female_ratio_box.setCurrentIndex(index)

        message = ('Toggling the female ratio attribute combo to'
                   ' "Don\'t use" did not add it to the keywords list.')
        self.assertEqual(
            dialog.get_value_for_key(defaults['FEMALE_RATIO_ATTR_KEY']),
            dialog.do_not_use_string, message)

        message = ('Toggling the female ratio attribute combo to'
                   ' "Don\'t use" did not disable dsbFemaleRatioDefault.')
        is_enabled = dialog.dsbFemaleRatioDefault.isEnabled()
        assert not is_enabled, message

        message = ('Toggling the female ratio attribute combo to'
                   ' "Don\'t use" did not remove the keyword.')
        assert (dialog.get_value_for_key(defaults['FEMALE_RATIO']) is None), \
            message

        # set to PEREMPUAN attribute
        attribute = 'PEREMPUAN'
        index = female_ratio_box.findText(attribute)
        message = 'The attribute %s is not found in the layer' % attribute
        self.assertNotEqual(index, -1, message)

        female_ratio_box.setCurrentIndex(index)
        message = ('Toggling the female ratio attribute combo to %s'
                   ' did not add it to the keywords list.') % attribute
        self.assertEqual(
            dialog.get_value_for_key(defaults['FEMALE_RATIO_ATTR_KEY']),
            attribute, message)

        message = ('Toggling the female ratio attribute combo to %s'
                   ' did not disable dsbFemaleRatioDefault.') % attribute
        is_enabled = dialog.dsbFemaleRatioDefault.isEnabled()
        self.assertFalse(is_enabled, message)

        message = ('Toggling the female ratio attribute combo to %s'
                   ' did not remove the keyword.') % attribute
        self.assertIsNone(dialog.get_value_for_key(defaults['FEMALE_RATIO']),
                          message)
コード例 #14
0
    def set_widgets(self):
        """Set widgets on the aggregation tab."""
        # Set values based on existing keywords (if already assigned)
        defaults = get_defaults()

        female_ratio_default = self.parent.get_existing_keyword(
            female_ratio_default_key)
        if female_ratio_default:
            self.dsbFemaleRatioDefault.setValue(
                float(female_ratio_default))
        else:
            self.dsbFemaleRatioDefault.setValue(defaults['FEMALE_RATIO'])

        youth_ratio_default = self.parent.get_existing_keyword(
            youth_ratio_default_key)
        if youth_ratio_default:
            self.dsbYouthRatioDefault.setValue(float(youth_ratio_default))
        else:
            self.dsbYouthRatioDefault.setValue(defaults['YOUTH_RATIO'])

        adult_ratio_default = self.parent.get_existing_keyword(
            adult_ratio_default_key)
        if adult_ratio_default:
            self.dsbAdultRatioDefault.setValue(float(adult_ratio_default))
        else:
            self.dsbAdultRatioDefault.setValue(defaults['ADULT_RATIO'])

        elderly_ratio_default = self.parent.get_existing_keyword(
            elderly_ratio_default_key)
        if elderly_ratio_default:
            self.dsbElderlyRatioDefault.setValue(float(elderly_ratio_default))
        else:
            self.dsbElderlyRatioDefault.setValue(
                defaults['ELDERLY_RATIO'])

        ratio_attribute_keys = [
            female_ratio_attribute_key,
            youth_ratio_attribute_key,
            adult_ratio_attribute_key,
            elderly_ratio_attribute_key]

        cbo_ratio_attributes = [
            self.cboFemaleRatioAttribute,
            self.cboYouthRatioAttribute,
            self.cboAdultRatioAttribute,
            self.cboElderlyRatioAttribute]

        for i in range(len(cbo_ratio_attributes)):
            self.populate_cbo_aggregation_attribute(
                ratio_attribute_keys[i], cbo_ratio_attributes[i])
コード例 #15
0
ファイル: options_dialog.py プロジェクト: dynaryu/inasafe
    def __init__(self, iface, dock=None, parent=None):
        """Constructor for the dialog.

        :param iface: A Quantum GIS QGisAppInterface instance.
        :type iface: QGisAppInterface

        :param parent: Parent widget of this dialog
        :type parent: QWidget

        :param dock: Optional dock widget instance that we can notify of
            changes to the keywords.
        :type dock: Dock
        """

        QtGui.QDialog.__init__(self, parent)
        self.setupUi(self)
        self.setWindowTitle(self.tr('InaSAFE %s Options' % get_version()))
        # Save reference to the QGIS interface and parent
        self.iface = iface
        self.parent = parent
        self.dock = dock
        self.keyword_io = KeywordIO()
        self.defaults = get_defaults()

        # Set up things for context help
        self.help_button = self.button_box.button(QtGui.QDialogButtonBox.Help)
        # Allow toggling the help button
        self.help_button.setCheckable(True)
        self.help_button.toggled.connect(self.help_toggled)
        self.main_stacked_widget.setCurrentIndex(1)

        self.grpNotImplemented.hide()
        self.adjustSize()
        self.restore_state()
        # hack prevent showing use thread visible and set it false see #557
        self.cbxUseThread.setChecked(True)
        self.cbxUseThread.setVisible(False)

        # Set up listener for various UI
        self.custom_org_logo_checkbox.toggled.connect(
            self.set_organisation_logo)
        self.custom_north_arrow_checkbox.toggled.connect(self.set_north_arrow)
        self.custom_UseUserDirectory_checkbox.toggled.connect(
            self.set_user_dir)
        self.custom_templates_dir_checkbox.toggled.connect(
            self.set_templates_dir)
        self.custom_org_disclaimer_checkbox.toggled.connect(
            self.set_org_disclaimer)
コード例 #16
0
ファイル: options_dialog.py プロジェクト: vck/inasafe
    def __init__(self, iface, dock=None, parent=None):
        """Constructor for the dialog.

        :param iface: A Quantum GIS QGisAppInterface instance.
        :type iface: QGisAppInterface

        :param parent: Parent widget of this dialog
        :type parent: QWidget

        :param dock: Optional dock widget instance that we can notify of
            changes to the keywords.
        :type dock: Dock
        """

        QtGui.QDialog.__init__(self, parent)
        self.setupUi(self)
        self.setWindowTitle(self.tr('InaSAFE %s Options' % get_version()))
        # Save reference to the QGIS interface and parent
        self.iface = iface
        self.parent = parent
        self.dock = dock
        self.keyword_io = KeywordIO()
        self.defaults = get_defaults()

        # Set up things for context help
        self.help_button = self.button_box.button(QtGui.QDialogButtonBox.Help)
        # Allow toggling the help button
        self.help_button.setCheckable(True)
        self.help_button.toggled.connect(self.help_toggled)
        self.main_stacked_widget.setCurrentIndex(1)

        self.grpNotImplemented.hide()
        self.adjustSize()
        self.restore_state()
        # hack prevent showing use thread visible and set it false see #557
        self.cbxUseThread.setChecked(True)
        self.cbxUseThread.setVisible(False)

        # Set up listener for various UI
        self.custom_org_logo_checkbox.toggled.connect(
            self.set_organisation_logo)
        self.custom_north_arrow_checkbox.toggled.connect(self.set_north_arrow)
        self.custom_UseUserDirectory_checkbox.toggled.connect(
            self.set_user_dir)
        self.custom_templates_dir_checkbox.toggled.connect(
            self.set_templates_dir)
        self.custom_org_disclaimer_checkbox.toggled.connect(
            self.set_org_disclaimer)
コード例 #17
0
    def set_widgets(self):
        """Set widgets on the aggregation tab."""
        # Set values based on existing keywords (if already assigned)
        defaults = get_defaults()

        female_ratio_default = self.parent.get_existing_keyword(
            female_ratio_default_key)
        if female_ratio_default:
            self.dsbFemaleRatioDefault.setValue(float(female_ratio_default))
        else:
            self.dsbFemaleRatioDefault.setValue(defaults['FEMALE_RATIO'])

        youth_ratio_default = self.parent.get_existing_keyword(
            youth_ratio_default_key)
        if youth_ratio_default:
            self.dsbYouthRatioDefault.setValue(float(youth_ratio_default))
        else:
            self.dsbYouthRatioDefault.setValue(defaults['YOUTH_RATIO'])

        adult_ratio_default = self.parent.get_existing_keyword(
            adult_ratio_default_key)
        if adult_ratio_default:
            self.dsbAdultRatioDefault.setValue(float(adult_ratio_default))
        else:
            self.dsbAdultRatioDefault.setValue(defaults['ADULT_RATIO'])

        elderly_ratio_default = self.parent.get_existing_keyword(
            elderly_ratio_default_key)
        if elderly_ratio_default:
            self.dsbElderlyRatioDefault.setValue(float(elderly_ratio_default))
        else:
            self.dsbElderlyRatioDefault.setValue(defaults['ELDERLY_RATIO'])

        ratio_attribute_keys = [
            female_ratio_attribute_key, youth_ratio_attribute_key,
            adult_ratio_attribute_key, elderly_ratio_attribute_key
        ]

        cbo_ratio_attributes = [
            self.cboFemaleRatioAttribute, self.cboYouthRatioAttribute,
            self.cboAdultRatioAttribute, self.cboElderlyRatioAttribute
        ]

        for i in range(len(cbo_ratio_attributes)):
            self.populate_cbo_aggregation_attribute(ratio_attribute_keys[i],
                                                    cbo_ratio_attributes[i])
コード例 #18
0
    def test_on_rad_postprocessing_toggled(self):
        """Test postprocessing radio button toggle behaviour works"""
        layer = clone_shp_layer(
            name='district_osm_jakarta',
            include_keywords=True,
            source_directory=test_data_path('boundaries'))
        defaults = get_defaults()
        dialog = KeywordsDialog(PARENT, IFACE, layer=layer)
        # Click hazard/exposure button first so that it won't take default
        # from keywords file
        dialog.radExposure.click()

        # Now click the postprocessing button
        button = dialog.radPostprocessing
        button.click()
        message = (
            'Toggling the postprocessing radio did not add a '
            'category to the keywords list.')
        self.assertEqual(
            dialog.get_value_for_key('category'), 'postprocessing', message)

        message = (
            'Toggling the postprocessing radio did not add an '
            'aggregation attribute to the keywords list.')
        self.assertEqual(
            dialog.get_value_for_key(defaults['AGGR_ATTR_KEY']),
            'KAB_NAME',
            message)

        message = (
            'Toggling the postprocessing radio did not add a '
            'female ratio attribute to the keywords list.')
        self.assertEqual(
            dialog.get_value_for_key(defaults['FEMALE_RATIO_ATTR_KEY']),
            dialog.global_default_string,
            message)

        message = (
            'Toggling the postprocessing radio did not add a '
            'female ratio default value to the keywords list.')
        self.assertEqual(
            float(dialog.get_value_for_key(defaults['FEMALE_RATIO_KEY'])),
            defaults['FEMALE_RATIO'],
            message)
コード例 #19
0
ファイル: test_aggregator.py プロジェクト: vck/inasafe
    def test_aggregation_attribute_in_keywords(self):
        """Aggregation attribute is chosen correctly when present in keywords.
        """
        attribute_key = get_defaults('AGGR_ATTR_KEY')

        # with KAB_NAME aggregation attribute defined in .keyword using
        # kabupaten_jakarta_singlepart.shp
        result, message = setup_scenario(
            self.DOCK,
            hazard='Continuous Flood',
            exposure='Population',
            function_id='FloodEvacuationRasterHazardFunction',
            aggregation_layer=u"Dístríct's of Jakarta",
            aggregation_enabled_flag=True)
        set_jakarta_extent(dock=self.DOCK)
        assert result, message
        # Press RUN
        self.DOCK.accept()
        attribute = self.DOCK.analysis.aggregator.attributes[attribute_key]
        message = ('The aggregation should be KAB_NAME. Found: %s' % attribute)
        self.assertEqual(attribute, 'KAB_NAME', message)
コード例 #20
0
ファイル: test_clipper.py プロジェクト: sopac/inasafe
    def test_clip_vector_with_unicode(self):
        """Test clipping vector layer with unicode attribute in feature.

        This issue is described at Github #2262 and #2233
        TODO: FIXME:
        This is a hacky fix. See above ticket for further explanation.
        To fix this, we should be able to specify UTF-8 encoding for
        QgsVectorFileWriter
        """
        # this layer contains unicode values in the
        layer_path = standard_data_path('boundaries',
                                        'district_osm_jakarta.shp')
        vector_layer = QgsVectorLayer(layer_path, 'District Jakarta', 'ogr')
        keyword_io = KeywordIO()
        aggregation_keyword = get_defaults()['AGGR_ATTR_KEY']
        aggregation_attribute = keyword_io.read_keywords(
            vector_layer, keyword=aggregation_keyword)
        source_extent = vector_layer.extent()
        extent = [
            source_extent.xMinimum(),
            source_extent.yMinimum(),
            source_extent.xMaximum(),
            source_extent.yMaximum()
        ]
        clipped_layer = clip_layer(layer=vector_layer,
                                   extent=extent,
                                   explode_flag=True,
                                   explode_attribute=aggregation_attribute)

        # cross check vector layer attribute in clipped layer
        vector_values = []
        for f in vector_layer.getFeatures():
            vector_values.append(f.attributes())

        clipped_values = []
        for f in clipped_layer.getFeatures():
            clipped_values.append(f.attributes())

        for val in clipped_values:
            self.assertIn(val, vector_values)
コード例 #21
0
    def test_aggregation_attribute_in_keywords(self):
        """Aggregation attribute is chosen correctly when present in keywords.
        """
        attribute_key = get_defaults('AGGR_ATTR_KEY')

        # with KAB_NAME aggregation attribute defined in .keyword using
        # kabupaten_jakarta_singlepart.shp
        result, message = setup_scenario(
            DOCK,
            hazard='A flood in Jakarta like in 2007',
            exposure='People',
            function_id='Flood Evacuation Function',
            aggregation_layer='kabupaten jakarta singlepart',
            aggregation_enabled_flag=True)
        set_jakarta_extent(dock=DOCK)
        assert result, message
        # Press RUN
        DOCK.accept()
        DOCK.runtime_keywords_dialog.accept()
        attribute = DOCK.analysis.aggregator.attributes[attribute_key]
        message = ('The aggregation should be KAB_NAME. Found: %s' % attribute)
        self.assertEqual(attribute, 'KAB_NAME', message)
コード例 #22
0
ファイル: test_aggregator.py プロジェクト: vck/inasafe
 def test_check_aggregation_none_in_keywords(self):
     """Aggregation attribute is chosen correctly when None in keywords."""
     layer_path = os.path.join(
         TESTDATA, 'kabupaten_jakarta_singlepart_with_None_keyword.shp')
     file_list = [layer_path]
     # add additional layers
     load_layers(file_list, clear_flag=False)
     attribute_key = get_defaults('AGGR_ATTR_KEY')
     # with None aggregation attribute defined in .keyword using
     # kabupaten_jakarta_singlepart_with_None_keyword.shp
     result, message = setup_scenario(
         self.DOCK,
         hazard='Continuous Flood',
         exposure='Population',
         function_id='FloodEvacuationRasterHazardFunction',
         aggregation_layer='kabupaten jakarta singlepart with None keyword')
     set_jakarta_extent(dock=self.DOCK)
     assert result, message
     # Press RUN
     self.DOCK.accept()
     attribute = self.DOCK.analysis.aggregator.attributes[attribute_key]
     message = ('The aggregation should be None. Found: %s' % attribute)
     assert attribute is None, message
コード例 #23
0
ファイル: test_clipper.py プロジェクト: easmetz/inasafe
    def test_clip_vector_with_unicode(self):
        """Test clipping vector layer with unicode attribute in feature.

        This issue is described at Github #2262 and #2233
        TODO: FIXME:
        This is a hacky fix. See above ticket for further explanation.
        To fix this, we should be able to specify UTF-8 encoding for
        QgsVectorFileWriter
        """
        # this layer contains unicode values in the
        layer_path = standard_data_path(
            'boundaries', 'district_osm_jakarta.shp')
        vector_layer = QgsVectorLayer(layer_path, 'District Jakarta', 'ogr')
        keyword_io = KeywordIO()
        aggregation_keyword = get_defaults()['AGGR_ATTR_KEY']
        aggregation_attribute = keyword_io.read_keywords(
            vector_layer, keyword=aggregation_keyword)
        source_extent = vector_layer.extent()
        extent = [source_extent.xMinimum(), source_extent.yMinimum(),
                  source_extent.xMaximum(), source_extent.yMaximum()]
        clipped_layer = clip_layer(
            layer=vector_layer,
            extent=extent,
            explode_flag=True,
            explode_attribute=aggregation_attribute)

        # cross check vector layer attribute in clipped layer
        vector_values = []
        for f in vector_layer.getFeatures():
            vector_values.append(f.attributes())

        clipped_values = []
        for f in clipped_layer.getFeatures():
            clipped_values.append(f.attributes())

        for val in clipped_values:
            self.assertIn(val, vector_values)
コード例 #24
0
ファイル: test_aggregator.py プロジェクト: sopac/inasafe
 def test_check_aggregation_no_attributes(self):
     """Aggregation attribute chosen correctly when no attr available."""
     layer_path = os.path.join(
         TESTDATA, 'kabupaten_jakarta_singlepart_0_good_attr.shp')
     file_list = [layer_path]
     # add additional layers
     load_layers(file_list, clear_flag=False)
     attribute_key = get_defaults('AGGR_ATTR_KEY')
     # with no good aggregation attribute using
     # kabupaten_jakarta_singlepart_0_good_attr.shp
     result, message = setup_scenario(
         self.DOCK,
         hazard='Continuous Flood',
         exposure='Population',
         function_id='FloodEvacuationRasterHazardFunction',
         aggregation_layer='kabupaten jakarta singlepart 0 good attr')
     set_jakarta_extent(dock=self.DOCK)
     self.assertTrue(result, message)
     # Press RUN
     self.DOCK.accept()
     aggregator = self.DOCK.impact_function.aggregator
     attribute = aggregator.attributes[attribute_key]
     message = ('The aggregation should be None. Found: %s' % attribute)
     self.assertIsNone(attribute, message)
コード例 #25
0
def generate_iso_metadata(keywords=None):
    """Make a valid ISO 19115 XML using the values of safe.get_defaults

    This method will create XML based on the iso_19115_template.py template
    The $placeholders there will be replaced by the values returned from
    safe.defaults.get_defaults. Note that get_defaults takes care of using the
    values set in QGIS settings if available.

    :param keywords: The metadata keywords to write (which should be
            provided as a dict of key value pairs).
    :type keywords: dict

    :return: str valid XML
    """

    # get defaults from settings
    template_replacements = copy.copy(get_defaults())

    # create runtime based replacement values
    template_replacements['ISO19115_TODAY_DATE'] = time.strftime("%Y-%m-%d")
    if keywords is not None:
        template_replacements['INASAFE_KEYWORDS'] = '<![CDATA[%s]]>' % \
                                                    json.dumps(keywords)
        try:
            template_replacements['ISO19115_TITLE'] = keywords['title']
        except KeyError:
            pass
        try:
            template_replacements['ISO19115_LINEAGE'] = keywords['source']
        except KeyError:
            template_replacements['ISO19115_LINEAGE'] = ''

    else:
        template_replacements['INASAFE_KEYWORDS'] = ''

    return ISO_METADATA_XML_TEMPLATE.safe_substitute(template_replacements)
コード例 #26
0
def generate_iso_metadata(keywords=None):
    """Make a valid ISO 19115 XML using the values of safe.get_defaults

    This method will create XML based on the iso_19115_template.py template
    The $placeholders there will be replaced by the values returned from
    safe.defaults.get_defaults. Note that get_defaults takes care of using the
    values set in QGIS settings if available.

    :param keywords: The metadata keywords to write (which should be
            provided as a dict of key value pairs).
    :type keywords: dict

    :return: str valid XML
    """

    # get defaults from settings
    template_replacements = copy.copy(get_defaults())

    # create runtime based replacement values
    template_replacements['ISO19115_TODAY_DATE'] = time.strftime("%Y-%m-%d")
    if keywords is not None:
        template_replacements['INASAFE_KEYWORDS'] = '<![CDATA[%s]]>' % \
                                                    json.dumps(keywords)
        try:
            template_replacements['ISO19115_TITLE'] = keywords['title']
        except KeyError:
            pass
        try:
            template_replacements['ISO19115_LINEAGE'] = keywords['source']
        except KeyError:
            template_replacements['ISO19115_LINEAGE'] = ''

    else:
        template_replacements['INASAFE_KEYWORDS'] = ''

    return ISO_METADATA_XML_TEMPLATE.safe_substitute(template_replacements)
コード例 #27
0
    def setup(self, params):
        """Concrete implementation to ensure needed parameters are initialized.

        :param params: Dict of parameters to pass to the post processor.
        :type params: dict

        """
        AbstractPostprocessor.setup(self, None)
        if self.impact_total is not None:
            self._raise_error('clear needs to be called before setup')

        self.impact_total = params['impact_total']
        try:
            # either all 3 ratio are custom set or we use defaults
            self.youth_ratio = params['youth_ratio']
            self.adult_ratio = params['adult_ratio']
            self.elderly_ratio = params['elderly_ratio']

            ratios_total = (self.youth_ratio +
                            self.adult_ratio +
                            self.elderly_ratio)
            if ratios_total > 1:
                self._raise_error(
                    'Age ratios should sum up to 1. Found: '
                    '%s + %s + %s = %s ' % (
                        self.youth_ratio,
                        self.adult_ratio,
                        self.elderly_ratio,
                        ratios_total))
        except KeyError:
            self._log_message('either all 3 age ratio are custom set or we'
                              ' use defaults')
            defaults = get_defaults()
            self.youth_ratio = defaults['YOUTH_RATIO']
            self.adult_ratio = defaults['ADULT_RATIO']
            self.elderly_ratio = defaults['ELDERLY_RATIO']
コード例 #28
0
ファイル: keywords_dialog.py プロジェクト: kant/inasafe
from safe.common.version import get_version
from safe.common.exceptions import InaSAFEError
from safe.defaults import get_defaults
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.help import show_context_help
from safe.utilities.utilities import get_error_message
from safe.utilities.gis import layer_attribute_names, is_polygon_layer
from safe.utilities.resources import get_ui_class
from safe.common.exceptions import (InvalidParameterError, HashNotFoundError,
                                    NoKeywordsFoundError)
from safe import definitions
from safe.utilities.unicode import get_unicode

# Aggregations' keywords
DEFAULTS = get_defaults()
female_ratio_attribute_key = DEFAULTS['FEMALE_RATIO_ATTR_KEY']
female_ratio_default_key = DEFAULTS['FEMALE_RATIO_KEY']
youth_ratio_attribute_key = DEFAULTS['YOUTH_RATIO_ATTR_KEY']
youth_ratio_default_key = DEFAULTS['YOUTH_RATIO_KEY']
adult_ratio_attribute_key = DEFAULTS['ADULT_RATIO_ATTR_KEY']
adult_ratio_default_key = DEFAULTS['ADULT_RATIO_KEY']
elderly_ratio_attribute_key = DEFAULTS['ELDERLY_RATIO_ATTR_KEY']
elderly_ratio_default_key = DEFAULTS['ELDERLY_RATIO_KEY']

LOGGER = logging.getLogger('InaSAFE')

FORM_CLASS = get_ui_class('keywords_dialog_base.ui')


class KeywordsDialog(QtGui.QDialog, FORM_CLASS):
コード例 #29
0
class FloodEvacuationFunction(FunctionProvider):
    """Impact function for flood evacuation

    :author AIFDR
    :rating 4
    :param requires category=='hazard' and \
                    subcategory in ['flood', 'tsunami'] and \
                    layertype=='raster' and \
                    unit=='m'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """

    title = tr('Need evacuation')
    defaults = get_defaults()

    # Function documentation
    synopsis = tr(
        'To assess the impacts of (flood or tsunami) inundation in raster '
        'format on population.')
    actions = tr(
        'Provide details about how many people would likely need to be '
        'evacuated, where they are located and what resources would be '
        'required to support them.')
    detailed_description = tr(
        'The population subject to inundation exceeding a threshold '
        '(default 1m) is calculated and returned as a raster layer. In '
        'addition the total number and the required needs in terms of the '
        'BNPB (Perka 7) are reported. The threshold can be changed and even '
        'contain multiple numbers in which case evacuation and needs are '
        'calculated using the largest number with population breakdowns '
        'provided for the smaller numbers. The population raster is resampled '
        'to the resolution of the hazard raster and is rescaled so that the '
        'resampled population counts reflect estimates of population count '
        'per resampled cell. The resulting impact layer has the same '
        'resolution and reflects population count per cell which are affected '
        'by inundation.')
    hazard_input = tr(
        'A hazard raster layer where each cell represents flood depth '
        '(in meters).')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Raster layer contains population affected and the minimum needs '
        'based on the population affected.')
    limitation = tr(
        'The default threshold of 1 meter was selected based on consensus, '
        'not hard evidence.')

    # Configurable parameters
    # TODO: Share the mimimum needs and make another default value
    parameters = OrderedDict([
        ('thresholds [m]', [1.0]),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             }),
             ('MinimumNeeds', {
                 'on': True
             }),
         ])), ('minimum needs', default_minimum_needs())
    ])

    def run(self, layers):
        """Risk plugin for flood population evacuation

        Input
          layers: List of layers expected to contain
              my_hazard: Raster layer of flood depth
              my_exposure: Raster layer of population data on the same grid
              as my_hazard

        Counts number of people exposed to flood levels exceeding
        specified threshold.

        Return
          Map of population exposed to flood levels exceeding the threshold
          Table with number of people evacuated and supplies required
        """

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Flood inundation [m]
        my_exposure = get_exposure_layer(layers)

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Determine depths above which people are regarded affected [m]
        # Use thresholds from inundation layer if specified
        thresholds = self.parameters['thresholds [m]']

        verify(isinstance(thresholds, list),
               'Expected thresholds to be a list. Got %s' % str(thresholds))

        # Extract data as numeric arrays
        D = my_hazard.get_data(nan=0.0)  # Depth

        # Calculate impact as population exposed to depths > max threshold
        P = my_exposure.get_data(nan=0.0, scaling=True)

        # Calculate impact to intermediate thresholds
        counts = []
        # merely initialize
        my_impact = None
        for i, lo in enumerate(thresholds):
            if i == len(thresholds) - 1:
                # The last threshold
                my_impact = M = numpy.where(D >= lo, P, 0)
            else:
                # Intermediate thresholds
                hi = thresholds[i + 1]
                M = numpy.where((D >= lo) * (D < hi), P, 0)

            # Count
            val = int(numpy.sum(M))

            # Don't show digits less than a 1000
            val = round_thousand(val)
            counts.append(val)

        # Count totals
        evacuated = counts[-1]
        total = int(numpy.sum(P))
        # Don't show digits less than a 1000
        total = round_thousand(total)

        # Calculate estimated minimum needs
        # The default value of each logistic is based on BNPB Perka 7/2008
        # minimum bantuan
        minimum_needs = self.parameters['minimum needs']

        tot_needs = evacuated_population_weekly_needs(evacuated, minimum_needs)

        # Generate impact report for the pdf map
        # noinspection PyListCreation
        table_body = [
            question,
            TableRow([(tr('People in %.1f m of water') % thresholds[-1]),
                      '%s%s' % (format_int(evacuated),
                                ('*' if evacuated >= 1000 else ''))],
                     header=True),
            TableRow(tr('* Number is rounded to the nearest 1000'),
                     header=False),
            TableRow(tr('Map shows population density needing evacuation')),
            TableRow(
                tr('Table below shows the weekly minium needs for all '
                   'evacuated people')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), format_int(tot_needs['rice'])],
            [
                tr('Drinking Water [l]'),
                format_int(tot_needs['drinking_water'])
            ], [tr('Clean Water [l]'),
                format_int(tot_needs['water'])],
            [tr('Family Kits'),
             format_int(tot_needs['family_kits'])],
            [tr('Toilets'), format_int(tot_needs['toilets'])]
        ]

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(TableRow(tr('How will warnings be disseminated?')))
        table_body.append(TableRow(tr('How will we reach stranded people?')))
        table_body.append(TableRow(tr('Do we have enough relief items?')))
        table_body.append(
            TableRow(
                tr('If yes, where are they located and how '
                   'will we distribute them?')))
        table_body.append(
            TableRow(
                tr('If no, where can we obtain additional relief items from and how '
                   'will we transport them to here?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People need evacuation if flood levels exceed %(eps).1f m') % {
                'eps': thresholds[-1]
            },
            tr('Minimum needs are defined in BNPB regulation 7/2008'),
            tr('All values are rounded up to the nearest integer in order to '
               'avoid representing human lives as fractionals.')
        ])

        if len(counts) > 1:
            table_body.append(TableRow(tr('Detailed breakdown'), header=True))

            for i, val in enumerate(counts[:-1]):
                s = (tr('People in %(lo).1f m to %(hi).1f m of water: %(val)i')
                     % {
                         'lo': thresholds[i],
                         'hi': thresholds[i + 1],
                         'val': format_int(val)
                     })
                table_body.append(TableRow(s, header=False))

        # Result
        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # check for zero impact
        if numpy.nanmax(my_impact) == 0 == numpy.nanmin(my_impact):
            table_body = [
                question,
                TableRow([(tr('People in %.1f m of water') % thresholds[-1]),
                          '%s' % format_int(evacuated)],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        classes = create_classes(my_impact.flat[:], len(colours))
        interval_classes = humanize_class(classes)
        style_classes = []

        for i in xrange(len(colours)):
            style_class = dict()
            if i == 1:
                label = create_label(interval_classes[i], 'Low')
            elif i == 4:
                label = create_label(interval_classes[i], 'Medium')
            elif i == 7:
                label = create_label(interval_classes[i], 'High')
            else:
                label = create_label(interval_classes[i])
            style_class['label'] = label
            style_class['quantity'] = classes[i]
            if i == 0:
                transparency = 100
            else:
                transparency = 0
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_classes.append(style_class)

        style_info = dict(target_field=None,
                          style_classes=style_classes,
                          style_type='rasterStyle')

        # For printing map purpose
        map_title = tr('People in need of evacuation')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(people per cell)')
        legend_title = tr('Population density')

        # Create raster object and return
        R = Raster(my_impact,
                   projection=my_hazard.get_projection(),
                   geotransform=my_hazard.get_geotransform(),
                   name=tr('Population which %s') %
                   (get_function_title(self).lower()),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return R
class VolcanoPolygonHazardPopulation(FunctionProvider):
    """Impact function for volcano hazard zones impact on population

    :author AIFDR
    :rating 4
    :param requires category=='hazard' and \
                    subcategory in ['volcano'] and \
                    layertype=='vector'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """

    title = tr('Need evacuation')
    target_field = 'population'
    defaults = get_defaults()
    # Function documentation
    synopsis = tr('To assess the impacts of volcano eruption on population.')
    actions = tr(
        'Provide details about how many population would likely be affected '
        'by each hazard zones.')
    hazard_input = tr(
        'A hazard vector layer can be polygon or point. If polygon, it must '
        'have "KRB" attribute and the valuefor it are "Kawasan Rawan '
        'Bencana I", "Kawasan Rawan Bencana II", or "Kawasan Rawan Bencana '
        'III."If you want to see the name of the volcano in the result, you '
        'need to add "NAME" attribute for point data or "GUNUNG" attribute '
        'for polygon data.')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Vector layer contains population affected and the minimum needs '
        'based on the population affected.')

    parameters = OrderedDict([
        ('distance [km]', [3, 5, 10]),
        ('minimum needs', default_minimum_needs()),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             }), ('MinimumNeeds', {
                 'on': True
             })
         ]))
    ])

    def run(self, layers):
        """Risk plugin for volcano population evacuation

        :param layers: List of layers expected to contain where two layers
            should be present.

            * my_hazard: Vector polygon layer of volcano impact zones
            * my_exposure: Raster layer of population data on the same grid as
              my_hazard

        Counts number of people exposed to volcano event.

        :returns: Map of population exposed to the volcano hazard zone.
            The returned dict will include a table with number of people
            evacuated and supplies required.
        :rtype: dict
        """

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Volcano KRB
        my_exposure = get_exposure_layer(layers)

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Input checks
        if not my_hazard.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected ' %
                   my_hazard.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon or point layer. I got %s with '
               'layer type %s' %
               (my_hazard.get_name(), my_hazard.get_geometry_name()))
        if not (my_hazard.is_polygon_data or my_hazard.is_point_data):
            raise Exception(msg)

        if my_hazard.is_point_data:
            # Use concentric circles
            radii = self.parameters['distance [km]']

            centers = my_hazard.get_geometry()
            attributes = my_hazard.get_data()
            rad_m = [x * 1000 for x in radii]  # Convert to meters
            my_hazard = make_circular_polygon(centers,
                                              rad_m,
                                              attributes=attributes)

            category_title = 'Radius'
            category_header = tr('Distance [km]')
            category_names = radii

            name_attribute = 'NAME'  # As in e.g. the Smithsonian dataset
        else:
            # Use hazard map
            category_title = 'KRB'
            category_header = tr('Category')

            # FIXME (Ole): Change to English and use translation system
            category_names = [
                'Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II',
                'Kawasan Rawan Bencana I'
            ]

            name_attribute = 'GUNUNG'  # As in e.g. BNPB hazard map
            attributes = my_hazard.get_data()

        # Get names of volcanos considered
        if name_attribute in my_hazard.get_attribute_names():
            D = {}
            for att in my_hazard.get_data():
                # Run through all polygons and get unique names
                D[att[name_attribute]] = None

            volcano_names = ''
            for name in D:
                volcano_names += '%s, ' % name
            volcano_names = volcano_names[:-2]  # Strip trailing ', '
        else:
            volcano_names = tr('Not specified in data')

        if not category_title in my_hazard.get_attribute_names():
            msg = ('Hazard data %s did not contain expected '
                   'attribute %s ' % (my_hazard.get_name(), category_title))
            # noinspection PyExceptionInherit
            raise InaSAFEError(msg)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(my_hazard,
                                                  my_exposure,
                                                  attribute_name='population')

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a population count of zero
        new_attributes = my_hazard.get_data()

        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            cat = attr[category_title]
            categories[cat] = 0

        # Count affected population per polygon and total
        evacuated = 0
        for attr in P.get_data():
            # Get population at this location
            pop = float(attr['population'])

            # Update population count for associated polygon
            poly_id = attr['polygon_id']
            new_attributes[poly_id][self.target_field] += pop

            # Update population count for each category
            cat = new_attributes[poly_id][category_title]
            categories[cat] += pop

        # Count totals
        total = int(numpy.sum(my_exposure.get_data(nan=0)))

        # Don't show digits less than a 1000
        total = round_thousand(total)

        # Count number and cumulative for each zone
        cum = 0
        pops = {}
        cums = {}
        for name in category_names:
            if category_title == 'Radius':
                key = name * 1000  # Convert to meters
            else:
                key = name
            # prevent key error
            pop = int(categories.get(key, 0))

            pop = round_thousand(pop)

            cum += pop
            cum = round_thousand(cum)

            pops[name] = pop
            cums[name] = cum

        # Use final accumulation as total number needing evac
        evacuated = cum

        tot_needs = evacuated_population_weekly_needs(evacuated)

        # Generate impact report for the pdf map
        blank_cell = ''
        table_body = [
            question,
            TableRow(
                [tr('Volcanos considered'),
                 '%s' % volcano_names, blank_cell],
                header=True),
            TableRow([
                tr('People needing evacuation'),
                '%s' % format_int(evacuated), blank_cell
            ],
                     header=True),
            TableRow(
                [category_header,
                 tr('Total'), tr('Cumulative')], header=True)
        ]

        for name in category_names:
            table_body.append(
                TableRow(
                    [name,
                     format_int(pops[name]),
                     format_int(cums[name])]))

        table_body.extend([
            TableRow(
                tr('Map shows population affected in '
                   'each of volcano hazard polygons.')),
            TableRow([tr('Needs per week'),
                      tr('Total'), blank_cell],
                     header=True),
            [tr('Rice [kg]'),
             format_int(tot_needs['rice']), blank_cell],
            [
                tr('Drinking Water [l]'),
                format_int(tot_needs['drinking_water']), blank_cell
            ],
            [
                tr('Clean Water [l]'),
                format_int(tot_needs['water']), blank_cell
            ],
            [
                tr('Family Kits'),
                format_int(tot_needs['family_kits']), blank_cell
            ], [tr('Toilets'),
                format_int(tot_needs['toilets']), blank_cell]
        ])
        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population %s in the exposure layer') %
            format_int(total),
            tr('People need evacuation if they are within the '
               'volcanic hazard zones.')
        ])

        population_counts = [x[self.target_field] for x in new_attributes]
        impact_summary = Table(table_body).toNewlineFreeString()

        # check for zero impact
        if numpy.nanmax(population_counts) == 0 == numpy.nanmin(
                population_counts):
            table_body = [
                question,
                TableRow([
                    tr('People needing evacuation'),
                    '%s' % format_int(evacuated), blank_cell
                ],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        classes = create_classes(population_counts, len(colours))
        interval_classes = humanize_class(classes)
        # Define style info for output polygons showing population counts
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 100
                style_class['min'] = 0
            else:
                transparency = 30
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('People affected by volcanic hazard zone')
        legend_notes = tr('Thousand separator is represented by  %s' %
                          get_thousand_separator())
        legend_units = tr('(people)')
        legend_title = tr('Population count')

        # Create vector layer and return
        V = Vector(data=new_attributes,
                   projection=my_hazard.get_projection(),
                   geometry=my_hazard.get_geometry(as_geometry_objects=True),
                   name=tr('Population affected by volcanic hazard zone'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return V
コード例 #31
0
class PAGFatalityFunction(ITBFatalityFunction):
    # noinspection PyUnresolvedReferences
    """Population Vulnerability Model Pager.

    Loss ratio(MMI) = standard normal distrib( 1 / BETA * ln(MMI/THETA)).
    Reference:
    Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a).
    Estimating casualties for large worldwide earthquakes using an empirical
    approach. U.S. Geological Survey Open-File Report 2009-1136.

    :author Helen Crowley
    :rating 3

    :param requires category=='hazard' and \
                    subcategory=='earthquake' and \
                    layertype=='raster' and \
                    unit=='MMI'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """
    class Metadata(ITBFatalityFunction.Metadata):
        """Metadata for PAG Fatality Function.

        .. versionadded:: 2.1

        We only need to re-implement get_metadata(), all other behaviours
        are inherited from the abstract base class.
        """
        @staticmethod
        def get_metadata():
            """Return metadata as a dictionary.

            This is a static method. You can use it to get the metadata in
            dictionary format for an impact function.

            :returns: A dictionary representing all the metadata for the
                concrete impact function.
            :rtype: dict
            """
            dict_meta = {
                'id':
                'PAGFatalityFunction.',
                'name':
                tr('PAG Fatality Function.'),
                'impact':
                tr('Die or be displaced according Pager model'),
                'author':
                'Helen Crowley',
                'date_implemented':
                'N/A',
                'overview':
                tr('To assess the impact of earthquake on population based '
                   'on Population Vulnerability Model Pager'),
                'categories': {
                    'hazard': {
                        'definition': hazard_definition,
                        'subcategory': hazard_earthquake,
                        'units': [unit_mmi],
                        'layer_constraints': [layer_raster_numeric]
                    },
                    'exposure': {
                        'definition': exposure_definition,
                        'subcategory': exposure_population,
                        'units': [unit_people_per_pixel],
                        'layer_constraints': [layer_raster_numeric]
                    }
                }
            }
            return dict_meta

    synopsis = tr('To assess the impact of earthquake on population based on '
                  'Population Vulnerability Model Pager')
    citations = tr(
        ' * Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a). '
        '   Estimating casualties for large worldwide earthquakes using '
        '   an empirical approach. U.S. Geological Survey Open-File '
        '   Report 2009-1136.')
    limitation = ''
    detailed_description = ''
    title = tr('Die or be displaced according Pager model')
    defaults = get_defaults()

    parameters = OrderedDict([
        ('Theta', 11.067),
        ('Beta', 0.106),  # Model coefficients
        # Rates of people displaced for each MMI level
        ('displacement_rate', {
            1: 0,
            1.5: 0,
            2: 0,
            2.5: 0,
            3: 0,
            3.5: 0,
            4: 0,
            4.5: 0,
            5: 0,
            5.5: 0,
            6: 1.0,
            6.5: 1.0,
            7: 1.0,
            7.5: 1.0,
            8: 1.0,
            8.5: 1.0,
            9: 1.0,
            9.5: 1.0,
            10: 1.0
        }),
        ('mmi_range', list(numpy.arange(2, 10, 0.5))),
        ('step', 0.25),
        # Threshold below which layer should be transparent
        ('tolerance', 0.01),
        ('calculate_displaced_people', True),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elderly_ratio', defaults['ELDERLY_RATIO'])])
             }), ('MinimumNeeds', {
                 'on': True
             })
         ])),
        ('minimum needs', default_minimum_needs()),
        ('provenance', default_provenance())
    ])

    # noinspection PyPep8Naming
    def fatality_rate(self, mmi):
        """Pager method to compute fatality rate.

        :param mmi: MMI

        :returns: Fatality rate
        """

        N = math.sqrt(2 * math.pi)
        THETA = self.parameters['Theta']
        BETA = self.parameters['Beta']

        x = math.log(mmi / THETA) / BETA
        return math.exp(-x * x / 2.0) / N
コード例 #32
0
class TsunamiEvacuationFunction(FunctionProvider):
    # noinspection PyUnresolvedReferences
    """Impact function for tsunami evacuation.

    :author AIFDR
    :rating 4
    :param requires category=='hazard' and \
                    subcategory=='tsunami' and \
                    layertype=='raster' and \
                    unit=='m'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """
    class Metadata(ImpactFunctionMetadata):
        """Metadata for TsunamiEvacuationFunction.

        .. versionadded:: 2.1

        We only need to re-implement get_metadata(), all other behaviours
        are inherited from the abstract base class.
        """
        @staticmethod
        def get_metadata():
            """Return metadata as a dictionary.

            This is a static method. You can use it to get the metadata in
            dictionary format for an impact function.

            :returns: A dictionary representing all the metadata for the
                concrete impact function.
            :rtype: dict
            """
            dict_meta = {
                'id':
                'TsunamiEvacuationFunction',
                'name':
                tr('Tsunami Evacuation Function'),
                'impact':
                tr('Need evacuation'),
                'author':
                'AIFDR',
                'date_implemented':
                'N/A',
                'overview':
                tr('To assess the impacts of tsunami inundation '
                   'in raster format on population.'),
                'categories': {
                    'hazard': {
                        'definition': hazard_definition,
                        'subcategories': [hazard_tsunami],
                        'units': [unit_feet_depth, unit_metres_depth],
                        'layer_constraints': [layer_raster_continuous]
                    },
                    'exposure': {
                        'definition': exposure_definition,
                        'subcategories': [exposure_population],
                        'units': [unit_people_per_pixel],
                        'layer_constraints': [layer_raster_continuous]
                    }
                }
            }
            return dict_meta

    title = tr('Need evacuation')
    defaults = get_defaults()

    # Function documentation
    synopsis = tr('To assess the impacts of tsunami inundation in raster '
                  'format on population.')
    actions = tr(
        'Provide details about how many people would likely need to be '
        'evacuated, where they are located and what resources would be '
        'required to support them.')
    detailed_description = tr(
        'The population subject to inundation exceeding a threshold '
        '(default 0.7m) is calculated and returned as a raster layer. In '
        'addition the total number and the required needs in terms of the '
        'BNPB (Perka 7) are reported. The threshold can be changed and even '
        'contain multiple numbers in which case evacuation and needs are '
        'calculated using the largest number with population breakdowns '
        'provided for the smaller numbers. The population raster is resampled '
        'to the resolution of the hazard raster and is rescaled so that the '
        'resampled population counts reflect estimates of population count '
        'per resampled cell. The resulting impact layer has the same '
        'resolution and reflects population count per cell which are affected '
        'by inundation.')
    hazard_input = tr(
        'A hazard raster layer where each cell represents tsunami depth '
        '(in meters).')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Raster layer contains population affected and the minimum needs '
        'based on the population affected.')
    limitation = tr(
        'The default threshold of 0.7 meter was selected based on consensus, '
        'not hard evidence.')

    # Configurable parameters
    # TODO: Share the mimimum needs and make another default value
    parameters = OrderedDict([
        ('thresholds [m]', [0.7]),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elderly_ratio', defaults['ELDERLY_RATIO'])])
             }),
             ('MinimumNeeds', {
                 'on': True
             }),
         ])), ('minimum needs', default_minimum_needs()),
        ('provenance', default_provenance())
    ])
    parameters = add_needs_parameters(parameters)

    def run(self, layers):
        """Risk plugin for tsunami population evacuation.

        :param layers: List of layers expected to contain
              hazard_layer: Raster layer of tsunami depth
              exposure_layer: Raster layer of population data on the same grid
              as hazard_layer

        Counts number of people exposed to tsunami levels exceeding
        specified threshold.

        :returns: Map of population exposed to tsunami levels exceeding the
            threshold. Table with number of people evacuated and supplies
            required.
        :rtype: tuple
        """

        # Identify hazard and exposure layers
        hazard_layer = get_hazard_layer(layers)  # Tsunami inundation [m]
        exposure_layer = get_exposure_layer(layers)

        question = get_question(hazard_layer.get_name(),
                                exposure_layer.get_name(), self)

        # Determine depths above which people are regarded affected [m]
        # Use thresholds from inundation layer if specified
        thresholds = self.parameters['thresholds [m]']

        verify(isinstance(thresholds, list),
               'Expected thresholds to be a list. Got %s' % str(thresholds))

        # Extract data as numeric arrays
        data = hazard_layer.get_data(nan=0.0)  # Depth

        # Calculate impact as population exposed to depths > max threshold
        population = exposure_layer.get_data(nan=0.0, scaling=True)

        # Calculate impact to intermediate thresholds
        counts = []
        # merely initialize
        impact = None
        for i, lo in enumerate(thresholds):
            if i == len(thresholds) - 1:
                # The last threshold
                impact = medium = numpy.where(data >= lo, population, 0)
            else:
                # Intermediate thresholds
                hi = thresholds[i + 1]
                medium = numpy.where((data >= lo) * (data < hi), population, 0)

            # Count
            val = int(numpy.sum(medium))

            # Sensible rounding
            val, rounding = population_rounding_full(val)
            counts.append([val, rounding])

        # Count totals
        evacuated, rounding = counts[-1]
        total = int(numpy.sum(population))
        # Don't show digits less than a 1000
        total = population_rounding(total)

        minimum_needs = [
            parameter.serialize()
            for parameter in self.parameters['minimum needs']
        ]

        # Generate impact report for the pdf map
        # noinspection PyListCreation
        table_body = [
            question,
            TableRow([(tr('People in %.1f m of water') % thresholds[-1]),
                      '%s*' % format_int(evacuated)],
                     header=True),
            TableRow(
                tr('* Number is rounded up to the nearest %s') % rounding),
            TableRow(tr('Map shows the numbers of people needing evacuation'))
        ]

        total_needs = evacuated_population_needs(evacuated, minimum_needs)
        for frequency, needs in total_needs.items():
            table_body.append(
                TableRow([
                    tr('Needs should be provided %s' % frequency),
                    tr('Total')
                ],
                         header=True))
            for resource in needs:
                table_body.append(
                    TableRow([
                        tr(resource['table name']),
                        format_int(resource['amount'])
                    ]))

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(TableRow(tr('How will warnings be disseminated?')))
        table_body.append(TableRow(tr('How will we reach stranded people?')))
        table_body.append(TableRow(tr('Do we have enough relief items?')))
        table_body.append(
            TableRow(
                tr('If yes, where are they located and how '
                   'will we distribute them?')))
        table_body.append(
            TableRow(
                tr('If no, where can we obtain additional relief items from and how '
                   'will we transport them to here?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People need evacuation if tsunami levels exceed %(eps).1f m') %
            {
                'eps': thresholds[-1]
            },
            tr('Minimum needs are defined in BNPB regulation 7/2008'),
            tr('All values are rounded up to the nearest integer in order to '
               'avoid representing human lives as fractions.')
        ])

        if len(counts) > 1:
            table_body.append(TableRow(tr('Detailed breakdown'), header=True))

            for i, val in enumerate(counts[:-1]):
                s = (tr('People in %(lo).1f m to %(hi).1f m of water: %(val)i')
                     % {
                         'lo': thresholds[i],
                         'hi': thresholds[i + 1],
                         'val': format_int(val[0])
                     })
                table_body.append(TableRow(s))

        # Result
        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # check for zero impact
        if numpy.nanmax(impact) == 0 == numpy.nanmin(impact):
            table_body = [
                question,
                TableRow([(tr('People in %.1f m of water') % thresholds[-1]),
                          '%s' % format_int(evacuated)],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        classes = create_classes(impact.flat[:], len(colours))
        interval_classes = humanize_class(classes)
        style_classes = []

        for i in xrange(len(colours)):
            style_class = dict()
            if i == 1:
                label = create_label(interval_classes[i], 'Low')
            elif i == 4:
                label = create_label(interval_classes[i], 'Medium')
            elif i == 7:
                label = create_label(interval_classes[i], 'High')
            else:
                label = create_label(interval_classes[i])
            style_class['label'] = label
            style_class['quantity'] = classes[i]
            if i == 0:
                transparency = 100
            else:
                transparency = 0
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_classes.append(style_class)

        style_info = dict(target_field=None,
                          style_classes=style_classes,
                          style_type='rasterStyle')

        # For printing map purpose
        map_title = tr('People in need of evacuation')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(people per cell)')
        legend_title = tr('Population')

        # Create raster object and return
        raster = Raster(impact,
                        projection=hazard_layer.get_projection(),
                        geotransform=hazard_layer.get_geotransform(),
                        name=tr('Population which %s') %
                        (get_function_title(self).lower()),
                        keywords={
                            'impact_summary': impact_summary,
                            'impact_table': impact_table,
                            'map_title': map_title,
                            'legend_notes': legend_notes,
                            'legend_units': legend_units,
                            'legend_title': legend_title,
                            'evacuated': evacuated,
                            'total_needs': total_needs
                        },
                        style_info=style_info)
        return raster
class FloodEvacuationFunctionVectorHazard(FunctionProvider):
    """Impact function for vector flood evacuation

    :author AIFDR
    :rating 4

    :param requires category=='hazard' and \
                    subcategory in ['flood', 'tsunami'] and \
                    layertype=='vector'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """

    title = tr('Need evacuation')
    # Function documentation
    synopsis = tr(
        'To assess the impacts of (flood or tsunami) inundation in vector '
        'format on population.')
    actions = tr(
        'Provide details about how many people would likely need to be '
        'evacuated, where they are located and what resources would be '
        'required to support them.')

    detailed_description = tr(
        'The population subject to inundation is determined whether in an '
        'area which affected or not. You can also set an evacuation '
        'percentage to calculate how many percent of the total population '
        'affected to be evacuated. This number will be used to estimate needs'
        ' based on BNPB Perka 7/2008 minimum bantuan.')

    hazard_input = tr(
        'A hazard vector layer which has attribute affected the value is '
        'either 1 or 0')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Vector layer contains population affected and the minimum needs '
        'based on evacuation percentage.')

    target_field = 'population'
    defaults = get_defaults()

    # Configurable parameters
    # TODO: Share the mimimum needs and make another default value
    parameters = OrderedDict([
        ('evacuation_percentage', 1),  # Percent of affected needing evacuation
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             }),
             ('MinimumNeeds', {
                 'on': True
             }),
         ])),
        ('minimum needs', default_minimum_needs())
    ])

    def run(self, layers):
        """Risk plugin for flood population evacuation

        Input:
          layers: List of layers expected to contain

              my_hazard : Vector polygon layer of flood depth

              my_exposure : Raster layer of population data on the same
                grid as my_hazard

        Counts number of people exposed to areas identified as flood prone

        Return
          Map of population exposed to flooding

          Table with number of people evacuated and supplies required
        """
        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)  # Flood inundation
        my_exposure = get_exposure_layer(layers)

        question = get_question(my_hazard.get_name(), my_exposure.get_name(),
                                self)

        # Check that hazard is polygon type
        if not my_hazard.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected ' %
                   my_hazard.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon layer. I got %s with layer '
               'type %s' %
               (my_hazard.get_name(), my_hazard.get_geometry_name()))
        if not my_hazard.is_polygon_data:
            raise Exception(msg)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(my_hazard,
                                                  my_exposure,
                                                  attribute_name='population')

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a population count of zero
        new_attributes = my_hazard.get_data()
        category_title = 'affected'  # FIXME: Should come from keywords
        deprecated_category_title = 'FLOODPRONE'
        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            try:
                cat = attr[category_title]
            except KeyError:
                cat = attr['FLOODPRONE']
            categories[cat] = 0

        # Count affected population per polygon, per category and total
        affected_population = 0
        for attr in P.get_data():

            affected = False
            if 'affected' in attr:
                res = attr['affected']
                if res is None:
                    x = False
                else:
                    x = bool(res)
                affected = x
            elif 'FLOODPRONE' in attr:
                # If there isn't an 'affected' attribute,
                res = attr['FLOODPRONE']
                if res is not None:
                    affected = res.lower() == 'yes'
            elif 'Affected' in attr:
                # Check the default attribute assigned for points
                # covered by a polygon
                res = attr['Affected']
                if res is None:
                    x = False
                else:
                    x = res
                affected = x
            else:
                # there is no flood related attribute
                msg = ('No flood related attribute found in %s. '
                       'I was looking fore either "Flooded", "FLOODPRONE" '
                       'or "Affected". The latter should have been '
                       'automatically set by call to '
                       'assign_hazard_values_to_exposure_data(). '
                       'Sorry I can\'t help more.')
                raise Exception(msg)

            if affected:
                # Get population at this location
                pop = float(attr['population'])

                # Update population count for associated polygon
                poly_id = attr['polygon_id']
                new_attributes[poly_id][self.target_field] += pop

                # Update population count for each category
                try:
                    cat = new_attributes[poly_id][category_title]
                except KeyError:
                    cat = new_attributes[poly_id][deprecated_category_title]
                categories[cat] += pop

                # Update total
                affected_population += pop

        affected_population = round_thousand(affected_population)
        # Estimate number of people in need of evacuation
        evacuated = (affected_population *
                     self.parameters['evacuation_percentage'] / 100.0)

        total = int(numpy.sum(my_exposure.get_data(nan=0, scaling=False)))

        # Don't show digits less than a 1000
        total = round_thousand(total)
        evacuated = round_thousand(evacuated)

        # Calculate estimated minimum needs
        minimum_needs = self.parameters['minimum needs']
        tot_needs = evacuated_population_weekly_needs(evacuated, minimum_needs)

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([
                tr('People affected'),
                '%s%s' % (format_int(int(affected_population)),
                          ('*' if affected_population >= 1000 else ''))
            ],
                     header=True),
            TableRow([
                tr('People needing evacuation'),
                '%s%s' % (format_int(int(evacuated)),
                          ('*' if evacuated >= 1000 else ''))
            ],
                     header=True),
            TableRow([
                TableCell(tr('* Number is rounded to the nearest 1000'),
                          col_span=2)
            ],
                     header=False),
            TableRow([
                tr('Evacuation threshold'),
                '%s%%' % format_int(self.parameters['evacuation_percentage'])
            ],
                     header=True),
            TableRow(
                tr('Map shows population affected in each flood'
                   ' prone area')),
            TableRow(
                tr('Table below shows the weekly minium needs '
                   'for all evacuated people')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), format_int(tot_needs['rice'])],
            [
                tr('Drinking Water [l]'),
                format_int(tot_needs['drinking_water'])
            ], [tr('Clean Water [l]'),
                format_int(tot_needs['water'])],
            [tr('Family Kits'),
             format_int(tot_needs['family_kits'])],
            [tr('Toilets'), format_int(tot_needs['toilets'])]
        ]
        impact_table = Table(table_body).toNewlineFreeString()

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(TableRow(tr('How will warnings be disseminated?')))
        table_body.append(TableRow(tr('How will we reach stranded people?')))
        table_body.append(TableRow(tr('Do we have enough relief items?')))
        table_body.append(
            TableRow(
                tr('If yes, where are they located and how '
                   'will we distribute them?')))
        table_body.append(
            TableRow(
                tr('If no, where can we obtain additional '
                   'relief items from and how will we '
                   'transport them to here?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People need evacuation if in area identified '
               'as "Flood Prone"'),
            tr('Minimum needs are defined in BNPB '
               'regulation 7/2008')
        ])
        impact_summary = Table(table_body).toNewlineFreeString()

        # Create style
        # Define classes for legend for flooded population counts
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        population_counts = [x['population'] for x in new_attributes]
        classes = create_classes(population_counts, len(colours))
        interval_classes = humanize_class(classes)

        # Define style info for output polygons showing population counts
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 100
                style_class['min'] = 0
            else:
                transparency = 0
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('People affected by flood prone areas')
        legend_notes = tr('Thousand separator is represented by \'.\'')
        legend_units = tr('(people per polygon)')
        legend_title = tr('Population Count')

        # Create vector layer and return
        V = Vector(data=new_attributes,
                   projection=my_hazard.get_projection(),
                   geometry=my_hazard.get_geometry(),
                   name=tr('Population affected by flood prone areas'),
                   keywords={
                       'impact_summary': impact_summary,
                       'impact_table': impact_table,
                       'target_field': self.target_field,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   style_info=style_info)
        return V
コード例 #34
0
    def test_on_dsb_female_ratio_default_value_changed(self):
        """Test hazard radio button toggle behaviour works"""
        layer = clone_shp_layer(
            name='district_osm_jakarta',
            include_keywords=True,
            source_directory=test_data_path('boundaries'))
        defaults = get_defaults()
        dialog = KeywordsDialog(PARENT, IFACE, layer=layer)
        button = dialog.radPostprocessing
        button.setChecked(False)
        button.click()
        female_ratio_box = dialog.cboFemaleRatioAttribute

        # set to Don't use
        index = female_ratio_box.findText(dialog.do_not_use_string)
        message = (dialog.do_not_use_string + ' not found')
        self.assertNotEqual(index, -1, message)
        female_ratio_box.setCurrentIndex(index)

        message = (
            'Toggling the female ratio attribute combo to'
            ' "Don\'t use" did not add it to the keywords list.')
        self.assertEqual(
            dialog.get_value_for_key(defaults['FEMALE_RATIO_ATTR_KEY']),
            dialog.do_not_use_string,
            message)

        message = (
            'Toggling the female ratio attribute combo to'
            ' "Don\'t use" did not disable dsbFemaleRatioDefault.')
        is_enabled = dialog.dsbFemaleRatioDefault.isEnabled()
        assert not is_enabled, message

        message = (
            'Toggling the female ratio attribute combo to'
            ' "Don\'t use" did not remove the keyword.')
        assert (dialog.get_value_for_key(defaults['FEMALE_RATIO']) is None), \
            message

        # set to PEREMPUAN attribute
        attribute = 'PEREMPUAN'
        index = female_ratio_box.findText(attribute)
        message = 'The attribute %s is not found in the layer' % attribute
        self.assertNotEqual(index, -1, message)

        female_ratio_box.setCurrentIndex(index)
        message = (
            'Toggling the female ratio attribute combo to %s'
            ' did not add it to the keywords list.') % attribute
        self.assertEqual(
            dialog.get_value_for_key(defaults['FEMALE_RATIO_ATTR_KEY']),
            attribute,
            message)

        message = (
            'Toggling the female ratio attribute combo to %s'
            ' did not disable dsbFemaleRatioDefault.') % attribute
        is_enabled = dialog.dsbFemaleRatioDefault.isEnabled()
        self.assertFalse(is_enabled, message)

        message = (
            'Toggling the female ratio attribute combo to %s'
            ' did not remove the keyword.') % attribute
        self.assertIsNone(
            dialog.get_value_for_key(defaults['FEMALE_RATIO']), message)
コード例 #35
0
class ITBFatalityFunction(FunctionProvider):
    """Indonesian Earthquake Fatality Model

    This model was developed by Institut Teknologi Bandung (ITB) and
    implemented by Dr. Hadi Ghasemi, Geoscience Australia.


    Reference:

    Indonesian Earthquake Building-Damage and Fatality Models and
    Post Disaster Survey Guidelines Development,
    Bali, 27-28 February 2012, 54pp.


    Algorithm:

    In this study, the same functional form as Allen (2009) is adopted
    to express fatality rate as a function of intensity (see Eq. 10 in the
    report). The Matlab built-in function (fminsearch) for  Nelder-Mead
    algorithm was used to estimate the model parameters. The objective
    function (L2G norm) that is minimised during the optimisation is the
    same as the one used by Jaiswal et al. (2010).

    The coefficients used in the indonesian model are
    x=0.62275231, y=8.03314466, zeta=2.15

    Allen, T. I., Wald, D. J., Earle, P. S., Marano, K. D., Hotovec, A. J.,
    Lin, K., and Hearne, M., 2009. An Atlas of ShakeMaps and population
    exposure catalog for earthquake loss modeling, Bull. Earthq. Eng. 7,
    701-718.

    Jaiswal, K., and Wald, D., 2010. An empirical model for global earthquake
    fatality estimation, Earthq. Spectra 26, 1017-1037.


    Caveats and limitations:

    The current model is the result of the above mentioned workshop and
    reflects the best available information. However, the current model
    has a number of issues listed below and is expected to evolve further
    over time.

    1 - The model is based on limited number of observed fatality
        rates during 4 past fatal events.
    2 - The model clearly over-predicts the fatality rates at
        intensities higher than VIII.
    3 - The model only estimates the expected fatality rate for a given
        intensity level; however the associated uncertainty for the proposed
        model is not addressed.
    4 - There are few known mistakes in developing the current model:
        - rounding MMI values to the nearest 0.5,
        - Implementing Finite-Fault models of candidate events, and
        - consistency between selected GMPEs with those in use by BMKG.
          These issues will be addressed by ITB team in the final report.

    Note: Because of these caveats, decisions should not be made solely on
    the information presented here and should always be verified by ground
    truthing and other reliable information sources.

    :author Hadi Ghasemi
    :rating 3

    :param requires category=='hazard' and \
                    subcategory=='earthquake' and \
                    layertype=='raster' and \
                    unit=='MMI'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'

    """

    title = tr('Die or be displaced')
    synopsis = tr(
        'To assess the impact of earthquake on population based on earthquake '
        'model developed by ITB')
    citations = tr(
        ' * Indonesian Earthquake Building-Damage and Fatality Models and '
        '   Post Disaster Survey Guidelines Development Bali, 27-28 '
        '   February 2012, 54pp.\n'
        ' * Allen, T. I., Wald, D. J., Earle, P. S., Marano, K. D., '
        '   Hotovec, A. J., Lin, K., and Hearne, M., 2009. An Atlas '
        '   of ShakeMaps and population exposure catalog for '
        '   earthquake loss modeling, Bull. Earthq. Eng. 7, 701-718.\n'
        ' * Jaiswal, K., and Wald, D., 2010. An empirical model for '
        '   global earthquake fatality estimation, Earthq. Spectra '
        '   26, 1017-1037.\n')
    limitation = tr(
        ' - The model is based on limited number of observed fatality '
        '   rates during 4 past fatal events. \n'
        ' - The model clearly over-predicts the fatality rates at '
        '   intensities higher than VIII.\n'
        ' - The model only estimates the expected fatality rate '
        '   for a given intensity level; however the associated '
        '   uncertainty for the proposed model is not addressed.\n'
        ' - There are few known mistakes in developing the current '
        '   model:\n\n'
        '   * rounding MMI values to the nearest 0.5,\n'
        '   * Implementing Finite-Fault models of candidate events, and\n'
        '   * consistency between selected GMPEs with those in use by '
        '     BMKG.\n')
    actions = tr(
        'Provide details about the population will be die or displaced')
    detailed_description = tr(
        'This model was developed by Institut Teknologi Bandung (ITB) '
        'and implemented by Dr. Hadi Ghasemi, Geoscience Australia\n'
        'Algorithm:\n'
        'In this study, the same functional form as Allen (2009) is '
        'adopted o express fatality rate as a function of intensity '
        '(see Eq. 10 in the report). The Matlab built-in function '
        '(fminsearch) for  Nelder-Mead algorithm was used to estimate '
        'the model parameters. The objective function (L2G norm) that '
        'is minimized during the optimisation is the same as the one '
        'used by Jaiswal et al. (2010).\n'
        'The coefficients used in the indonesian model are x=0.62275231, '
        'y=8.03314466, zeta=2.15')
    defaults = get_defaults()

    parameters = OrderedDict([
        ('x', 0.62275231),
        ('y', 8.03314466),  # Model coefficients
        # Rates of people displaced for each MMI level
        ('displacement_rate', {
            1: 0,
            2: 0,
            3: 0,
            4: 0,
            5: 0,
            6: 1.0,
            7: 1.0,
            8: 1.0,
            9: 1.0,
            10: 1.0
        }),
        ('mmi_range', range(2, 10)),
        ('step', 0.5),
        # Threshold below which layer should be transparent
        ('tolerance', 0.01),
        ('calculate_displaced_people', True),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             }), ('MinimumNeeds', {
                 'on': True
             })
         ])),
        ('minimum needs', default_minimum_needs())
    ])

    def fatality_rate(self, mmi):
        """
        ITB method to compute fatality rate
        :param mmi:
        """
        # As per email discussion with Ole, Trevor, Hadi, mmi < 4 will have
        # a fatality rate of 0 - Tim
        if mmi < 4:
            return 0

        x = self.parameters['x']
        y = self.parameters['y']
        return numpy.power(10.0, x * mmi - y)

    def run(self, layers):
        """Indonesian Earthquake Fatality Model

        Input:

        :param layers: List of layers expected to contain,

                my_hazard: Raster layer of MMI ground shaking

                my_exposure: Raster layer of population density
        """

        displacement_rate = self.parameters['displacement_rate']

        # Tolerance for transparency
        tolerance = self.parameters['tolerance']

        # Extract input layers
        intensity = get_hazard_layer(layers)
        population = get_exposure_layer(layers)

        question = get_question(intensity.get_name(), population.get_name(),
                                self)

        # Extract data grids
        my_hazard = intensity.get_data()  # Ground Shaking
        my_exposure = population.get_data(scaling=True)  # Population Density

        # Calculate population affected by each MMI level
        # FIXME (Ole): this range is 2-9. Should 10 be included?

        mmi_range = self.parameters['mmi_range']
        number_of_exposed = {}
        number_of_displaced = {}
        number_of_fatalities = {}

        # Calculate fatality rates for observed Intensity values (my_hazard
        # based on ITB power model
        R = numpy.zeros(my_hazard.shape)
        for mmi in mmi_range:
            # Identify cells where MMI is in class i and
            # count population affected by this shake level
            I = numpy.where((my_hazard > mmi - self.parameters['step']) *
                            (my_hazard <= mmi + self.parameters['step']),
                            my_exposure, 0)

            # Calculate expected number of fatalities per level
            fatality_rate = self.fatality_rate(mmi)

            F = fatality_rate * I

            # Calculate expected number of displaced people per level
            try:
                D = displacement_rate[mmi] * I
            except KeyError, e:
                msg = 'mmi = %i, I = %s, Error msg: %s' % (mmi, str(I), str(e))
                # noinspection PyExceptionInherit
                raise InaSAFEError(msg)

            # Adjust displaced people to disregard fatalities.
            # Set to zero if there are more fatalities than displaced.
            D = numpy.where(D > F, D - F, 0)

            # Sum up numbers for map
            R += D  # Displaced

            # Generate text with result for this study
            # This is what is used in the real time system exposure table
            number_of_exposed[mmi] = numpy.nansum(I.flat)
            number_of_displaced[mmi] = numpy.nansum(D.flat)
            # noinspection PyUnresolvedReferences
            number_of_fatalities[mmi] = numpy.nansum(F.flat)

        # Set resulting layer to NaN when less than a threshold. This is to
        # achieve transparency (see issue #126).
        R[R < tolerance] = numpy.nan

        # Total statistics
        total = int(round(numpy.nansum(my_exposure.flat) / 1000) * 1000)

        # Compute number of fatalities
        fatalities = int(
            round(numpy.nansum(number_of_fatalities.values()) / 1000)) * 1000
        # As per email discussion with Ole, Trevor, Hadi, total fatalities < 50
        # will be rounded down to 0 - Tim
        if fatalities < 50:
            fatalities = 0

        # Compute number of people displaced due to building collapse
        displaced = int(
            round(numpy.nansum(number_of_displaced.values()) / 1000)) * 1000

        # Generate impact report
        table_body = [question]

        # Add total fatality estimate
        s = format_int(fatalities)
        table_body.append(
            TableRow([tr('Number of fatalities'), s], header=True))

        if self.parameters['calculate_displaced_people']:
            # Add total estimate of people displaced
            s = format_int(displaced)
            table_body.append(
                TableRow([tr('Number of people displaced'), s], header=True))
        else:
            displaced = 0

        # Add estimate of total population in area
        s = format_int(int(total))
        table_body.append(
            TableRow([tr('Total number of people'), s], header=True))

        # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan
        # FIXME: Refactor and share
        minimum_needs = self.parameters['minimum needs']
        needs = evacuated_population_weekly_needs(displaced, minimum_needs)

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([tr('Fatalities'),
                      '%s' % format_int(fatalities)],
                     header=True),
            TableRow([tr('People displaced'),
                      '%s' % format_int(displaced)],
                     header=True),
            TableRow(
                tr('Map shows density estimate of '
                   'displaced population')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), format_int(needs['rice'])],
            [tr('Drinking Water [l]'),
             format_int(needs['drinking_water'])],
            [tr('Clean Water [l]'),
             format_int(needs['water'])],
            [tr('Family Kits'),
             format_int(needs['family_kits'])],
            TableRow(tr('Action Checklist:'), header=True)
        ]

        if fatalities > 0:
            table_body.append(
                tr('Are there enough victim identification '
                   'units available for %s people?') % format_int(fatalities))
        if displaced > 0:
            table_body.append(
                tr('Are there enough shelters and relief items '
                   'available for %s people?') % format_int(displaced))
            table_body.append(
                TableRow(
                    tr('If yes, where are they located and '
                       'how will we distribute them?')))
            table_body.append(
                TableRow(
                    tr('If no, where can we obtain '
                       'additional relief items from and '
                       'how will we transport them?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People are considered to be displaced if '
               'they experience and survive a shake level'
               'of more than 5 on the MMI scale '),
            tr('Minimum needs are defined in BNPB '
               'regulation 7/2008'),
            tr('The fatality calculation assumes that '
               'no fatalities occur for shake levels below 4 '
               'and fatality counts of less than 50 are '
               'disregarded.'),
            tr('All values are rounded up to the nearest '
               'integer in order to avoid representing human '
               'lives as fractions.')
        ])

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('Fatality model is from '
               'Institute of Teknologi Bandung 2012.'))
        table_body.append(tr('Population numbers rounded to nearest 1000.'))

        # Result
        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary

        # check for zero impact
        if numpy.nanmax(R) == 0 == numpy.nanmin(R):
            table_body = [
                question,
                TableRow([tr('Fatalities'),
                          '%s' % format_int(fatalities)],
                         header=True)
            ]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = ['#EEFFEE', '#FFFF7F', '#E15500', '#E4001B', '#730000']
        classes = create_classes(R.flat[:], len(colours))
        interval_classes = humanize_class(classes)
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            style_class['quantity'] = classes[i]
            if i == 0:
                transparency = 100
            else:
                transparency = 30
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_classes.append(style_class)

        style_info = dict(target_field=None,
                          style_classes=style_classes,
                          style_type='rasterStyle')

        # For printing map purpose
        map_title = tr('Earthquake impact to population')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(people per cell)')
        legend_title = tr('Population density')

        # Create raster object and return
        L = Raster(R,
                   projection=population.get_projection(),
                   geotransform=population.get_geotransform(),
                   keywords={
                       'impact_summary': impact_summary,
                       'total_population': total,
                       'total_fatalities': fatalities,
                       'fatalities_per_mmi': number_of_fatalities,
                       'exposed_per_mmi': number_of_exposed,
                       'displaced_per_mmi': number_of_displaced,
                       'impact_table': impact_table,
                       'map_title': map_title,
                       'legend_notes': legend_notes,
                       'legend_units': legend_units,
                       'legend_title': legend_title
                   },
                   name=tr('Estimated displaced population per cell'),
                   style_info=style_info)

        return L
コード例 #36
0
class PAGFatalityFunction(ITBFatalityFunction):
    """
    Population Vulnerability Model Pager
    Loss ratio(MMI) = standard normal distrib( 1 / BETA * ln(MMI/THETA)).
    Reference:
    Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a).
    Estimating casualties for large worldwide earthquakes using an empirical
    approach. U.S. Geological Survey Open-File Report 2009-1136.

    :author Helen Crowley
    :rating 3

    :param requires category=='hazard' and \
                    subcategory=='earthquake' and \
                    layertype=='raster' and \
                    unit=='MMI'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """
    synopsis = tr('To asses the impact of earthquake on population based on '
                  'Population Vulnerability Model Pager')
    citations = \
        tr(' * Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a). '
           '   Estimating casualties for large worldwide earthquakes using '
           '   an empirical approach. U.S. Geological Survey Open-File '
           '   Report 2009-1136.')
    limitation = ''
    detailed_description = ''
    title = tr('Die or be displaced according Pager model')
    defaults = get_defaults()

    # see https://github.com/AIFDR/inasafe/issues/628
    default_needs = default_minimum_needs()
    default_needs[tr('Water')] = 67

    parameters = OrderedDict([
        ('Theta', 11.067),
        ('Beta', 0.106),  # Model coefficients
        # Rates of people displaced for each MMI level
        ('displacement_rate', {
        1: 0, 1.5: 0, 2: 0, 2.5: 0, 3: 0,
        3.5: 0, 4: 0, 4.5: 0, 5: 0, 5.5: 0,
        6: 1.0, 6.5: 1.0, 7: 1.0, 7.5: 1.0,
        8: 1.0, 8.5: 1.0, 9: 1.0, 9.5: 1.0,
        10: 1.0}),
        ('mmi_range', list(numpy.arange(2, 10, 0.5))),
        ('step', 0.25),
        # Threshold below which layer should be transparent
        ('tolerance', 0.01),
        ('calculate_displaced_people', True),
        ('postprocessors', OrderedDict([
            ('Gender', {'on': True}),
            ('Age', {
                'on': True,
                'params': OrderedDict([
                    ('youth_ratio', defaults['YOUTH_RATIO']),
                    ('adult_ratio', defaults['ADULT_RATIO']),
                    ('elder_ratio', defaults['ELDER_RATIO'])])}),
            ('MinimumNeeds', {'on': True})])),
        ('minimum needs', default_needs)])

    def fatality_rate(self, mmi):
        """Pager method to compute fatality rate"""

        N = math.sqrt(2 * math.pi)
        THETA = self.parameters['Theta']
        BETA = self.parameters['Beta']

        x = math.log(mmi / THETA) / BETA
        return math.exp(-x * x / 2.0) / N
コード例 #37
0
    def __init__(self, parent, iface, dock=None, layer=None):
        """Constructor for the dialog.

        .. note:: In QtDesigner the advanced editor's predefined keywords
           list should be shown in english always, so when adding entries to
           cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
           the :safe_qgis:`translatable` property.

        :param parent: Parent widget of this dialog.
        :type parent: QWidget

        :param iface: Quantum GIS QGisAppInterface instance.
        :type iface: QGisAppInterface

        :param dock: Dock widget instance that we can notify of changes to
            the keywords. Optional.
        :type dock: Dock
        """
        QtGui.QDialog.__init__(self, parent)
        self.setupUi(self)
        self.setWindowTitle(self.tr(
            'InaSAFE %s Keywords Editor' % get_version()))
        # Save reference to the QGIS interface and parent
        self.iface = iface
        self.parent = parent
        self.dock = dock
        self.defaults = None

        # string constants
        self.global_default_string = metadata.global_default_attribute['name']
        self.do_not_use_string = metadata.do_not_use_attribute['name']
        self.global_default_data = metadata.global_default_attribute['id']
        self.do_not_use_data = metadata.do_not_use_attribute['id']

        if layer is None:
            self.layer = self.iface.activeLayer()
        else:
            self.layer = layer

        self.keyword_io = KeywordIO()

        # note the keys should remain untranslated as we need to write
        # english to the keywords file. The keys will be written as user data
        # in the combo entries.
        # .. seealso:: http://www.voidspace.org.uk/python/odict.html
        self.standard_exposure_list = OrderedDict(
            [('population', self.tr('population')),
             ('structure', self.tr('structure')),
             ('road', self.tr('road')),
             ('Not Set', self.tr('Not Set'))])
        self.standard_hazard_list = OrderedDict(
            [('earthquake [MMI]', self.tr('earthquake [MMI]')),
             ('tsunami [m]', self.tr('tsunami [m]')),
             ('tsunami [wet/dry]', self.tr('tsunami [wet/dry]')),
             ('tsunami [feet]', self.tr('tsunami [feet]')),
             ('flood [m]', self.tr('flood [m]')),
             ('flood [wet/dry]', self.tr('flood [wet/dry]')),
             ('flood [feet]', self.tr('flood [feet]')),
             ('tephra [kg2/m2]', self.tr('tephra [kg2/m2]')),
             ('volcano', self.tr('volcano')),
             ('generic [categorised]', self.tr('generic [categorised]')),
             ('Not Set', self.tr('Not Set'))])

        # noinspection PyUnresolvedReferences
        self.lstKeywords.itemClicked.connect(self.edit_key_value_pair)

        # Set up help dialog showing logic.
        help_button = self.buttonBox.button(QtGui.QDialogButtonBox.Help)
        help_button.clicked.connect(self.show_help)

        if self.layer is not None and is_polygon_layer(self.layer):
            # set some initial ui state:
            self.defaults = get_defaults()
            self.radPredefined.setChecked(True)
            self.dsbFemaleRatioDefault.blockSignals(True)
            self.dsbFemaleRatioDefault.setValue(self.defaults['FEMALE_RATIO'])
            self.dsbFemaleRatioDefault.blockSignals(False)

            self.dsbYouthRatioDefault.blockSignals(True)
            self.dsbYouthRatioDefault.setValue(self.defaults['YOUTH_RATIO'])
            self.dsbYouthRatioDefault.blockSignals(False)

            self.dsbAdultRatioDefault.blockSignals(True)
            self.dsbAdultRatioDefault.setValue(self.defaults['ADULT_RATIO'])
            self.dsbAdultRatioDefault.blockSignals(False)

            self.dsbElderlyRatioDefault.blockSignals(True)
            self.dsbElderlyRatioDefault.setValue(
                self.defaults['ELDERLY_RATIO'])
            self.dsbElderlyRatioDefault.blockSignals(False)
        else:
            self.radPostprocessing.hide()
            self.tab_widget.removeTab(1)

        if self.layer:
            self.load_state_from_keywords()

        # add a reload from keywords button
        reload_button = self.buttonBox.addButton(
            self.tr('Reload'), QtGui.QDialogButtonBox.ActionRole)
        reload_button.clicked.connect(self.load_state_from_keywords)
        self.resize_dialog()
        self.tab_widget.setCurrentIndex(0)
        # TODO No we should not have test related stuff in prod code. TS
        self.test = False
コード例 #38
0
class ITBFatalityFunctionConfigurable(FunctionProvider):
    """Indonesian Earthquake Fatality Model

    This model was developed by Institut Teknologi Bandung (ITB) and
    implemented by Dr. Hadi Ghasemi, Geoscience Australia.


    Reference:

    Indonesian Earthquake Building-Damage and Fatality Models and
    Post Disaster Survey Guidelines Development,
    Bali, 27-28 February 2012, 54pp.


    Algorithm:

    In this study, the same functional form as Allen (2009) is adopted
    to express fatality rate as a function of intensity (see Eq. 10 in the
    report). The Matlab built-in function (fminsearch) for  Nelder-Mead
    algorithm is used to estimate the model parameters. The objective
    function (L2G norm) that is minimised during the optimisation is the
    same as the one used by Jaiswal et al. (2010).

    The coefficients used in the indonesian model are
    x=0.62275231, y=8.03314466, zeta=2.15

    Allen, T. I., Wald, D. J., Earle, P. S., Marano, K. D., Hotovec, A. J.,
    Lin, K., and Hearne, M., 2009. An Atlas of ShakeMaps and population
    exposure catalog for earthquake loss modeling, Bull. Earthq. Eng. 7,
    701-718.

    Jaiswal, K., and Wald, D., 2010. An empirical model for global earthquake
    fatality estimation, Earthq. Spectra 26, 1017-1037.


    Caveats and limitations:

    The current model is the result of the above mentioned workshop and
    reflects the best available information. However, the current model
    has a number of issues listed below and is expected to evolve further
    over time.

    1 - The model is based on limited number of observed fatality
        rates during 4 past fatal events.
    2 - The model clearly over-predicts the fatality rates at
        intensities higher than VIII.
    3 - The model only estimates the expected fatality rate for a given
        intensity level; however the associated uncertainty for the proposed
        model is not addressed.
    4 - There are few known mistakes in developing the current model:
        - rounding MMI values to the nearest 0.5,
        - Implementing Finite-Fault models of candidate events, and
        - consistency between selected GMPEs with those in use by BMKG.
          These issues will be addressed by ITB team in the final report.


    :author Hadi Ghasemi
    :rating 3

    :param requires category=='hazard' and \
                    subcategory=='earthquake' and \
                    layertype=='raster' and \
                    unit=='MMI'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'

    """

    title = tr('Die or be displaced')
    defaults = get_defaults()
    parameters = OrderedDict([
        ('x', 0.62275231),
        ('y', 8.03314466),  # Model coefficients
        # Rates of people displaced for each MMI level
        ('displacement_rate', {
            1: 0,
            2: 0,
            3: 0,
            4: 0,
            5: 0,
            6: 1.0,
            7: 1.0,
            8: 1.0,
            9: 1.0,
            10: 1.0
        }),
        # Threshold below which layer should be transparent
        ('tolerance', 0.01),
        ('calculate_displaced_people', True),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elder_ratio', defaults['ELDER_RATIO'])])
             }), ('MinimumNeeds', {
                 'on': True
             })
         ]))
    ])

    def run(self, layers):
        """Indonesian Earthquake Fatality Model

        Input
          layers: List of layers expected to contain
              H: Raster layer of MMI ground shaking
              P: Raster layer of population density

        """

        # Establish model coefficients
        x = self.parameters['x']
        y = self.parameters['y']

        # Define percentages of people being displaced at each mmi level
        displacement_rate = self.parameters['displacement_rate']

        # Tolerance for transparency
        tolerance = self.parameters['tolerance']

        # Extract input layers
        intensity = get_hazard_layer(layers)
        population = get_exposure_layer(layers)

        question = get_question(intensity.get_name(), population.get_name(),
                                self)

        # Extract data grids
        H = intensity.get_data()  # Ground Shaking
        P = population.get_data(scaling=True)  # Population Density

        # Calculate population affected by each MMI level
        # FIXME (Ole): this range is 2-9. Should 10 be included?
        mmi_range = range(2, 10)
        number_of_exposed = {}
        number_of_displaced = {}
        number_of_fatalities = {}

        # Calculate fatality rates for observed Intensity values (H
        # based on ITB power model
        R = numpy.zeros(H.shape)
        for mmi in mmi_range:

            # Identify cells where MMI is in class i
            mask = (H > mmi - 0.5) * (H <= mmi + 0.5)

            # Count population affected by this shake level
            I = numpy.where(mask, P, 0)

            # Calculate expected number of fatalities per level
            fatality_rate = numpy.power(10.0, x * mmi - y)
            F = fatality_rate * I

            # Calculate expected number of displaced people per level
            try:
                D = displacement_rate[mmi] * I
            except KeyError, e:
                msg = 'mmi = %i, I = %s, Error msg: %s' % (mmi, str(I), str(e))
                raise InaSAFEError(msg)

            # Adjust displaced people to disregard fatalities.
            # Set to zero if there are more fatalities than displaced.
            D = numpy.where(D > F, D - F, 0)

            # Sum up numbers for map
            R += D  # Displaced

            # Generate text with result for this study
            # This is what is used in the real time system exposure table
            number_of_exposed[mmi] = numpy.nansum(I.flat)
            number_of_displaced[mmi] = numpy.nansum(D.flat)
            number_of_fatalities[mmi] = numpy.nansum(F.flat)

        # Set resulting layer to NaN when less than a threshold. This is to
        # achieve transparency (see issue #126).
        R[R < tolerance] = numpy.nan

        # Total statistics
        total = int(round(numpy.nansum(P.flat) / 1000) * 1000)

        # Compute number of fatalities
        fatalities = int(
            round(numpy.nansum(number_of_fatalities.values()) / 1000)) * 1000

        # Compute number of people displaced due to building collapse
        displaced = int(
            round(numpy.nansum(number_of_displaced.values()) / 1000)) * 1000

        # Generate impact report
        table_body = [question]

        # Add total fatality estimate
        s = str(int(fatalities)).rjust(10)
        table_body.append(
            TableRow([tr('Number of fatalities'), s], header=True))

        if self.parameters['calculate_displaced_people']:
            # Add total estimate of people displaced
            s = str(int(displaced)).rjust(10)
            table_body.append(
                TableRow([tr('Number of people displaced'), s], header=True))
        else:
            displaced = 0

        # Add estimate of total population in area
        s = str(int(total)).rjust(10)
        table_body.append(
            TableRow([tr('Total number of people'), s], header=True))

        # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan
        rice = displaced * 2.8
        drinking_water = displaced * 17.5
        water = displaced * 67
        family_kits = displaced / 5
        toilets = displaced / 20

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([tr('Fatalities'), '%i' % fatalities], header=True),
            TableRow([tr('People displaced'),
                      '%i' % displaced], header=True),
            TableRow(
                tr('Map shows density estimate of '
                   'displaced population')),
            TableRow([tr('Needs per week'), tr('Total')], header=True),
            [tr('Rice [kg]'), int(rice)],
            [tr('Drinking Water [l]'),
             int(drinking_water)], [tr('Clean Water [l]'),
                                    int(water)],
            [tr('Family Kits'), int(family_kits)],
            [tr('Toilets'), int(toilets)]
        ]
        impact_table = Table(table_body).toNewlineFreeString()

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        if fatalities > 0:
            table_body.append(
                tr('Are there enough victim identification '
                   'units available for %i people?') % fatalities)
        if displaced > 0:
            table_body.append(
                tr('Are there enough shelters and relief items '
                   'available for %i people?') % displaced)
            table_body.append(
                TableRow(
                    tr('If yes, where are they located and '
                       'how will we distribute them?')))
            table_body.append(
                TableRow(
                    tr('If no, where can we obtain '
                       'additional relief items from and '
                       'how will we transport them?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %i') % total,
            tr('People are considered to be displaced if '
               'they experience and survive a shake level'
               'of more than 5 on the MMI scale '),
            tr('Minimum needs are defined in BNPB '
               'regulation 7/2008')
        ])

        impact_summary = Table(table_body).toNewlineFreeString()
        map_title = tr('People in need of evacuation')

        table_body.append(TableRow(tr('Notes'), header=True))
        table_body.append(
            tr('Fatality model is from '
               'Institute of Teknologi Bandung 2012.'))
        table_body.append(tr('Population numbers rounded to nearest 1000.'))

        impact_summary = Table(table_body).toNewlineFreeString()
        impact_table = impact_summary
        map_title = tr('Earthquake impact to population')

        # Create style info dynamically
        classes = numpy.linspace(numpy.nanmin(R.flat[:]),
                                 numpy.nanmax(R.flat[:]), 5)

        style_classes = [
            dict(colour='#EEFFEE',
                 quantity=classes[0],
                 transparency=100,
                 label=tr('%.2f people/cell') % classes[0]),
            dict(colour='#FFFF7F', quantity=classes[1], transparency=30),
            dict(colour='#E15500',
                 quantity=classes[2],
                 transparency=30,
                 label=tr('%.2f people/cell') % classes[2]),
            dict(colour='#E4001B', quantity=classes[3], transparency=30),
            dict(colour='#730000',
                 quantity=classes[4],
                 transparency=30,
                 label=tr('%.2f people/cell') % classes[4])
        ]
        style_info = dict(target_field=None, style_classes=style_classes)

        # Create new layer and return
        L = Raster(R,
                   projection=population.get_projection(),
                   geotransform=population.get_geotransform(),
                   keywords={
                       'impact_summary': impact_summary,
                       'total_population': total,
                       'total_fatalities': fatalities,
                       'impact_table': impact_table,
                       'map_title': map_title
                   },
                   name=tr('Estimated displaced population'),
                   style_info=style_info)

        # Maybe return a shape file with contours instead
        return L
コード例 #39
0
class ContinuousHazardPopulationImpactFunction(FunctionProvider):
    # noinspection PyUnresolvedReferences
    """Plugin for impact of population as derived by continuous hazard.

    :author AIFDR
    :rating 2
    :param requires category=='hazard' and \
                    layertype=='raster' and \
                    data_type=='continuous'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """
    class Metadata(ImpactFunctionMetadata):
        """Metadata for Continuous Hazard Population Impact Function.

        .. versionadded:: 2.1

        We only need to re-implement get_metadata(), all other behaviours
        are inherited from the abstract base class.
        """
        @staticmethod
        def get_metadata():
            """Return metadata as a dictionary.

            This is a static method. You can use it to get the metadata in
            dictionary format for an impact function.

            :returns: A dictionary representing all the metadata for the
                concrete impact function.
            :rtype: dict
            """
            dict_meta = {
                'id':
                'ContinuousHazardPopulationImpactFunction',
                'name':
                tr('Continuous Hazard Population Impact Function'),
                'impact':
                tr('Be impacted'),
                'author':
                'AIFDR',
                'date_implemented':
                'N/A',
                'overview':
                tr('To assess the impacts of continuous hazards in raster '
                   'format on population raster layer.'),
                'categories': {
                    'hazard': {
                        'definition': hazard_definition,
                        'subcategories': hazard_all,  # already a list
                        'units': [],
                        'layer_constraints': [layer_raster_continuous]
                    },
                    'exposure': {
                        'definition': exposure_definition,
                        'subcategories': [exposure_population],
                        'units': [unit_people_per_pixel],
                        'layer_constraints': [layer_raster_continuous]
                    }
                }
            }
            return dict_meta

    # Function documentation
    title = tr('Be impacted')
    synopsis = tr(
        'To assess the impacts of continuous hazards in raster format on '
        'population raster layer.')
    actions = tr(
        'Provide details about how many people would likely need to be '
        'impacted for each category.')
    hazard_input = tr(
        'A hazard raster layer where each cell represents the level of the '
        'hazard. The hazard has continuous value of hazard level.')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Map of population exposed to high category and a table with number '
        'of people in each category')
    detailed_description = tr(
        'This function will categorised the continuous hazard level into 3 '
        'category based on the threshold that has been input by the user.'
        'After that, this function will calculate how many people will be '
        'impacted per category for all categories in the hazard layer.')
    limitation = tr('The number of categories is three.')

    # Configurable parameters
    defaults = get_defaults()
    parameters = OrderedDict([
        ('Categorical thresholds', [0.34, 0.67, 1]),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elderly_ratio', defaults['ELDERLY_RATIO'])])
             }),
             ('MinimumNeeds', {
                 'on': True
             }),
         ])), ('minimum needs', default_minimum_needs()),
        ('provenance', default_provenance())
    ])
    parameters = add_needs_parameters(parameters)

    def run(self, layers):
        """Plugin for impact of population as derived by categorised hazard.

        :param layers: List of layers expected to contain

            * hazard_layer: Raster layer of categorised hazard
            * exposure_layer: Raster layer of population data

        Counts number of people exposed to each category of the hazard

        :returns:
          Map of population exposed to high category
          Table with number of people in each category
        """

        # The 3 category
        high_t = self.parameters['Categorical thresholds'][2]
        medium_t = self.parameters['Categorical thresholds'][1]
        low_t = self.parameters['Categorical thresholds'][0]

        # Identify hazard and exposure layers
        hazard_layer = get_hazard_layer(layers)  # Categorised Hazard
        exposure_layer = get_exposure_layer(layers)  # Population Raster

        question = get_question(hazard_layer.get_name(),
                                exposure_layer.get_name(), self)

        # Extract data as numeric arrays
        C = hazard_layer.get_data(nan=0.0)  # Category

        # Calculate impact as population exposed to each category
        P = exposure_layer.get_data(nan=0.0, scaling=True)
        H = numpy.where(C <= high_t, P, 0)
        M = numpy.where(C < medium_t, P, 0)
        L = numpy.where(C < low_t, P, 0)

        # Count totals
        total = int(numpy.sum(P))
        high = int(numpy.sum(H)) - int(numpy.sum(M))
        medium = int(numpy.sum(M)) - int(numpy.sum(L))
        low = int(numpy.sum(L))
        total_impact = high + medium + low

        # Don't show digits less than a 1000
        total = population_rounding(total)
        total_impact = population_rounding(total_impact)
        high = population_rounding(high)
        medium = population_rounding(medium)
        low = population_rounding(low)

        minimum_needs = [
            parameter.serialize()
            for parameter in self.parameters['minimum needs']
        ]

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([tr('People impacted '),
                      '%s' % format_int(total_impact)],
                     header=True),
            TableRow(
                [tr('People in high hazard area '),
                 '%s' % format_int(high)],
                header=True),
            TableRow([
                tr('People in medium hazard area '),
                '%s' % format_int(medium)
            ],
                     header=True),
            TableRow([tr('People in low hazard area'),
                      '%s' % format_int(low)],
                     header=True)
        ]

        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Map shows population count in high or medium hazard area'),
            tr('Total population: %s') % format_int(total),
            TableRow(
                tr('Table below shows the minimum needs for all '
                   'affected people'))
        ])

        total_needs = evacuated_population_needs(total_impact, minimum_needs)
        for frequency, needs in total_needs.items():
            table_body.append(
                TableRow([
                    tr('Needs should be provided %s' % frequency),
                    tr('Total')
                ],
                         header=True))
            for resource in needs:
                table_body.append(
                    TableRow([
                        tr(resource['table name']),
                        format_int(resource['amount'])
                    ]))

        impact_summary = Table(table_body).toNewlineFreeString()
        map_title = tr('People in high hazard areas')

        # Generate 8 equidistant classes across the range of flooded population
        # 8 is the number of classes in the predefined flood population style
        # as imported
        # noinspection PyTypeChecker
        classes = numpy.linspace(numpy.nanmin(M.flat[:]),
                                 numpy.nanmax(M.flat[:]), 8)

        # Modify labels in existing flood style to show quantities
        style_classes = style_info['style_classes']

        style_classes[1]['label'] = tr('Low [%i people/cell]') % classes[1]
        style_classes[4]['label'] = tr('Medium [%i people/cell]') % classes[4]
        style_classes[7]['label'] = tr('High [%i people/cell]') % classes[7]

        style_info['legend_title'] = tr('Population Count')

        # Create raster object and return
        raster_layer = Raster(M,
                              projection=hazard_layer.get_projection(),
                              geotransform=hazard_layer.get_geotransform(),
                              name=tr('Population which %s') %
                              (get_function_title(self).lower()),
                              keywords={
                                  'impact_summary': impact_summary,
                                  'impact_table': impact_table,
                                  'map_title': map_title,
                                  'total_needs': total_needs
                              },
                              style_info=style_info)
        return raster_layer
コード例 #40
0
class CategorisedHazardPopulationImpactFunction(FunctionProvider):
    """Plugin for impact of population as derived by categorised hazard

    :author AIFDR
    :rating 2
    :param requires category=='hazard' and \
                    unit=='normalised' and \
                    layertype=='raster'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """
    # Function documentation
    title = tr('Be impacted')
    synopsis = tr('To assess the impacts of categorized hazards in raster '
                  'format on population raster layer.')
    actions = tr('Provide details about how many people would likely need '
                 'to be impacted for each category.')
    hazard_input = tr('A hazard raster layer where each cell represents '
                      'the category of the hazard. There should be 3 '
                      'categories: 1, 2, and 3.')
    exposure_input = tr('An exposure raster layer where each cell represent '
                        'population count.')
    output = tr('Map of population exposed to high category and a table with '
                'number of people in each category')
    detailed_description = \
        tr('This function will calculate how many people will be impacted '
           'per each category for all categories in the hazard layer. '
           'Currently there should be 3 categories in the hazard layer. After '
           'that it will show the result and the total amount of people that '
           'will be impacted for the hazard given.')
    limitation = tr('The number of categories is three.')

    # Configurable parameters
    defaults = get_defaults()
    parameters = OrderedDict([
        ('postprocessors', OrderedDict([
            ('Gender', {'on': True}),
            ('Age', {
                'on': True,
                'params': OrderedDict([
                    ('youth_ratio', defaults['YOUTH_RATIO']),
                    ('adult_ratio', defaults['ADULT_RATIO']),
                    ('elder_ratio', defaults['ELDER_RATIO'])])})]))])

    def run(self, layers):
        """Plugin for impact of population as derived by categorised hazard

        Input
          layers: List of layers expected to contain
              my_hazard: Raster layer of categorised hazard
              my_exposure: Raster layer of population data

        Counts number of people exposed to each category of the hazard

        Return
          Map of population exposed to high category
          Table with number of people in each category
        """

        # The 3 category
        high_t = 1
        medium_t = 0.66
        low_t = 0.34

        # Identify hazard and exposure layers
        my_hazard = get_hazard_layer(layers)    # Categorised Hazard
        my_exposure = get_exposure_layer(layers)  # Population Raster

        question = get_question(my_hazard.get_name(),
                                my_exposure.get_name(),
                                self)

        # Extract data as numeric arrays
        C = my_hazard.get_data(nan=0.0)  # Category

        # Calculate impact as population exposed to each category
        P = my_exposure.get_data(nan=0.0, scaling=True)
        H = numpy.where(C == high_t, P, 0)
        M = numpy.where(C > medium_t, P, 0)
        L = numpy.where(C < low_t, P, 0)

        # Count totals
        total = int(numpy.sum(P))
        high = int(numpy.sum(H))
        medium = int(numpy.sum(M)) - int(numpy.sum(H))
        low = int(numpy.sum(L)) - int(numpy.sum(M))
        total_impact = high + medium + low

        # Don't show digits less than a 1000
        total = round_thousand(total)
        total_impact = round_thousand(total_impact)
        high = round_thousand(high)
        medium = round_thousand(medium)
        low = round_thousand(low)

        # Generate impact report for the pdf map
        table_body = [question,
                      TableRow([tr('People impacted '),
                                '%s' % format_int(total_impact)],
                               header=True),
                      TableRow([tr('People in high hazard area '),
                                '%s' % format_int(high)],
                               header=True),
                      TableRow([tr('People in medium hazard area '),
                                '%s' % format_int(medium)],
                               header=True),
                      TableRow([tr('People in low hazard area'),
                                '%s' % format_int(low)],
                               header=True)]

        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend([TableRow(tr('Notes'), header=True),
                           tr('Map shows population density in high or medium '
                              'hazard area'),
                           tr('Total population: %s') % format_int(total)])
        impact_summary = Table(table_body).toNewlineFreeString()
        map_title = tr('People in high hazard areas')

        # Generate 8 equidistant classes across the range of flooded population
        # 8 is the number of classes in the predefined flood population style
        # as imported
        # noinspection PyTypeChecker
        classes = numpy.linspace(numpy.nanmin(M.flat[:]),
                                 numpy.nanmax(M.flat[:]), 8)

        # Modify labels in existing flood style to show quantities
        style_classes = style_info['style_classes']

        style_classes[1]['label'] = tr('Low [%i people/cell]') % classes[1]
        style_classes[4]['label'] = tr('Medium [%i people/cell]') % classes[4]
        style_classes[7]['label'] = tr('High [%i people/cell]') % classes[7]

        style_info['legend_title'] = tr('Population Density')

        # Create raster object and return
        R = Raster(M,
                   projection=my_hazard.get_projection(),
                   geotransform=my_hazard.get_geotransform(),
                   name=tr('Population which %s') % (
                       get_function_title(self).lower()),
                   keywords={'impact_summary': impact_summary,
                             'impact_table': impact_table,
                             'map_title': map_title},
                   style_info=style_info)
        return R
コード例 #41
0
ファイル: keywords_dialog.py プロジェクト: kant/inasafe
    def __init__(self, parent, iface, dock=None, layer=None):
        """Constructor for the dialog.

        .. note:: In QtDesigner the advanced editor's predefined keywords
           list should be shown in english always, so when adding entries to
           cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
           the :safe_qgis:`translatable` property.

        :param parent: Parent widget of this dialog.
        :type parent: QWidget

        :param iface: Quantum GIS QGisAppInterface instance.
        :type iface: QGisAppInterface

        :param dock: Dock widget instance that we can notify of changes to
            the keywords. Optional.
        :type dock: Dock
        """
        QtGui.QDialog.__init__(self, parent)
        self.setupUi(self)
        self.setWindowTitle(
            self.tr('InaSAFE %s Keywords Editor' % get_version()))
        # Save reference to the QGIS interface and parent
        self.iface = iface
        self.parent = parent
        self.dock = dock
        self.defaults = None

        # string constants
        self.global_default_string = definitions.global_default_attribute[
            'name']
        self.do_not_use_string = definitions.do_not_use_attribute['name']
        self.global_default_data = definitions.global_default_attribute['id']
        self.do_not_use_data = definitions.do_not_use_attribute['id']

        if layer is None:
            self.layer = self.iface.activeLayer()
        else:
            self.layer = layer

        self.keyword_io = KeywordIO()

        # note the keys should remain untranslated as we need to write
        # english to the keywords file. The keys will be written as user data
        # in the combo entries.
        # .. seealso:: http://www.voidspace.org.uk/python/odict.html
        self.standard_exposure_list = OrderedDict([
            ('population', self.tr('population')),
            ('structure', self.tr('structure')), ('road', self.tr('road')),
            ('Not Set', self.tr('Not Set'))
        ])
        self.standard_hazard_list = OrderedDict([
            ('earthquake [MMI]', self.tr('earthquake [MMI]')),
            ('tsunami [m]', self.tr('tsunami [m]')),
            ('tsunami [wet/dry]', self.tr('tsunami [wet/dry]')),
            ('tsunami [feet]', self.tr('tsunami [feet]')),
            ('flood [m]', self.tr('flood [m]')),
            ('flood [wet/dry]', self.tr('flood [wet/dry]')),
            ('flood [feet]', self.tr('flood [feet]')),
            ('tephra [kg2/m2]', self.tr('tephra [kg2/m2]')),
            ('volcano', self.tr('volcano')),
            ('generic [categorised]', self.tr('generic [categorised]')),
            ('Not Set', self.tr('Not Set'))
        ])

        # noinspection PyUnresolvedReferences
        self.lstKeywords.itemClicked.connect(self.edit_key_value_pair)

        # Set up help dialog showing logic.
        help_button = self.buttonBox.button(QtGui.QDialogButtonBox.Help)
        help_button.clicked.connect(self.show_help)

        if self.layer is not None and is_polygon_layer(self.layer):
            # set some initial ui state:
            self.defaults = get_defaults()
            self.radPredefined.setChecked(True)
            self.dsbFemaleRatioDefault.blockSignals(True)
            self.dsbFemaleRatioDefault.setValue(self.defaults['FEMALE_RATIO'])
            self.dsbFemaleRatioDefault.blockSignals(False)

            self.dsbYouthRatioDefault.blockSignals(True)
            self.dsbYouthRatioDefault.setValue(self.defaults['YOUTH_RATIO'])
            self.dsbYouthRatioDefault.blockSignals(False)

            self.dsbAdultRatioDefault.blockSignals(True)
            self.dsbAdultRatioDefault.setValue(self.defaults['ADULT_RATIO'])
            self.dsbAdultRatioDefault.blockSignals(False)

            self.dsbElderlyRatioDefault.blockSignals(True)
            self.dsbElderlyRatioDefault.setValue(
                self.defaults['ELDERLY_RATIO'])
            self.dsbElderlyRatioDefault.blockSignals(False)
        else:
            self.radPostprocessing.hide()
            self.tab_widget.removeTab(1)

        if self.layer:
            self.load_state_from_keywords()

        # add a reload from keywords button
        reload_button = self.buttonBox.addButton(
            self.tr('Reload'), QtGui.QDialogButtonBox.ActionRole)
        reload_button.clicked.connect(self.load_state_from_keywords)
        self.resize_dialog()
        self.tab_widget.setCurrentIndex(0)
        # TODO No we should not have test related stuff in prod code. TS
        self.test = False
コード例 #42
0
class AbstractPostprocessor():
    """
    Abstract postprocessor class, do not instantiate directly.
    but instantiate the PostprocessorFactory class which will take care of
    setting up many prostprocessors. Alternatively you can as well instantiate
    directly a sub class of AbstractPostprocessor.

    Each subclass has to overload the process method and call its parent
    like this: AbstractPostprocessor.process(self)
    if a postprocessor needs parmeters, then it should override the setup and
    clear methods as well and call respectively
    AbstractPostprocessor.setup(self) and AbstractPostprocessor.clear(self).

    for implementation examples see AgePostprocessor which uses mandatory and
    optional parameters
    """

    NO_DATA_TEXT = get_defaults('NO_DATA')

    def __init__(self):
        """
        Constructor for abstract postprocessor class, do not instantiate
        directly. It takes care of defining self._results
        Needs to be called from the concrete implementation with
        AbstractPostprocessor.__init__(self)
        """
        self._results = None

    def description(self):
        """
        Describe briefly what the post processor does.

        Args:
            None

        Returns:
            Str the translated description

        Raises:
            Errors are propagated
        """
        raise NotImplementedError('Please don\'t use this class directly')

    def setup(self, params):
        """
        Abstract method to be called from the concrete implementation
        with AbstractPostprocessor.setup(self, None) it takes care of results
        being initialized

        Args:
            params: dict of parameters to pass to the post processor
        Returns:
            None
        Raises:
            None
        """
        del params
        if self._results is not None:
            self._raise_error('clear needs to be called before setup')
        self._results = OrderedDict()

    def process(self):
        """
        Abstract method to be called from the concrete implementation
        with AbstractPostprocessor.process(self) it takes care of results
        being initialized

        Args:
            None
        Returns:
            None
        Raises:
            None
        """
        if self._results is None:
            self._raise_error('setup needs to be called before process')

    def clear(self):
        """
        Abstract method to be called from the concrete implementation
        with AbstractPostprocessor.process(self) it takes care of results
        being cleared

        Args:
            None
        Returns:
            None
        Raises:
            None
        """
        self._results = None

    def results(self):
        """Returns the postprocessors results

        Args:
            None
        Returns:
            Odict of results
        Raises:
            None
        """
        return self._results

    def _raise_error(self, message=None):
        """internal method to be used by the postprocessors to raise an error

        Args:
            None
        Returns:
            None
        Raises:
            PostProcessorError
        """
        if message is None:
            message = 'Postprocessor error'
        raise PostProcessorError(message)

    def _log_message(self, message):
        """internal method to be used by the postprocessors to log a message

        Args:
            None
        Returns:
            None
        Raises:
            None
        """
        LOGGER.debug(message)

    def _append_result(self, name, result, metadata=None):
        """add an indicator results to the postprocessors result.

        internal method to be used by the postprocessors to add an indicator
        results to the postprocessors result

        Args:
            * name: str the name of the indicator
            * result the value calculated by the indicator
            * metadata Dict of metadata
        Returns:
            None
        Raises:
            None
        """

        if metadata is None:
            metadata = dict()
        # LOGGER.debug('name : ' + str(name) + '\nresult : ' + str(result))
        if result is not None and result != self.NO_DATA_TEXT:
            try:
                result = format_int(result)
            except ValueError as e:
                LOGGER.debug(e)
                result = result
        self._results[name] = {'value': result, 'metadata': metadata}
コード例 #43
0
class VolcanoPolygonHazardPopulation(FunctionProvider):
    # noinspection PyUnresolvedReferences
    """Impact function for volcano hazard zones impact on population.

    :author AIFDR
    :rating 4
    :param requires category=='hazard' and \
                    subcategory in ['volcano'] and \
                    layertype=='vector'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """

    class Metadata(ImpactFunctionMetadata):
        """Metadata for Volcano Polygon Hazard Population.

        .. versionadded:: 2.1

        We only need to re-implement get_metadata(), all other behaviours
        are inherited from the abstract base class.
        """

        @staticmethod
        def get_metadata():
            """Return metadata as a dictionary.

            This is a static method. You can use it to get the metadata in
            dictionary format for an impact function.

            :returns: A dictionary representing all the metadata for the
                concrete impact function.
            :rtype: dict
            """
            dict_meta = {
                'id': 'VolcanoPolygonHazardPopulation',
                'name': tr('Volcano Polygon Hazard Population'),
                'impact': tr('Need evacuation'),
                'author': 'AIFDR',
                'date_implemented': 'N/A',
                'overview': tr('To assess the impacts of volcano eruption '
                               'on population.'),
                'categories': {
                    'hazard': {
                        'definition': hazard_definition,
                        'subcategories': [hazard_volcano],
                        'units': [unit_volcano_categorical],
                        'layer_constraints': [
                            layer_vector_polygon,
                            layer_vector_point
                        ]
                    },
                    'exposure': {
                        'definition': exposure_definition,
                        'subcategories': [exposure_population],
                        'units': [unit_people_per_pixel],
                        'layer_constraints': [layer_raster_continuous]
                    }
                }
            }
            return dict_meta

    title = tr('Need evacuation')
    target_field = 'population'
    defaults = get_defaults()
    # Function documentation
    synopsis = tr('To assess the impacts of volcano eruption on population.')
    actions = tr(
        'Provide details about how many population would likely be affected '
        'by each hazard zones.')
    hazard_input = tr(
        'A hazard vector layer can be polygon or point. If polygon, it must '
        'have "KRB" attribute and the valuefor it are "Kawasan Rawan '
        'Bencana I", "Kawasan Rawan Bencana II", or "Kawasan Rawan Bencana '
        'III."If you want to see the name of the volcano in the result, you '
        'need to add "NAME" attribute for point data or "GUNUNG" attribute '
        'for polygon data.')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Vector layer contains people affected and the minimum needs '
        'based on the number of people affected.')

    parameters = OrderedDict([
        ('distance [km]', [3, 5, 10]),
        ('minimum needs', default_minimum_needs()),
        ('postprocessors', OrderedDict([
            ('Gender', {'on': True}),
            ('Age', {
                'on': True,
                'params': OrderedDict([
                    ('youth_ratio', defaults['YOUTH_RATIO']),
                    ('adult_ratio', defaults['ADULT_RATIO']),
                    ('elderly_ratio', defaults['ELDERLY_RATIO'])])}),
            ('MinimumNeeds', {'on': True})
        ])),
        ('minimum needs', default_minimum_needs()),
        ('provenance', default_provenance())
    ])
    parameters = add_needs_parameters(parameters)

    def run(self, layers):
        """Risk plugin for volcano population evacuation.

        :param layers: List of layers expected to contain where two layers
            should be present.

            * hazard_layer: Vector polygon layer of volcano impact zones
            * exposure_layer: Raster layer of population data on the same grid
                as hazard_layer

        Counts number of people exposed to volcano event.

        :returns: Map of population exposed to the volcano hazard zone.
            The returned dict will include a table with number of people
            evacuated and supplies required.
        :rtype: dict

        :raises:
            * Exception - When hazard layer is not vector layer
            * RadiiException - When radii are not valid (they need to be
                monotonically increasing)
        """

        # Identify hazard and exposure layers
        hazard_layer = get_hazard_layer(layers)  # Volcano KRB
        exposure_layer = get_exposure_layer(layers)

        question = get_question(
            hazard_layer.get_name(), exposure_layer.get_name(), self)

        # Input checks
        if not hazard_layer.is_vector:
            msg = ('Input hazard %s  was not a vector layer as expected '
                   % hazard_layer.get_name())
            raise Exception(msg)

        msg = ('Input hazard must be a polygon or point layer. I got %s with '
               'layer type %s' % (hazard_layer.get_name(),
                                  hazard_layer.get_geometry_name()))
        if not (hazard_layer.is_polygon_data or hazard_layer.is_point_data):
            raise Exception(msg)

        data_table = hazard_layer.get_data()
        if hazard_layer.is_point_data:
            # Use concentric circles
            radii = self.parameters['distance [km]']
            category_title = 'Radius'
            category_header = tr('Distance [km]')
            category_names = radii

            name_attribute = 'NAME'  # As in e.g. the Smithsonian dataset

            centers = hazard_layer.get_geometry()
            rad_m = [x * 1000 for x in radii]  # Convert to meters
            hazard_layer = buffer_points(
                centers, rad_m, category_title, data_table=data_table)
        else:
            # Use hazard map
            category_title = 'KRB'
            category_header = tr('Category')

            # FIXME (Ole): Change to English and use translation system
            category_names = ['Kawasan Rawan Bencana III',
                              'Kawasan Rawan Bencana II',
                              'Kawasan Rawan Bencana I']

            name_attribute = 'GUNUNG'  # As in e.g. BNPB hazard map

        # Get names of volcanoes considered
        if name_attribute in hazard_layer.get_attribute_names():
            volcano_name_list = []
            # Run through all polygons and get unique names
            for row in data_table:
                volcano_name_list.append(row[name_attribute])

            volcano_names = ''
            for name in volcano_name_list:
                volcano_names += '%s, ' % name
            volcano_names = volcano_names[:-2]  # Strip trailing ', '
        else:
            volcano_names = tr('Not specified in data')

        # Check if category_title exists in hazard_layer
        if category_title not in hazard_layer.get_attribute_names():
            msg = ('Hazard data %s did not contain expected '
                   'attribute %s ' % (hazard_layer.get_name(), category_title))
            # noinspection PyExceptionInherit
            raise InaSAFEError(msg)

        # Find the target field name that has no conflict with default target
        attribute_names = hazard_layer.get_attribute_names()
        new_target_field = get_non_conflicting_attribute_name(
            self.target_field, attribute_names)
        self.target_field = new_target_field

        # Run interpolation function for polygon2raster
        interpolated_layer = assign_hazard_values_to_exposure_data(
            hazard_layer, exposure_layer, attribute_name=self.target_field)

        # Initialise data_table of output dataset with all data_table
        # from input polygon and a population count of zero
        new_data_table = hazard_layer.get_data()
        categories = {}
        for row in new_data_table:
            row[self.target_field] = 0
            category = row[category_title]
            categories[category] = 0

        # Count affected population per polygon and total
        for row in interpolated_layer.get_data():
            # Get population at this location
            population = float(row[self.target_field])

            # Update population count for associated polygon
            poly_id = row['polygon_id']
            new_data_table[poly_id][self.target_field] += population

            # Update population count for each category
            category = new_data_table[poly_id][category_title]
            categories[category] += population

        # Count totals
        total_population = population_rounding(
            int(numpy.sum(exposure_layer.get_data(nan=0))))

        # Count number and cumulative for each zone
        cumulative = 0
        all_categories_population = {}
        all_categories_cumulative = {}
        for name in category_names:
            if category_title == 'Radius':
                key = name * 1000  # Convert to meters
            else:
                key = name
            # prevent key error
            population = int(categories.get(key, 0))

            cumulative += population

            # I'm not sure whether this is the best place to apply rounding?
            all_categories_population[name] = population_rounding(population)
            all_categories_cumulative[name] = population_rounding(cumulative)

        # Use final accumulation as total number needing evacuation
        evacuated = population_rounding(cumulative)

        minimum_needs = [
            parameter.serialize() for parameter in
            self.parameters['minimum needs']
        ]

        # Generate impact report for the pdf map
        blank_cell = ''
        table_body = [question,
                      TableRow([tr('Volcanoes considered'),
                                '%s' % volcano_names, blank_cell],
                               header=True),
                      TableRow([tr('People needing evacuation'),
                                '%s' % format_int(evacuated),
                                blank_cell],
                               header=True),
                      TableRow([category_header,
                                tr('Total'), tr('Cumulative')],
                               header=True)]

        for name in category_names:
            table_body.append(
                TableRow([name,
                          format_int(all_categories_population[name]),
                          format_int(all_categories_cumulative[name])]))

        table_body.extend([
            TableRow(tr(
                'Map shows the number of people affected in each of volcano '
                'hazard polygons.'))])

        total_needs = evacuated_population_needs(
            evacuated, minimum_needs)
        for frequency, needs in total_needs.items():
            table_body.append(TableRow(
                [
                    tr('Needs should be provided %s' % frequency),
                    tr('Total')
                ],
                header=True))
            for resource in needs:
                table_body.append(TableRow([
                    tr(resource['table name']),
                    format_int(resource['amount'])]))
        impact_table = Table(table_body).toNewlineFreeString()

        # Extend impact report for on-screen display
        table_body.extend(
            [TableRow(tr('Notes'), header=True),
             tr('Total population %s in the exposure layer') % format_int(
                 total_population),
             tr('People need evacuation if they are within the '
                'volcanic hazard zones.')])

        population_counts = [x[self.target_field] for x in new_data_table]
        impact_summary = Table(table_body).toNewlineFreeString()

        # check for zero impact
        if numpy.nanmax(population_counts) == 0 == numpy.nanmin(
                population_counts):
            table_body = [
                question,
                TableRow([tr('People needing evacuation'),
                          '%s' % format_int(evacuated),
                          blank_cell], header=True)]
            my_message = Table(table_body).toNewlineFreeString()
            raise ZeroImpactException(my_message)

        # Create style
        colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
                   '#FFCC00', '#FF6600', '#FF0000', '#7A0000']
        classes = create_classes(population_counts, len(colours))
        interval_classes = humanize_class(classes)
        # Define style info for output polygons showing population counts
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 100
                style_class['min'] = 0
            else:
                transparency = 30
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('People affected by volcanic hazard zone')
        legend_notes = tr('Thousand separator is represented by  %s' %
                          get_thousand_separator())
        legend_units = tr('(people per cell)')
        legend_title = tr('Population')

        # Create vector layer and return
        impact_layer = Vector(
            data=new_data_table,
            projection=hazard_layer.get_projection(),
            geometry=hazard_layer.get_geometry(as_geometry_objects=True),
            name=tr('People affected by volcanic hazard zone'),
            keywords={'impact_summary': impact_summary,
                      'impact_table': impact_table,
                      'target_field': self.target_field,
                      'map_title': map_title,
                      'legend_notes': legend_notes,
                      'legend_units': legend_units,
                      'legend_title': legend_title,
                      'total_needs': total_needs},
            style_info=style_info)
        return impact_layer
コード例 #44
0
from safe.common.version import get_version
from safe.common.exceptions import InaSAFEError
from safe.defaults import get_defaults
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.help import show_context_help
from safe.utilities.utilities import get_error_message
from safe.utilities.gis import layer_attribute_names, is_polygon_layer
from safe.utilities.resources import get_ui_class
from safe.common.exceptions import (
    InvalidParameterError,
    HashNotFoundError,
    NoKeywordsFoundError)
from safe import metadata

# Aggregations' keywords
DEFAULTS = get_defaults()
female_ratio_attribute_key = DEFAULTS['FEMALE_RATIO_ATTR_KEY']
female_ratio_default_key = DEFAULTS['FEMALE_RATIO_KEY']
youth_ratio_attribute_key = DEFAULTS['YOUTH_RATIO_ATTR_KEY']
youth_ratio_default_key = DEFAULTS['YOUTH_RATIO_KEY']
adult_ratio_attribute_key = DEFAULTS['ADULT_RATIO_ATTR_KEY']
adult_ratio_default_key = DEFAULTS['ADULT_RATIO_KEY']
elderly_ratio_attribute_key = DEFAULTS['ELDERLY_RATIO_ATTR_KEY']
elderly_ratio_default_key = DEFAULTS['ELDERLY_RATIO_KEY']

LOGGER = logging.getLogger('InaSAFE')

FORM_CLASS = get_ui_class('keywords_dialog_base.ui')


class KeywordsDialog(QtGui.QDialog, FORM_CLASS):
class FloodEvacuationFunctionVectorHazard(FunctionProvider):
    # noinspection PyUnresolvedReferences
    """Impact function for vector flood evacuation.

    :author AIFDR
    :rating 4

    :param requires category=='hazard' and \
                    subcategory=='flood' and \
                    layertype=='vector'

    :param requires category=='exposure' and \
                    subcategory=='population' and \
                    layertype=='raster'
    """
    class Metadata(ImpactFunctionMetadata):
        """Metadata for FloodEvacuationFunctionVectorHazard.

        .. versionadded:: 2.1

        We only need to re-implement get_metadata(), all other behaviours
        are inherited from the abstract base class.
        """
        @staticmethod
        def get_metadata():
            """Return metadata as a dictionary.

            This is a static method. You can use it to get the metadata in
            dictionary format for an impact function.

            :returns: A dictionary representing all the metadata for the
                concrete impact function.
            :rtype: dict
            """
            dict_meta = {
                'id':
                'FloodEvacuationFunctionVectorHazard',
                'name':
                tr('Flood Evacuation Function Vector Hazard'),
                'impact':
                tr('Need evacuation'),
                'author':
                'AIFDR',
                'date_implemented':
                'N/A',
                'overview':
                tr('To assess the impacts of flood inundation '
                   'in vector format on population.'),
                'categories': {
                    'hazard': {
                        'definition': hazard_definition,
                        'subcategory': [hazard_flood],
                        'units': unit_wetdry,
                        'layer_constraints': [layer_vector_polygon]
                    },
                    'exposure': {
                        'definition': exposure_definition,
                        'subcategory': exposure_population,
                        'units': [unit_people_per_pixel],
                        'layer_constraints': [layer_raster_numeric]
                    }
                }
            }
            return dict_meta

    title = tr('Need evacuation')
    # Function documentation
    synopsis = tr('To assess the impacts of flood inundation in vector '
                  'format on population.')
    actions = tr(
        'Provide details about how many people would likely need to be '
        'evacuated, where they are located and what resources would be '
        'required to support them.')

    detailed_description = tr(
        'The population subject to inundation is determined whether in an '
        'area which affected or not. You can also set an evacuation '
        'percentage to calculate how many percent of the total population '
        'affected to be evacuated. This number will be used to estimate needs'
        ' based on BNPB Perka 7/2008 minimum bantuan.')

    hazard_input = tr(
        'A hazard vector layer which has attribute affected the value is '
        'either 1 or 0')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr('Vector layer contains people affected and the minimum needs '
                'based on evacuation percentage.')

    target_field = 'population'
    defaults = get_defaults()

    # Configurable parameters
    # TODO: Share the mimimum needs and make another default value
    parameters = OrderedDict([
        ('evacuation_percentage', 1),  # Percent of affected needing evacuation
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elderly_ratio', defaults['ELDERLY_RATIO'])])
             }),
             ('MinimumNeeds', {
                 'on': True
             }),
         ])),
        ('minimum needs', default_minimum_needs()),
        ('provenance', default_provenance())
    ])

    def run(self, layers):
        """Risk plugin for flood population evacuation.

        :param layers: List of layers expected to contain

            * hazard_layer : Vector polygon layer of flood depth
            * exposure_layer : Raster layer of population data on the same grid
                as hazard_layer

        Counts number of people exposed to areas identified as flood prone

        :returns: Map of population exposed to flooding Table with number of
            people evacuated and supplies required.
        :rtype: tuple
        """
        # Identify hazard and exposure layers
        hazard_layer = get_hazard_layer(layers)  # Flood inundation
        exposure_layer = get_exposure_layer(layers)

        question = get_question(hazard_layer.get_name(),
                                exposure_layer.get_name(), self)

        # Check that hazard is polygon type
        if not hazard_layer.is_vector:
            message = ('Input hazard %s  was not a vector layer as expected ' %
                       hazard_layer.get_name())
            raise Exception(message)

        message = (
            'Input hazard must be a polygon layer. I got %s with layer type '
            '%s' % (hazard_layer.get_name(), hazard_layer.get_geometry_name()))
        if not hazard_layer.is_polygon_data:
            raise Exception(message)

        # Run interpolation function for polygon2raster
        P = assign_hazard_values_to_exposure_data(hazard_layer,
                                                  exposure_layer,
                                                  attribute_name='population')

        # Initialise attributes of output dataset with all attributes
        # from input polygon and a population count of zero
        new_attributes = hazard_layer.get_data()
        category_title = 'affected'  # FIXME: Should come from keywords
        deprecated_category_title = 'FLOODPRONE'
        categories = {}
        for attr in new_attributes:
            attr[self.target_field] = 0
            try:
                cat = attr[category_title]
            except KeyError:
                try:
                    cat = attr['FLOODPRONE']
                    categories[cat] = 0
                except KeyError:
                    pass

        # Count affected population per polygon, per category and total
        affected_population = 0
        for attr in P.get_data():

            affected = False
            if 'affected' in attr:
                res = attr['affected']
                if res is None:
                    x = False
                else:
                    x = bool(res)
                affected = x
            elif 'FLOODPRONE' in attr:
                # If there isn't an 'affected' attribute,
                res = attr['FLOODPRONE']
                if res is not None:
                    affected = res.lower() == 'yes'
            elif 'Affected' in attr:
                # Check the default attribute assigned for points
                # covered by a polygon
                res = attr['Affected']
                if res is None:
                    x = False
                else:
                    x = res
                affected = x
            else:
                # assume that every polygon is affected (see #816)
                affected = True
                # there is no flood related attribute
                # message = ('No flood related attribute found in %s. '
                #       'I was looking for either "Flooded", "FLOODPRONE" '
                #       'or "Affected". The latter should have been '
                #       'automatically set by call to '
                #       'assign_hazard_values_to_exposure_data(). '
                #       'Sorry I can\'t help more.')
                # raise Exception(message)

            if affected:
                # Get population at this location
                pop = float(attr['population'])

                # Update population count for associated polygon
                poly_id = attr['polygon_id']
                new_attributes[poly_id][self.target_field] += pop

                # Update population count for each category
                if len(categories) > 0:
                    try:
                        cat = new_attributes[poly_id][category_title]
                    except KeyError:
                        cat = new_attributes[poly_id][
                            deprecated_category_title]
                    categories[cat] += pop

                # Update total
                affected_population += pop

        # Estimate number of people in need of evacuation
        evacuated = (affected_population *
                     self.parameters['evacuation_percentage'] / 100.0)

        affected_population, rounding = population_rounding_full(
            affected_population)

        total = int(numpy.sum(exposure_layer.get_data(nan=0, scaling=False)))

        # Don't show digits less than a 1000
        total = population_rounding(total)
        evacuated, rounding_evacuated = population_rounding_full(evacuated)

        minimum_needs = [
            parameter.serialize()
            for parameter in self.parameters['minimum needs']
        ]

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([
                tr('People affected'),
                '%s*' % (format_int(int(affected_population)))
            ],
                     header=True),
            TableRow([
                TableCell(tr('* Number is rounded up to the nearest %s') %
                          (rounding),
                          col_span=2)
            ]),
            TableRow([
                tr('People needing evacuation'),
                '%s*' % (format_int(int(evacuated)))
            ],
                     header=True),
            TableRow([
                TableCell(tr('* Number is rounded up to the nearest %s') %
                          (rounding_evacuated),
                          col_span=2)
            ]),
            TableRow([
                tr('Evacuation threshold'),
                '%s%%' % format_int(self.parameters['evacuation_percentage'])
            ],
                     header=True),
            TableRow(
                tr('Map shows the number of people affected in each flood prone '
                   'area')),
            TableRow(
                tr('Table below shows the weekly minimum needs for all '
                   'evacuated people'))
        ]
        total_needs = evacuated_population_needs(evacuated, minimum_needs)
        for frequency, needs in total_needs.items():
            table_body.append(
                TableRow([
                    tr('Needs should be provided %s' % frequency),
                    tr('Total')
                ],
                         header=True))
            for resource in needs:
                table_body.append(
                    TableRow([
                        tr(resource['table name']),
                        format_int(resource['amount'])
                    ]))

        impact_table = Table(table_body).toNewlineFreeString()

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(TableRow(tr('How will warnings be disseminated?')))
        table_body.append(TableRow(tr('How will we reach stranded people?')))
        table_body.append(TableRow(tr('Do we have enough relief items?')))
        table_body.append(
            TableRow(
                tr('If yes, where are they located and how will we distribute '
                   'them?')))
        table_body.append(
            TableRow(
                tr('If no, where can we obtain additional relief items from and '
                   'how will we transport them to here?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Total population: %s') % format_int(total),
            tr('People need evacuation if in the area identified as '
               '"Flood Prone"'),
            tr('Minimum needs are defined in BNPB regulation 7/2008')
        ])
        impact_summary = Table(table_body).toNewlineFreeString()

        # Create style
        # Define classes for legend for flooded population counts
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]

        population_counts = [x['population'] for x in new_attributes]
        classes = create_classes(population_counts, len(colours))
        interval_classes = humanize_class(classes)

        # Define style info for output polygons showing population counts
        style_classes = []
        for i in xrange(len(colours)):
            style_class = dict()
            style_class['label'] = create_label(interval_classes[i])
            if i == 0:
                transparency = 0
                style_class['min'] = 0
            else:
                transparency = 0
                style_class['min'] = classes[i - 1]
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_class['max'] = classes[i]
            style_classes.append(style_class)

        # Override style info with new classes and name
        style_info = dict(target_field=self.target_field,
                          style_classes=style_classes,
                          style_type='graduatedSymbol')

        # For printing map purpose
        map_title = tr('People affected by flood prone areas')
        legend_notes = tr('Thousand separator is represented by \'.\'')
        legend_units = tr('(people per polygon)')
        legend_title = tr('Population Count')

        # Create vector layer and return
        vector_layer = Vector(data=new_attributes,
                              projection=hazard_layer.get_projection(),
                              geometry=hazard_layer.get_geometry(),
                              name=tr('People affected by flood prone areas'),
                              keywords={
                                  'impact_summary': impact_summary,
                                  'impact_table': impact_table,
                                  'target_field': self.target_field,
                                  'map_title': map_title,
                                  'legend_notes': legend_notes,
                                  'legend_units': legend_units,
                                  'legend_title': legend_title,
                                  'affected_population': affected_population,
                                  'total_population': total,
                                  'total_needs': total_needs
                              },
                              style_info=style_info)
        return vector_layer
コード例 #46
0
class ClassifiedHazardPopulationImpactFunction(FunctionProvider):
    # noinspection PyUnresolvedReferences
    """Plugin for impact of population as derived by classified hazard.

        :author ESSC
        :rating 3
        :param requires category=='hazard' and \
                        layertype=='raster' and \
                        data_type=='classified' and \
                        unit=='classes'

        :param requires category=='exposure' and \
                        subcategory=='population' and \
                        layertype=='raster'
        """
    class Metadata(ImpactFunctionMetadata):
        """Metadata for Classified Hazard Population Impact Function.

        .. versionadded:: 2.1

        We only need to re-implement get_metadata(), all other behaviours
        are inherited from the abstract base class.
        """
        @staticmethod
        def get_metadata():
            """Return metadata as a dictionary.

            This is a static method. You can use it to get the metadata in
            dictionary format for an impact function.

            :returns: A dictionary representing all the metadata for the
                concrete impact function.
            :rtype: dict
            """
            dict_meta = {
                'id':
                'ClassifiedHazardPopulationImpactFunction',
                'name':
                tr('Classified Hazard Population Impact Function'),
                'impact':
                tr('Be impacted by each class'),
                'author':
                'Dianne Bencito',
                'date_implemented':
                'N/A',
                'overview':
                tr('To assess the impacts of classified hazards in raster '
                   'format on population raster layer.'),
                'categories': {
                    'hazard': {
                        'definition': hazard_definition,
                        'subcategories': hazard_all,
                        'units': [unit_classified],
                        'layer_constraints': [layer_raster_classified]
                    },
                    'exposure': {
                        'definition': exposure_definition,
                        'subcategories': [exposure_population],
                        'units': [unit_people_per_pixel],
                        'layer_constraints': [layer_raster_continuous]
                    }
                }
            }
            return dict_meta

    # Function documentation
    title = tr('Be affected by each hazard class')
    synopsis = tr(
        'To assess the impacts of classified hazards in raster format on '
        'population raster layer.')
    actions = tr(
        'Provide details about how many people would likely be affected for '
        'each hazard class.')
    hazard_input = tr(
        'A hazard raster layer where each cell represents the class of the '
        'hazard. There should be 3 classes: e.g. 1, 2, and 3.')
    exposure_input = tr(
        'An exposure raster layer where each cell represent population count.')
    output = tr(
        'Map of population exposed to high class and a table with number '
        'of people in each class')
    detailed_description = tr(
        'This function will use the class from the hazard layer that has been '
        'identified by the user which one is low, medium, or high from the '
        'parameter that user input. After that, this impact function will '
        'calculate the people will be affected per each class for class in '
        'the hazard layer. Finally, it will show the result and the total of '
        'people that will be affected for the hazard given.')
    limitation = tr('The number of classes is three.')

    # Configurable parameters
    defaults = get_defaults()
    parameters = OrderedDict([
        ('low_hazard_class', 1.0), ('medium_hazard_class', 2.0),
        ('high_hazard_class', 3.0),
        ('postprocessors',
         OrderedDict([
             ('Gender', {
                 'on': True
             }),
             ('Age', {
                 'on':
                 True,
                 'params':
                 OrderedDict([('youth_ratio', defaults['YOUTH_RATIO']),
                              ('adult_ratio', defaults['ADULT_RATIO']),
                              ('elderly_ratio', defaults['ELDERLY_RATIO'])])
             }),
             ('MinimumNeeds', {
                 'on': True
             }),
         ])), ('minimum needs', default_minimum_needs()),
        ('provenance', default_provenance())
    ])
    parameters = add_needs_parameters(parameters)

    def run(self, layers):
        """Plugin for impact of population as derived by classified hazard.

        Input
        :param layers: List of layers expected to contain

              * hazard_layer: Raster layer of classified hazard
              * exposure_layer: Raster layer of population data

        Counts number of people exposed to each class of the hazard

        Return
          Map of population exposed to high class
          Table with number of people in each class
        """

        # The 3 classes
        low_t = self.parameters['low_hazard_class']
        medium_t = self.parameters['medium_hazard_class']
        high_t = self.parameters['high_hazard_class']

        # Identify hazard and exposure layers
        hazard_layer = get_hazard_layer(layers)  # Classified Hazard
        exposure_layer = get_exposure_layer(layers)  # Population Raster

        question = get_question(hazard_layer.get_name(),
                                exposure_layer.get_name(), self)

        # Extract data as numeric arrays
        data = hazard_layer.get_data(nan=0.0)  # Class

        # Calculate impact as population exposed to each class
        population = exposure_layer.get_data(nan=0.0, scaling=True)
        if high_t == 0:
            hi = numpy.where(0, population, 0)
        else:
            hi = numpy.where(data == high_t, population, 0)
        if medium_t == 0:
            med = numpy.where(0, population, 0)
        else:
            med = numpy.where(data == medium_t, population, 0)
        if low_t == 0:
            lo = numpy.where(0, population, 0)
        else:
            lo = numpy.where(data == low_t, population, 0)
        if high_t == 0:
            impact = numpy.where((data == low_t) + (data == medium_t),
                                 population, 0)
        elif medium_t == 0:
            impact = numpy.where((data == low_t) + (data == high_t),
                                 population, 0)
        elif low_t == 0:
            impact = numpy.where((data == medium_t) + (data == high_t),
                                 population, 0)
        else:
            impact = numpy.where(
                (data == low_t) + (data == medium_t) + (data == high_t),
                population, 0)

        # Count totals
        total = int(numpy.sum(population))
        high = int(numpy.sum(hi))
        medium = int(numpy.sum(med))
        low = int(numpy.sum(lo))
        total_impact = int(numpy.sum(impact))

        # Perform population rounding based on number of people
        no_impact = population_rounding(total - total_impact)
        total = population_rounding(total)
        total_impact = population_rounding(total_impact)
        high = population_rounding(high)
        medium = population_rounding(medium)
        low = population_rounding(low)

        minimum_needs = [
            parameter.serialize()
            for parameter in self.parameters['minimum needs']
        ]

        # Generate impact report for the pdf map
        table_body = [
            question,
            TableRow([
                tr('Total Population Affected '),
                '%s' % format_int(total_impact)
            ],
                     header=True),
            TableRow([
                tr('Population in High hazard class areas '),
                '%s' % format_int(high)
            ]),
            TableRow([
                tr('Population in Medium hazard class areas '),
                '%s' % format_int(medium)
            ]),
            TableRow([
                tr('Population in Low hazard class areas '),
                '%s' % format_int(low)
            ]),
            TableRow(
                [tr('Population Not Affected'),
                 '%s' % format_int(no_impact)]),
            TableRow(
                tr('Table below shows the minimum needs for all '
                   'evacuated people'))
        ]

        total_needs = evacuated_population_needs(total_impact, minimum_needs)
        for frequency, needs in total_needs.items():
            table_body.append(
                TableRow([
                    tr('Needs should be provided %s' % frequency),
                    tr('Total')
                ],
                         header=True))
            for resource in needs:
                table_body.append(
                    TableRow([
                        tr(resource['table name']),
                        format_int(resource['amount'])
                    ]))

        impact_table = Table(table_body).toNewlineFreeString()

        table_body.append(TableRow(tr('Action Checklist:'), header=True))
        table_body.append(TableRow(tr('How will warnings be disseminated?')))
        table_body.append(TableRow(tr('How will we reach stranded people?')))
        table_body.append(TableRow(tr('Do we have enough relief items?')))
        table_body.append(
            TableRow(
                tr('If yes, where are they located and how will we distribute '
                   'them?')))
        table_body.append(
            TableRow(
                tr('If no, where can we obtain additional relief items from '
                   'and how will we transport them to here?')))

        # Extend impact report for on-screen display
        table_body.extend([
            TableRow(tr('Notes'), header=True),
            tr('Map shows the numbers of people in high, medium, and low '
               'hazard class areas'),
            tr('Total population: %s') % format_int(total)
        ])
        impact_summary = Table(table_body).toNewlineFreeString()

        # Create style
        colours = [
            '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600',
            '#FF0000', '#7A0000'
        ]
        classes = create_classes(impact.flat[:], len(colours))
        interval_classes = humanize_class(classes)
        style_classes = []

        for i in xrange(len(colours)):
            style_class = dict()
            if i == 1:
                label = create_label(interval_classes[i], 'Low')
            elif i == 4:
                label = create_label(interval_classes[i], 'Medium')
            elif i == 7:
                label = create_label(interval_classes[i], 'High')
            else:
                label = create_label(interval_classes[i])
            style_class['label'] = label
            style_class['quantity'] = classes[i]
            if i == 0:
                transparency = 30
            else:
                transparency = 30
            style_class['transparency'] = transparency
            style_class['colour'] = colours[i]
            style_classes.append(style_class)

        style_info = dict(target_field=None,
                          style_classes=style_classes,
                          style_type='rasterStyle')

        # For printing map purpose
        map_title = tr('Population affected by each class')
        legend_notes = tr('Thousand separator is represented by %s' %
                          get_thousand_separator())
        legend_units = tr('(people per cell)')
        legend_title = tr('Number of People')

        # Create raster object and return
        raster_layer = Raster(impact,
                              projection=hazard_layer.get_projection(),
                              geotransform=hazard_layer.get_geotransform(),
                              name=tr('Population which %s') %
                              (get_function_title(self).lower()),
                              keywords={
                                  'impact_summary': impact_summary,
                                  'impact_table': impact_table,
                                  'map_title': map_title,
                                  'legend_notes': legend_notes,
                                  'legend_units': legend_units,
                                  'legend_title': legend_title,
                                  'total_needs': total_needs
                              },
                              style_info=style_info)
        return raster_layer