def set_layer(self, layer=None):
        """Set layer and update UI accordingly.

        :param layer: A QgsVectorLayer.
        :type layer: QgsVectorLayer
        """
        if layer:
            self.layer = layer
        else:
            self.layer = self.input_layer_combo_box.currentLayer()
        if not self.layer:
            return

        try:
            keywords = self.keyword_io.read_keywords(layer)
            self.show_current_metadata()
        except NoKeywordsFoundError:
            # FIXME (elpaso)
            keywords = {}

        # TODO(IS): Show only possible exposure target
        if keywords.get('layer_purpose', False) == layer_purpose_hazard['key']:
            self.target_exposure_label.setEnabled(True)
            self.target_exposure_combo_box.setEnabled(True)
            self.target_exposure_combo_box.clear()
            for exposure in exposure_all:
                # Only show exposure that has active classification
                if active_classification(keywords, exposure['key']):
                    self.target_exposure_combo_box.addItem(
                        exposure['name'], exposure['key'])
        else:
            self.target_exposure_label.setEnabled(False)
            self.target_exposure_combo_box.setEnabled(False)
            self.target_exposure_combo_box.clear()
            self.target_exposure_combo_box.addItem(tr("Not Applicable"))
Ejemplo n.º 2
0
    def set_layer(self, layer=None):
        """Set layer and update UI accordingly.

        :param layer: A QgsVectorLayer.
        :type layer: QgsVectorLayer
        """
        if layer:
            self.layer = layer
        else:
            self.layer = self.input_layer_combo_box.currentLayer()
        if not self.layer:
            return

        try:
            keywords = self.keyword_io.read_keywords(layer)
            self.show_current_metadata()
        except NoKeywordsFoundError:
            # FIXME (elpaso)
            keywords = {}

        # TODO(IS): Show only possible exposure target
        if keywords.get('layer_purpose', False) == layer_purpose_hazard['key']:
            self.target_exposure_label.setEnabled(True)
            self.target_exposure_combo_box.setEnabled(True)
            self.target_exposure_combo_box.clear()
            for exposure in exposure_all:
                # Only show exposure that has active classification
                if active_classification(keywords, exposure['key']):
                    self.target_exposure_combo_box.addItem(
                        exposure['name'], exposure['key'])
        else:
            self.target_exposure_label.setEnabled(False)
            self.target_exposure_combo_box.setEnabled(False)
            self.target_exposure_combo_box.clear()
            self.target_exposure_combo_box.addItem(tr("Not Applicable"))
Ejemplo n.º 3
0
def _check_value_mapping(layer, exposure_key=None):
    """Loop over the exposure type field and check if the value map is correct.

    :param layer: The layer
    :type layer: QgsVectorLayer

    :param exposure_key: The exposure key.
    :type exposure_key: str
    """
    index = layer.fieldNameIndex(exposure_type_field['field_name'])
    unique_exposure = layer.uniqueValues(index)
    if layer.keywords['layer_purpose'] == layer_purpose_hazard['key']:
        if not exposure_key:
            message = tr('Hazard value mapping missing exposure key.')
            raise InvalidKeywordsForProcessingAlgorithm(message)
        value_map = active_thresholds_value_maps(layer.keywords, exposure_key)
    else:
        value_map = layer.keywords.get('value_map')

    if not value_map:
        # The exposure do not have a value_map, we can skip the layer.
        return layer

    if layer.keywords['layer_purpose'] == layer_purpose_hazard['key']:
        if not exposure_key:
            message = tr('Hazard classification is missing exposure key.')
            raise InvalidKeywordsForProcessingAlgorithm(message)
        classification = active_classification(layer.keywords, exposure_key)
    else:
        classification = layer.keywords['classification']

    exposure_classification = definition(classification)

    other = None
    if exposure_classification['key'] != 'data_driven_classes':
        other = exposure_classification['classes'][-1]['key']

    exposure_mapped = []
    for group in value_map.itervalues():
        exposure_mapped.extend(group)

    diff = list(set(unique_exposure) - set(exposure_mapped))

    if other in value_map.keys():
        value_map[other].extend(diff)
    else:
        value_map[other] = diff

    layer.keywords['value_map'] = value_map
    layer.keywords['classification'] = classification
    return layer
Ejemplo n.º 4
0
def _check_value_mapping(layer, exposure_key=None):
    """Loop over the exposure type field and check if the value map is correct.

    :param layer: The layer
    :type layer: QgsVectorLayer

    :param exposure_key: The exposure key.
    :type exposure_key: str
    """
    index = layer.fields().lookupField(exposure_type_field['field_name'])
    unique_exposure = layer.uniqueValues(index)
    if layer.keywords['layer_purpose'] == layer_purpose_hazard['key']:
        if not exposure_key:
            message = tr('Hazard value mapping missing exposure key.')
            raise InvalidKeywordsForProcessingAlgorithm(message)
        value_map = active_thresholds_value_maps(layer.keywords, exposure_key)
    else:
        value_map = layer.keywords.get('value_map')

    if not value_map:
        # The exposure do not have a value_map, we can skip the layer.
        return layer

    if layer.keywords['layer_purpose'] == layer_purpose_hazard['key']:
        if not exposure_key:
            message = tr('Hazard classification is missing exposure key.')
            raise InvalidKeywordsForProcessingAlgorithm(message)
        classification = active_classification(layer.keywords, exposure_key)
    else:
        classification = layer.keywords['classification']

    exposure_classification = definition(classification)

    other = None
    if exposure_classification['key'] != data_driven_classes['key']:
        other = exposure_classification['classes'][-1]['key']

    exposure_mapped = []
    for group in list(value_map.values()):
        exposure_mapped.extend(group)

    diff = list(unique_exposure - set(exposure_mapped))

    if other in list(value_map.keys()):
        value_map[other].extend(diff)
    else:
        value_map[other] = diff

    layer.keywords['value_map'] = value_map
    layer.keywords['classification'] = classification
    return layer
Ejemplo n.º 5
0
def add_debug_layers_to_canvas(impact_function):
    """Helper method to add debug layers to QGIS from impact function.

    :param impact_function: The impact function used.
    :type impact_function: ImpactFunction
    """
    name = 'DEBUG %s' % impact_function.name
    root = QgsProject.instance().layerTreeRoot()
    group_debug = root.insertGroup(0, name)
    group_debug.setItemVisibilityChecked(False)
    group_debug.setExpanded(False)
    hazard_keywords = impact_function.provenance['hazard_keywords']
    exposure_keywords = impact_function.provenance['exposure_keywords']

    # Let's style the hazard class in each layers.
    # noinspection PyBroadException
    try:
        classification = active_classification(hazard_keywords,
                                               exposure_keywords['exposure'])
        classification = definition(classification)

        classes = OrderedDict()
        for f in reversed(classification['classes']):
            classes[f['key']] = (f['color'], f['name'])
        hazard_class = hazard_class_field['key']
    except BaseException:
        # We might not have a classification. But this is the debug group so
        # let's not raise a new exception.
        classification = None

    datastore = impact_function.datastore
    for layer in datastore.layers():
        qgis_layer = datastore.layer(layer)
        if not isinstance(qgis_layer, QgsMapLayer):
            continue
        QgsProject.instance().addMapLayer(qgis_layer, False)
        layer_node = group_debug.insertLayer(0, qgis_layer)
        layer_node.setItemVisibilityChecked(False)
        layer_node.setExpanded(False)

        # Let's style layers which have a geometry and have
        # hazard_class
        not_allowed_geom = [
            QgsWkbTypes.NullGeometry, QgsWkbTypes.UnknownGeometry
        ]
        if qgis_layer.type() == QgsMapLayer.VectorLayer:
            if qgis_layer.geometryType() not in not_allowed_geom \
                    and classification: # noqa
                if qgis_layer.keywords['inasafe_fields'].get(hazard_class):
                    hazard_class_style(qgis_layer, classes, True)
Ejemplo n.º 6
0
def add_debug_layers_to_canvas(impact_function):
    """Helper method to add debug layers to QGIS from impact function.

    :param impact_function: The impact function used.
    :type impact_function: ImpactFunction
    """
    name = 'DEBUG %s' % impact_function.name
    root = QgsProject.instance().layerTreeRoot()
    group_debug = root.insertGroup(0, name)
    group_debug.setItemVisibilityChecked(False)
    group_debug.setExpanded(False)
    hazard_keywords = impact_function.provenance['hazard_keywords']
    exposure_keywords = impact_function.provenance['exposure_keywords']

    # Let's style the hazard class in each layers.
    # noinspection PyBroadException
    try:
        classification = active_classification(
            hazard_keywords, exposure_keywords['exposure'])
        classification = definition(classification)

        classes = OrderedDict()
        for f in reversed(classification['classes']):
            classes[f['key']] = (f['color'], f['name'])
        hazard_class = hazard_class_field['key']
    except BaseException:
        # We might not have a classification. But this is the debug group so
        # let's not raise a new exception.
        classification = None

    datastore = impact_function.datastore
    for layer in datastore.layers():
        qgis_layer = datastore.layer(layer)
        if not isinstance(qgis_layer, QgsMapLayer):
            continue
        QgsProject.instance().addMapLayer(
            qgis_layer, False)
        layer_node = group_debug.insertLayer(0, qgis_layer)
        layer_node.setItemVisibilityChecked(False)
        layer_node.setExpanded(False)

        # Let's style layers which have a geometry and have
        # hazard_class
        if qgis_layer.type() == QgsMapLayer.VectorLayer:
            if qgis_layer.geometryType() not in [
                        QgsWkbTypes.NullGeometry,
                        QgsWkbTypes.UnknownGeometry
                    ] and classification: # noqa
                if qgis_layer.keywords['inasafe_fields'].get(hazard_class):
                    hazard_class_style(qgis_layer, classes, True)
Ejemplo n.º 7
0
def notes_assumptions_extractor(impact_report, component_metadata):
    """Extracting notes and assumptions of the exposure layer

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']
    exposure_type = definition(exposure_keywords['exposure'])

    analysis_note_dict = resolve_from_dictionary(extra_args, 'analysis_notes')
    context['items'] = [analysis_note_dict]

    context['component_key'] = component_metadata.key
    context['header'] = resolve_from_dictionary(extra_args, 'header')
    context['items'] += provenance['notes']

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # Check hazard affected class
    affected_classes = []
    for hazard_class in hazard_classification['classes']:
        if exposure_keywords['exposure'] == exposure_population['key']:
            # Taking from profile
            is_affected_class = is_affected(
                hazard=hazard_keywords['hazard'],
                classification=hazard_classification['key'],
                hazard_class=hazard_class['key'],
            )
            if is_affected_class:
                affected_classes.append(hazard_class)
        else:
            if hazard_class.get('affected', False):
                affected_classes.append(hazard_class)

    if affected_classes:
        affected_note_dict = resolve_from_dictionary(
            extra_args, 'affected_note_format')

        # generate hazard classes
        hazard_classes = ', '.join([
            c['name'] for c in affected_classes
        ])

        for index, affected_note in enumerate(affected_note_dict['item_list']):
            affected_note_dict['item_list'][index] = (
                affected_note.format(hazard_classes=hazard_classes)
            )

        context['items'].append(affected_note_dict)

    # Check hazard have displacement rate
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('displacement_rate', 0) > 0:
            have_displacement_rate = True
            break
    else:
        have_displacement_rate = False

    # Only show displacement note if analysis about population exposure
    if have_displacement_rate and exposure_type == exposure_population:
        # add notes for displacement rate used
        displacement_note_dict = resolve_from_dictionary(
            extra_args, 'displacement_rates_note_format')

        # generate rate description
        displacement_rates_note_format = resolve_from_dictionary(
            extra_args, 'hazard_displacement_rates_note_format')
        displacement_rates_note = []
        for hazard_class in hazard_classification['classes']:
            the_hazard_class = deepcopy(hazard_class)
            the_hazard_class['displacement_rate'] = get_displacement_rate(
                hazard=hazard_keywords['hazard'],
                classification=hazard_classification['key'],
                hazard_class=the_hazard_class['key']
            )
            displacement_rates_note.append(
                displacement_rates_note_format.format(**the_hazard_class))

        rate_description = ', '.join(displacement_rates_note)

        for index, displacement_note in enumerate(
                displacement_note_dict['item_list']):
            displacement_note_dict['item_list'][index] = (
                displacement_note.format(rate_description=rate_description)
            )

        context['items'].append(displacement_note_dict)

    # Check hazard have displacement rate
    have_fatality_rate = False
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('fatality_rate', None) is not None and \
                hazard_class.get('fatality_rate', 0) > 0:
            have_fatality_rate = True
            break

    if have_fatality_rate and exposure_type == exposure_population:
        # add notes for fatality rate used
        fatality_note_dict = resolve_from_dictionary(
            extra_args, 'fatality_rates_note_format')

        # generate rate description
        fatality_rates_note_format = resolve_from_dictionary(
            extra_args, 'hazard_fatality_rates_note_format')
        fatality_rates_note = []
        for hazard_class in hazard_classification['classes']:
            # we make a copy here because we don't want to
            # change the real value.
            copy_of_hazard_class = dict(hazard_class)
            if copy_of_hazard_class['fatality_rate'] is None or \
                    copy_of_hazard_class['fatality_rate'] <= 0:
                copy_of_hazard_class['fatality_rate'] = 0
            else:
                # we want to show the rate as a scientific notation
                copy_of_hazard_class['fatality_rate'] = (
                    html_scientific_notation_rate(
                        copy_of_hazard_class['fatality_rate']))

            fatality_rates_note.append(
                fatality_rates_note_format.format(**copy_of_hazard_class))

        rate_description = ', '.join(fatality_rates_note)

        for index, fatality_note in enumerate(fatality_note_dict['item_list']):
            fatality_note_dict['item_list'][index] = (
                fatality_note.format(rate_description=rate_description)
            )

        context['items'].append(fatality_note_dict)

    return context
Ejemplo n.º 8
0
    def test_active_classification_thresholds_value_maps(self):
        """Test for active_classification and thresholds value maps method."""
        keywords = {
            'layer_mode': 'continuous',
            'thresholds': {
                'structure': {
                    'ina_structure_flood_hazard_classification': {
                        'classes': {
                            'low': [1, 2],
                            'medium': [3, 4],
                            'high': [5, 6]
                        },
                        'active': False
                    },
                    'ina_structure_flood_hazard_4_class_classification': {
                        'classes': {
                            'low': [1, 2],
                            'medium': [3, 4],
                            'high': [5, 6],
                            'very_high': [7, 8]
                        },
                        'active': False
                    }
                },
                'population': {
                    'ina_population_flood_hazard_classification': {
                        'classes': {
                            'low': [1, 2.5],
                            'medium': [2.5, 4.5],
                            'high': [4.5, 6]
                        },
                        'active': False
                    },
                    'ina_population_flood_hazard_4_class_classification': {
                        'classes': {
                            'low': [1, 2.5],
                            'medium': [2.5, 4],
                            'high': [4, 6],
                            'very_high': [6, 8]
                        },
                        'active': True
                    }
                }
            }
        }
        classification = active_classification(keywords, 'population')
        self.assertEqual(classification,
                         'ina_population_flood_hazard_4_class_classification')

        classification = active_classification(keywords, 'road')
        self.assertIsNone(classification)

        classification = active_classification(keywords, 'structure')
        self.assertIsNone(classification)

        thresholds = active_thresholds_value_maps(keywords, 'population')
        expected_thresholds = {
            'low': [1, 2.5],
            'medium': [2.5, 4],
            'high': [4, 6],
            'very_high': [6, 8]
        }
        self.assertDictEqual(thresholds, expected_thresholds)

        classification = active_thresholds_value_maps(keywords, 'road')
        self.assertIsNone(classification)

        classification = active_thresholds_value_maps(keywords, 'structure')
        self.assertIsNone(classification)
Ejemplo n.º 9
0
def population_chart_extractor(impact_report, component_metadata):
    """Creating population donut chart.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    provenance = impact_report.impact_function.provenance
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    """Generate Donut chart for affected population."""

    # create context for the donut chart

    # retrieve hazard classification from hazard layer
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    if not hazard_classification:
        return context

    data = []
    labels = []
    colors = []

    for hazard_class in hazard_classification['classes']:

        # Skip if it is not affected hazard class
        if not hazard_class['affected']:
            continue

        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (
            hazard_class['key'],)

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            # Hazard label taken from translated hazard count field
            # label, string-formatted with translated hazard class label
            hazard_value = value_from_field_name(field_name, analysis_layer)
            hazard_value = round_affected_number(
                hazard_value,
                use_rounding=True,
                use_population_rounding=True)
        except KeyError:
            # in case the field was not found
            continue

        data.append(hazard_value)
        labels.append(hazard_class['name'])
        colors.append(hazard_class['color'].name())

    # add total not affected
    try:
        field_name = analysis_layer_fields[total_not_affected_field['key']]
        hazard_value = value_from_field_name(field_name, analysis_layer)
        hazard_value = round_affected_number(
            hazard_value,
            use_rounding=True,
            use_population_rounding=True)

        data.append(hazard_value)
        labels.append(total_not_affected_field['name'])
        colors.append(green.name())
    except KeyError:
        # in case the field is not there
        pass

    # add number for total not affected
    chart_title = resolve_from_dictionary(extra_args, 'chart_title')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    donut_context = DonutChartContext(
        data=data,
        labels=labels,
        colors=colors,
        inner_radius_ratio=0.5,
        stroke_color='#fff',
        title=chart_title,
        total_header=total_header,
        as_file=True)

    context['context'] = donut_context

    return context
Ejemplo n.º 10
0
def reclassify(layer, exposure_key=None):
    """Reclassify a continuous vector layer.

    This function will modify the input.

    For instance if you want to reclassify like this table :
            Original Value     |   Class
            - ∞ < val <= 0     |     1
            0   < val <= 0.5   |     2
            0.5 < val <= 5     |     3
            5   < val <  + ∞   |     6

    You need a dictionary :
        ranges = OrderedDict()
        ranges[1] = [None, 0]
        ranges[2] = [0.0, 0.5]
        ranges[3] = [0.5, 5]
        ranges[6] = [5, None]

    :param layer: The raster layer.
    :type layer: QgsRasterLayer

    :param exposure_key: The exposure key.
    :type exposure_key: str

    :return: The classified vector layer.
    :rtype: QgsVectorLayer

    .. versionadded:: 4.0
    """
    output_layer_name = reclassify_vector_steps['output_layer_name']
    output_layer_name = output_layer_name % layer.keywords['title']

    # This layer should have this keyword, or it's a mistake from the dev.
    inasafe_fields = layer.keywords['inasafe_fields']
    continuous_column = inasafe_fields[hazard_value_field['key']]

    if exposure_key:
        classification_key = active_classification(
            layer.keywords, exposure_key)
        thresholds = active_thresholds_value_maps(layer.keywords, exposure_key)
        layer.keywords['thresholds'] = thresholds
        layer.keywords['classification'] = classification_key
    else:
        classification_key = layer.keywords.get('classification')
        thresholds = layer.keywords.get('thresholds')

    if not thresholds:
        raise InvalidKeywordsForProcessingAlgorithm(
            'thresholds are missing from the layer %s'
            % layer.keywords['layer_purpose'])

    continuous_index = layer.fields().lookupField(continuous_column)

    classified_field = QgsField()
    classified_field.setType(hazard_class_field['type'])
    classified_field.setName(hazard_class_field['field_name'])
    classified_field.setLength(hazard_class_field['length'])
    classified_field.setPrecision(hazard_class_field['precision'])

    layer.startEditing()
    layer.addAttribute(classified_field)

    classified_field_index = layer.fields(). \
        lookupField(classified_field.name())

    for feature in layer.getFeatures():
        attributes = feature.attributes()
        source_value = attributes[continuous_index]
        classified_value = reclassify_value(source_value, thresholds)
        if (classified_value is None
                or (hasattr(classified_value, 'isNull')
                    and classified_value.isNull())):
            layer.deleteFeature(feature.id())
        else:
            layer.changeAttributeValue(
                feature.id(), classified_field_index, classified_value)

    layer.commitChanges()
    layer.updateFields()

    # We transfer keywords to the output.
    inasafe_fields[hazard_class_field['key']] = (
        hazard_class_field['field_name'])

    value_map = {}

    hazard_classes = definition(classification_key)['classes']
    for hazard_class in reversed(hazard_classes):
        value_map[hazard_class['key']] = [hazard_class['value']]

    layer.keywords['value_map'] = value_map
    layer.keywords['title'] = output_layer_name

    check_layer(layer)
    return layer
Ejemplo n.º 11
0
def population_chart_extractor(impact_report, component_metadata):
    """Creating population donut chart.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    provenance = impact_report.impact_function.provenance
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']
    """Generate Donut chart for affected population."""

    # create context for the donut chart

    # retrieve hazard classification from hazard layer
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    if not hazard_classification:
        return context

    data = []
    labels = []
    colors = []

    for hazard_class in hazard_classification['classes']:

        # Skip if it is not affected hazard class
        if not hazard_class['affected']:
            continue

        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (hazard_class['key'], )

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            # Hazard label taken from translated hazard count field
            # label, string-formatted with translated hazard class label
            hazard_value = value_from_field_name(field_name, analysis_layer)
            hazard_value = round_affected_number(hazard_value,
                                                 use_rounding=True,
                                                 use_population_rounding=True)
        except KeyError:
            # in case the field was not found
            continue

        data.append(hazard_value)
        labels.append(hazard_class['name'])
        colors.append(hazard_class['color'].name())

    # add total not affected
    try:
        field_name = analysis_layer_fields[total_not_affected_field['key']]
        hazard_value = value_from_field_name(field_name, analysis_layer)
        hazard_value = round_affected_number(hazard_value,
                                             use_rounding=True,
                                             use_population_rounding=True)

        data.append(hazard_value)
        labels.append(total_not_affected_field['name'])
        colors.append(green.name())
    except KeyError:
        # in case the field is not there
        pass

    # add number for total not affected
    chart_title = resolve_from_dictionary(extra_args, 'chart_title')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    donut_context = DonutChartContext(data=data,
                                      labels=labels,
                                      colors=colors,
                                      inner_radius_ratio=0.5,
                                      stroke_color='#fff',
                                      title=chart_title,
                                      total_header=total_header,
                                      as_file=True)

    context['context'] = donut_context

    return context
Ejemplo n.º 12
0
def reclassify(layer, exposure_key=None, callback=None):
    """Reclassify a continuous vector layer.

    This function will modify the input.

    For instance if you want to reclassify like this table :
            Original Value     |   Class
            - ∞ < val <= 0     |     1
            0   < val <= 0.5   |     2
            0.5 < val <= 5     |     3
            5   < val <  + ∞   |     6

    You need a dictionary :
        ranges = OrderedDict()
        ranges[1] = [None, 0]
        ranges[2] = [0.0, 0.5]
        ranges[3] = [0.5, 5]
        ranges[6] = [5, None]

    :param layer: The raster layer.
    :type layer: QgsRasterLayer

    :param exposure_key: The exposure key.
    :type exposure_key: str

    :param callback: A function to all to indicate progress. The function
        should accept params 'current' (int), 'maximum' (int) and 'step' (str).
        Defaults to None.
    :type callback: function

    :return: The classified vector layer.
    :rtype: QgsVectorLayer

    .. versionadded:: 4.0
    """
    output_layer_name = reclassify_vector_steps['output_layer_name']
    output_layer_name = output_layer_name % layer.keywords['title']
    processing_step = reclassify_vector_steps['step_name']

    # This layer should have this keyword, or it's a mistake from the dev.
    inasafe_fields = layer.keywords['inasafe_fields']
    continuous_column = inasafe_fields[hazard_value_field['key']]

    if exposure_key:
        classification_key = active_classification(layer.keywords,
                                                   exposure_key)
        thresholds = active_thresholds_value_maps(layer.keywords, exposure_key)
        layer.keywords['thresholds'] = thresholds
        layer.keywords['classification'] = classification_key
    else:
        classification_key = layer.keywords.get('classification')
        thresholds = layer.keywords.get('thresholds')

    if not thresholds:
        raise InvalidKeywordsForProcessingAlgorithm(
            'thresholds are missing from the layer %s' %
            layer.keywords['layer_purpose'])

    continuous_index = layer.fieldNameIndex(continuous_column)

    classified_field = QgsField()
    classified_field.setType(hazard_class_field['type'])
    classified_field.setName(hazard_class_field['field_name'])
    classified_field.setLength(hazard_class_field['length'])
    classified_field.setPrecision(hazard_class_field['precision'])

    layer.startEditing()
    layer.addAttribute(classified_field)

    classified_field_index = layer.fieldNameIndex(classified_field.name())

    for feature in layer.getFeatures():
        attributes = feature.attributes()
        source_value = attributes[continuous_index]
        classified_value = _classified_value(source_value, thresholds)
        if not classified_value:
            layer.deleteFeature(feature.id())
        else:
            layer.changeAttributeValue(feature.id(), classified_field_index,
                                       classified_value)

    layer.commitChanges()
    layer.updateFields()

    # We transfer keywords to the output.
    inasafe_fields[hazard_class_field['key']] = (
        hazard_class_field['field_name'])

    value_map = {}

    hazard_classes = definition(classification_key)['classes']
    for hazard_class in reversed(hazard_classes):
        value_map[hazard_class['key']] = [hazard_class['value']]

    layer.keywords['value_map'] = value_map
    layer.keywords['title'] = output_layer_name

    check_layer(layer)
    return layer
Ejemplo n.º 13
0
def general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    multi_exposure = impact_report.multi_exposure_impact_function
    if multi_exposure:
        return multi_exposure_general_report_extractor(
            impact_report, component_metadata)

    context = {}
    extra_args = component_metadata.extra_args

    # figure out analysis report type
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    use_rounding = impact_report.impact_function.use_rounding
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    exposure_type = definition(exposure_keywords['exposure'])
    # Only round the number when it is population exposure and we use rounding
    is_population = exposure_type is exposure_population

    # find hazard class
    summary = []

    analysis_feature = next(analysis_layer.getFeatures())
    analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

    exposure_unit = exposure_type['units'][0]
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')
    if exposure_unit['abbreviation']:
        value_header = '{measure} ({abbreviation})'.format(**exposure_unit)
    else:
        value_header = '{name}'.format(**exposure_unit)

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # in case there is a classification
    if hazard_classification:

        # classified hazard must have hazard count in analysis layer
        hazard_stats = []
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )
            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = analysis_inasafe_fields[field_key_name]
                field_index = analysis_layer.fields().lookupField(field_name)
                # Hazard label taken from translated hazard count field
                # label, string-formatted with translated hazard class label
                hazard_label = hazard_class['name']

                hazard_value = format_number(
                    analysis_feature[field_index],
                    use_rounding=use_rounding,
                    is_population=is_population)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'numbers': [hazard_value]
                }
            except KeyError:
                # in case the field was not found
                hazard_label = hazard_class['name']
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'numbers': ['0'],
                }

            hazard_stats.append(stats)

        # find total field
        try:
            field_name = analysis_inasafe_fields[total_exposed_field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            total = format_number(
                total, use_rounding=use_rounding, is_population=is_population)
            stats = {
                'key': total_exposed_field['key'],
                'name': total_exposed_field['name'],
                'as_header': True,
                'numbers': [total]
            }
            hazard_stats.append(stats)
        except KeyError:
            pass

        summary.append({
            'header_label': hazard_header,
            'value_labels': [value_header],
            'rows': hazard_stats
        })

    # retrieve affected column
    report_stats = []

    reported_fields = resolve_from_dictionary(
        extra_args, 'reported_fields')
    for item in reported_fields:
        header = item['header']
        field = item['field']
        if field['key'] in analysis_inasafe_fields:
            field_index = analysis_layer.fields().lookupField(
                field['field_name'])
            if field == fatalities_field:
                # For fatalities field, we show a range of number
                # instead
                row_value = fatalities_range(analysis_feature[field_index])
            else:
                row_value = format_number(
                    analysis_feature[field_index],
                    use_rounding=use_rounding,
                    is_population=is_population)
            row_stats = {
                'key': field['key'],
                'name': header,
                'numbers': [row_value]
            }
            report_stats.append(row_stats)

    # Give report section
    header_label = exposure_type['name']
    summary.append({
        'header_label': header_label,
        # This should depend on exposure unit
        # TODO: Change this so it can take the unit dynamically
        'value_labels': [value_header],
        'rows': report_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])
    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')
    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=hazard_classification['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(
        extra_args, ['concept_notes', 'note_format'])

    if is_population:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])
    else:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'general_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context
Ejemplo n.º 14
0
def reclassify(layer, exposure_key=None, overwrite_input=False, callback=None):
    """Reclassify a continuous raster layer.

    Issue https://github.com/inasafe/inasafe/issues/3182


    This function is a wrapper for the code from
    https://github.com/chiatt/gdal_reclassify

    For instance if you want to reclassify like this table :
            Original Value     |   Class
            - ∞ < val <= 0     |     1
            0   < val <= 0.5   |     2
            0.5 < val <= 5     |     3
            5   < val <  + ∞   |     6

    You need a dictionary :
        ranges = OrderedDict()
        ranges[1] = [None, 0]
        ranges[2] = [0.0, 0.5]
        ranges[3] = [0.5, 5]
        ranges[6] = [5, None]

    :param layer: The raster layer.
    :type layer: QgsRasterLayer

    :param overwrite_input: Option for the output layer. True will overwrite
        the input layer. False will create a temporary layer.
    :type overwrite_input: bool

    :param exposure_key: The exposure key.
    :type exposure_key: str

    :param callback: A function to all to indicate progress. The function
        should accept params 'current' (int), 'maximum' (int) and 'step' (str).
        Defaults to None.
    :type callback: function

    :return: The classified raster layer.
    :rtype: QgsRasterLayer

    .. versionadded:: 4.0
    """
    output_layer_name = reclassify_raster_steps['output_layer_name']
    processing_step = reclassify_raster_steps['step_name']
    output_layer_name = output_layer_name % layer.keywords['layer_purpose']

    if exposure_key:
        classification_key = active_classification(layer.keywords,
                                                   exposure_key)
        thresholds = active_thresholds_value_maps(layer.keywords, exposure_key)
        layer.keywords['thresholds'] = thresholds
        layer.keywords['classification'] = classification_key
    else:
        classification_key = layer.keywords.get('classification')
        thresholds = layer.keywords.get('thresholds')
    if not thresholds:
        raise InvalidKeywordsForProcessingAlgorithm(
            'thresholds are missing from the layer %s' %
            layer.keywords['layer_purpose'])

    if not classification_key:
        raise InvalidKeywordsForProcessingAlgorithm(
            'classification is missing from the layer %s' %
            layer.keywords['layer_purpose'])

    ranges = {}
    value_map = {}
    hazard_classes = definition(classification_key)['classes']
    for hazard_class in hazard_classes:
        ranges[hazard_class['value']] = thresholds[hazard_class['key']]
        value_map[hazard_class['key']] = [hazard_class['value']]

    if overwrite_input:
        output_raster = layer.source()
    else:
        output_raster = unique_filename(suffix='.tiff', dir=temp_dir())

    driver = gdal.GetDriverByName('GTiff')

    raster_file = gdal.Open(layer.source())
    band = raster_file.GetRasterBand(1)
    no_data = band.GetNoDataValue()
    source = band.ReadAsArray()
    destination = source.copy()

    for value, interval in ranges.iteritems():
        v_min = interval[0]
        v_max = interval[1]

        if v_min is None:
            destination[np.where(source <= v_max)] = value

        if v_max is None:
            destination[np.where(source > v_min)] = value

        if v_min < v_max:
            destination[np.where((v_min < source) & (source <= v_max))] = value

    # Tag no data cells
    destination[np.where(source == no_data)] = no_data_value

    # Create the new file.
    output_file = driver.Create(output_raster, raster_file.RasterXSize,
                                raster_file.RasterYSize, 1)
    output_file.GetRasterBand(1).WriteArray(destination)
    output_file.GetRasterBand(1).SetNoDataValue(no_data_value)

    # CRS
    output_file.SetProjection(raster_file.GetProjection())
    output_file.SetGeoTransform(raster_file.GetGeoTransform())
    output_file.FlushCache()

    del output_file

    if not isfile(output_raster):
        raise FileNotFoundError

    reclassified = QgsRasterLayer(output_raster, output_layer_name)

    # We transfer keywords to the output.
    reclassified.keywords = layer.keywords.copy()
    reclassified.keywords['layer_mode'] = 'classified'

    value_map = {}

    hazard_classes = definition(classification_key)['classes']
    for hazard_class in reversed(hazard_classes):
        value_map[hazard_class['key']] = [hazard_class['value']]

    reclassified.keywords['value_map'] = value_map
    reclassified.keywords['title'] = output_layer_name

    check_layer(reclassified)
    return reclassified
Ejemplo n.º 15
0
def reclassify(layer, exposure_key=None, overwrite_input=False, callback=None):
    """Reclassify a continuous raster layer.

    Issue https://github.com/inasafe/inasafe/issues/3182


    This function is a wrapper for the code from
    https://github.com/chiatt/gdal_reclassify

    For instance if you want to reclassify like this table :
            Original Value     |   Class
            - ∞ < val <= 0     |     1
            0   < val <= 0.5   |     2
            0.5 < val <= 5     |     3
            5   < val <  + ∞   |     6

    You need a dictionary :
        ranges = OrderedDict()
        ranges[1] = [None, 0]
        ranges[2] = [0.0, 0.5]
        ranges[3] = [0.5, 5]
        ranges[6] = [5, None]

    :param layer: The raster layer.
    :type layer: QgsRasterLayer

    :param overwrite_input: Option for the output layer. True will overwrite
        the input layer. False will create a temporary layer.
    :type overwrite_input: bool

    :param exposure_key: The exposure key.
    :type exposure_key: str

    :param callback: A function to all to indicate progress. The function
        should accept params 'current' (int), 'maximum' (int) and 'step' (str).
        Defaults to None.
    :type callback: function

    :return: The classified raster layer.
    :rtype: QgsRasterLayer

    .. versionadded:: 4.0
    """
    output_layer_name = reclassify_raster_steps['output_layer_name']
    processing_step = reclassify_raster_steps['step_name']
    output_layer_name = output_layer_name % layer.keywords['layer_purpose']

    if exposure_key:
        classification_key = active_classification(
            layer.keywords, exposure_key)
        thresholds = active_thresholds_value_maps(layer.keywords, exposure_key)
        layer.keywords['thresholds'] = thresholds
        layer.keywords['classification'] = classification_key
    else:
        classification_key = layer.keywords.get('classification')
        thresholds = layer.keywords.get('thresholds')
    if not thresholds:
        raise InvalidKeywordsForProcessingAlgorithm(
            'thresholds are missing from the layer %s'
            % layer.keywords['layer_purpose'])

    if not classification_key:
        raise InvalidKeywordsForProcessingAlgorithm(
            'classification is missing from the layer %s'
            % layer.keywords['layer_purpose'])

    ranges = {}
    value_map = {}
    hazard_classes = definition(classification_key)['classes']
    for hazard_class in hazard_classes:
        ranges[hazard_class['value']] = thresholds[hazard_class['key']]
        value_map[hazard_class['key']] = [hazard_class['value']]

    if overwrite_input:
        output_raster = layer.source()
    else:
        output_raster = unique_filename(suffix='.tiff', dir=temp_dir())

    driver = gdal.GetDriverByName('GTiff')

    raster_file = gdal.Open(layer.source())
    band = raster_file.GetRasterBand(1)
    no_data = band.GetNoDataValue()
    source = band.ReadAsArray()
    destination = source.copy()

    for value, interval in ranges.iteritems():
        v_min = interval[0]
        v_max = interval[1]

        if v_min is None:
            destination[np.where(source <= v_max)] = value

        if v_max is None:
            destination[np.where(source > v_min)] = value

        if v_min < v_max:
            destination[np.where((v_min < source) & (source <= v_max))] = value

    # Tag no data cells
    destination[np.where(source == no_data)] = no_data_value

    # Create the new file.
    output_file = driver.Create(
        output_raster, raster_file.RasterXSize, raster_file.RasterYSize, 1)
    output_file.GetRasterBand(1).WriteArray(destination)
    output_file.GetRasterBand(1).SetNoDataValue(no_data_value)

    # CRS
    output_file.SetProjection(raster_file.GetProjection())
    output_file.SetGeoTransform(raster_file.GetGeoTransform())
    output_file.FlushCache()

    del output_file

    if not isfile(output_raster):
        raise FileNotFoundError

    reclassified = QgsRasterLayer(output_raster, output_layer_name)

    # We transfer keywords to the output.
    reclassified.keywords = layer.keywords.copy()
    reclassified.keywords['layer_mode'] = 'classified'

    value_map = {}

    hazard_classes = definition(classification_key)['classes']
    for hazard_class in reversed(hazard_classes):
        value_map[hazard_class['key']] = [hazard_class['value']]

    reclassified.keywords['value_map'] = value_map
    reclassified.keywords['title'] = output_layer_name

    check_layer(reclassified)
    return reclassified
Ejemplo n.º 16
0
    def test_active_classification_thresholds_value_maps(self):
        """Test for active_classification and thresholds value maps method."""
        keywords = {
            'layer_mode': 'continuous',
            'thresholds': {
                'structure': {
                    'ina_structure_flood_hazard_classification': {
                        'classes': {
                            'low': [1, 2],
                            'medium': [3, 4],
                            'high': [5, 6]
                        },
                        'active': False
                    },
                    'ina_structure_flood_hazard_4_class_classification': {
                        'classes': {
                            'low': [1, 2],
                            'medium': [3, 4],
                            'high': [5, 6],
                            'very_high': [7, 8]
                        },
                        'active': False

                    }
                },
                'population': {
                    'ina_population_flood_hazard_classification': {
                        'classes': {
                            'low': [1, 2.5],
                            'medium': [2.5, 4.5],
                            'high': [4.5, 6]
                        },
                        'active': False
                    },
                    'ina_population_flood_hazard_4_class_classification': {
                        'classes': {
                            'low': [1, 2.5],
                            'medium': [2.5, 4],
                            'high': [4, 6],
                            'very_high': [6, 8]
                        },
                        'active': True
                    }
                }
            }
        }
        classification = active_classification(keywords, 'population')
        self.assertEqual(
            classification,
            'ina_population_flood_hazard_4_class_classification')

        classification = active_classification(keywords, 'road')
        self.assertIsNone(classification)

        classification = active_classification(keywords, 'structure')
        self.assertIsNone(classification)

        thresholds = active_thresholds_value_maps(keywords, 'population')
        expected_thresholds = {
            'low': [1, 2.5],
            'medium': [2.5, 4],
            'high': [4, 6],
            'very_high': [6, 8]}
        self.assertDictEqual(thresholds, expected_thresholds)

        classification = active_thresholds_value_maps(keywords, 'road')
        self.assertIsNone(classification)

        classification = active_thresholds_value_maps(keywords, 'structure')
        self.assertIsNone(classification)
Ejemplo n.º 17
0
def infographic_people_section_notes_extractor(impact_report,
                                               component_metadata):
    """Extracting notes for people section in the infographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.2
    """
    extra_args = component_metadata.extra_args
    provenance = impact_report.impact_function.provenance
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    context = {}
    context['notes'] = []

    note = {
        'title': None,
        'description': resolve_from_dictionary(extra_args, 'extra_note'),
        'citations': None
    }
    context['notes'].append(note)

    concept_keys = ['affected_people', 'displaced_people']
    for key in concept_keys:
        note = {
            'title': concepts[key].get('name'),
            'description': concepts[key].get('description'),
            'citations': concepts[key].get('citations')[0]['text']
        }
        context['notes'].append(note)

    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # generate rate description
    displacement_rates_note_format = resolve_from_dictionary(
        extra_args, 'hazard_displacement_rates_note_format')
    displacement_rates_note = []
    for hazard_class in hazard_classification['classes']:
        hazard_class['classification_unit'] = (
            hazard_classification['classification_unit'])
        displacement_rates_note.append(
            displacement_rates_note_format.format(**hazard_class))

    rate_description = ', '.join(displacement_rates_note)

    note = {
        'title': concepts['displacement_rate'].get('name'),
        'description': rate_description,
        'citations': concepts['displacement_rate'].get('citations')[0]['text']
    }

    context['notes'].append(note)

    return context
Ejemplo n.º 18
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: The impact report that acts as a proxy to fetch
        all the data that extractor needed.
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: The component metadata. Used to obtain
        information about the component we want to render.
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: Context for rendering phase.
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = next(analysis_layer.getFeatures())
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    use_rounding = impact_report.impact_function.use_rounding
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    """Initializations."""

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # Get exposure type definition
    exposure_type = definition(exposure_keywords['exposure'])
    # Only round the number when it is population exposure and we use rounding
    is_population = exposure_type is exposure_population

    # action for places with poopulation exposure
    is_place_with_population = False
    if exposure_type is exposure_place:
        exposure_fields = exposure_keywords['inasafe_fields']
        if exposure_fields.get(population_count_field['key']):
            is_place_with_population = True

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [
        exposure_type_field,
        exposure_class_field
    ]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break

    """Create detail header."""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(
        breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args,
        'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': [],
            'total': []
        },
        'not_affected': {
            'hazards': [],
            'total': []
        }
    }
    for key, group in list(header_hazard_group.items()):
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    affected_header_index = None
    for index, hazard_class in enumerate(hazard_classification['classes']):
        # the tuple format would be:
        # (class name, is it affected, header background color

        hazard_class_name = hazard_class['name']
        affected = hazard_class.get('affected')

        if not affected and not affected_header_index:
            affected_header_index = index + 1
            affected_status = 'not_affected'
        elif affected:
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    if affected_header_index:
        not_affected_header_index = len(hazard_classification['classes']) + 2
    else:
        affected_header_index = len(hazard_classification['classes']) + 1
        not_affected_header_index = affected_header_index + 2

    report_fields = []

    headers.insert(affected_header_index, total_affected_field['name'])
    header_hazard_group['affected']['total'].append(
        total_affected_field['name'])
    report_fields.append(total_affected_field)

    headers.insert(not_affected_header_index, total_not_affected_field['name'])
    header_hazard_group['not_affected']['total'].append(
        total_not_affected_field['name'])
    report_fields.append(total_not_affected_field)

    # affected, not affected, population (if applicable), not exposed,
    # total header
    report_fields += [total_not_exposed_field, total_field]

    place_pop_name = resolve_from_dictionary(
        extra_args, ['place_with_population', 'header'])
    if is_place_with_population:
        # we want to change header name for population
        duplicated_population_count_field = deepcopy(
            exposed_population_count_field)
        duplicated_population_count_field['name'] = place_pop_name
        report_fields.append(duplicated_population_count_field)

    report_fields_index = -2 + -(int(is_place_with_population))
    for report_field in report_fields[report_fields_index:]:
        headers.append(report_field['name'])

    """Create detail rows."""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fields().lookupField(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in list(header_hazard_group.items()):
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fields() \
                    .lookupField(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(
                    count_value,
                    use_rounding=use_rounding,
                    is_population=is_population)
                row.append({
                    'value': count_value,
                    'header_group': group_key
                })
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({
                    'value': 0,
                    'header_group': group_key
                })

        skip_row = False

        for field in report_fields:
            group_key = None
            for key, group in list(header_hazard_group.items()):
                if field['name'] in group['total']:
                    group_key = key
                    break

            field_index = exposure_summary_table.fields().lookupField(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(
                total_count,
                use_rounding=use_rounding,
                is_population=is_population)

            # we comment below code because now we want to show all rows,
            # we can uncomment if we want to remove the rows with zero total

            # if total_count == '0' and field == total_affected_field:
            #     skip_row = True
            #     break

            if group_key:
                if field == total_affected_field:
                    row.insert(
                        affected_header_index,
                        {
                            'value': total_count,
                            'header_group': group_key
                        })
                elif field == total_not_affected_field:
                    row.insert(
                        not_affected_header_index,
                        {
                            'value': total_count,
                            'header_group': group_key
                        })
                else:
                    row.append({
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_keywords)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name

    """create total footers."""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    save_total_affected_field = False
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (
            hazard_class['key'],)
        if not hazard_class.get('affected'):
            save_total_affected_field = True

        group_key = None
        for key, group in list(header_hazard_group.items()):
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fields().lookupField(field_name)
            count_value = format_number(
                analysis_feature[field_index],
                use_rounding=use_rounding,
                is_population=is_population)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers) + int(save_total_affected_field)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            # reduce total affected and not affected column index by 1
            # since we are removing a column
            if group_key == affected_field['field_name']:
                affected_header_index -= 1
            else:
                not_affected_header_index -= 1
            continue
        footers.append({
            'value': count_value,
            'header_group': group_key
        })

    # for footers
    for field in report_fields:

        total_count = value_from_field_name(
            field['field_name'], analysis_layer)

        if not total_count and field['name'] == place_pop_name:
            field = population_count_field
            field['name'] = place_pop_name
            total_count = value_from_field_name(
                field['field_name'], analysis_layer)

        group_key = None
        for key, group in list(header_hazard_group.items()):
            if field['name'] in group['total']:
                group_key = key
                break

        total_count = format_number(
            total_count,
            use_rounding=use_rounding,
            is_population=is_population)
        if group_key:
            if field == total_affected_field:
                footers.insert(
                    affected_header_index,
                    {
                        'value': total_count,
                        'header_group': group_key
                    })
            elif field == total_not_affected_field:
                footers.insert(
                    not_affected_header_index,
                    {
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                footers.append({
                    'value': total_count,
                    'header_group': group_key
                })
        else:
            footers.append(total_count)

    header = resolve_from_dictionary(
        extra_args, 'header')
    notes = resolve_from_dictionary(
        extra_args, 'notes')

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0

    # we want to include total affected and not affected as a group
    # to its class so len(report_fields) - 2
    total_header_index = len(headers) - (len(report_fields) - 2)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in list(header_hazard_group.items()):
            if hazard_class_name in group['hazards'] or (
                    hazard_class_name in group['total']):
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    context['extra_table'] = {}

    # extra table for specific exposure if exist
    extra_fields = resolve_from_dictionary(extra_args, 'exposure_extra_fields')
    if exposure_type['key'] in list(extra_fields.keys()):

        # create header for the extra table
        extra_table_header_format = resolve_from_dictionary(
            extra_args, 'extra_table_header_format')
        extra_table_header = extra_table_header_format.format(
            exposure=exposure_header)

        # headers
        headers = []
        headers.append(
            breakdown_header_template.format(exposure=exposure_header))

        current_unit = None
        currency_unit = setting('currency', expected_type=str)
        for field in extra_fields[exposure_type['key']]:
            field_index = exposure_summary_table.fields().lookupField(
                field['field_name'])
            if field_index < 0:
                LOGGER.debug(
                    'Field name not found: %s, field index: %s' % (
                        field['field_name'], field_index))
                continue

            units = field.get('units')
            if units:
                for unit in units:
                    if currency_unit == unit['key']:
                        current_unit = unit['name']
                        break
                if not current_unit:
                    current_unit = units[0]['name']

            header_format = '{header} ({unit})'
            headers.append(header_format.format(
                header=field['header_name'], unit=current_unit))

        # rows
        details = []
        for feat in exposure_summary_table.getFeatures():
            row = []

            # Get breakdown name
            exposure_summary_table_field_name = breakdown_field['field_name']
            field_index = exposure_summary_table.fields().lookupField(
                exposure_summary_table_field_name)
            class_key = feat[field_index]

            row.append(class_key)

            for field in extra_fields[exposure_type['key']]:
                field_index = exposure_summary_table.fields().lookupField(
                    field['field_name'])
                # noinspection PyBroadException
                try:
                    total_count = int(float(feat[field_index]))
                except BaseException:
                    LOGGER.debug(
                        'Field name not found: %s, field index: %s' % (
                            field['field_name'], field_index))
                    continue
                total_count = format_number(
                    total_count,
                    use_rounding=use_rounding,
                    is_population=is_population)
                row.append(total_count)

            details.append(row)

        details = sorted(details, key=sort_classes)

        context['extra_table'] = {
            'table_header': extra_table_header,
            'headers': headers,
            'details': details,
        }

    return context
Ejemplo n.º 19
0
def update_value_map(layer, exposure_key=None, callback=None):
    """Assign inasafe values according to definitions for a vector layer.

    :param layer: The vector layer.
    :type layer: QgsVectorLayer

    :param exposure_key: The exposure key.
    :type exposure_key: str

    :param callback: A function to all to indicate progress. The function
        should accept params 'current' (int), 'maximum' (int) and 'step' (str).
        Defaults to None.
    :type callback: function

    :return: The classified vector layer.
    :rtype: QgsVectorLayer

    .. versionadded:: 4.0
    """
    output_layer_name = assign_inasafe_values_steps['output_layer_name']
    processing_step = assign_inasafe_values_steps['step_name']
    output_layer_name = output_layer_name % layer.keywords['layer_purpose']

    keywords = layer.keywords
    inasafe_fields = keywords['inasafe_fields']

    classification = None
    if keywords['layer_purpose'] == layer_purpose_hazard['key']:
        if not inasafe_fields.get(hazard_value_field['key']):
            raise InvalidKeywordsForProcessingAlgorithm
        old_field = hazard_value_field
        new_field = hazard_class_field
        classification = active_classification(layer.keywords, exposure_key)

    elif keywords['layer_purpose'] == layer_purpose_exposure['key']:
        if not inasafe_fields.get(exposure_type_field['key']):
            raise InvalidKeywordsForProcessingAlgorithm
        old_field = exposure_type_field
        new_field = exposure_class_field
    else:
        raise InvalidKeywordsForProcessingAlgorithm

    # It's a hazard layer
    if exposure_key:
        if not active_thresholds_value_maps(keywords, exposure_key):
            raise InvalidKeywordsForProcessingAlgorithm
        value_map = active_thresholds_value_maps(keywords, exposure_key)
    # It's exposure layer
    else:
        if not keywords.get('value_map'):
            raise InvalidKeywordsForProcessingAlgorithm
        value_map = keywords.get('value_map')

    unclassified_column = inasafe_fields[old_field['key']]
    unclassified_index = layer.fieldNameIndex(unclassified_column)

    reversed_value_map = {}
    for inasafe_class, values in value_map.iteritems():
        for val in values:
            reversed_value_map[val] = inasafe_class

    classified_field = QgsField()
    classified_field.setType(new_field['type'])
    classified_field.setName(new_field['field_name'])
    classified_field.setLength(new_field['length'])
    classified_field.setPrecision(new_field['precision'])

    layer.startEditing()
    layer.addAttribute(classified_field)

    classified_field_index = layer.fieldNameIndex(classified_field.name())

    for feature in layer.getFeatures():
        attributes = feature.attributes()
        source_value = attributes[unclassified_index]
        classified_value = reversed_value_map.get(source_value)

        if not classified_value:
            classified_value = ''

        layer.changeAttributeValue(
            feature.id(), classified_field_index, classified_value)

    layer.commitChanges()

    remove_fields(layer, [unclassified_column])

    # We transfer keywords to the output.
    # We add new class field
    inasafe_fields[new_field['key']] = new_field['field_name']

    # and we remove hazard value field
    inasafe_fields.pop(old_field['key'])

    layer.keywords = keywords
    layer.keywords['inasafe_fields'] = inasafe_fields
    if exposure_key:
        value_map_key = 'value_maps'
    else:
        value_map_key = 'value_map'
    if value_map_key in layer.keywords.keys():
        layer.keywords.pop(value_map_key)
    layer.keywords['title'] = output_layer_name
    if classification:
        layer.keywords['classification'] = classification

    check_layer(layer)
    return layer
Ejemplo n.º 20
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: The impact report that acts as a proxy to fetch
        all the data that extractor needed.
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: The component metadata. Used to obtain
        information about the component we want to render.
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: Context for rendering phase.
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = analysis_layer.getFeatures().next()
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    use_rounding = impact_report.impact_function.use_rounding
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']
    """Initializations."""

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # Get exposure type definition
    exposure_type = definition(exposure_keywords['exposure'])
    # Only round the number when it is population exposure and we use rounding
    is_population = exposure_type is exposure_population

    # action for places with poopulation exposure
    is_place_with_population = False
    if exposure_type is exposure_place:
        exposure_fields = exposure_keywords['inasafe_fields']
        if exposure_fields.get(population_count_field['key']):
            is_place_with_population = True

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    """Create detail header."""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args, 'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': [],
            'total': []
        },
        'not_affected': {
            'hazards': [],
            'total': []
        }
    }
    for key, group in header_hazard_group.iteritems():
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    affected_header_index = None
    for index, hazard_class in enumerate(hazard_classification['classes']):
        # the tuple format would be:
        # (class name, is it affected, header background color

        hazard_class_name = hazard_class['name']
        affected = hazard_class.get('affected')

        if not affected and not affected_header_index:
            affected_header_index = index + 1
            affected_status = 'not_affected'
        elif affected:
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    if affected_header_index:
        not_affected_header_index = len(hazard_classification['classes']) + 2
    else:
        affected_header_index = len(hazard_classification['classes']) + 1
        not_affected_header_index = affected_header_index + 2

    report_fields = []

    headers.insert(affected_header_index, total_affected_field['name'])
    header_hazard_group['affected']['total'].append(
        total_affected_field['name'])
    report_fields.append(total_affected_field)

    headers.insert(not_affected_header_index, total_not_affected_field['name'])
    header_hazard_group['not_affected']['total'].append(
        total_not_affected_field['name'])
    report_fields.append(total_not_affected_field)

    # affected, not affected, population (if applicable), not exposed,
    # total header
    report_fields += [total_not_exposed_field, total_field]

    place_pop_name = resolve_from_dictionary(
        extra_args, ['place_with_population', 'header'])
    if is_place_with_population:
        # we want to change header name for population
        duplicated_population_count_field = deepcopy(
            exposed_population_count_field)
        duplicated_population_count_field['name'] = place_pop_name
        report_fields.append(duplicated_population_count_field)

    report_fields_index = -2 + -(int(is_place_with_population))
    for report_field in report_fields[report_fields_index:]:
        headers.append(report_field['name'])
    """Create detail rows."""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fieldNameIndex(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in header_hazard_group.iteritems():
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fieldNameIndex(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(count_value,
                                            use_rounding=use_rounding,
                                            is_population=is_population)
                row.append({'value': count_value, 'header_group': group_key})
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({'value': 0, 'header_group': group_key})

        skip_row = False

        for field in report_fields:
            group_key = None
            for key, group in header_hazard_group.iteritems():
                if field['name'] in group['total']:
                    group_key = key
                    break

            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(total_count,
                                        use_rounding=use_rounding,
                                        is_population=is_population)

            # we comment below code because now we want to show all rows,
            # we can uncomment if we want to remove the rows with zero total

            # if total_count == '0' and field == total_affected_field:
            #     skip_row = True
            #     break

            if group_key:
                if field == total_affected_field:
                    row.insert(affected_header_index, {
                        'value': total_count,
                        'header_group': group_key
                    })
                elif field == total_not_affected_field:
                    row.insert(not_affected_header_index, {
                        'value': total_count,
                        'header_group': group_key
                    })
                else:
                    row.append({
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_keywords)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name
    """create total footers."""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    save_total_affected_field = False
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (hazard_class['key'], )
        if not hazard_class.get('affected'):
            save_total_affected_field = True

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fieldNameIndex(field_name)
            count_value = format_number(analysis_feature[field_index],
                                        use_rounding=use_rounding,
                                        is_population=is_population)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers) + int(save_total_affected_field)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            # reduce total affected and not affected column index by 1
            # since we are removing a column
            if group_key == affected_field['field_name']:
                affected_header_index -= 1
            else:
                not_affected_header_index -= 1
            continue
        footers.append({'value': count_value, 'header_group': group_key})

    # for footers
    for field in report_fields:

        total_count = value_from_field_name(field['field_name'],
                                            analysis_layer)

        if not total_count and field['name'] == place_pop_name:
            field = population_count_field
            field['name'] = place_pop_name
            total_count = value_from_field_name(field['field_name'],
                                                analysis_layer)

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if field['name'] in group['total']:
                group_key = key
                break

        total_count = format_number(total_count,
                                    use_rounding=use_rounding,
                                    is_population=is_population)
        if group_key:
            if field == total_affected_field:
                footers.insert(affected_header_index, {
                    'value': total_count,
                    'header_group': group_key
                })
            elif field == total_not_affected_field:
                footers.insert(not_affected_header_index, {
                    'value': total_count,
                    'header_group': group_key
                })
            else:
                footers.append({
                    'value': total_count,
                    'header_group': group_key
                })
        else:
            footers.append(total_count)

    header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')

    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0

    # we want to include total affected and not affected as a group
    # to its class so len(report_fields) - 2
    total_header_index = len(headers) - (len(report_fields) - 2)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class_name in group['hazards'] or (hazard_class_name
                                                         in group['total']):
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    context['extra_table'] = {}

    # extra table for specific exposure if exist
    extra_fields = resolve_from_dictionary(extra_args, 'exposure_extra_fields')
    if exposure_type['key'] in extra_fields.keys():

        # create header for the extra table
        extra_table_header_format = resolve_from_dictionary(
            extra_args, 'extra_table_header_format')
        extra_table_header = extra_table_header_format.format(
            exposure=exposure_header)

        # headers
        headers = []
        headers.append(
            breakdown_header_template.format(exposure=exposure_header))

        current_unit = None
        currency_unit = setting('currency', expected_type=str)
        for field in extra_fields[exposure_type['key']]:
            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            if field_index < 0:
                LOGGER.debug('Field name not found: %s, field index: %s' %
                             (field['field_name'], field_index))
                continue

            units = field.get('units')
            if units:
                for unit in units:
                    if currency_unit == unit['key']:
                        current_unit = unit['name']
                        break
                if not current_unit:
                    current_unit = units[0]['name']

            header_format = '{header} ({unit})'
            headers.append(
                header_format.format(header=field['header_name'],
                                     unit=current_unit))

        # rows
        details = []
        for feat in exposure_summary_table.getFeatures():
            row = []

            # Get breakdown name
            exposure_summary_table_field_name = breakdown_field['field_name']
            field_index = exposure_summary_table.fieldNameIndex(
                exposure_summary_table_field_name)
            class_key = feat[field_index]

            row.append(class_key)

            for field in extra_fields[exposure_type['key']]:
                field_index = exposure_summary_table.fieldNameIndex(
                    field['field_name'])
                # noinspection PyBroadException
                try:
                    total_count = int(float(feat[field_index]))
                except:
                    LOGGER.debug('Field name not found: %s, field index: %s' %
                                 (field['field_name'], field_index))
                    continue
                total_count = format_number(total_count,
                                            use_rounding=use_rounding,
                                            is_population=is_population)
                row.append(total_count)

            details.append(row)

        details = sorted(details, key=sort_classes)

        context['extra_table'] = {
            'table_header': extra_table_header,
            'headers': headers,
            'details': details,
        }

    return context
Ejemplo n.º 21
0
def general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    multi_exposure = impact_report.multi_exposure_impact_function
    if multi_exposure:
        return multi_exposure_general_report_extractor(impact_report,
                                                       component_metadata)

    context = {}
    extra_args = component_metadata.extra_args

    # figure out analysis report type
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    use_rounding = impact_report.impact_function.use_rounding
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    exposure_type = definition(exposure_keywords['exposure'])
    # Only round the number when it is population exposure and we use rounding
    is_population = exposure_type is exposure_population

    # find hazard class
    summary = []

    analysis_feature = next(analysis_layer.getFeatures())
    analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

    exposure_unit = exposure_type['units'][0]
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')
    if exposure_unit['abbreviation']:
        value_header = '{measure} ({abbreviation})'.format(**exposure_unit)
    else:
        value_header = '{name}'.format(**exposure_unit)

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # in case there is a classification
    if hazard_classification:

        # classified hazard must have hazard count in analysis layer
        hazard_stats = []
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )
            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = analysis_inasafe_fields[field_key_name]
                field_index = analysis_layer.fields().lookupField(field_name)
                # Hazard label taken from translated hazard count field
                # label, string-formatted with translated hazard class label
                hazard_label = hazard_class['name']

                hazard_value = format_number(analysis_feature[field_index],
                                             use_rounding=use_rounding,
                                             is_population=is_population)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'numbers': [hazard_value]
                }
            except KeyError:
                # in case the field was not found
                hazard_label = hazard_class['name']
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'numbers': ['0'],
                }

            hazard_stats.append(stats)

        # find total field
        try:
            field_name = analysis_inasafe_fields[total_exposed_field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            total = format_number(total,
                                  use_rounding=use_rounding,
                                  is_population=is_population)
            stats = {
                'key': total_exposed_field['key'],
                'name': total_exposed_field['name'],
                'as_header': True,
                'numbers': [total]
            }
            hazard_stats.append(stats)
        except KeyError:
            pass

        summary.append({
            'header_label': hazard_header,
            'value_labels': [value_header],
            'rows': hazard_stats
        })

    # retrieve affected column
    report_stats = []

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')
    for item in reported_fields:
        header = item['header']
        field = item['field']
        if field['key'] in analysis_inasafe_fields:
            field_index = analysis_layer.fields().lookupField(
                field['field_name'])
            if field == fatalities_field:
                # For fatalities field, we show a range of number
                # instead
                row_value = fatalities_range(analysis_feature[field_index])
            else:
                row_value = format_number(analysis_feature[field_index],
                                          use_rounding=use_rounding,
                                          is_population=is_population)
            row_stats = {
                'key': field['key'],
                'name': header,
                'numbers': [row_value]
            }
            report_stats.append(row_stats)

    # Give report section
    header_label = exposure_type['name']
    summary.append({
        'header_label': header_label,
        # This should depend on exposure unit
        # TODO: Change this so it can take the unit dynamically
        'value_labels': [value_header],
        'rows': report_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])
    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')
    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=hazard_classification['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(extra_args,
                                          ['concept_notes', 'note_format'])

    if is_population:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])
    else:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'general_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context
Ejemplo n.º 22
0
def multi_exposure_general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.3
    """
    context = {}
    extra_args = component_metadata.extra_args

    multi_exposure = impact_report.multi_exposure_impact_function
    analysis_layer = impact_report.analysis
    provenances = [
        impact_function.provenance
        for impact_function in (multi_exposure.impact_functions)
    ]
    debug_mode = multi_exposure.debug
    population_exist = False

    hazard_keywords = provenances[0]['hazard_keywords']
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    # Summarize every value needed for each exposure and extract later to the
    # context.
    summary = []
    map_legend_titles = []
    hazard_classifications = {}
    exposures_stats = []
    for provenance in provenances:
        map_legend_title = provenance['map_legend_title']
        map_legend_titles.append(map_legend_title)
        exposure_stats = {}
        exposure_keywords = provenance['exposure_keywords']
        exposure_type = definition(exposure_keywords['exposure'])
        exposure_stats['exposure'] = exposure_type
        # Only round the number when it is population exposure and it is not
        # in debug mode
        is_rounded = not debug_mode
        is_population = exposure_type is exposure_population
        if is_population:
            population_exist = True

        analysis_feature = next(analysis_layer.getFeatures())
        analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

        exposure_unit = exposure_type['units'][0]
        if exposure_unit['abbreviation']:
            value_header = '{measure} ({abbreviation})'.format(
                measure=map_legend_title,
                abbreviation=exposure_unit['abbreviation'])
        else:
            value_header = '{name}'.format(name=map_legend_title)

        exposure_stats['value_header'] = value_header

        # Get hazard classification
        hazard_classification = definition(
            active_classification(hazard_keywords,
                                  exposure_keywords['exposure']))
        hazard_classifications[exposure_type['key']] = hazard_classification

        # in case there is a classification
        if hazard_classification:
            classification_result = {}
            reported_fields_result = {}
            for hazard_class in hazard_classification['classes']:
                # hazard_count_field is a dynamic field with hazard class
                # as parameter
                field_key_name = exposure_hazard_count_field['key'] % (
                    exposure_type['key'], hazard_class['key'])
                try:
                    # retrieve dynamic field name from analysis_fields keywords
                    # will cause key error if no hazard count for that
                    # particular class
                    field_name = analysis_inasafe_fields[field_key_name]
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    hazard_value = format_number(analysis_feature[field_index],
                                                 use_rounding=is_rounded,
                                                 is_population=is_population)
                except KeyError:
                    # in case the field was not found
                    hazard_value = 0

                classification_result[hazard_class['key']] = hazard_value

            # find total field
            try:
                field_key_name = exposure_total_exposed_field['key'] % (
                    exposure_type['key'])
                field_name = analysis_inasafe_fields[field_key_name]
                total = value_from_field_name(field_name, analysis_layer)
                total = format_number(total,
                                      use_rounding=is_rounded,
                                      is_population=is_population)
                classification_result[total_exposed_field['key']] = total
            except KeyError:
                pass

            exposure_stats['classification_result'] = classification_result

        for item in reported_fields:
            field = item.get('field')
            multi_exposure_field = item.get('multi_exposure_field')
            row_value = '-'
            if multi_exposure_field:
                field_key = (multi_exposure_field['key'] %
                             (exposure_type['key']))
                field_name = (multi_exposure_field['field_name'] %
                              (exposure_type['key']))
                if field_key in analysis_inasafe_fields:
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    row_value = format_number(analysis_feature[field_index],
                                              use_rounding=is_rounded,
                                              is_population=is_population)

            elif field in [displaced_field, fatalities_field]:
                if field['key'] in analysis_inasafe_fields and is_population:
                    field_index = analysis_layer.fields().lookupField(
                        field['name'])
                    if field == fatalities_field:
                        # For fatalities field, we show a range of number
                        # instead
                        row_value = fatalities_range(
                            analysis_feature[field_index])
                    else:
                        row_value = format_number(
                            analysis_feature[field_index],
                            use_rounding=is_rounded,
                            is_population=is_population)

            reported_fields_result[field['key']] = row_value

        exposure_stats['reported_fields_result'] = reported_fields_result
        exposures_stats.append(exposure_stats)

    # After finish summarizing value, then proceed to context extraction.

    # find total value and labels for each exposure
    value_headers = []
    total_values = []
    for exposure_stats in exposures_stats:
        # label
        value_header = exposure_stats['value_header']
        value_headers.append(value_header)

        # total value
        classification_result = exposure_stats['classification_result']
        total_value = classification_result[total_exposed_field['key']]
        total_values.append(total_value)

    classifications = list(hazard_classifications.values())
    is_item_identical = (classifications.count(
        classifications[0]) == len(classifications))
    if classifications and is_item_identical:
        hazard_stats = []
        for hazard_class in classifications[0]['classes']:
            values = []
            for exposure_stats in exposures_stats:
                classification_result = exposure_stats['classification_result']
                value = classification_result[hazard_class['key']]
                values.append(value)
            stats = {
                'key': hazard_class['key'],
                'name': hazard_class['name'],
                'numbers': values
            }
            hazard_stats.append(stats)

        total_stats = {
            'key': total_exposed_field['key'],
            'name': total_exposed_field['name'],
            'as_header': True,
            'numbers': total_values
        }
        hazard_stats.append(total_stats)

        summary.append({
            'header_label': hazard_header,
            'value_labels': value_headers,
            'rows': hazard_stats
        })
    # if there are different hazard classifications used in the analysis,
    # we will create a separate table for each hazard classification
    else:
        hazard_classification_groups = {}
        for exposure_key, hazard_classification in (iter(
                list(hazard_classifications.items()))):
            exposure_type = definition(exposure_key)
            if hazard_classification['key'] not in (
                    hazard_classification_groups):
                hazard_classification_groups[hazard_classification['key']] = [
                    exposure_type
                ]
            else:
                hazard_classification_groups[
                    hazard_classification['key']].append(exposure_type)

        for hazard_classification_key, exposures in (iter(
                list(hazard_classification_groups.items()))):
            custom_headers = []
            custom_total_values = []
            # find total value and labels for each exposure
            for exposure_stats in exposures_stats:
                if exposure_stats['exposure'] not in exposures:
                    continue
                # label
                value_header = exposure_stats['value_header']
                custom_headers.append(value_header)

                # total value
                classification_result = exposure_stats['classification_result']
                total_value = classification_result[total_exposed_field['key']]
                custom_total_values.append(total_value)

            hazard_stats = []
            hazard_classification = definition(hazard_classification_key)
            for hazard_class in hazard_classification['classes']:
                values = []
                for exposure_stats in exposures_stats:
                    if exposure_stats['exposure'] not in exposures:
                        continue
                    classification_result = exposure_stats[
                        'classification_result']
                    value = classification_result[hazard_class['key']]
                    values.append(value)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_class['name'],
                    'numbers': values
                }
                hazard_stats.append(stats)

            total_stats = {
                'key': total_exposed_field['key'],
                'name': total_exposed_field['name'],
                'as_header': True,
                'numbers': custom_total_values
            }
            hazard_stats.append(total_stats)

            summary.append({
                'header_label': hazard_header,
                'value_labels': custom_headers,
                'rows': hazard_stats
            })

    reported_fields_stats = []
    for item in reported_fields:
        field = item.get('field')
        values = []
        for exposure_stats in exposures_stats:
            reported_fields_result = exposure_stats['reported_fields_result']
            value = reported_fields_result[field['key']]
            values.append(value)
        stats = {
            'key': field['key'],
            'name': item['header'],
            'numbers': values
        }
        reported_fields_stats.append(stats)

    header_label = resolve_from_dictionary(extra_args,
                                           ['reported_fields_header'])
    summary.append({
        'header_label': header_label,
        'value_labels': value_headers,
        'rows': reported_fields_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])

    combined_map_legend_title = ''
    for index, map_legend_title in enumerate(map_legend_titles):
        combined_map_legend_title += map_legend_title
        if not (index + 1) == len(map_legend_titles):
            combined_map_legend_title += ', '

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')
    table_header = table_header_format.format(
        title=combined_map_legend_title,
        unit=classifications[0]['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(extra_args,
                                          ['concept_notes', 'note_format'])

    concepts = resolve_from_dictionary(extra_args,
                                       ['concept_notes', 'general_concepts'])

    if population_exist:
        concepts += resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context
Ejemplo n.º 23
0
def update_value_map(layer, exposure_key=None):
    """Assign inasafe values according to definitions for a vector layer.

    :param layer: The vector layer.
    :type layer: QgsVectorLayer

    :param exposure_key: The exposure key.
    :type exposure_key: str

    :return: The classified vector layer.
    :rtype: QgsVectorLayer

    .. versionadded:: 4.0
    """
    output_layer_name = assign_inasafe_values_steps['output_layer_name']
    output_layer_name = output_layer_name % layer.keywords['layer_purpose']

    keywords = layer.keywords
    inasafe_fields = keywords['inasafe_fields']

    classification = None
    if keywords['layer_purpose'] == layer_purpose_hazard['key']:
        if not inasafe_fields.get(hazard_value_field['key']):
            raise InvalidKeywordsForProcessingAlgorithm
        old_field = hazard_value_field
        new_field = hazard_class_field
        classification = active_classification(layer.keywords, exposure_key)

    elif keywords['layer_purpose'] == layer_purpose_exposure['key']:
        if not inasafe_fields.get(exposure_type_field['key']):
            raise InvalidKeywordsForProcessingAlgorithm
        old_field = exposure_type_field
        new_field = exposure_class_field
    else:
        raise InvalidKeywordsForProcessingAlgorithm

    # It's a hazard layer
    if exposure_key:
        if not active_thresholds_value_maps(keywords, exposure_key):
            raise InvalidKeywordsForProcessingAlgorithm
        value_map = active_thresholds_value_maps(keywords, exposure_key)
    # It's exposure layer
    else:
        if not keywords.get('value_map'):
            raise InvalidKeywordsForProcessingAlgorithm
        value_map = keywords.get('value_map')

    unclassified_column = inasafe_fields[old_field['key']]
    unclassified_index = layer.fields().lookupField(unclassified_column)

    reversed_value_map = {}
    for inasafe_class, values in list(value_map.items()):
        for val in values:
            reversed_value_map[val] = inasafe_class

    classified_field = QgsField()
    classified_field.setType(new_field['type'])
    classified_field.setName(new_field['field_name'])
    classified_field.setLength(new_field['length'])
    classified_field.setPrecision(new_field['precision'])

    layer.startEditing()
    layer.addAttribute(classified_field)

    classified_field_index = layer.fields(). \
        lookupField(classified_field.name())

    for feature in layer.getFeatures():
        attributes = feature.attributes()
        source_value = attributes[unclassified_index]
        classified_value = reversed_value_map.get(source_value)

        if not classified_value:
            classified_value = ''

        layer.changeAttributeValue(feature.id(), classified_field_index,
                                   classified_value)

    layer.commitChanges()

    remove_fields(layer, [unclassified_column])

    # We transfer keywords to the output.
    # We add new class field
    inasafe_fields[new_field['key']] = new_field['field_name']

    # and we remove hazard value field
    inasafe_fields.pop(old_field['key'])

    layer.keywords = keywords
    layer.keywords['inasafe_fields'] = inasafe_fields
    if exposure_key:
        value_map_key = 'value_maps'
    else:
        value_map_key = 'value_map'
    if value_map_key in list(layer.keywords.keys()):
        layer.keywords.pop(value_map_key)
    layer.keywords['title'] = output_layer_name
    if classification:
        layer.keywords['classification'] = classification

    check_layer(layer)
    return layer
Ejemplo n.º 24
0
def multi_exposure_general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.3
    """
    context = {}
    extra_args = component_metadata.extra_args

    multi_exposure = impact_report.multi_exposure_impact_function
    analysis_layer = impact_report.analysis
    provenances = [
        impact_function.provenance for impact_function in (
            multi_exposure.impact_functions)]
    debug_mode = multi_exposure.debug
    population_exist = False

    hazard_keywords = provenances[0]['hazard_keywords']
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    # Summarize every value needed for each exposure and extract later to the
    # context.
    summary = []
    map_legend_titles = []
    hazard_classifications = {}
    exposures_stats = []
    for provenance in provenances:
        map_legend_title = provenance['map_legend_title']
        map_legend_titles.append(map_legend_title)
        exposure_stats = {}
        exposure_keywords = provenance['exposure_keywords']
        exposure_type = definition(exposure_keywords['exposure'])
        exposure_stats['exposure'] = exposure_type
        # Only round the number when it is population exposure and it is not
        # in debug mode
        is_rounded = not debug_mode
        is_population = exposure_type is exposure_population
        if is_population:
            population_exist = True

        analysis_feature = next(analysis_layer.getFeatures())
        analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

        exposure_unit = exposure_type['units'][0]
        if exposure_unit['abbreviation']:
            value_header = '{measure} ({abbreviation})'.format(
                measure=map_legend_title,
                abbreviation=exposure_unit['abbreviation'])
        else:
            value_header = '{name}'.format(name=map_legend_title)

        exposure_stats['value_header'] = value_header

        # Get hazard classification
        hazard_classification = definition(
            active_classification(hazard_keywords,
                                  exposure_keywords['exposure']))
        hazard_classifications[exposure_type['key']] = hazard_classification

        # in case there is a classification
        if hazard_classification:
            classification_result = {}
            reported_fields_result = {}
            for hazard_class in hazard_classification['classes']:
                # hazard_count_field is a dynamic field with hazard class
                # as parameter
                field_key_name = exposure_hazard_count_field['key'] % (
                    exposure_type['key'], hazard_class['key'])
                try:
                    # retrieve dynamic field name from analysis_fields keywords
                    # will cause key error if no hazard count for that
                    # particular class
                    field_name = analysis_inasafe_fields[field_key_name]
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    hazard_value = format_number(
                        analysis_feature[field_index],
                        use_rounding=is_rounded,
                        is_population=is_population)
                except KeyError:
                    # in case the field was not found
                    hazard_value = 0

                classification_result[hazard_class['key']] = hazard_value

            # find total field
            try:
                field_key_name = exposure_total_exposed_field['key'] % (
                    exposure_type['key'])
                field_name = analysis_inasafe_fields[field_key_name]
                total = value_from_field_name(field_name, analysis_layer)
                total = format_number(
                    total, use_rounding=is_rounded,
                    is_population=is_population)
                classification_result[total_exposed_field['key']] = total
            except KeyError:
                pass

            exposure_stats['classification_result'] = classification_result

        for item in reported_fields:
            field = item.get('field')
            multi_exposure_field = item.get('multi_exposure_field')
            row_value = '-'
            if multi_exposure_field:
                field_key = (
                    multi_exposure_field['key'] % (exposure_type['key']))
                field_name = (
                    multi_exposure_field['field_name'] % (
                        exposure_type['key']))
                if field_key in analysis_inasafe_fields:
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    row_value = format_number(
                        analysis_feature[field_index],
                        use_rounding=is_rounded,
                        is_population=is_population)

            elif field in [displaced_field, fatalities_field]:
                if field['key'] in analysis_inasafe_fields and is_population:
                    field_index = analysis_layer.fields(
                    ).lookupField(field['name'])
                    if field == fatalities_field:
                        # For fatalities field, we show a range of number
                        # instead
                        row_value = fatalities_range(
                            analysis_feature[field_index])
                    else:
                        row_value = format_number(
                            analysis_feature[field_index],
                            use_rounding=is_rounded,
                            is_population=is_population)

            reported_fields_result[field['key']] = row_value

        exposure_stats['reported_fields_result'] = reported_fields_result
        exposures_stats.append(exposure_stats)

    # After finish summarizing value, then proceed to context extraction.

    # find total value and labels for each exposure
    value_headers = []
    total_values = []
    for exposure_stats in exposures_stats:
        # label
        value_header = exposure_stats['value_header']
        value_headers.append(value_header)

        # total value
        classification_result = exposure_stats['classification_result']
        total_value = classification_result[total_exposed_field['key']]
        total_values.append(total_value)

    classifications = list(hazard_classifications.values())
    is_item_identical = (
        classifications.count(
            classifications[0]) == len(classifications))
    if classifications and is_item_identical:
        hazard_stats = []
        for hazard_class in classifications[0]['classes']:
            values = []
            for exposure_stats in exposures_stats:
                classification_result = exposure_stats['classification_result']
                value = classification_result[hazard_class['key']]
                values.append(value)
            stats = {
                'key': hazard_class['key'],
                'name': hazard_class['name'],
                'numbers': values
            }
            hazard_stats.append(stats)

        total_stats = {
            'key': total_exposed_field['key'],
            'name': total_exposed_field['name'],
            'as_header': True,
            'numbers': total_values
        }
        hazard_stats.append(total_stats)

        summary.append({
            'header_label': hazard_header,
            'value_labels': value_headers,
            'rows': hazard_stats
        })
    # if there are different hazard classifications used in the analysis,
    # we will create a separate table for each hazard classification
    else:
        hazard_classification_groups = {}
        for exposure_key, hazard_classification in (
                iter(list(hazard_classifications.items()))):
            exposure_type = definition(exposure_key)
            if hazard_classification['key'] not in (
                    hazard_classification_groups):
                hazard_classification_groups[hazard_classification['key']] = [
                    exposure_type]
            else:
                hazard_classification_groups[
                    hazard_classification['key']].append(exposure_type)

        for hazard_classification_key, exposures in (
                iter(list(hazard_classification_groups.items()))):
            custom_headers = []
            custom_total_values = []
            # find total value and labels for each exposure
            for exposure_stats in exposures_stats:
                if exposure_stats['exposure'] not in exposures:
                    continue
                # label
                value_header = exposure_stats['value_header']
                custom_headers.append(value_header)

                # total value
                classification_result = exposure_stats['classification_result']
                total_value = classification_result[total_exposed_field['key']]
                custom_total_values.append(total_value)

            hazard_stats = []
            hazard_classification = definition(hazard_classification_key)
            for hazard_class in hazard_classification['classes']:
                values = []
                for exposure_stats in exposures_stats:
                    if exposure_stats['exposure'] not in exposures:
                        continue
                    classification_result = exposure_stats[
                        'classification_result']
                    value = classification_result[hazard_class['key']]
                    values.append(value)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_class['name'],
                    'numbers': values
                }
                hazard_stats.append(stats)

            total_stats = {
                'key': total_exposed_field['key'],
                'name': total_exposed_field['name'],
                'as_header': True,
                'numbers': custom_total_values
            }
            hazard_stats.append(total_stats)

            summary.append({
                'header_label': hazard_header,
                'value_labels': custom_headers,
                'rows': hazard_stats
            })

    reported_fields_stats = []
    for item in reported_fields:
        field = item.get('field')
        values = []
        for exposure_stats in exposures_stats:
            reported_fields_result = exposure_stats['reported_fields_result']
            value = reported_fields_result[field['key']]
            values.append(value)
        stats = {
            'key': field['key'],
            'name': item['header'],
            'numbers': values
        }
        reported_fields_stats.append(stats)

    header_label = resolve_from_dictionary(
        extra_args, ['reported_fields_header'])
    summary.append({
        'header_label': header_label,
        'value_labels': value_headers,
        'rows': reported_fields_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])

    combined_map_legend_title = ''
    for index, map_legend_title in enumerate(map_legend_titles):
        combined_map_legend_title += map_legend_title
        if not (index + 1) == len(map_legend_titles):
            combined_map_legend_title += ', '

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')
    table_header = table_header_format.format(
        title=combined_map_legend_title,
        unit=classifications[0]['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(
        extra_args, ['concept_notes', 'note_format'])

    concepts = resolve_from_dictionary(
        extra_args, ['concept_notes', 'general_concepts'])

    if population_exist:
        concepts += resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context