示例#1
0
def generate_impact_map_report(impact_function, iface):
    """Generate impact map pdf from impact function.

    :param impact_function: The impact function used.
    :type impact_function: ImpactFunction

    :param iface: QGIS QGisAppInterface instance.
    :type iface: QGisAppInterface
    """
    # get the extra layers that we need
    extra_layers = []
    print_atlas = setting('print_atlas_report', False, bool)
    if print_atlas:
        extra_layers.append(impact_function.aggregation_summary)

    # get the hazard and exposure type
    hazard_layer = impact_function.hazard
    exposure_layer = impact_function.exposure

    hazard_type = layer_definition_type(hazard_layer)
    exposure_type = layer_definition_type(exposure_layer)

    # create impact report instance
    report_metadata = ReportMetadata(metadata_dict=update_template_component(
        component=map_report, hazard=hazard_type, exposure=exposure_type))
    impact_report = ImpactReport(iface,
                                 report_metadata,
                                 impact_function=impact_function,
                                 extra_layers=extra_layers)

    # Get other setting
    logo_path = setting('organisation_logo_path', None, str)
    impact_report.inasafe_context.organisation_logo = logo_path

    disclaimer_text = setting('reportDisclaimer', None, str)
    impact_report.inasafe_context.disclaimer = disclaimer_text

    north_arrow_path = setting('north_arrow_path', None, str)
    impact_report.inasafe_context.north_arrow = north_arrow_path

    # get the extent of impact layer
    impact_report.qgis_composition_context.extent = \
        impact_function.impact.extent()

    # generate report folder

    # no other option for now
    # TODO: retrieve the information from data store
    if isinstance(impact_function.datastore.uri, QDir):
        layer_dir = impact_function.datastore.uri.absolutePath()
    else:
        # No other way for now
        return

    # We will generate it on the fly without storing it after datastore
    # supports
    impact_report.output_folder = os.path.join(layer_dir, 'output')
    return impact_report.process_components()
示例#2
0
    def test_layer_definition_type(self):
        """Test layer_definition_type method.

        .. versionadded:: 4.0
        """
        layer_paths = self.layer_paths_list
        expected_definitions = [
            hazard_generic,
            hazard_earthquake,
            hazard_tsunami,
            hazard_cyclone,
            exposure_structure,
            exposure_structure,
            exposure_population,
            exposure_road,
        ]
        for layer_path, expected_definition in zip(
                layer_paths, expected_definitions):
            path = standard_data_path(*layer_path)
            layer, _ = load_layer(path)
            actual_definition = layer_definition_type(layer)
            try:
                self.assertEqual(expected_definition, actual_definition)
            except Exception as e:
                LOGGER.error('Layer path: {path}'.format(
                    path=path))
                LOGGER.error('Expected {name}'.format(
                    **expected_definition))
                LOGGER.error('Actual {name}'.format(
                    **actual_definition))
                raise e
示例#3
0
    def test_layer_definition_type(self):
        """Test layer_definition_type method.

        .. versionadded:: 4.0
        """
        layer_paths = self.layer_paths_list
        expected_definitions = [
            hazard_generic,
            hazard_earthquake,
            hazard_tsunami,
            hazard_cyclone,
            exposure_structure,
            exposure_structure,
            exposure_population,
            exposure_road,
        ]
        for layer_path, expected_definition in zip(layer_paths,
                                                   expected_definitions):
            path = standard_data_path(*layer_path)
            layer, _ = load_layer(path)
            actual_definition = layer_definition_type(layer)
            try:
                self.assertEqual(expected_definition, actual_definition)
            except Exception as e:
                LOGGER.error('Layer path: {path}'.format(path=path))
                LOGGER.error('Expected {name}'.format(**expected_definition))
                LOGGER.error('Actual {name}'.format(**actual_definition))
                raise e
示例#4
0
def general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    # figure out analysis report type
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode

    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # find hazard class
    summary = []

    analysis_feature = analysis_layer.getFeatures().next()
    analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

    exposure_unit = exposure_type['units'][0]
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')
    if exposure_unit['abbreviation']:
        value_header = u'{measure} ({abbreviation})'.format(**exposure_unit)
    else:
        value_header = u'{name}'.format(**exposure_unit)

    # in case there is a classification
    if 'classification' in hazard_layer.keywords:

        # retrieve hazard classification from hazard layer
        hazard_classification = layer_hazard_classification(hazard_layer)

        # classified hazard must have hazard count in analysis layer
        hazard_stats = []
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )
            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = analysis_inasafe_fields[field_key_name]
                field_index = analysis_layer.fieldNameIndex(field_name)
                # Hazard label taken from translated hazard count field
                # label, string-formatted with translated hazard class label
                hazard_label = hazard_class['name']

                hazard_value = format_number(analysis_feature[field_index],
                                             enable_rounding=is_rounded,
                                             is_population=is_population)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': hazard_value
                }
            except KeyError:
                # in case the field was not found
                hazard_label = hazard_class['name']
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': 0,
                }

            hazard_stats.append(stats)

        # find total field
        try:
            field_name = analysis_inasafe_fields[total_field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            total = format_number(total,
                                  enable_rounding=is_rounded,
                                  is_population=is_population)
            stats = {
                'key': total_field['key'],
                'name': total_field['name'],
                'as_header': True,
                'value': total
            }
            hazard_stats.append(stats)
        except KeyError:
            pass

        summary.append({
            'header_label': hazard_header,
            'value_label': value_header,
            'rows': hazard_stats
        })

    # retrieve affected column
    report_stats = []

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')
    for item in reported_fields:
        header = item['header']
        field = item['field']
        if field['key'] in analysis_inasafe_fields:
            field_index = analysis_layer.fieldNameIndex(field['field_name'])
            if field == fatalities_field:
                # For fatalities field, we show a range of number
                # instead
                row_value = fatalities_range(analysis_feature[field_index])
            else:
                row_value = format_number(analysis_feature[field_index],
                                          enable_rounding=is_rounded,
                                          is_population=is_population)
            row_stats = {
                'key': field['key'],
                'name': header,
                'value': row_value
            }
            report_stats.append(row_stats)

    # Give report section
    exposure_type = layer_definition_type(exposure_layer)
    header_label = exposure_type['name']
    summary.append({
        'header_label': header_label,
        # This should depend on exposure unit
        # TODO: Change this so it can take the unit dynamically
        'value_label': value_header,
        'rows': report_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])
    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')
    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=hazard_classification['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(extra_args,
                                          ['concept_notes', 'note_format'])

    if is_population:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])
    else:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'general_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context
示例#5
0
def aggregation_result_extractor(impact_report, component_metadata):
    """Extracting aggregation result of breakdown from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    aggregation_summary = impact_report.aggregation_summary
    aggregation_summary_fields = aggregation_summary.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    """Filtering report sections"""

    # Only process for applicable exposure types
    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # For now aggregation report only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context
    """Generating type name for columns"""

    type_fields = read_dynamic_inasafe_field(aggregation_summary_fields,
                                             affected_exposure_count_field)
    # do not include total, to preserve ordering and proper reference
    type_fields.remove('total')

    # we need to sort the column
    # get the classes lists
    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort columns based on class order
    # create function to sort
    def sort_classes(_type_field):
        """Sort method to retrieve exposure class key index."""
        # class key is the type field name
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _type_field == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    type_fields = sorted(type_fields, key=sort_classes)

    # generate type_header_labels for column header
    type_header_labels = []
    for type_name in type_fields:
        type_label = tr(type_name.capitalize())
        type_header_labels.append(type_label)
    """Generating values for rows"""

    # generate rows of values for values of each column
    rows = []
    aggregation_name_index = aggregation_summary.fieldNameIndex(
        aggregation_name_field['field_name'])
    total_field_index = aggregation_summary.fieldNameIndex(
        total_affected_field['field_name'])

    type_field_index = []
    for type_name in type_fields:
        field_name = affected_exposure_count_field['field_name'] % type_name
        type_index = aggregation_summary.fieldNameIndex(field_name)
        type_field_index.append(type_index)

    for feat in aggregation_summary.getFeatures():
        total_affected_value = format_number(feat[total_field_index],
                                             enable_rounding=is_rounded,
                                             is_population=is_population)
        if total_affected_value == '0':
            # skip aggregation type if the total affected is zero
            continue
        item = {
            # Name is the header for each row
            'name': feat[aggregation_name_index],
            # Total is the total for each row
            'total': total_affected_value
        }
        # Type values is the values for each column in each row
        type_values = []
        for idx in type_field_index:
            affected_value = format_number(feat[idx],
                                           enable_rounding=is_rounded)
            type_values.append(affected_value)
        item['type_values'] = type_values
        rows.append(item)
    """Generate total for footers"""

    # calculate total values for each type. Taken from exposure summary table
    type_total_values = []
    # Get affected field index
    affected_field_index = exposure_summary_table.fieldNameIndex(
        total_affected_field['field_name'])

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    breakdown_field_name = breakdown_field['field_name']
    breakdown_field_index = exposure_summary_table.fieldNameIndex(
        breakdown_field_name)

    # Fetch total affected for each breakdown name
    value_dict = {}
    for feat in exposure_summary_table.getFeatures():
        # exposure summary table is in csv format, so the field returned is
        # always in text format
        affected_value = int(float(feat[affected_field_index]))
        affected_value = format_number(affected_value,
                                       enable_rounding=is_rounded,
                                       is_population=is_population)
        value_dict[feat[breakdown_field_index]] = affected_value

    if value_dict:
        for type_name in type_fields:
            affected_value_string_formatted = value_dict[type_name]
            if affected_value_string_formatted == '0':
                # if total affected for breakdown type is zero
                # current column index
                column_index = len(type_total_values)
                # cut column header
                type_header_labels = (type_header_labels[:column_index] +
                                      type_header_labels[column_index + 1:])
                # cut all row values for the column
                for item in rows:
                    type_values = item['type_values']
                    item['type_values'] = (type_values[:column_index] +
                                           type_values[column_index + 1:])
                continue
            type_total_values.append(affected_value_string_formatted)
    """Get the super total affected"""

    # total for affected (super total)
    analysis_feature = analysis_layer.getFeatures().next()
    field_index = analysis_layer.fieldNameIndex(
        total_affected_field['field_name'])
    total_all = format_number(analysis_feature[field_index],
                              enable_rounding=is_rounded)
    """Generate and format the context"""
    aggregation_area_default_header = resolve_from_dictionary(
        extra_args, 'aggregation_area_default_header')
    header_label = (aggregation_summary.title()
                    or aggregation_area_default_header)

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'], unit=unit_string)
    table_header = ' '.join(table_header.split())

    section_header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    total_in_aggregation_header = resolve_from_dictionary(
        extra_args, 'total_in_aggregation_header')
    context['header'] = section_header
    context['notes'] = notes
    context['aggregation_result'] = {
        'table_header': table_header,
        'header_label': header_label,
        'type_header_labels': type_header_labels,
        'total_label': total_header,
        'total_in_aggregation_area_label': total_in_aggregation_header,
        'rows': rows,
        'type_total_values': type_total_values,
        'total_all': total_all,
    }
    return context
示例#6
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: The impact report that acts as a proxy to fetch
        all the data that extractor needed.
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: The component metadata. Used to obtain
        information about the component we want to render.
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: Context for rendering phase.
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = analysis_layer.getFeatures().next()
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode
    """Initializations"""

    # Get hazard classification
    hazard_classification = layer_hazard_classification(hazard_layer)

    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounding = not debug_mode
    is_population = exposure_type is exposure_population

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    """Create detail header"""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args, 'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': [],
            'total': []
        },
        'not_affected': {
            'hazards': [],
            'total': []
        }
    }
    for key, group in header_hazard_group.iteritems():
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    affected_header_index = None
    for index, hazard_class in enumerate(hazard_classification['classes']):
        # the tuple format would be:
        # (class name, is it affected, header background color

        hazard_class_name = hazard_class['name']
        affected = hazard_class.get('affected')

        if not affected and not affected_header_index:
            affected_header_index = index + 1
            affected_status = 'not_affected'
        elif affected:
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    if affected_header_index:
        not_affected_header_index = len(hazard_classification['classes']) + 2
    else:
        affected_header_index = len(hazard_classification['classes']) + 1
        not_affected_header_index = affected_header_index + 2

    headers.insert(affected_header_index, total_affected_field['name'])
    headers.insert(not_affected_header_index, total_not_affected_field['name'])

    header_hazard_group['affected']['total'].append(
        total_affected_field['name'])
    header_hazard_group['not_affected']['total'].append(
        total_not_affected_field['name'])

    # affected, not affected, not exposed, total header
    report_fields = [
        total_affected_field, total_not_affected_field,
        total_not_exposed_field, total_field
    ]
    for report_field in report_fields[2:]:
        headers.append(report_field['name'])
    """Create detail rows"""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fieldNameIndex(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in header_hazard_group.iteritems():
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fieldNameIndex(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(count_value,
                                            enable_rounding=is_rounding,
                                            is_population=is_population)
                row.append({'value': count_value, 'header_group': group_key})
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({'value': 0, 'header_group': group_key})

        skip_row = False

        for field in report_fields:
            group_key = None
            for key, group in header_hazard_group.iteritems():
                if field['name'] in group['total']:
                    group_key = key
                    break

            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(total_count,
                                        enable_rounding=is_rounding,
                                        is_population=is_population)

            # we comment below code because now we want to show all rows,
            # we can uncomment if we want to remove the rows with zero total

            # if total_count == '0' and field == total_affected_field:
            #     skip_row = True
            #     break

            if group_key:
                if field == total_affected_field:
                    row.insert(affected_header_index, {
                        'value': total_count,
                        'header_group': group_key
                    })
                elif field == total_not_affected_field:
                    row.insert(not_affected_header_index, {
                        'value': total_count,
                        'header_group': group_key
                    })
                else:
                    row.append({
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name
    """create total footers"""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    save_total_affected_field = False
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (hazard_class['key'], )
        if not hazard_class.get('affected'):
            save_total_affected_field = True

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fieldNameIndex(field_name)
            count_value = format_number(analysis_feature[field_index],
                                        enable_rounding=is_rounding,
                                        is_population=is_population)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers) + int(save_total_affected_field)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            # reduce total affected and not affected column index by 1
            # since we are removing a column
            if group_key == affected_field['field_name']:
                affected_header_index -= 1
            else:
                not_affected_header_index -= 1
            continue
        footers.append({'value': count_value, 'header_group': group_key})

    # for footers
    for field in report_fields:

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if field['name'] in group['total']:
                group_key = key
                break

        total_count = value_from_field_name(field['field_name'],
                                            analysis_layer)
        total_count = format_number(total_count,
                                    enable_rounding=is_rounding,
                                    is_population=is_population)
        if group_key:
            if field == total_affected_field:
                footers.insert(affected_header_index, {
                    'value': total_count,
                    'header_group': group_key
                })
            elif field == total_not_affected_field:
                footers.insert(not_affected_header_index, {
                    'value': total_count,
                    'header_group': group_key
                })
            else:
                footers.append({
                    'value': total_count,
                    'header_group': group_key
                })
        else:
            footers.append(total_count)

    header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')

    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0

    # we want to include total affected and not affected as a group
    # to its class so len(report_fields) - 2
    total_header_index = len(headers) - (len(report_fields) - 2)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class_name in group['hazards'] or (hazard_class_name
                                                         in group['total']):
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    context['extra_table'] = {}

    # extra table for specific exposure if exist
    extra_fields = resolve_from_dictionary(extra_args, 'exposure_extra_fields')
    if exposure_type['key'] in extra_fields.keys():

        # create header for the extra table
        extra_table_header_format = resolve_from_dictionary(
            extra_args, 'extra_table_header_format')
        extra_table_header = extra_table_header_format.format(
            exposure=exposure_header)

        # headers
        headers = []
        headers.append(
            breakdown_header_template.format(exposure=exposure_header))

        current_unit = None
        currency_unit = setting('currency', expected_type=str)
        for field in extra_fields[exposure_type['key']]:
            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            if field_index < 0:
                LOGGER.debug('Field name not found: %s, field index: %s' %
                             (field['field_name'], field_index))
                continue

            units = field.get('units')
            if units:
                for unit in units:
                    if currency_unit == unit['key']:
                        current_unit = unit['name']
                        break
                if not current_unit:
                    current_unit = units[0]['name']

            header_format = '{header} ({unit})'
            headers.append(
                header_format.format(header=field['header_name'],
                                     unit=current_unit))

        # rows
        details = []
        for feat in exposure_summary_table.getFeatures():
            row = []

            # Get breakdown name
            exposure_summary_table_field_name = breakdown_field['field_name']
            field_index = exposure_summary_table.fieldNameIndex(
                exposure_summary_table_field_name)
            class_key = feat[field_index]

            row.append(class_key)

            for field in extra_fields[exposure_type['key']]:
                field_index = exposure_summary_table.fieldNameIndex(
                    field['field_name'])
                # noinspection PyBroadException
                try:
                    total_count = int(float(feat[field_index]))
                except:
                    LOGGER.debug('Field name not found: %s, field index: %s' %
                                 (field['field_name'], field_index))
                    continue
                total_count = format_number(total_count,
                                            enable_rounding=is_rounding,
                                            is_population=is_population)
                row.append(total_count)

            details.append(row)

        details = sorted(details, key=sort_classes)

        context['extra_table'] = {
            'table_header': extra_table_header,
            'headers': headers,
            'details': details,
        }

    return context
示例#7
0
def aggregation_postprocessors_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {
        'sections': OrderedDict()
    }

    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    aggregation_summary = impact_report.aggregation_summary
    analysis_layer = impact_report.analysis
    analysis_layer_fields = impact_report.analysis.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    use_aggregation = bool(impact_report.impact_function.provenance[
        'aggregation_layer'])

    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)

    # this entire section is only for population exposure type
    if not exposure_type == exposure_population:
        return context

    # check zero displaced (there will be no output to display)
    try:
        displaced_field_name = analysis_layer_fields[displaced_field['key']]
        total_displaced = value_from_field_name(
            displaced_field_name, analysis_layer)

        zero_displaced = False
        if total_displaced == 0:
            zero_displaced = True
    except KeyError:
        # in case no displaced field
        # let each section handled itself
        zero_displaced = False

    context['use_aggregation'] = use_aggregation
    if not use_aggregation:
        context['header'] = resolve_from_dictionary(
            extra_args, 'header')

    group_header_format = resolve_from_dictionary(
        extra_args, ['defaults', 'group_header_format'])
    section_header_format = resolve_from_dictionary(
        extra_args, ['defaults', 'section_header_format'])

    """Age Groups"""
    age_items = {
        'group': age_displaced_count_group,
        'group_header': group_header_format.format(
            header_name=age_displaced_count_group['header_name']),
        'fields': [postprocessor_output_field(p) for p in age_postprocessors]
    }

    # check age_fields exists
    for field in age_items['fields']:
        if field['key'] in analysis_layer_fields:
            no_age_field = False
            break
    else:
        no_age_field = True

    context['sections']['age'] = []
    age_section_header = section_header_format.format(
        header_name=age_displaced_count_group['header_name'])
    if zero_displaced:
        context['sections']['age'].append(
            {
                'header': age_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    elif no_age_field:
        context['sections']['age'].append(
            {
                'header': age_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'no_age_rate_message'])
            }
        )
    else:
        context['sections']['age'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                age_items,
                age_section_header,
                use_aggregation=use_aggregation,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )

    """Gender Groups"""
    gender_items = {
        'group': gender_displaced_count_group,
        'group_header': group_header_format.format(
            header_name=gender_displaced_count_group['header_name']),
        'fields': [
            postprocessor_output_field(p) for p in gender_postprocessors]
    }

    # check gender_fields exists
    for field in gender_items['fields']:
        if field['key'] in analysis_layer_fields:
            no_gender_field = False
            break
    else:
        no_gender_field = True

    context['sections']['gender'] = []
    gender_section_header = section_header_format.format(
        header_name=gender_displaced_count_group['header_name'])
    if zero_displaced:
        context['sections']['gender'].append(
            {
                'header': gender_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    elif no_gender_field:
        context['sections']['gender'].append(
            {
                'header': gender_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'no_gender_rate_message'])
            }
        )
    else:
        context['sections']['gender'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                gender_items,
                gender_section_header,
                use_aggregation=use_aggregation,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )

    """Vulnerability Groups"""
    context['sections']['vulnerability'] = []
    for vulnerability_group in vulnerability_displaced_count_groups:
        vulnerability_items = {
            'group': vulnerability_group,
            'group_header': group_header_format.format(
                header_name=vulnerability_group['header_name']),
            'fields': [field for field in vulnerability_group['fields']]
        }

        # check vulnerability_fields exists
        for field in vulnerability_items['fields']:
            if field['key'] in analysis_layer_fields:
                no_vulnerability_field = False
                break
        else:
            no_vulnerability_field = True

        vulnerability_section_header = section_header_format.format(
            header_name=vulnerability_group['header_name'])
        if zero_displaced:
            context['sections']['vulnerability'].append(
                {
                    'header': vulnerability_section_header,
                    'empty': True,
                    'message': resolve_from_dictionary(
                        extra_args, ['defaults', 'zero_displaced_message'])
                }
            )
        elif no_vulnerability_field:
            context['sections']['vulnerability'].append(
                {
                    'header': vulnerability_section_header,
                    'empty': True,
                    'message': resolve_from_dictionary(
                        extra_args,
                        ['defaults', 'no_vulnerability_rate_message'])
                }
            )
        else:
            context['sections']['vulnerability'].append(
                create_section(
                    aggregation_summary,
                    analysis_layer,
                    vulnerability_items,
                    vulnerability_section_header,
                    use_aggregation=use_aggregation,
                    debug_mode=debug_mode,
                    extra_component_args=extra_args)
            )

    """Minimum Needs"""
    context['sections']['minimum_needs'] = []
    minimum_needs_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'header'])
    # Don't show minimum needs if there is no displaced
    if zero_displaced:
        context['sections']['minimum_needs'].append(
            {
                'header': minimum_needs_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    # Only provides minimum needs breakdown if there is aggregation layer
    elif use_aggregation:
        # minimum needs should provide unit for column headers
        units_label = []
        minimum_needs_items = {
            'group_header': u'Minimum needs breakdown',
            'fields': minimum_needs_fields + additional_minimum_needs
        }

        for field in minimum_needs_items['fields']:
            unit = None
            if field.get('need_parameter'):
                need = field['need_parameter']
                if isinstance(need, ResourceParameter):
                    unit_abbreviation = need.unit.abbreviation
            elif field.get('unit'):
                need_unit = field.get('unit')
                unit_abbreviation = need_unit.get('abbreviation')

            if unit_abbreviation:
                unit_format = '{unit}'
                unit = unit_format.format(
                    unit=unit_abbreviation)
            units_label.append(unit)

        context['sections']['minimum_needs'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                minimum_needs_items,
                minimum_needs_section_header,
                units_label=units_label,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )
    else:
        sections_not_empty = True
        for _, values in context['sections'].iteritems():
            for value in values:
                if value.get('rows'):
                    break
                else:
                    sections_not_empty = False

        context['sections_not_empty'] = sections_not_empty

    return context
示例#8
0
def aggregation_result_extractor(impact_report, component_metadata):
    """Extracting aggregation result of breakdown from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}

    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    aggregation_summary = impact_report.aggregation_summary
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode

    """Filtering report sections"""

    # Only process for applicable exposure types
    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # For now aggregation report only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')]
    if exposure_type not in itemizable_exposures_all:
        return context

    """Generating type name for columns"""

    type_fields = read_dynamic_inasafe_field(
        aggregation_summary_fields, affected_exposure_count_field)
    # do not include total, to preserve ordering and proper reference
    type_fields.remove('total')

    # we need to sort the column
    # get the classes lists
    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort columns based on class order
    # create function to sort
    def sort_classes(_type_field):
        """Sort method to retrieve exposure class key index."""
        # class key is the type field name
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _type_field == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    type_fields = sorted(type_fields, key=sort_classes)

    # generate type_header_labels for column header
    type_header_labels = []
    for type_name in type_fields:
        type_label = tr(type_name.capitalize())
        type_header_labels.append(type_label)

    """Generating values for rows"""

    # generate rows of values for values of each column
    rows = []
    aggregation_name_index = aggregation_summary.fieldNameIndex(
        aggregation_name_field['field_name'])
    total_field_index = aggregation_summary.fieldNameIndex(
        total_affected_field['field_name'])

    type_field_index = []
    for type_name in type_fields:
        field_name = affected_exposure_count_field['field_name'] % type_name
        type_index = aggregation_summary.fieldNameIndex(field_name)
        type_field_index.append(type_index)

    for feat in aggregation_summary.getFeatures():
        total_affected_value = format_number(
            feat[total_field_index],
            enable_rounding=is_rounded,
            is_population=is_population)
        if total_affected_value == '0':
            # skip aggregation type if the total affected is zero
            continue
        item = {
            # Name is the header for each row
            'name': feat[aggregation_name_index],
            # Total is the total for each row
            'total': total_affected_value
        }
        # Type values is the values for each column in each row
        type_values = []
        for idx in type_field_index:
            affected_value = format_number(
                feat[idx],
                enable_rounding=is_rounded)
            type_values.append(affected_value)
        item['type_values'] = type_values
        rows.append(item)

    """Generate total for footers"""

    # calculate total values for each type. Taken from exposure summary table
    type_total_values = []
    # Get affected field index
    affected_field_index = exposure_summary_table.fieldNameIndex(
        total_affected_field['field_name'])

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [
        exposure_type_field,
        exposure_class_field
    ]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    breakdown_field_name = breakdown_field['field_name']
    breakdown_field_index = exposure_summary_table.fieldNameIndex(
        breakdown_field_name)

    # Fetch total affected for each breakdown name
    value_dict = {}
    for feat in exposure_summary_table.getFeatures():
        # exposure summary table is in csv format, so the field returned is
        # always in text format
        affected_value = int(float(feat[affected_field_index]))
        affected_value = format_number(
            affected_value,
            enable_rounding=is_rounded,
            is_population=is_population)
        value_dict[feat[breakdown_field_index]] = affected_value

    if value_dict:
        for type_name in type_fields:
            affected_value_string_formatted = value_dict[type_name]
            if affected_value_string_formatted == '0':
                # if total affected for breakdown type is zero
                # current column index
                column_index = len(type_total_values)
                # cut column header
                type_header_labels = (
                    type_header_labels[:column_index] +
                    type_header_labels[column_index + 1:])
                # cut all row values for the column
                for item in rows:
                    type_values = item['type_values']
                    item['type_values'] = (
                        type_values[:column_index] +
                        type_values[column_index + 1:])
                continue
            type_total_values.append(affected_value_string_formatted)

    """Get the super total affected"""

    # total for affected (super total)
    analysis_feature = analysis_layer.getFeatures().next()
    field_index = analysis_layer.fieldNameIndex(
        total_affected_field['field_name'])
    total_all = format_number(
        analysis_feature[field_index],
        enable_rounding=is_rounded)

    """Generate and format the context"""
    aggregation_area_default_header = resolve_from_dictionary(
        extra_args, 'aggregation_area_default_header')
    header_label = (
        aggregation_summary.title() or aggregation_area_default_header)

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string)
    table_header = ' '.join(table_header.split())

    section_header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    total_in_aggregation_header = resolve_from_dictionary(
        extra_args, 'total_in_aggregation_header')
    context['header'] = section_header
    context['notes'] = notes
    context['aggregation_result'] = {
        'table_header': table_header,
        'header_label': header_label,
        'type_header_labels': type_header_labels,
        'total_label': total_header,
        'total_in_aggregation_area_label': total_in_aggregation_header,
        'rows': rows,
        'type_total_values': type_total_values,
        'total_all': total_all,
    }
    return context
示例#9
0
def generate_impact_map_report(impact_function, iface):
    """Generate impact map pdf from impact function.

    :param impact_function: The impact function used.
    :type impact_function: ImpactFunction

    :param iface: QGIS QGisAppInterface instance.
    :type iface: QGisAppInterface
    """
    # get the extra layers that we need
    extra_layers = []
    print_atlas = setting('print_atlas_report', False, bool)
    if print_atlas:
        extra_layers.append(impact_function.aggregation_summary)

    # get the hazard and exposure type
    hazard_layer = impact_function.hazard
    exposure_layer = impact_function.exposure

    hazard_type = layer_definition_type(hazard_layer)
    exposure_type = layer_definition_type(exposure_layer)

    # create impact report instance
    report_metadata = ReportMetadata(
        metadata_dict=update_template_component(
            component=map_report,
            hazard=hazard_type,
            exposure=exposure_type))
    impact_report = ImpactReport(
        iface,
        report_metadata,
        impact_function=impact_function,
        extra_layers=extra_layers)

    # Get other setting
    logo_path = setting('organisation_logo_path', None, str)
    impact_report.inasafe_context.organisation_logo = logo_path

    disclaimer_text = setting('reportDisclaimer', None, str)
    impact_report.inasafe_context.disclaimer = disclaimer_text

    north_arrow_path = setting('north_arrow_path', None, str)
    impact_report.inasafe_context.north_arrow = north_arrow_path

    # get the extent of impact layer
    impact_report.qgis_composition_context.extent = \
        impact_function.impact.extent()

    # generate report folder

    # no other option for now
    # TODO: retrieve the information from data store
    if isinstance(impact_function.datastore.uri, QDir):
        layer_dir = impact_function.datastore.uri.absolutePath()
    else:
        # No other way for now
        return

    # We will generate it on the fly without storing it after datastore
    # supports
    impact_report.output_folder = os.path.join(layer_dir, 'output')
    return impact_report.process_components()
示例#10
0
def notes_assumptions_extractor(impact_report, component_metadata):
    """Extracting notes and assumptions of the exposure layer

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args
    exposure_type = layer_definition_type(exposure_layer)

    analysis_note_dict = resolve_from_dictionary(extra_args, 'analysis_notes')
    context['items'] = [analysis_note_dict]

    context['header'] = resolve_from_dictionary(extra_args, 'header')
    context['items'] += provenance['notes']

    # Get hazard classification
    hazard_classification = layer_hazard_classification(hazard_layer)

    # Check hazard affected class
    affected_classes = []
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('affected', False):
            affected_classes.append(hazard_class)

    if affected_classes:
        affected_note_dict = resolve_from_dictionary(extra_args,
                                                     'affected_note_format')

        # generate hazard classes
        hazard_classes = ', '.join([c['name'] for c in affected_classes])

        for index, affected_note in enumerate(affected_note_dict['item_list']):
            affected_note_dict['item_list'][index] = (affected_note.format(
                hazard_classes=hazard_classes))

        context['items'].append(affected_note_dict)

    # Check hazard have displacement rate
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('displacement_rate', 0) > 0:
            have_displacement_rate = True
            break
    else:
        have_displacement_rate = False

    # Only show displacement note if analysis about population exposure
    if have_displacement_rate and exposure_type == exposure_population:
        # add notes for displacement rate used
        displacement_note_dict = resolve_from_dictionary(
            extra_args, 'displacement_rates_note_format')

        # generate rate description
        displacement_rates_note_format = resolve_from_dictionary(
            extra_args, 'hazard_displacement_rates_note_format')
        displacement_rates_note = []
        for hazard_class in hazard_classification['classes']:
            displacement_rates_note.append(
                displacement_rates_note_format.format(**hazard_class))

        rate_description = ', '.join(displacement_rates_note)

        for index, displacement_note in enumerate(
                displacement_note_dict['item_list']):
            displacement_note_dict['item_list'][index] = (
                displacement_note.format(rate_description=rate_description))

        context['items'].append(displacement_note_dict)

    # Check hazard have displacement rate
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('fatality_rate', 0) > 0:
            have_fatality_rate = True
            break
    else:
        have_fatality_rate = False

    if have_fatality_rate and exposure_type == exposure_population:
        # add notes for fatality rate used
        fatality_note_dict = resolve_from_dictionary(
            extra_args, 'fatality_rates_note_format')

        # generate rate description
        fatality_rates_note_format = resolve_from_dictionary(
            extra_args, 'hazard_fatality_rates_note_format')
        fatality_rates_note = []
        for hazard_class in hazard_classification['classes']:
            # we make a copy here because we don't want to
            # change the real value.
            copy_of_hazard_class = dict(hazard_class)
            if not copy_of_hazard_class['fatality_rate'] > 0:
                copy_of_hazard_class['fatality_rate'] = 0
            else:
                # we want to show the rate as a scientific notation
                copy_of_hazard_class['fatality_rate'] = (
                    html_scientific_notation_rate(
                        copy_of_hazard_class['fatality_rate']))

            fatality_rates_note.append(
                fatality_rates_note_format.format(**copy_of_hazard_class))

        rate_description = ', '.join(fatality_rates_note)

        for index, fatality_note in enumerate(fatality_note_dict['item_list']):
            fatality_note_dict['item_list'][index] = (fatality_note.format(
                rate_description=rate_description))

        context['items'].append(fatality_note_dict)

    return context
示例#11
0
def mmi_detail_extractor(impact_report, component_metadata):
    """Extracting MMI-related analysis result.

    This extractor should only be used for EQ Raster with Population.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    exposure_layer = impact_report.exposure
    hazard_layer = impact_report.hazard
    analysis_layer = impact_report.analysis
    analysis_layer_keywords = analysis_layer.keywords
    hazard_keywords = hazard_layer.keywords
    extra_args = component_metadata.extra_args
    enable_rounding = not impact_report.impact_function.debug_mode

    # check if this is EQ raster with population
    hazard_type = layer_definition_type(hazard_layer)
    if not hazard_type == hazard_earthquake:
        return context

    hazard_geometry = hazard_keywords[layer_geometry['key']]
    if not hazard_geometry == layer_geometry_raster['key']:
        return context

    exposure_type = layer_definition_type(exposure_layer)
    if not exposure_type == exposure_population:
        return context

    header = resolve_from_dictionary(extra_args, 'header')

    context['header'] = header

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    """Generate headers"""
    table_header = [
        resolve_from_dictionary(extra_args, 'mmi_header')
    ] + [v['header'] for v in reported_fields]

    """Extract MMI-related data"""
    # mmi is ranged from 1 to 10, which means: [1, 11)
    mmi_range = range(1, 11)
    rows = []
    roman_numeral = [
        'I',
        'II',
        'III',
        'IV',
        'V',
        'VI',
        'VII',
        'VIII',
        'IX',
        'X'
    ]
    for i in mmi_range:
        columns = [roman_numeral[i - 1]]
        for value in reported_fields:
            field = value['field']
            try:
                key_name = field['key'] % (i, )
                field_name = analysis_layer_keywords[key_name]
                # check field exists
                count = value_from_field_name(field_name, analysis_layer)
                if not count:
                    count = 0
            except KeyError:
                count = 0
            count = format_number(
                count,
                enable_rounding=enable_rounding,
                is_population=True)
            columns.append(count)

        rows.append(columns)

    """Extract total"""
    total_footer = [
        resolve_from_dictionary(extra_args, 'total_header')
    ]

    total_fields = resolve_from_dictionary(extra_args, 'total_fields')
    for field in total_fields:
        try:
            field_name = analysis_layer_keywords[field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            if not total:
                total = 0
        except KeyError:
            total = 0
        total = format_number(
            total,
            enable_rounding=enable_rounding,
            is_population=True)
        total_footer.append(total)

    context['mmi'] = {
        'header': table_header,
        'rows': rows,
        'footer': total_footer
    }

    return context
示例#12
0
def general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    # figure out analysis report type
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode

    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # find hazard class
    summary = []

    analysis_feature = analysis_layer.getFeatures().next()
    analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

    exposure_unit = exposure_type['units'][0]
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')
    if exposure_unit['abbreviation']:
        value_header = u'{measure} ({abbreviation})'.format(**exposure_unit)
    else:
        value_header = u'{name}'.format(**exposure_unit)

    # in case there is a classification
    if 'classification' in hazard_layer.keywords:

        # retrieve hazard classification from hazard layer
        hazard_classification = layer_hazard_classification(hazard_layer)

        # classified hazard must have hazard count in analysis layer
        hazard_stats = []
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )
            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = analysis_inasafe_fields[field_key_name]
                field_index = analysis_layer.fieldNameIndex(field_name)
                # Hazard label taken from translated hazard count field
                # label, string-formatted with translated hazard class label
                hazard_label = hazard_class['name']

                hazard_value = format_number(
                    analysis_feature[field_index],
                    enable_rounding=is_rounded,
                    is_population=is_population)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': hazard_value
                }
            except KeyError:
                # in case the field was not found
                hazard_label = hazard_class['name']
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': 0,
                }

            hazard_stats.append(stats)

        # find total field
        try:
            field_name = analysis_inasafe_fields[total_field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            total = format_number(
                total, enable_rounding=is_rounded, is_population=is_population)
            stats = {
                'key': total_field['key'],
                'name': total_field['name'],
                'as_header': True,
                'value': total
            }
            hazard_stats.append(stats)
        except KeyError:
            pass

        summary.append({
            'header_label': hazard_header,
            'value_label': value_header,
            'rows': hazard_stats
        })

    # retrieve affected column
    report_stats = []

    reported_fields = resolve_from_dictionary(
        extra_args, 'reported_fields')
    for item in reported_fields:
        header = item['header']
        field = item['field']
        if field['key'] in analysis_inasafe_fields:
            field_index = analysis_layer.fieldNameIndex(
                field['field_name'])
            if field == fatalities_field:
                # For fatalities field, we show a range of number
                # instead
                row_value = fatalities_range(analysis_feature[field_index])
            else:
                row_value = format_number(
                    analysis_feature[field_index],
                    enable_rounding=is_rounded,
                    is_population=is_population)
            row_stats = {
                'key': field['key'],
                'name': header,
                'value': row_value
            }
            report_stats.append(row_stats)

    # Give report section
    exposure_type = layer_definition_type(exposure_layer)
    header_label = exposure_type['name']
    summary.append({
        'header_label': header_label,
        # This should depend on exposure unit
        # TODO: Change this so it can take the unit dynamically
        'value_label': value_header,
        'rows': report_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])
    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')
    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=hazard_classification['classification_unit'])

    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header

    return context
def aggregation_postprocessors_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {
        'sections': OrderedDict()
    }

    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    aggregation_summary = impact_report.aggregation_summary
    analysis_layer = impact_report.analysis
    analysis_layer_fields = impact_report.analysis.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    use_aggregation = bool(impact_report.impact_function.provenance[
        'aggregation_layer'])

    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)

    # this entire section is only for population exposure type
    if not exposure_type == exposure_population:
        return context

    # check zero displaced (there will be no output to display)
    try:
        displaced_field_name = analysis_layer_fields[displaced_field['key']]
        total_displaced = value_from_field_name(
            displaced_field_name, analysis_layer)

        zero_displaced = False
        if total_displaced == 0:
            zero_displaced = True
    except KeyError:
        # in case no displaced field
        # let each section handled itself
        zero_displaced = False

    context['use_aggregation'] = use_aggregation
    if not use_aggregation:
        context['header'] = resolve_from_dictionary(
            extra_args, 'header')

    group_header_format = resolve_from_dictionary(
        extra_args, ['defaults', 'group_header_format'])
    section_header_format = resolve_from_dictionary(
        extra_args, ['defaults', 'section_header_format'])

    """Age Groups"""
    age_items = {
        'group': age_displaced_count_group,
        'group_header': group_header_format.format(
            header_name=age_displaced_count_group['header_name']),
        'fields': [postprocessor_output_field(p) for p in age_postprocessors]
    }

    # check age_fields exists
    for field in age_items['fields']:
        if field['key'] in analysis_layer_fields:
            no_age_field = False
            break
    else:
        no_age_field = True

    context['sections']['age'] = []
    age_section_header = section_header_format.format(
        header_name=age_displaced_count_group['header_name'])
    if zero_displaced:
        context['sections']['age'].append(
            {
                'header': age_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    elif no_age_field:
        context['sections']['age'].append(
            {
                'header': age_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'no_age_rate_message'])
            }
        )
    else:
        context['sections']['age'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                age_items,
                age_section_header,
                use_aggregation=use_aggregation,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )

    """Gender Groups"""
    gender_items = {
        'group': gender_displaced_count_group,
        'group_header': group_header_format.format(
            header_name=gender_displaced_count_group['header_name']),
        'fields': [
            postprocessor_output_field(p) for p in gender_postprocessors]
    }

    # check gender_fields exists
    for field in gender_items['fields']:
        if field['key'] in analysis_layer_fields:
            no_gender_field = False
            break
    else:
        no_gender_field = True

    context['sections']['gender'] = []
    gender_section_header = section_header_format.format(
        header_name=gender_displaced_count_group['header_name'])
    if zero_displaced:
        context['sections']['gender'].append(
            {
                'header': gender_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    elif no_gender_field:
        context['sections']['gender'].append(
            {
                'header': gender_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'no_gender_rate_message'])
            }
        )
    else:
        context['sections']['gender'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                gender_items,
                gender_section_header,
                use_aggregation=use_aggregation,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )

    """Vulnerability Groups"""
    context['sections']['vulnerability'] = []
    for vulnerability_group in vulnerability_displaced_count_groups:
        vulnerability_items = {
            'group': vulnerability_group,
            'group_header': group_header_format.format(
                header_name=vulnerability_group['header_name']),
            'fields': [field for field in vulnerability_group['fields']]
        }

        # check vulnerability_fields exists
        for field in vulnerability_items['fields']:
            if field['key'] in analysis_layer_fields:
                no_vulnerability_field = False
                break
        else:
            no_vulnerability_field = True

        vulnerability_section_header = section_header_format.format(
            header_name=vulnerability_group['header_name'])
        if zero_displaced:
            context['sections']['vulnerability'].append(
                {
                    'header': vulnerability_section_header,
                    'empty': True,
                    'message': resolve_from_dictionary(
                        extra_args, ['defaults', 'zero_displaced_message'])
                }
            )
        elif no_vulnerability_field:
            context['sections']['vulnerability'].append(
                {
                    'header': vulnerability_section_header,
                    'empty': True,
                    'message': resolve_from_dictionary(
                        extra_args,
                        ['defaults', 'no_vulnerability_rate_message'])
                }
            )
        else:
            context['sections']['vulnerability'].append(
                create_section(
                    aggregation_summary,
                    analysis_layer,
                    vulnerability_items,
                    vulnerability_section_header,
                    use_aggregation=use_aggregation,
                    debug_mode=debug_mode,
                    extra_component_args=extra_args)
            )

    """Minimum Needs"""
    context['sections']['minimum_needs'] = []
    minimum_needs_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'header'])
    # Don't show minimum needs if there is no displaced
    if zero_displaced:
        context['sections']['minimum_needs'].append(
            {
                'header': minimum_needs_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    # Only provides minimum needs breakdown if there is aggregation layer
    elif use_aggregation:
        # minimum needs should provide unit for column headers
        units_label = []
        minimum_needs_items = {
            'group_header': u'Minimum needs breakdown',
            'fields': minimum_needs_fields
        }

        for field in minimum_needs_items['fields']:
            need = field['need_parameter']
            if isinstance(need, ResourceParameter):
                unit = None
                unit_abbreviation = need.unit.abbreviation
                if unit_abbreviation:
                    unit_format = '{unit}'
                    unit = unit_format.format(
                        unit=unit_abbreviation)
                units_label.append(unit)

        context['sections']['minimum_needs'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                minimum_needs_items,
                minimum_needs_section_header,
                units_label=units_label,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )
    else:
        sections_not_empty = True
        for _, values in context['sections'].iteritems():
            for value in values:
                if value.get('rows'):
                    break
                else:
                    sections_not_empty = False

        context['sections_not_empty'] = sections_not_empty

    return context
示例#14
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = analysis_layer.getFeatures().next()
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode

    """Initializations"""

    # Get hazard classification
    hazard_classification = layer_hazard_classification(hazard_layer)

    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounding = not debug_mode
    is_population = exposure_type is exposure_population

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [
        exposure_type_field,
        exposure_class_field
    ]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break

    """Create detail header"""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(
        breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args,
        'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': [],
            'total': []
        },
        'not_affected': {
            'hazards': [],
            'total': []
        }
    }
    for key, group in header_hazard_group.iteritems():
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    affected_header_index = None
    for index, hazard_class in enumerate(hazard_classification['classes']):
        # the tuple format would be:
        # (class name, is it affected, header background color

        hazard_class_name = hazard_class['name']
        affected = hazard_class.get('affected')

        if not affected and not affected_header_index:
            affected_header_index = index + 1
            affected_status = 'not_affected'
        elif affected:
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    if affected_header_index:
        not_affected_header_index = len(hazard_classification['classes']) + 2
    else:
        affected_header_index = len(hazard_classification['classes']) + 1
        not_affected_header_index = affected_header_index + 2

    headers.insert(affected_header_index, total_affected_field['name'])
    headers.insert(not_affected_header_index, total_not_affected_field['name'])

    header_hazard_group['affected']['total'].append(
        total_affected_field['name'])
    header_hazard_group['not_affected']['total'].append(
        total_not_affected_field['name'])

    # affected, not affected, not exposed, total header
    report_fields = [
        total_affected_field,
        total_not_affected_field,
        total_not_exposed_field,
        total_field
    ]
    for report_field in report_fields[2:]:
        headers.append(report_field['name'])

    """Create detail rows"""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fieldNameIndex(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in header_hazard_group.iteritems():
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fieldNameIndex(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(
                    count_value,
                    enable_rounding=is_rounding,
                    is_population=is_population)
                row.append({
                    'value': count_value,
                    'header_group': group_key
                })
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({
                    'value': 0,
                    'header_group': group_key
                })

        skip_row = False

        for field in report_fields:
            group_key = None
            for key, group in header_hazard_group.iteritems():
                if field['name'] in group['total']:
                    group_key = key
                    break

            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(
                total_count,
                enable_rounding=is_rounding,
                is_population=is_population)

            # we comment below code because now we want to show all rows,
            # we can uncomment if we want to remove the rows with zero total

            # if total_count == '0' and field == total_affected_field:
            #     skip_row = True
            #     break

            if group_key:
                if field == total_affected_field:
                    row.insert(
                        affected_header_index,
                        {
                            'value': total_count,
                            'header_group': group_key
                        })
                elif field == total_not_affected_field:
                    row.insert(
                        not_affected_header_index,
                        {
                            'value': total_count,
                            'header_group': group_key
                        })
                else:
                    row.append({
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name

    """create total footers"""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    save_total_affected_field = False
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (
            hazard_class['key'],)
        if not hazard_class.get('affected'):
            save_total_affected_field = True

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fieldNameIndex(field_name)
            count_value = format_number(
                analysis_feature[field_index],
                enable_rounding=is_rounding,
                is_population=is_population)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers) + int(save_total_affected_field)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            continue
        footers.append({
            'value': count_value,
            'header_group': group_key
        })

    # for footers
    for field in report_fields:

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if field['name'] in group['total']:
                group_key = key
                break

        total_count = value_from_field_name(
            field['field_name'], analysis_layer)
        total_count = format_number(
            total_count,
            enable_rounding=is_rounding,
            is_population=is_population)
        if group_key:
            if field == total_affected_field:
                footers.insert(
                    affected_header_index,
                    {
                        'value': total_count,
                        'header_group': group_key
                    })
            elif field == total_not_affected_field:
                footers.insert(
                    not_affected_header_index,
                    {
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                footers.append({
                    'value': total_count,
                    'header_group': group_key
                })
        else:
            footers.append(total_count)

    header = resolve_from_dictionary(
        extra_args, 'header')
    notes = resolve_from_dictionary(
        extra_args, 'notes')

    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0

    # we want to include total affected and not affected as a group
    # to its class so len(report_fields) - 2
    total_header_index = len(headers) - (len(report_fields) - 2)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class_name in group['hazards'] or (
                        hazard_class_name in group['total']):
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    return context
示例#15
0
def notes_assumptions_extractor(impact_report, component_metadata):
    """Extracting notes and assumptions of the exposure layer

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args
    exposure_type = layer_definition_type(exposure_layer)

    context['header'] = resolve_from_dictionary(extra_args, 'header')
    context['items'] = provenance['notes']

    # Get hazard classification
    hazard_classification = layer_hazard_classification(hazard_layer)

    # Check hazard affected class
    affected_classes = []
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('affected', False):
            affected_classes.append(hazard_class)

    if affected_classes:
        affected_note_format = resolve_from_dictionary(extra_args,
                                                       'affected_note_format')

        # generate hazard classes
        hazard_classes = ', '.join([c['name'] for c in affected_classes])

        context['items'].append(
            affected_note_format.format(hazard_classes=hazard_classes))

    # Check hazard have displacement rate
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('displacement_rate', 0) > 0:
            have_displacement_rate = True
            break
    else:
        have_displacement_rate = False

    # Only show displacement note if analysis about population exposure
    if have_displacement_rate and exposure_type == exposure_population:
        # add notes for displacement rate used
        displacement_note_format = resolve_from_dictionary(
            extra_args, 'displacement_rates_note_format')

        # generate rate description
        hazard_note_format = resolve_from_dictionary(
            extra_args, 'hazard_displacement_rates_note_format')
        hazard_note = []
        for hazard_class in hazard_classification['classes']:
            hazard_note.append(hazard_note_format.format(**hazard_class))

        rate_description = ', '.join(hazard_note)
        context['items'].append(
            displacement_note_format.format(rate_description=rate_description))

    return context
示例#16
0
def notes_assumptions_extractor(impact_report, component_metadata):
    """Extracting notes and assumptions of the exposure layer

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args
    exposure_type = layer_definition_type(exposure_layer)

    analysis_note_dict = resolve_from_dictionary(extra_args, 'analysis_notes')
    context['items'] = [analysis_note_dict]

    context['header'] = resolve_from_dictionary(extra_args, 'header')
    context['items'] += provenance['notes']

    # Get hazard classification
    hazard_classification = layer_hazard_classification(hazard_layer)

    # Check hazard affected class
    affected_classes = []
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('affected', False):
            affected_classes.append(hazard_class)

    if affected_classes:
        affected_note_dict = resolve_from_dictionary(
            extra_args, 'affected_note_format')

        # generate hazard classes
        hazard_classes = ', '.join([
            c['name'] for c in affected_classes
        ])

        for index, affected_note in enumerate(affected_note_dict['item_list']):
            affected_note_dict['item_list'][index] = (
                affected_note.format(hazard_classes=hazard_classes)
            )

        context['items'].append(affected_note_dict)

    # Check hazard have displacement rate
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('displacement_rate', 0) > 0:
            have_displacement_rate = True
            break
    else:
        have_displacement_rate = False

    # Only show displacement note if analysis about population exposure
    if have_displacement_rate and exposure_type == exposure_population:
        # add notes for displacement rate used
        displacement_note_dict = resolve_from_dictionary(
            extra_args, 'displacement_rates_note_format')

        # generate rate description
        displacement_rates_note_format = resolve_from_dictionary(
            extra_args, 'hazard_displacement_rates_note_format')
        displacement_rates_note = []
        for hazard_class in hazard_classification['classes']:
            displacement_rates_note.append(
                displacement_rates_note_format.format(**hazard_class))

        rate_description = ', '.join(displacement_rates_note)

        for index, displacement_note in enumerate(
                displacement_note_dict['item_list']):
            displacement_note_dict['item_list'][index] = (
                displacement_note.format(rate_description=rate_description)
            )

        context['items'].append(displacement_note_dict)

    # Check hazard have displacement rate
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('fatality_rate', 0) > 0:
            have_fatality_rate = True
            break
    else:
        have_fatality_rate = False

    if have_fatality_rate and exposure_type == exposure_population:
        # add notes for fatality rate used
        fatality_note_dict = resolve_from_dictionary(
            extra_args, 'fatality_rates_note_format')

        # generate rate description
        fatality_rates_note_format = resolve_from_dictionary(
            extra_args, 'hazard_fatality_rates_note_format')
        fatality_rates_note = []
        for hazard_class in hazard_classification['classes']:
            # we make a copy here because we don't want to
            # change the real value.
            copy_of_hazard_class = dict(hazard_class)
            if not copy_of_hazard_class['fatality_rate'] > 0:
                copy_of_hazard_class['fatality_rate'] = 0
            else:
                # we want to show the rate as a scientific notation
                copy_of_hazard_class['fatality_rate'] = (
                    html_scientific_notation_rate(
                        copy_of_hazard_class['fatality_rate']))

            fatality_rates_note.append(
                fatality_rates_note_format.format(**copy_of_hazard_class))

        rate_description = ', '.join(fatality_rates_note)

        for index, fatality_note in enumerate(fatality_note_dict['item_list']):
            fatality_note_dict['item_list'][index] = (
                fatality_note.format(rate_description=rate_description)
            )

        context['items'].append(fatality_note_dict)

    return context