Exemplo n.º 1
0
def _add_not_exposed(analysis_row, enable_rounding, exposure_unit,
                     coefficient):
    """Helper to add the `not exposed` item to the legend.

    :param analysis_row: The analysis row as a list.
    :type analysis_row: list

    :param enable_rounding: If we need to do a rounding.
    :type enable_rounding: bool

    :param exposure_unit: The exposure unit.
    :type exposure_unit: safe.definitions.units

    :param coefficient: Divide the result after the rounding.
    :type coefficient:float

    :return: A tuple with the color and the formatted label.
    :rtype: tuple
    """
    # We add the not exposed class at the end.
    not_exposed_field = (hazard_count_field['field_name'] %
                         not_exposed_class['key'])
    try:
        value = analysis_row[not_exposed_field]
    except KeyError:
        # The field might not exist if there is not feature not exposed.
        value = 0
    value = format_number(value, enable_rounding, coefficient)
    label = _format_label(hazard_class=not_exposed_class['name'],
                          value=value,
                          exposure_unit=exposure_unit)

    return not_exposed_class['color'], label
Exemplo n.º 2
0
 def number(self):
     """Number to be displayed for the element."""
     value = format_number(
         self._number,
         enable_rounding=True,
         is_population=True)
     return value
Exemplo n.º 3
0
def _add_not_exposed(
        analysis_row,
        enable_rounding,
        is_population,
        exposure_unit,
        coefficient):
    """Helper to add the `not exposed` item to the legend.

    :param analysis_row: The analysis row as a list.
    :type analysis_row: list

    :param enable_rounding: If we need to do a rounding.
    :type enable_rounding: bool

    :param is_population: Flag if the number is population. It needs to be
        used with enable_rounding.
    :type is_population: bool

    :param exposure_unit: The exposure unit.
    :type exposure_unit: safe.definitions.units

    :param coefficient: Divide the result after the rounding.
    :type coefficient:float

    :return: A tuple with the color and the formatted label.
    :rtype: tuple
    """
    # We add the not exposed class at the end.
    not_exposed_field = (
        hazard_count_field['field_name'] % not_exposed_class['key'])
    try:
        value = analysis_row[not_exposed_field]
    except KeyError:
        # The field might not exist if there is not feature not exposed.
        value = 0
    value = format_number(value, enable_rounding, is_population, coefficient)
    label = _format_label(
        hazard_class=not_exposed_class['name'],
        value=value,
        exposure_unit=exposure_unit)

    return not_exposed_class['color'], label
Exemplo n.º 4
0
def minimum_needs_extractor(impact_report, component_metadata):
    """Extracting minimum needs of the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args
    analysis_layer = impact_report.analysis
    analysis_keywords = analysis_layer.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    is_rounding = not debug_mode

    header = resolve_from_dictionary(extra_args, 'header')
    context['header'] = header

    # check if displaced is not zero
    try:
        displaced_field_name = analysis_keywords[displaced_field['key']]
        total_displaced = value_from_field_name(
            displaced_field_name, analysis_layer)
        if total_displaced == 0:
            zero_displaced_message = resolve_from_dictionary(
                extra_args, 'zero_displaced_message')
            context['zero_displaced'] = {
                'status': True,
                'message': zero_displaced_message
            }
            return context
    except KeyError:
        # in case no displaced field
        pass

    # minimum needs calculation only affect population type exposure
    # check if analysis keyword have minimum_needs keywords
    have_minimum_needs_field = False
    for field_key in analysis_keywords:
        if field_key.startswith(minimum_needs_namespace):
            have_minimum_needs_field = True
            break

    if not have_minimum_needs_field:
        return context

    frequencies = {}
    # map each needs to its frequency groups
    for field in (minimum_needs_fields + additional_minimum_needs):
        need_parameter = field.get('need_parameter')
        if isinstance(need_parameter, ResourceParameter):
            frequency = need_parameter.frequency
        else:
            frequency = field.get('frequency')

        if frequency:
            if frequency not in frequencies:
                frequencies[frequency] = [field]
            else:
                frequencies[frequency].append(field)

    needs = []
    analysis_feature = analysis_layer.getFeatures().next()
    header_frequency_format = resolve_from_dictionary(
        extra_args, 'header_frequency_format')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    need_header_format = resolve_from_dictionary(
        extra_args, 'need_header_format')
    # group the needs by frequency
    for key, frequency in frequencies.iteritems():
        group = {
            'header': header_frequency_format.format(frequency=tr(key)),
            'total_header': total_header,
            'needs': []
        }
        for field in frequency:
            # check value exists in the field
            field_idx = analysis_layer.fieldNameIndex(field['field_name'])
            if field_idx == -1:
                # skip if field doesn't exists
                continue
            value = format_number(
                analysis_feature[field_idx],
                enable_rounding=is_rounding,
                is_population=True)

            if field.get('need_parameter'):
                need_parameter = field['need_parameter']
                """:type: ResourceParameter"""
                name = tr(need_parameter.name)
                unit_abbreviation = need_parameter.unit.abbreviation

            else:
                if field.get('header_name'):
                    name = field.get('header_name')
                else:
                    name = field.get('name')

                need_unit = field.get('unit')
                if need_unit:
                    unit_abbreviation = need_unit.get('abbreviation')

            if unit_abbreviation:
                header = need_header_format.format(
                    name=name,
                    unit_abbreviation=unit_abbreviation)
            else:
                header = name

            item = {
                'header': header,
                'value': value
            }
            group['needs'].append(item)
        needs.append(group)

    context['needs'] = needs

    return context
Exemplo n.º 5
0
def mmi_detail_extractor(impact_report, component_metadata):
    """Extracting MMI-related analysis result.

    This extractor should only be used for EQ Raster with Population.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    analysis_layer = impact_report.analysis
    analysis_layer_keywords = analysis_layer.keywords
    extra_args = component_metadata.extra_args
    use_rounding = impact_report.impact_function.use_rounding
    provenance = impact_report.impact_function.provenance
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    # check if this is EQ raster with population
    hazard_type = definition(hazard_keywords['hazard'])
    if not hazard_type == hazard_earthquake:
        return context

    hazard_geometry = hazard_keywords[layer_geometry['key']]
    if not hazard_geometry == layer_geometry_raster['key']:
        return context

    exposure_type = definition(exposure_keywords['exposure'])
    if not exposure_type == exposure_population:
        return context

    header = resolve_from_dictionary(extra_args, 'header')

    context['header'] = header

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')
    """Generate headers."""
    table_header = [resolve_from_dictionary(extra_args, 'mmi_header')
                    ] + [v['header'] for v in reported_fields]
    """Extract MMI-related data"""
    # mmi is ranged from 1 to 10, which means: [1, 11)
    mmi_range = list(range(1, 11))
    rows = []
    roman_numeral = [
        'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X'
    ]
    for i in mmi_range:
        columns = [roman_numeral[i - 1]]
        for value in reported_fields:
            field = value['field']
            try:
                key_name = field['key'] % (i, )
                field_name = analysis_layer_keywords[key_name]
                # check field exists
                count = value_from_field_name(field_name, analysis_layer)
                if not count:
                    count = 0
            except KeyError:
                count = 0
            count = format_number(count,
                                  use_rounding=use_rounding,
                                  is_population=True)
            columns.append(count)

        rows.append(columns)
    """Extract total."""
    total_footer = [resolve_from_dictionary(extra_args, 'total_header')]

    total_fields = resolve_from_dictionary(extra_args, 'total_fields')
    for field in total_fields:
        try:
            field_name = analysis_layer_keywords[field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            if not total:
                total = 0
        except KeyError:
            total = 0
        total = format_number(total,
                              use_rounding=use_rounding,
                              is_population=True)
        total_footer.append(total)

    context['component_key'] = component_metadata.key
    context['mmi'] = {
        'header': table_header,
        'rows': rows,
        'footer': total_footer
    }

    return context
Exemplo n.º 6
0
def general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    # figure out analysis report type
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode

    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # find hazard class
    summary = []

    analysis_feature = analysis_layer.getFeatures().next()
    analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

    exposure_unit = exposure_type['units'][0]
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')
    if exposure_unit['abbreviation']:
        value_header = u'{measure} ({abbreviation})'.format(**exposure_unit)
    else:
        value_header = u'{name}'.format(**exposure_unit)

    # in case there is a classification
    if 'classification' in hazard_layer.keywords:

        # retrieve hazard classification from hazard layer
        hazard_classification = layer_hazard_classification(hazard_layer)

        # classified hazard must have hazard count in analysis layer
        hazard_stats = []
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )
            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = analysis_inasafe_fields[field_key_name]
                field_index = analysis_layer.fieldNameIndex(field_name)
                # Hazard label taken from translated hazard count field
                # label, string-formatted with translated hazard class label
                hazard_label = hazard_class['name']

                hazard_value = format_number(analysis_feature[field_index],
                                             enable_rounding=is_rounded,
                                             is_population=is_population)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': hazard_value
                }
            except KeyError:
                # in case the field was not found
                hazard_label = hazard_class['name']
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': 0,
                }

            hazard_stats.append(stats)

        # find total field
        try:
            field_name = analysis_inasafe_fields[total_field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            total = format_number(total,
                                  enable_rounding=is_rounded,
                                  is_population=is_population)
            stats = {
                'key': total_field['key'],
                'name': total_field['name'],
                'as_header': True,
                'value': total
            }
            hazard_stats.append(stats)
        except KeyError:
            pass

        summary.append({
            'header_label': hazard_header,
            'value_label': value_header,
            'rows': hazard_stats
        })

    # retrieve affected column
    report_stats = []

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')
    for item in reported_fields:
        header = item['header']
        field = item['field']
        if field['key'] in analysis_inasafe_fields:
            field_index = analysis_layer.fieldNameIndex(field['field_name'])
            if field == fatalities_field:
                # For fatalities field, we show a range of number
                # instead
                row_value = fatalities_range(analysis_feature[field_index])
            else:
                row_value = format_number(analysis_feature[field_index],
                                          enable_rounding=is_rounded,
                                          is_population=is_population)
            row_stats = {
                'key': field['key'],
                'name': header,
                'value': row_value
            }
            report_stats.append(row_stats)

    # Give report section
    exposure_type = layer_definition_type(exposure_layer)
    header_label = exposure_type['name']
    summary.append({
        'header_label': header_label,
        # This should depend on exposure unit
        # TODO: Change this so it can take the unit dynamically
        'value_label': value_header,
        'rows': report_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])
    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')
    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=hazard_classification['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(extra_args,
                                          ['concept_notes', 'note_format'])

    if is_population:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])
    else:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'general_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context
Exemplo n.º 7
0
def generate_classified_legend(
        analysis,
        exposure,
        hazard,
        debug_mode):
    """Generate an ordered python structure with the classified symbology.

    :param analysis: The analysis layer.
    :type analysis: QgsVectorLayer

    :param exposure: The exposure layer.
    :type exposure: QgsVectorLayer

    :param hazard: The hazard layer.
    :type hazard: QgsVectorLayer

    :param debug_mode: Boolean if run in debug mode.
    :type debug_mode: bool

    :return: The ordered dictionary to use to build the classified style.
    :rtype: OrderedDict
    """
    # We need to read the analysis layer to get the number of features.
    analysis_row = analysis.getFeatures().next()

    # Let's style the hazard class in each layers.
    hazard_classification = hazard.keywords['classification']
    hazard_classification = definition(hazard_classification)

    # Let's check if there is some thresholds:
    thresholds = hazard.keywords.get('thresholds')
    if thresholds:
        hazard_unit = hazard.keywords.get('continuous_hazard_unit')
        hazard_unit = definition(hazard_unit)['abbreviation']
    else:
        hazard_unit = None

    exposure = exposure.keywords['exposure']
    exposure_definitions = definition(exposure)
    exposure_units = exposure_definitions['units']
    exposure_unit = exposure_units[0]
    coefficient = 1
    # We check if can use a greater unit, such as kilometre for instance.
    if len(exposure_units) > 1:
        # We use only two units for now.
        delta = coefficient_between_units(
            exposure_units[1], exposure_units[0])

        all_values_are_greater = True

        # We check if all values are greater than the coefficient
        for i, hazard_class in enumerate(hazard_classification['classes']):
            field_name = hazard_count_field['field_name'] % hazard_class['key']
            try:
                value = analysis_row[field_name]
            except KeyError:
                value = 0

            if 0 < value < delta:
                # 0 is fine, we can still keep the second unit.
                all_values_are_greater = False

        if all_values_are_greater:
            # If yes, we can use this unit.
            exposure_unit = exposure_units[1]
            coefficient = delta

    classes = OrderedDict()

    # In debug mode we don't round number.
    enable_rounding = not debug_mode

    for i, hazard_class in enumerate(hazard_classification['classes']):
        # Get the hazard class name.
        field_name = hazard_count_field['field_name'] % hazard_class['key']

        # Get the number of affected feature by this hazard class.
        try:
            value = analysis_row[field_name]
        except KeyError:
            # The field might not exist if no feature impacted in this hazard
            # zone.
            value = 0
        value = format_number(
            value,
            enable_rounding,
            exposure_definitions['use_population_rounding'],
            coefficient)

        minimum = None
        maximum = None

        # Check if we need to add thresholds.
        if thresholds:
            if i == 0:
                minimum = thresholds[hazard_class['key']][0]
            elif i == len(hazard_classification['classes']) - 1:
                maximum = thresholds[hazard_class['key']][1]
            else:
                minimum = thresholds[hazard_class['key']][0]
                maximum = thresholds[hazard_class['key']][1]

        label = _format_label(
            hazard_class=hazard_class['name'],
            value=value,
            exposure_unit=exposure_unit['abbreviation'],
            minimum=minimum,
            maximum=maximum,
            hazard_unit=hazard_unit)

        classes[hazard_class['key']] = (hazard_class['color'], label)

    if exposure_definitions['display_not_exposed'] or debug_mode:
        classes[not_exposed_class['key']] = _add_not_exposed(
            analysis_row,
            enable_rounding,
            exposure_definitions['use_population_rounding'],
            exposure_unit['abbreviation'],
            coefficient)

    return classes
Exemplo n.º 8
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: The impact report that acts as a proxy to fetch
        all the data that extractor needed.
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: The component metadata. Used to obtain
        information about the component we want to render.
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: Context for rendering phase.
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = analysis_layer.getFeatures().next()
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    use_rounding = impact_report.impact_function.use_rounding
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']
    """Initializations."""

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # Get exposure type definition
    exposure_type = definition(exposure_keywords['exposure'])
    # Only round the number when it is population exposure and we use rounding
    is_population = exposure_type is exposure_population

    # action for places with poopulation exposure
    is_place_with_population = False
    if exposure_type is exposure_place:
        exposure_fields = exposure_keywords['inasafe_fields']
        if exposure_fields.get(population_count_field['key']):
            is_place_with_population = True

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    """Create detail header."""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args, 'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': [],
            'total': []
        },
        'not_affected': {
            'hazards': [],
            'total': []
        }
    }
    for key, group in header_hazard_group.iteritems():
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    affected_header_index = None
    for index, hazard_class in enumerate(hazard_classification['classes']):
        # the tuple format would be:
        # (class name, is it affected, header background color

        hazard_class_name = hazard_class['name']
        affected = hazard_class.get('affected')

        if not affected and not affected_header_index:
            affected_header_index = index + 1
            affected_status = 'not_affected'
        elif affected:
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    if affected_header_index:
        not_affected_header_index = len(hazard_classification['classes']) + 2
    else:
        affected_header_index = len(hazard_classification['classes']) + 1
        not_affected_header_index = affected_header_index + 2

    report_fields = []

    headers.insert(affected_header_index, total_affected_field['name'])
    header_hazard_group['affected']['total'].append(
        total_affected_field['name'])
    report_fields.append(total_affected_field)

    headers.insert(not_affected_header_index, total_not_affected_field['name'])
    header_hazard_group['not_affected']['total'].append(
        total_not_affected_field['name'])
    report_fields.append(total_not_affected_field)

    # affected, not affected, population (if applicable), not exposed,
    # total header
    report_fields += [total_not_exposed_field, total_field]

    place_pop_name = resolve_from_dictionary(
        extra_args, ['place_with_population', 'header'])
    if is_place_with_population:
        # we want to change header name for population
        duplicated_population_count_field = deepcopy(
            exposed_population_count_field)
        duplicated_population_count_field['name'] = place_pop_name
        report_fields.append(duplicated_population_count_field)

    report_fields_index = -2 + -(int(is_place_with_population))
    for report_field in report_fields[report_fields_index:]:
        headers.append(report_field['name'])
    """Create detail rows."""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fieldNameIndex(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in header_hazard_group.iteritems():
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fieldNameIndex(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(count_value,
                                            use_rounding=use_rounding,
                                            is_population=is_population)
                row.append({'value': count_value, 'header_group': group_key})
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({'value': 0, 'header_group': group_key})

        skip_row = False

        for field in report_fields:
            group_key = None
            for key, group in header_hazard_group.iteritems():
                if field['name'] in group['total']:
                    group_key = key
                    break

            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(total_count,
                                        use_rounding=use_rounding,
                                        is_population=is_population)

            # we comment below code because now we want to show all rows,
            # we can uncomment if we want to remove the rows with zero total

            # if total_count == '0' and field == total_affected_field:
            #     skip_row = True
            #     break

            if group_key:
                if field == total_affected_field:
                    row.insert(affected_header_index, {
                        'value': total_count,
                        'header_group': group_key
                    })
                elif field == total_not_affected_field:
                    row.insert(not_affected_header_index, {
                        'value': total_count,
                        'header_group': group_key
                    })
                else:
                    row.append({
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_keywords)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name
    """create total footers."""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    save_total_affected_field = False
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (hazard_class['key'], )
        if not hazard_class.get('affected'):
            save_total_affected_field = True

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fieldNameIndex(field_name)
            count_value = format_number(analysis_feature[field_index],
                                        use_rounding=use_rounding,
                                        is_population=is_population)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers) + int(save_total_affected_field)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            # reduce total affected and not affected column index by 1
            # since we are removing a column
            if group_key == affected_field['field_name']:
                affected_header_index -= 1
            else:
                not_affected_header_index -= 1
            continue
        footers.append({'value': count_value, 'header_group': group_key})

    # for footers
    for field in report_fields:

        total_count = value_from_field_name(field['field_name'],
                                            analysis_layer)

        if not total_count and field['name'] == place_pop_name:
            field = population_count_field
            field['name'] = place_pop_name
            total_count = value_from_field_name(field['field_name'],
                                                analysis_layer)

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if field['name'] in group['total']:
                group_key = key
                break

        total_count = format_number(total_count,
                                    use_rounding=use_rounding,
                                    is_population=is_population)
        if group_key:
            if field == total_affected_field:
                footers.insert(affected_header_index, {
                    'value': total_count,
                    'header_group': group_key
                })
            elif field == total_not_affected_field:
                footers.insert(not_affected_header_index, {
                    'value': total_count,
                    'header_group': group_key
                })
            else:
                footers.append({
                    'value': total_count,
                    'header_group': group_key
                })
        else:
            footers.append(total_count)

    header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')

    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0

    # we want to include total affected and not affected as a group
    # to its class so len(report_fields) - 2
    total_header_index = len(headers) - (len(report_fields) - 2)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class_name in group['hazards'] or (hazard_class_name
                                                         in group['total']):
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    context['extra_table'] = {}

    # extra table for specific exposure if exist
    extra_fields = resolve_from_dictionary(extra_args, 'exposure_extra_fields')
    if exposure_type['key'] in extra_fields.keys():

        # create header for the extra table
        extra_table_header_format = resolve_from_dictionary(
            extra_args, 'extra_table_header_format')
        extra_table_header = extra_table_header_format.format(
            exposure=exposure_header)

        # headers
        headers = []
        headers.append(
            breakdown_header_template.format(exposure=exposure_header))

        current_unit = None
        currency_unit = setting('currency', expected_type=str)
        for field in extra_fields[exposure_type['key']]:
            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            if field_index < 0:
                LOGGER.debug('Field name not found: %s, field index: %s' %
                             (field['field_name'], field_index))
                continue

            units = field.get('units')
            if units:
                for unit in units:
                    if currency_unit == unit['key']:
                        current_unit = unit['name']
                        break
                if not current_unit:
                    current_unit = units[0]['name']

            header_format = '{header} ({unit})'
            headers.append(
                header_format.format(header=field['header_name'],
                                     unit=current_unit))

        # rows
        details = []
        for feat in exposure_summary_table.getFeatures():
            row = []

            # Get breakdown name
            exposure_summary_table_field_name = breakdown_field['field_name']
            field_index = exposure_summary_table.fieldNameIndex(
                exposure_summary_table_field_name)
            class_key = feat[field_index]

            row.append(class_key)

            for field in extra_fields[exposure_type['key']]:
                field_index = exposure_summary_table.fieldNameIndex(
                    field['field_name'])
                # noinspection PyBroadException
                try:
                    total_count = int(float(feat[field_index]))
                except:
                    LOGGER.debug('Field name not found: %s, field index: %s' %
                                 (field['field_name'], field_index))
                    continue
                total_count = format_number(total_count,
                                            use_rounding=use_rounding,
                                            is_population=is_population)
                row.append(total_count)

            details.append(row)

        details = sorted(details, key=sort_classes)

        context['extra_table'] = {
            'table_header': extra_table_header,
            'headers': headers,
            'details': details,
        }

    return context
Exemplo n.º 9
0
def population_infographic_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args
    """Initializations"""
    hazard_layer = impact_report.hazard
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    icons = component_metadata.extra_args.get('icons')

    # this report sections only applies if it is a population report.
    population_fields = [
        population_count_field['key'],
        exposure_count_field['key'] % (exposure_population['key'], ),
    ] + [f['key'] for f in minimum_needs_fields]

    for item in population_fields:
        if item in analysis_layer_fields:
            break
    else:
        return context

    # We try to get total affected field
    # if it didn't exists, check other fields to show
    total_affected_fields = [
        total_affected_field['key'],
        # We might want to check other fields, but turn it off until further
        # discussion
        population_count_field['key'],
        exposure_count_field['key'] % (exposure_population['key'], ),
    ]

    for item in total_affected_fields:
        if item in analysis_layer_fields:
            total_affected = value_from_field_name(analysis_layer_fields[item],
                                                   analysis_layer)
            total_affected_field_used = item
            break
    else:
        return context

    if displaced_field['key'] in analysis_layer_fields:
        total_displaced = value_from_field_name(
            analysis_layer_fields[displaced_field['key']], analysis_layer)
    else:
        return context

    sections = OrderedDict()
    """People Section"""

    # Take default value from definitions
    people_header = resolve_from_dictionary(extra_args,
                                            ['sections', 'people', 'header'])
    people_items = resolve_from_dictionary(extra_args,
                                           ['sections', 'people', 'items'])

    # create context for affected infographic
    sub_header = resolve_from_dictionary(people_items[0], 'sub_header')

    # retrieve relevant header based on the fields we showed.
    sub_header = sub_header[total_affected_field_used]

    affected_infographic = PeopleInfographicElement(
        header=sub_header,
        icon=icons.get(total_affected_field['key']),
        number=total_affected)

    # create context for displaced infographic
    sub_header = resolve_from_dictionary(people_items[1], 'sub_header')
    sub_header_note_format = resolve_from_dictionary(people_items[1],
                                                     'sub_header_note_format')
    rate_description_format = resolve_from_dictionary(
        people_items[1], 'rate_description_format')
    rate_description = []

    hazard_classification = layer_hazard_classification(hazard_layer)
    for hazard_class in hazard_classification['classes']:
        displacement_rate = hazard_class.get('displacement_rate', 0)
        if displacement_rate:
            rate_description.append(
                rate_description_format.format(**hazard_class))

    rate_description_string = ', '.join(rate_description)

    sub_header_note = sub_header_note_format.format(
        rate_description=rate_description_string)

    displaced_infographic = PeopleInfographicElement(
        header=sub_header,
        header_note=sub_header_note,
        icon=icons.get(displaced_field['key']),
        number=total_displaced)

    sections['people'] = {
        'header': people_header,
        'items': [affected_infographic, displaced_infographic]
    }
    """Vulnerability Section"""

    # Take default value from definitions
    vulnerability_items = resolve_from_dictionary(
        extra_args, ['sections', 'vulnerability', 'items'])

    vulnerability_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'vulnerability', 'header'])

    vulnerability_section_sub_header_format = resolve_from_dictionary(
        extra_args, ['sections', 'vulnerability', 'sub_header_format'])

    infographic_elements = []
    for group in vulnerability_items:
        fields = group['fields']
        group_header = group['sub_group_header']
        bootstrap_column = group['bootstrap_column']
        element_column = group['element_column']
        headers = group['headers']
        elements = []
        for field, header in zip(fields, headers):
            field_key = field['key']
            try:
                field_name = analysis_layer_fields[field_key]
                value = value_from_field_name(field_name, analysis_layer)
            except KeyError:
                # It means the field is not there
                continue

            if value:
                value_percentage = value * 100.0 / total_displaced
            else:
                value_percentage = 0

            infographic_element = PeopleVulnerabilityInfographicElement(
                header=header,
                icon=icons.get(field_key),
                number=value,
                percentage=value_percentage)
            elements.append(infographic_element)
        if elements:
            infographic_elements.append({
                'group_header': group_header,
                'bootstrap_column': bootstrap_column,
                'element_column': element_column,
                'items': elements
            })

    total_displaced_rounded = format_number(total_displaced,
                                            enable_rounding=True,
                                            is_population=True)

    sections['vulnerability'] = {
        'header':
        vulnerability_section_header,
        'small_header':
        vulnerability_section_sub_header_format.format(
            number_displaced=total_displaced_rounded),
        'items':
        infographic_elements
    }
    """Minimum Needs Section"""

    minimum_needs_header = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'header'])
    empty_unit_string = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'empty_unit_string'])

    items = []

    for item in minimum_needs_fields:
        need = item['need_parameter']
        if isinstance(need, ResourceParameter):

            needs_count = value_from_field_name(item['field_name'],
                                                analysis_layer)

            if need.unit.abbreviation:
                unit_string = '{unit}/{frequency}'.format(
                    unit=need.unit.abbreviation, frequency=need.frequency)
            else:
                unit_string = empty_unit_string

            item = PeopleMinimumNeedsInfographicElement(header=item['name'],
                                                        icon=icons.get(
                                                            item['key']),
                                                        number=needs_count,
                                                        unit=unit_string)
            items.append(item)

    # TODO: get from impact function provenance later
    needs_profile = NeedsProfile()

    sections['minimum_needs'] = {
        'header': minimum_needs_header,
        'small_header': needs_profile.provenance,
        'items': items,
    }
    """Population Charts"""

    population_donut_path = impact_report.component_absolute_output_path(
        'population-chart-png')

    css_label_classes = []
    try:
        population_chart_context = impact_report.metadata.component_by_key(
            'population-chart').context['context']
        """
        :type: safe.report.extractors.infographic_elements.svg_charts.
            DonutChartContext
        """
        for pie_slice in population_chart_context.slices:
            label = pie_slice['label']
            if not label:
                continue
            css_class = label.replace(' ', '').lower()
            css_label_classes.append(css_class)
    except KeyError:
        population_chart_context = None

    sections['population_chart'] = {
        'img_path': resource_url(population_donut_path),
        'context': population_chart_context,
        'css_label_classes': css_label_classes
    }

    context['brand_logo'] = resource_url(
        resources_path('img', 'logos', 'inasafe-logo-white.png'))
    context['sections'] = sections
    context['title'] = analysis_layer.title() or value_from_field_name(
        analysis_name_field['field_name'], analysis_layer)

    return context
Exemplo n.º 10
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: The impact report that acts as a proxy to fetch
        all the data that extractor needed.
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: The component metadata. Used to obtain
        information about the component we want to render.
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: Context for rendering phase.
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = next(analysis_layer.getFeatures())
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    use_rounding = impact_report.impact_function.use_rounding
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    """Initializations."""

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # Get exposure type definition
    exposure_type = definition(exposure_keywords['exposure'])
    # Only round the number when it is population exposure and we use rounding
    is_population = exposure_type is exposure_population

    # action for places with poopulation exposure
    is_place_with_population = False
    if exposure_type is exposure_place:
        exposure_fields = exposure_keywords['inasafe_fields']
        if exposure_fields.get(population_count_field['key']):
            is_place_with_population = True

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [
        exposure_type_field,
        exposure_class_field
    ]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break

    """Create detail header."""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(
        breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args,
        'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': [],
            'total': []
        },
        'not_affected': {
            'hazards': [],
            'total': []
        }
    }
    for key, group in list(header_hazard_group.items()):
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    affected_header_index = None
    for index, hazard_class in enumerate(hazard_classification['classes']):
        # the tuple format would be:
        # (class name, is it affected, header background color

        hazard_class_name = hazard_class['name']
        affected = hazard_class.get('affected')

        if not affected and not affected_header_index:
            affected_header_index = index + 1
            affected_status = 'not_affected'
        elif affected:
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    if affected_header_index:
        not_affected_header_index = len(hazard_classification['classes']) + 2
    else:
        affected_header_index = len(hazard_classification['classes']) + 1
        not_affected_header_index = affected_header_index + 2

    report_fields = []

    headers.insert(affected_header_index, total_affected_field['name'])
    header_hazard_group['affected']['total'].append(
        total_affected_field['name'])
    report_fields.append(total_affected_field)

    headers.insert(not_affected_header_index, total_not_affected_field['name'])
    header_hazard_group['not_affected']['total'].append(
        total_not_affected_field['name'])
    report_fields.append(total_not_affected_field)

    # affected, not affected, population (if applicable), not exposed,
    # total header
    report_fields += [total_not_exposed_field, total_field]

    place_pop_name = resolve_from_dictionary(
        extra_args, ['place_with_population', 'header'])
    if is_place_with_population:
        # we want to change header name for population
        duplicated_population_count_field = deepcopy(
            exposed_population_count_field)
        duplicated_population_count_field['name'] = place_pop_name
        report_fields.append(duplicated_population_count_field)

    report_fields_index = -2 + -(int(is_place_with_population))
    for report_field in report_fields[report_fields_index:]:
        headers.append(report_field['name'])

    """Create detail rows."""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fields().lookupField(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in list(header_hazard_group.items()):
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fields() \
                    .lookupField(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(
                    count_value,
                    use_rounding=use_rounding,
                    is_population=is_population)
                row.append({
                    'value': count_value,
                    'header_group': group_key
                })
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({
                    'value': 0,
                    'header_group': group_key
                })

        skip_row = False

        for field in report_fields:
            group_key = None
            for key, group in list(header_hazard_group.items()):
                if field['name'] in group['total']:
                    group_key = key
                    break

            field_index = exposure_summary_table.fields().lookupField(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(
                total_count,
                use_rounding=use_rounding,
                is_population=is_population)

            # we comment below code because now we want to show all rows,
            # we can uncomment if we want to remove the rows with zero total

            # if total_count == '0' and field == total_affected_field:
            #     skip_row = True
            #     break

            if group_key:
                if field == total_affected_field:
                    row.insert(
                        affected_header_index,
                        {
                            'value': total_count,
                            'header_group': group_key
                        })
                elif field == total_not_affected_field:
                    row.insert(
                        not_affected_header_index,
                        {
                            'value': total_count,
                            'header_group': group_key
                        })
                else:
                    row.append({
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_keywords)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name

    """create total footers."""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    save_total_affected_field = False
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (
            hazard_class['key'],)
        if not hazard_class.get('affected'):
            save_total_affected_field = True

        group_key = None
        for key, group in list(header_hazard_group.items()):
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fields().lookupField(field_name)
            count_value = format_number(
                analysis_feature[field_index],
                use_rounding=use_rounding,
                is_population=is_population)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers) + int(save_total_affected_field)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            # reduce total affected and not affected column index by 1
            # since we are removing a column
            if group_key == affected_field['field_name']:
                affected_header_index -= 1
            else:
                not_affected_header_index -= 1
            continue
        footers.append({
            'value': count_value,
            'header_group': group_key
        })

    # for footers
    for field in report_fields:

        total_count = value_from_field_name(
            field['field_name'], analysis_layer)

        if not total_count and field['name'] == place_pop_name:
            field = population_count_field
            field['name'] = place_pop_name
            total_count = value_from_field_name(
                field['field_name'], analysis_layer)

        group_key = None
        for key, group in list(header_hazard_group.items()):
            if field['name'] in group['total']:
                group_key = key
                break

        total_count = format_number(
            total_count,
            use_rounding=use_rounding,
            is_population=is_population)
        if group_key:
            if field == total_affected_field:
                footers.insert(
                    affected_header_index,
                    {
                        'value': total_count,
                        'header_group': group_key
                    })
            elif field == total_not_affected_field:
                footers.insert(
                    not_affected_header_index,
                    {
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                footers.append({
                    'value': total_count,
                    'header_group': group_key
                })
        else:
            footers.append(total_count)

    header = resolve_from_dictionary(
        extra_args, 'header')
    notes = resolve_from_dictionary(
        extra_args, 'notes')

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0

    # we want to include total affected and not affected as a group
    # to its class so len(report_fields) - 2
    total_header_index = len(headers) - (len(report_fields) - 2)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in list(header_hazard_group.items()):
            if hazard_class_name in group['hazards'] or (
                    hazard_class_name in group['total']):
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    context['extra_table'] = {}

    # extra table for specific exposure if exist
    extra_fields = resolve_from_dictionary(extra_args, 'exposure_extra_fields')
    if exposure_type['key'] in list(extra_fields.keys()):

        # create header for the extra table
        extra_table_header_format = resolve_from_dictionary(
            extra_args, 'extra_table_header_format')
        extra_table_header = extra_table_header_format.format(
            exposure=exposure_header)

        # headers
        headers = []
        headers.append(
            breakdown_header_template.format(exposure=exposure_header))

        current_unit = None
        currency_unit = setting('currency', expected_type=str)
        for field in extra_fields[exposure_type['key']]:
            field_index = exposure_summary_table.fields().lookupField(
                field['field_name'])
            if field_index < 0:
                LOGGER.debug(
                    'Field name not found: %s, field index: %s' % (
                        field['field_name'], field_index))
                continue

            units = field.get('units')
            if units:
                for unit in units:
                    if currency_unit == unit['key']:
                        current_unit = unit['name']
                        break
                if not current_unit:
                    current_unit = units[0]['name']

            header_format = '{header} ({unit})'
            headers.append(header_format.format(
                header=field['header_name'], unit=current_unit))

        # rows
        details = []
        for feat in exposure_summary_table.getFeatures():
            row = []

            # Get breakdown name
            exposure_summary_table_field_name = breakdown_field['field_name']
            field_index = exposure_summary_table.fields().lookupField(
                exposure_summary_table_field_name)
            class_key = feat[field_index]

            row.append(class_key)

            for field in extra_fields[exposure_type['key']]:
                field_index = exposure_summary_table.fields().lookupField(
                    field['field_name'])
                # noinspection PyBroadException
                try:
                    total_count = int(float(feat[field_index]))
                except BaseException:
                    LOGGER.debug(
                        'Field name not found: %s, field index: %s' % (
                            field['field_name'], field_index))
                    continue
                total_count = format_number(
                    total_count,
                    use_rounding=use_rounding,
                    is_population=is_population)
                row.append(total_count)

            details.append(row)

        details = sorted(details, key=sort_classes)

        context['extra_table'] = {
            'table_header': extra_table_header,
            'headers': headers,
            'details': details,
        }

    return context
Exemplo n.º 11
0
def create_section_without_aggregation(
        aggregation_summary, analysis_layer, postprocessor_fields,
        section_header,
        units_label=None,
        debug_mode=False,
        extra_component_args=None):
    """Create demographic section context without aggregation.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param debug_mode: flag for debug_mode, affect number representations
    :type debug_mode: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict
    """
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords[
        'inasafe_fields']
    enable_rounding = not debug_mode

    # retrieving postprocessor
    postprocessors_fields_found = []
    for output_field in postprocessor_fields['fields']:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    try:
        displaced_field_name = analysis_layer_fields[
            displaced_field['key']]
        displaced_field_name = aggregation_summary_fields[
            displaced_field['key']]
    except KeyError:
        # no displaced field, can't show result
        return {}

    """Generating header name for columns"""

    # First column header is aggregation title
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        section_header,
        total_population_header,
    ]

    """Generating values for rows"""
    row_values = []

    for idx, output_field in enumerate(postprocessors_fields_found):

        name = output_field.get('header_name')
        if not name:
            name = output_field.get('name')

        row = []
        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = u'{name} [{unit}]'
            else:
                header_format = u'{name}'

            header = header_format.format(
                name=name,
                unit=unit)
        else:
            header_format = u'{name}'
            header = header_format.format(name=name)

        row.append(header)

        # if no aggregation layer, then aggregation summary only contain one
        # feature
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(
            field_name,
            analysis_layer)
        value = format_number(
            value,
            enable_rounding=enable_rounding,
            is_population=True)
        row.append(value)

        row_values.append(row)

    return {
        'columns': columns,
        'rows': row_values,
    }
Exemplo n.º 12
0
def create_section_with_aggregation(
        aggregation_summary, analysis_layer, postprocessor_fields,
        section_header,
        units_label=None,
        debug_mode=False,
        extra_component_args=None):
    """Create demographic section context with aggregation breakdown.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param debug_mode: flag for debug_mode, affect number representations
    :type debug_mode: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict

    .. versionadded:: 4.0
    """
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords[
        'inasafe_fields']
    enable_rounding = not debug_mode

    # retrieving postprocessor
    postprocessors_fields_found = []

    if type(postprocessor_fields) is dict:
        output_fields = postprocessor_fields['fields']
    else:
        output_fields = postprocessor_fields

    for output_field in output_fields:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    # no displaced field, can't show result
    if displaced_field['key'] not in analysis_layer_fields:
        return {}
    if displaced_field['key'] not in aggregation_summary_fields:
        return {}

    """Generating header name for columns"""

    # First column header is aggregation title
    default_aggregation_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'aggregation_header'])
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        aggregation_summary.title() or default_aggregation_header,
        total_population_header,
    ]
    row_values = []

    group_fields_found = []
    start_group_header = True
    for idx, output_field in enumerate(postprocessors_fields_found):

        name = output_field.get('header_name')
        if not name:
            name = output_field.get('name')

        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = u'{name} [{unit}]'
            else:
                header_format = u'{name}'

            header = header_format.format(
                name=name,
                unit=unit)
        else:
            header_format = u'{name}'
            header = header_format.format(name=name)

        if type(postprocessor_fields) is dict:
            try:
                group_header = postprocessor_fields['group_header']
                group_fields = postprocessor_fields['group']['fields']
                if output_field in group_fields:
                    group_fields_found.append(output_field)
                else:
                    columns.append(header)
                    continue
            except KeyError:
                group_fields_found.append(output_field)

        header_dict = {
            'name': header,
            'group_header': group_header,
            'start_group_header': start_group_header
        }

        start_group_header = False
        columns.append(header_dict)

    """Generating values for rows"""

    for feature in aggregation_summary.getFeatures():

        aggregation_name_index = aggregation_summary.fieldNameIndex(
            aggregation_name_field['field_name'])
        displaced_field_name = aggregation_summary_fields[
            displaced_field['key']]
        displaced_field_index = aggregation_summary.fieldNameIndex(
            displaced_field_name)

        aggregation_name = feature[aggregation_name_index]
        total_displaced = feature[displaced_field_index]

        if not total_displaced or isinstance(total_displaced, QPyNullVariant):
            # skip if total displaced null
            continue

        total_displaced = format_number(
            feature[displaced_field_index],
            enable_rounding=enable_rounding,
            is_population=True)

        row = [
            aggregation_name,
            total_displaced,
        ]

        if total_displaced == '0' and not debug_mode:
            continue

        for output_field in postprocessors_fields_found:
            field_name = aggregation_summary_fields[output_field['key']]
            field_index = aggregation_summary.fieldNameIndex(field_name)
            value = feature[field_index]

            value = format_number(
                value,
                enable_rounding=enable_rounding,
                is_population=True)
            row.append(value)

        row_values.append(row)

    """Generating total rows """

    total_displaced_field_name = analysis_layer_fields[
        displaced_field['key']]
    value = value_from_field_name(
        total_displaced_field_name, analysis_layer)
    value = format_number(
        value,
        enable_rounding=enable_rounding,
        is_population=True)
    total_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_header'])
    totals = [
        total_header,
        value
    ]
    for output_field in postprocessors_fields_found:
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(field_name, analysis_layer)
        value = format_number(
            value,
            enable_rounding=enable_rounding,
            is_population=True)
        totals.append(value)

    default_notes = resolve_from_dictionary(
        extra_component_args, ['defaults', 'notes'])

    if type(default_notes) is not list:
        default_notes = [default_notes]

    try:
        notes = default_notes + postprocessor_fields['group']['notes']
    except (TypeError, KeyError):
        notes = default_notes
        pass

    return {
        'notes': notes,
        'header': section_header,
        'columns': columns,
        'rows': row_values,
        'totals': totals,
        'group_header_colspan': len(group_fields_found)
    }
def create_section_with_aggregation(
        aggregation_summary, analysis_layer, postprocessor_fields,
        section_header,
        units_label=None,
        debug_mode=False,
        extra_component_args=None):
    """Create demographic section context with aggregation breakdown.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param debug_mode: flag for debug_mode, affect number representations
    :type debug_mode: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict

    .. versionadded:: 4.0
    """
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords[
        'inasafe_fields']
    enable_rounding = not debug_mode

    # retrieving postprocessor
    postprocessors_fields_found = []
    for output_field in postprocessor_fields:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    # no displaced field, can't show result
    if displaced_field['key'] not in analysis_layer_fields:
        return {}
    if displaced_field['key'] not in aggregation_summary_fields:
        return {}

    """Generating header name for columns"""

    # First column header is aggregation title
    default_aggregation_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'aggregation_header'])
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        aggregation_summary.title() or default_aggregation_header,
        total_population_header,
    ]
    row_values = []

    for idx, output_field in enumerate(postprocessors_fields_found):
        name = output_field['name']
        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = u'{name} [{unit}]'
            else:
                header_format = u'{name}'

            header = header_format.format(
                name=name,
                unit=unit)
        else:
            header_format = u'{name}'
            header = header_format.format(name=name)

        columns.append(header)

    """Generating values for rows"""

    for feature in aggregation_summary.getFeatures():

        aggregation_name_index = aggregation_summary.fieldNameIndex(
            aggregation_name_field['field_name'])
        displaced_field_name = aggregation_summary_fields[
            displaced_field['key']]
        displaced_field_index = aggregation_summary.fieldNameIndex(
            displaced_field_name)

        aggregation_name = feature[aggregation_name_index]
        total_displaced = feature[displaced_field_index]

        if not total_displaced or isinstance(total_displaced, QPyNullVariant):
            # skip if total displaced null
            continue

        total_displaced = format_number(
            feature[displaced_field_index],
            enable_rounding=enable_rounding)

        row = [
            aggregation_name,
            total_displaced,
        ]

        if total_displaced == '0' and not debug_mode:
            continue

        for output_field in postprocessors_fields_found:
            field_name = aggregation_summary_fields[output_field['key']]
            field_index = aggregation_summary.fieldNameIndex(field_name)
            value = feature[field_index]

            value = format_number(
                value,
                enable_rounding=enable_rounding)
            row.append(value)

        row_values.append(row)

    """Generating total rows """

    total_displaced_field_name = analysis_layer_fields[
        displaced_field['key']]
    value = value_from_field_name(
        total_displaced_field_name, analysis_layer)
    value = format_number(
        value,
        enable_rounding=enable_rounding)
    total_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_header'])
    totals = [
        total_header,
        value
    ]
    for output_field in postprocessors_fields_found:
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(field_name, analysis_layer)
        value = format_number(
            value,
            enable_rounding=enable_rounding)
        totals.append(value)

    notes = resolve_from_dictionary(
        extra_component_args, ['defaults', 'notes'])
    return {
        'notes': notes,
        'header': section_header,
        'columns': columns,
        'rows': row_values,
        'totals': totals,
    }
Exemplo n.º 14
0
def population_infographic_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    """Initializations"""
    hazard_layer = impact_report.hazard
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    icons = component_metadata.extra_args.get('icons')

    # this report sections only applies if it is a population report.
    population_fields = [
        population_count_field['key'],
        exposure_count_field['key'] % (exposure_population['key'], ),
    ] + [f['key'] for f in minimum_needs_fields]

    for item in population_fields:
        if item in analysis_layer_fields:
            break
    else:
        return context

    # We try to get total affected field
    # if it didn't exists, check other fields to show
    total_affected_fields = [
        total_affected_field['key'],
        # We might want to check other fields, but turn it off until further
        # discussion
        population_count_field['key'],
        exposure_count_field['key'] % (exposure_population['key'], ),
    ]

    for item in total_affected_fields:
        if item in analysis_layer_fields:
            total_affected = value_from_field_name(
                analysis_layer_fields[item],
                analysis_layer)
            total_affected_field_used = item
            break
    else:
        return context

    if displaced_field['key'] in analysis_layer_fields:
        total_displaced = value_from_field_name(
            analysis_layer_fields[displaced_field['key']],
            analysis_layer)
    else:
        return context

    sections = OrderedDict()

    """People Section"""

    # Take default value from definitions
    people_header = resolve_from_dictionary(
        extra_args, ['sections', 'people', 'header'])
    people_items = resolve_from_dictionary(
        extra_args, ['sections', 'people', 'items'])

    # create context for affected infographic
    sub_header = resolve_from_dictionary(
        people_items[0], 'sub_header')

    # retrieve relevant header based on the fields we showed.
    sub_header = sub_header[total_affected_field_used]

    affected_infographic = PeopleInfographicElement(
        header=sub_header,
        icon=icons.get(
            total_affected_field['key']),
        number=total_affected)

    # create context for displaced infographic
    sub_header = resolve_from_dictionary(
        people_items[1], 'sub_header')
    sub_header_note_format = resolve_from_dictionary(
        people_items[1], 'sub_header_note_format')
    rate_description_format = resolve_from_dictionary(
        people_items[1], 'rate_description_format')
    rate_description = []

    hazard_classification = layer_hazard_classification(hazard_layer)
    for hazard_class in hazard_classification['classes']:
        displacement_rate = hazard_class.get('displacement_rate', 0)
        if displacement_rate:
            rate_description.append(
                rate_description_format.format(**hazard_class))

    rate_description_string = ', '.join(rate_description)

    sub_header_note = sub_header_note_format.format(
        rate_description=rate_description_string)

    displaced_infographic = PeopleInfographicElement(
        header=sub_header,
        header_note=sub_header_note,
        icon=icons.get(
            displaced_field['key']),
        number=total_displaced)

    sections['people'] = {
        'header': people_header,
        'items': [
            affected_infographic,
            displaced_infographic
        ]
    }

    """Vulnerability Section"""

    # Take default value from definitions
    vulnerability_items = resolve_from_dictionary(
        extra_args,
        ['sections', 'vulnerability', 'items'])

    vulnerability_section_header = resolve_from_dictionary(
        extra_args,
        ['sections', 'vulnerability', 'header'])

    vulnerability_section_sub_header_format = resolve_from_dictionary(
        extra_args,
        ['sections', 'vulnerability', 'sub_header_format'])

    infographic_elements = []
    for group in vulnerability_items:
        fields = group['fields']
        group_header = group['sub_group_header']
        bootstrap_column = group['bootstrap_column']
        element_column = group['element_column']
        headers = group['headers']
        elements = []
        for field, header in zip(fields, headers):
            field_key = field['key']
            try:
                field_name = analysis_layer_fields[field_key]
                value = value_from_field_name(
                    field_name, analysis_layer)
            except KeyError:
                # It means the field is not there
                continue

            if value:
                value_percentage = value * 100.0 / total_displaced
            else:
                value_percentage = 0

            infographic_element = PeopleVulnerabilityInfographicElement(
                header=header,
                icon=icons.get(field_key),
                number=value,
                percentage=value_percentage
            )
            elements.append(infographic_element)
        if elements:
            infographic_elements.append({
                'group_header': group_header,
                'bootstrap_column': bootstrap_column,
                'element_column': element_column,
                'items': elements
            })

    total_displaced_rounded = format_number(
        total_displaced,
        enable_rounding=True,
        is_population=True)

    sections['vulnerability'] = {
        'header': vulnerability_section_header,
        'small_header': vulnerability_section_sub_header_format.format(
            number_displaced=total_displaced_rounded),
        'items': infographic_elements
    }

    """Minimum Needs Section"""

    minimum_needs_header = resolve_from_dictionary(
        extra_args,
        ['sections', 'minimum_needs', 'header'])
    empty_unit_string = resolve_from_dictionary(
        extra_args,
        ['sections', 'minimum_needs', 'empty_unit_string'])

    items = []

    for item in minimum_needs_fields:
        need = item['need_parameter']
        if isinstance(need, ResourceParameter):

            needs_count = value_from_field_name(
                item['field_name'], analysis_layer)

            if need.unit.abbreviation:
                unit_string = '{unit}/{frequency}'.format(
                    unit=need.unit.abbreviation,
                    frequency=need.frequency)
            else:
                unit_string = empty_unit_string

            item = PeopleMinimumNeedsInfographicElement(
                header=item['name'],
                icon=icons.get(
                    item['key']),
                number=needs_count,
                unit=unit_string)
            items.append(item)

    # TODO: get from impact function provenance later
    needs_profile = NeedsProfile()

    sections['minimum_needs'] = {
        'header': minimum_needs_header,
        'small_header': needs_profile.provenance,
        'items': items,
    }

    """Population Charts"""

    population_donut_path = impact_report.component_absolute_output_path(
        'population-chart-png')

    css_label_classes = []
    try:
        population_chart_context = impact_report.metadata.component_by_key(
            'population-chart').context['context']
        """
        :type: safe.report.extractors.infographic_elements.svg_charts.
            DonutChartContext
        """
        for pie_slice in population_chart_context.slices:
            label = pie_slice['label']
            if not label:
                continue
            css_class = label.replace(' ', '').lower()
            css_label_classes.append(css_class)
    except KeyError:
        population_chart_context = None

    sections['population_chart'] = {
        'img_path': resource_url(population_donut_path),
        'context': population_chart_context,
        'css_label_classes': css_label_classes
    }

    context['brand_logo'] = resource_url(
        resources_path('img', 'logos', 'inasafe-logo-white.png'))
    context['sections'] = sections
    context['title'] = analysis_layer.title() or value_from_field_name(
        analysis_name_field['field_name'], analysis_layer)

    return context
Exemplo n.º 15
0
def multi_exposure_general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.3
    """
    context = {}
    extra_args = component_metadata.extra_args

    multi_exposure = impact_report.multi_exposure_impact_function
    analysis_layer = impact_report.analysis
    provenances = [
        impact_function.provenance
        for impact_function in (multi_exposure.impact_functions)
    ]
    debug_mode = multi_exposure.debug
    population_exist = False

    hazard_keywords = provenances[0]['hazard_keywords']
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    # Summarize every value needed for each exposure and extract later to the
    # context.
    summary = []
    map_legend_titles = []
    hazard_classifications = {}
    exposures_stats = []
    for provenance in provenances:
        map_legend_title = provenance['map_legend_title']
        map_legend_titles.append(map_legend_title)
        exposure_stats = {}
        exposure_keywords = provenance['exposure_keywords']
        exposure_type = definition(exposure_keywords['exposure'])
        exposure_stats['exposure'] = exposure_type
        # Only round the number when it is population exposure and it is not
        # in debug mode
        is_rounded = not debug_mode
        is_population = exposure_type is exposure_population
        if is_population:
            population_exist = True

        analysis_feature = next(analysis_layer.getFeatures())
        analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

        exposure_unit = exposure_type['units'][0]
        if exposure_unit['abbreviation']:
            value_header = '{measure} ({abbreviation})'.format(
                measure=map_legend_title,
                abbreviation=exposure_unit['abbreviation'])
        else:
            value_header = '{name}'.format(name=map_legend_title)

        exposure_stats['value_header'] = value_header

        # Get hazard classification
        hazard_classification = definition(
            active_classification(hazard_keywords,
                                  exposure_keywords['exposure']))
        hazard_classifications[exposure_type['key']] = hazard_classification

        # in case there is a classification
        if hazard_classification:
            classification_result = {}
            reported_fields_result = {}
            for hazard_class in hazard_classification['classes']:
                # hazard_count_field is a dynamic field with hazard class
                # as parameter
                field_key_name = exposure_hazard_count_field['key'] % (
                    exposure_type['key'], hazard_class['key'])
                try:
                    # retrieve dynamic field name from analysis_fields keywords
                    # will cause key error if no hazard count for that
                    # particular class
                    field_name = analysis_inasafe_fields[field_key_name]
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    hazard_value = format_number(analysis_feature[field_index],
                                                 use_rounding=is_rounded,
                                                 is_population=is_population)
                except KeyError:
                    # in case the field was not found
                    hazard_value = 0

                classification_result[hazard_class['key']] = hazard_value

            # find total field
            try:
                field_key_name = exposure_total_exposed_field['key'] % (
                    exposure_type['key'])
                field_name = analysis_inasafe_fields[field_key_name]
                total = value_from_field_name(field_name, analysis_layer)
                total = format_number(total,
                                      use_rounding=is_rounded,
                                      is_population=is_population)
                classification_result[total_exposed_field['key']] = total
            except KeyError:
                pass

            exposure_stats['classification_result'] = classification_result

        for item in reported_fields:
            field = item.get('field')
            multi_exposure_field = item.get('multi_exposure_field')
            row_value = '-'
            if multi_exposure_field:
                field_key = (multi_exposure_field['key'] %
                             (exposure_type['key']))
                field_name = (multi_exposure_field['field_name'] %
                              (exposure_type['key']))
                if field_key in analysis_inasafe_fields:
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    row_value = format_number(analysis_feature[field_index],
                                              use_rounding=is_rounded,
                                              is_population=is_population)

            elif field in [displaced_field, fatalities_field]:
                if field['key'] in analysis_inasafe_fields and is_population:
                    field_index = analysis_layer.fields().lookupField(
                        field['name'])
                    if field == fatalities_field:
                        # For fatalities field, we show a range of number
                        # instead
                        row_value = fatalities_range(
                            analysis_feature[field_index])
                    else:
                        row_value = format_number(
                            analysis_feature[field_index],
                            use_rounding=is_rounded,
                            is_population=is_population)

            reported_fields_result[field['key']] = row_value

        exposure_stats['reported_fields_result'] = reported_fields_result
        exposures_stats.append(exposure_stats)

    # After finish summarizing value, then proceed to context extraction.

    # find total value and labels for each exposure
    value_headers = []
    total_values = []
    for exposure_stats in exposures_stats:
        # label
        value_header = exposure_stats['value_header']
        value_headers.append(value_header)

        # total value
        classification_result = exposure_stats['classification_result']
        total_value = classification_result[total_exposed_field['key']]
        total_values.append(total_value)

    classifications = list(hazard_classifications.values())
    is_item_identical = (classifications.count(
        classifications[0]) == len(classifications))
    if classifications and is_item_identical:
        hazard_stats = []
        for hazard_class in classifications[0]['classes']:
            values = []
            for exposure_stats in exposures_stats:
                classification_result = exposure_stats['classification_result']
                value = classification_result[hazard_class['key']]
                values.append(value)
            stats = {
                'key': hazard_class['key'],
                'name': hazard_class['name'],
                'numbers': values
            }
            hazard_stats.append(stats)

        total_stats = {
            'key': total_exposed_field['key'],
            'name': total_exposed_field['name'],
            'as_header': True,
            'numbers': total_values
        }
        hazard_stats.append(total_stats)

        summary.append({
            'header_label': hazard_header,
            'value_labels': value_headers,
            'rows': hazard_stats
        })
    # if there are different hazard classifications used in the analysis,
    # we will create a separate table for each hazard classification
    else:
        hazard_classification_groups = {}
        for exposure_key, hazard_classification in (iter(
                list(hazard_classifications.items()))):
            exposure_type = definition(exposure_key)
            if hazard_classification['key'] not in (
                    hazard_classification_groups):
                hazard_classification_groups[hazard_classification['key']] = [
                    exposure_type
                ]
            else:
                hazard_classification_groups[
                    hazard_classification['key']].append(exposure_type)

        for hazard_classification_key, exposures in (iter(
                list(hazard_classification_groups.items()))):
            custom_headers = []
            custom_total_values = []
            # find total value and labels for each exposure
            for exposure_stats in exposures_stats:
                if exposure_stats['exposure'] not in exposures:
                    continue
                # label
                value_header = exposure_stats['value_header']
                custom_headers.append(value_header)

                # total value
                classification_result = exposure_stats['classification_result']
                total_value = classification_result[total_exposed_field['key']]
                custom_total_values.append(total_value)

            hazard_stats = []
            hazard_classification = definition(hazard_classification_key)
            for hazard_class in hazard_classification['classes']:
                values = []
                for exposure_stats in exposures_stats:
                    if exposure_stats['exposure'] not in exposures:
                        continue
                    classification_result = exposure_stats[
                        'classification_result']
                    value = classification_result[hazard_class['key']]
                    values.append(value)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_class['name'],
                    'numbers': values
                }
                hazard_stats.append(stats)

            total_stats = {
                'key': total_exposed_field['key'],
                'name': total_exposed_field['name'],
                'as_header': True,
                'numbers': custom_total_values
            }
            hazard_stats.append(total_stats)

            summary.append({
                'header_label': hazard_header,
                'value_labels': custom_headers,
                'rows': hazard_stats
            })

    reported_fields_stats = []
    for item in reported_fields:
        field = item.get('field')
        values = []
        for exposure_stats in exposures_stats:
            reported_fields_result = exposure_stats['reported_fields_result']
            value = reported_fields_result[field['key']]
            values.append(value)
        stats = {
            'key': field['key'],
            'name': item['header'],
            'numbers': values
        }
        reported_fields_stats.append(stats)

    header_label = resolve_from_dictionary(extra_args,
                                           ['reported_fields_header'])
    summary.append({
        'header_label': header_label,
        'value_labels': value_headers,
        'rows': reported_fields_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])

    combined_map_legend_title = ''
    for index, map_legend_title in enumerate(map_legend_titles):
        combined_map_legend_title += map_legend_title
        if not (index + 1) == len(map_legend_titles):
            combined_map_legend_title += ', '

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')
    table_header = table_header_format.format(
        title=combined_map_legend_title,
        unit=classifications[0]['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(extra_args,
                                          ['concept_notes', 'note_format'])

    concepts = resolve_from_dictionary(extra_args,
                                       ['concept_notes', 'general_concepts'])

    if population_exist:
        concepts += resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context
Exemplo n.º 16
0
def create_section_with_aggregation(aggregation_summary,
                                    analysis_layer,
                                    postprocessor_fields,
                                    section_header,
                                    units_label=None,
                                    use_rounding=True,
                                    extra_component_args=None):
    """Create demographic section context with aggregation breakdown.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param use_rounding: flag for rounding, affect number representations
    :type use_rounding: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict

    .. versionadded:: 4.0
    """
    aggregation_summary_fields = aggregation_summary.keywords['inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']

    # retrieving postprocessor
    postprocessors_fields_found = []

    if isinstance(postprocessor_fields, dict):
        output_fields = postprocessor_fields['fields']
    else:
        output_fields = postprocessor_fields

    for output_field in output_fields:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    # no displaced field, can't show result
    if displaced_field['key'] not in analysis_layer_fields:
        return {}
    if displaced_field['key'] not in aggregation_summary_fields:
        return {}
    """Generating header name for columns."""

    # First column header is aggregation title
    default_aggregation_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'aggregation_header'])
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        aggregation_summary.title() or default_aggregation_header,
        total_population_header,
    ]
    row_values = []

    group_fields_found = []
    start_group_header = True
    for idx, output_field in enumerate(postprocessors_fields_found):

        name = output_field.get('header_name')
        if not name:
            name = output_field.get('name')

        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = '{name} [{unit}]'
            else:
                header_format = '{name}'

            header = header_format.format(name=name, unit=unit)
        else:
            header_format = '{name}'
            header = header_format.format(name=name)

        if isinstance(postprocessor_fields, dict):
            try:
                group_header = postprocessor_fields['group_header']
                group_fields = postprocessor_fields['group']['fields']
                if output_field in group_fields:
                    group_fields_found.append(output_field)
                else:
                    columns.append(header)
                    continue
            except KeyError:
                group_fields_found.append(output_field)

        header_dict = {
            'name': header,
            'group_header': group_header,
            'start_group_header': start_group_header
        }

        start_group_header = False
        columns.append(header_dict)
    """Generating values for rows."""

    for feature in aggregation_summary.getFeatures():

        aggregation_name_index = aggregation_summary.fields().lookupField(
            aggregation_name_field['field_name'])
        displaced_field_name = aggregation_summary_fields[
            displaced_field['key']]
        displaced_field_index = aggregation_summary.fields().lookupField(
            displaced_field_name)

        aggregation_name = feature[aggregation_name_index]
        total_displaced = feature[displaced_field_index]

        if not total_displaced or total_displaced is None:
            # skip if total displaced null
            continue

        total_displaced = format_number(feature[displaced_field_index],
                                        use_rounding=use_rounding,
                                        is_population=True)

        row = [
            aggregation_name,
            total_displaced,
        ]

        if total_displaced == '0' and not use_rounding:
            continue

        for output_field in postprocessors_fields_found:
            field_name = aggregation_summary_fields[output_field['key']]
            field_index = aggregation_summary.fields().lookupField(field_name)
            value = feature[field_index]

            value = format_number(value,
                                  use_rounding=use_rounding,
                                  is_population=True)
            row.append(value)

        row_values.append(row)
    """Generating total rows."""

    total_displaced_field_name = analysis_layer_fields[displaced_field['key']]
    value = value_from_field_name(total_displaced_field_name, analysis_layer)
    value = format_number(value, use_rounding=use_rounding, is_population=True)
    total_header = resolve_from_dictionary(extra_component_args,
                                           ['defaults', 'total_header'])
    totals = [total_header, value]
    for output_field in postprocessors_fields_found:
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(field_name, analysis_layer)
        value = format_number(value,
                              use_rounding=use_rounding,
                              is_population=True)
        totals.append(value)

    default_notes = resolve_from_dictionary(extra_component_args,
                                            ['defaults', 'notes'])

    if not isinstance(default_notes, list):
        default_notes = [default_notes]

    try:
        notes = default_notes + postprocessor_fields['group']['notes']
    except (TypeError, KeyError):
        notes = default_notes
        pass

    return {
        'notes': notes,
        'header': section_header,
        'columns': columns,
        'rows': row_values,
        'totals': totals,
        'group_header_colspan': len(group_fields_found)
    }
Exemplo n.º 17
0
def create_section_without_aggregation(aggregation_summary,
                                       analysis_layer,
                                       postprocessor_fields,
                                       section_header,
                                       units_label=None,
                                       use_rounding=True,
                                       extra_component_args=None):
    """Create demographic section context without aggregation.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param use_rounding: flag for rounding, affect number representations
    :type use_rounding: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict
    """
    aggregation_summary_fields = aggregation_summary.keywords['inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']

    # retrieving postprocessor
    postprocessors_fields_found = []
    for output_field in postprocessor_fields['fields']:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    try:
        analysis_layer_fields[displaced_field['key']]
        aggregation_summary_fields[displaced_field['key']]
    except KeyError:
        # no displaced field, can't show result
        return {}
    """Generating header name for columns."""

    # First column header is aggregation title
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        section_header,
        total_population_header,
    ]
    """Generating values for rows."""
    row_values = []

    for idx, output_field in enumerate(postprocessors_fields_found):

        name = output_field.get('header_name')
        if not name:
            name = output_field.get('name')

        row = []
        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = '{name} [{unit}]'
            else:
                header_format = '{name}'

            header = header_format.format(name=name, unit=unit)
        else:
            header_format = '{name}'
            header = header_format.format(name=name)

        row.append(header)

        # if no aggregation layer, then aggregation summary only contain one
        # feature
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(field_name, analysis_layer)
        value = format_number(value,
                              use_rounding=use_rounding,
                              is_population=True)
        row.append(value)

        row_values.append(row)

    return {
        'columns': columns,
        'rows': row_values,
    }
Exemplo n.º 18
0
def general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    # figure out analysis report type
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode

    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # find hazard class
    summary = []

    analysis_feature = analysis_layer.getFeatures().next()
    analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

    exposure_unit = exposure_type['units'][0]
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')
    if exposure_unit['abbreviation']:
        value_header = u'{measure} ({abbreviation})'.format(**exposure_unit)
    else:
        value_header = u'{name}'.format(**exposure_unit)

    # in case there is a classification
    if 'classification' in hazard_layer.keywords:

        # retrieve hazard classification from hazard layer
        hazard_classification = layer_hazard_classification(hazard_layer)

        # classified hazard must have hazard count in analysis layer
        hazard_stats = []
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )
            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = analysis_inasafe_fields[field_key_name]
                field_index = analysis_layer.fieldNameIndex(field_name)
                # Hazard label taken from translated hazard count field
                # label, string-formatted with translated hazard class label
                hazard_label = hazard_class['name']

                hazard_value = format_number(
                    analysis_feature[field_index],
                    enable_rounding=is_rounded,
                    is_population=is_population)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': hazard_value
                }
            except KeyError:
                # in case the field was not found
                hazard_label = hazard_class['name']
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': 0,
                }

            hazard_stats.append(stats)

        # find total field
        try:
            field_name = analysis_inasafe_fields[total_field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            total = format_number(
                total, enable_rounding=is_rounded, is_population=is_population)
            stats = {
                'key': total_field['key'],
                'name': total_field['name'],
                'as_header': True,
                'value': total
            }
            hazard_stats.append(stats)
        except KeyError:
            pass

        summary.append({
            'header_label': hazard_header,
            'value_label': value_header,
            'rows': hazard_stats
        })

    # retrieve affected column
    report_stats = []

    reported_fields = resolve_from_dictionary(
        extra_args, 'reported_fields')
    for item in reported_fields:
        header = item['header']
        field = item['field']
        if field['key'] in analysis_inasafe_fields:
            field_index = analysis_layer.fieldNameIndex(
                field['field_name'])
            if field == fatalities_field:
                # For fatalities field, we show a range of number
                # instead
                row_value = fatalities_range(analysis_feature[field_index])
            else:
                row_value = format_number(
                    analysis_feature[field_index],
                    enable_rounding=is_rounded,
                    is_population=is_population)
            row_stats = {
                'key': field['key'],
                'name': header,
                'value': row_value
            }
            report_stats.append(row_stats)

    # Give report section
    exposure_type = layer_definition_type(exposure_layer)
    header_label = exposure_type['name']
    summary.append({
        'header_label': header_label,
        # This should depend on exposure unit
        # TODO: Change this so it can take the unit dynamically
        'value_label': value_header,
        'rows': report_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])
    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')
    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=hazard_classification['classification_unit'])

    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header

    return context
Exemplo n.º 19
0
 def number(self):
     """Number to be displayed for the element."""
     value = format_number(self._number,
                           enable_rounding=True,
                           is_population=True)
     return value
Exemplo n.º 20
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = analysis_layer.getFeatures().next()
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode
    """Initializations"""

    # Get hazard classification
    hazard_classification = layer_hazard_classification(hazard_layer)

    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounding = not debug_mode

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    """Create detail header"""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args, 'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': []
        },
        'not_affected': {
            'hazards': []
        }
    }
    for key, group in header_hazard_group.iteritems():
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    for hazard_class in hazard_classification['classes']:
        # the tuple format would be:
        # (class name, is it affected, header background color
        hazard_class_name = hazard_class['name']
        if hazard_class.get('affected'):
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    # affected, not affected, not exposed, total header
    report_fields = [
        total_affected_field, total_not_affected_field,
        total_not_exposed_field, total_field
    ]
    for report_field in report_fields:
        headers.append(report_field['name'])
    """Create detail rows"""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fieldNameIndex(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in header_hazard_group.iteritems():
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fieldNameIndex(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(count_value,
                                            enable_rounding=is_rounding)
                row.append({'value': count_value, 'header_group': group_key})
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({'value': 0, 'header_group': group_key})

        skip_row = False

        for field in report_fields:
            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(total_count,
                                        enable_rounding=is_rounding)
            if total_count == '0' and field == total_affected_field:
                skip_row = True
                break

            row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name
    """create total footers"""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (hazard_class['key'], )

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fieldNameIndex(field_name)
            count_value = format_number(analysis_feature[field_index],
                                        enable_rounding=is_rounding)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            continue
        footers.append({'value': count_value, 'header_group': group_key})

    # for footers
    for field in report_fields:
        total_count = value_from_field_name(field['field_name'],
                                            analysis_layer)
        total_count = format_number(total_count, enable_rounding=is_rounding)
        footers.append(total_count)

    header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')

    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0
    total_header_index = len(headers) - len(report_fields)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class_name in group['hazards']:
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    return context
Exemplo n.º 21
0
def mmi_detail_extractor(impact_report, component_metadata):
    """Extracting MMI-related analysis result.

    This extractor should only be used for EQ Raster with Population.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    analysis_layer = impact_report.analysis
    analysis_layer_keywords = analysis_layer.keywords
    extra_args = component_metadata.extra_args
    use_rounding = impact_report.impact_function.use_rounding
    provenance = impact_report.impact_function.provenance
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    # check if this is EQ raster with population
    hazard_type = definition(hazard_keywords['hazard'])
    if not hazard_type == hazard_earthquake:
        return context

    hazard_geometry = hazard_keywords[layer_geometry['key']]
    if not hazard_geometry == layer_geometry_raster['key']:
        return context

    exposure_type = definition(exposure_keywords['exposure'])
    if not exposure_type == exposure_population:
        return context

    header = resolve_from_dictionary(extra_args, 'header')

    context['header'] = header

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    """Generate headers."""
    table_header = [
        resolve_from_dictionary(extra_args, 'mmi_header')
    ] + [v['header'] for v in reported_fields]

    """Extract MMI-related data"""
    # mmi is ranged from 1 to 10, which means: [1, 11)
    mmi_range = list(range(1, 11))
    rows = []
    roman_numeral = [
        'I',
        'II',
        'III',
        'IV',
        'V',
        'VI',
        'VII',
        'VIII',
        'IX',
        'X'
    ]
    for i in mmi_range:
        columns = [roman_numeral[i - 1]]
        for value in reported_fields:
            field = value['field']
            try:
                key_name = field['key'] % (i, )
                field_name = analysis_layer_keywords[key_name]
                # check field exists
                count = value_from_field_name(field_name, analysis_layer)
                if not count:
                    count = 0
            except KeyError:
                count = 0
            count = format_number(
                count,
                use_rounding=use_rounding,
                is_population=True)
            columns.append(count)

        rows.append(columns)

    """Extract total."""
    total_footer = [
        resolve_from_dictionary(extra_args, 'total_header')
    ]

    total_fields = resolve_from_dictionary(extra_args, 'total_fields')
    for field in total_fields:
        try:
            field_name = analysis_layer_keywords[field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            if not total:
                total = 0
        except KeyError:
            total = 0
        total = format_number(
            total,
            use_rounding=use_rounding,
            is_population=True)
        total_footer.append(total)

    context['component_key'] = component_metadata.key
    context['mmi'] = {
        'header': table_header,
        'rows': rows,
        'footer': total_footer
    }

    return context
Exemplo n.º 22
0
def generate_classified_legend(analysis, exposure, hazard, debug_mode):
    """Generate an ordered python structure with the classified symbology.

    :param analysis: The analysis layer.
    :type analysis: QgsVectorLayer

    :param exposure: The exposure layer.
    :type exposure: QgsVectorLayer

    :param hazard: The hazard layer.
    :type hazard: QgsVectorLayer

    :param debug_mode: Boolean if run in debug mode.
    :type debug_mode: bool

    :return: The ordered dictionary to use to build the classified style.
    :rtype: OrderedDict
    """
    # We need to read the analysis layer to get the number of features.
    analysis_row = analysis.getFeatures().next()

    # Let's style the hazard class in each layers.
    hazard_classification = hazard.keywords['classification']
    hazard_classification = definition(hazard_classification)

    # Let's check if there is some thresholds:
    thresholds = hazard.keywords.get('thresholds')
    if thresholds:
        hazard_unit = hazard.keywords.get('continuous_hazard_unit')
        hazard_unit = definition(hazard_unit)['abbreviation']
    else:
        hazard_unit = None

    exposure = exposure.keywords['exposure']
    exposure_definitions = definition(exposure)
    exposure_units = exposure_definitions['units']
    exposure_unit = exposure_units[0]
    coefficient = 1
    # We check if can use a greater unit, such as kilometre for instance.
    if len(exposure_units) > 1:
        # We use only two units for now.
        delta = coefficient_between_units(exposure_units[1], exposure_units[0])

        all_values_are_greater = True

        # We check if all values are greater than the coefficient
        for i, hazard_class in enumerate(hazard_classification['classes']):
            field_name = hazard_count_field['field_name'] % hazard_class['key']
            try:
                value = analysis_row[field_name]
            except KeyError:
                value = 0

            if 0 < value < delta:
                # 0 is fine, we can still keep the second unit.
                all_values_are_greater = False

        if all_values_are_greater:
            # If yes, we can use this unit.
            exposure_unit = exposure_units[1]
            coefficient = delta

    classes = OrderedDict()

    # In debug mode we don't round number.
    enable_rounding = not debug_mode

    for i, hazard_class in enumerate(hazard_classification['classes']):
        # Get the hazard class name.
        field_name = hazard_count_field['field_name'] % hazard_class['key']

        # Get the number of affected feature by this hazard class.
        try:
            value = analysis_row[field_name]
        except KeyError:
            # The field might not exist if no feature impacted in this hazard
            # zone.
            value = 0
        value = format_number(value, enable_rounding,
                              exposure_definitions['use_population_rounding'],
                              coefficient)

        minimum = None
        maximum = None

        # Check if we need to add thresholds.
        if thresholds:
            if i == 0:
                minimum = thresholds[hazard_class['key']][0]
            elif i == len(hazard_classification['classes']) - 1:
                maximum = thresholds[hazard_class['key']][1]
            else:
                minimum = thresholds[hazard_class['key']][0]
                maximum = thresholds[hazard_class['key']][1]

        label = _format_label(hazard_class=hazard_class['name'],
                              value=value,
                              exposure_unit=exposure_unit['abbreviation'],
                              minimum=minimum,
                              maximum=maximum,
                              hazard_unit=hazard_unit)

        classes[hazard_class['key']] = (hazard_class['color'], label)

    if exposure_definitions['display_not_exposed'] or debug_mode:
        classes[not_exposed_class['key']] = _add_not_exposed(
            analysis_row, enable_rounding,
            exposure_definitions['use_population_rounding'],
            exposure_unit['abbreviation'], coefficient)

    return classes
Exemplo n.º 23
0
def aggregation_result_extractor(impact_report, component_metadata):
    """Extracting aggregation result of breakdown from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    aggregation_summary = impact_report.aggregation_summary
    aggregation_summary_fields = aggregation_summary.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    """Filtering report sections"""

    # Only process for applicable exposure types
    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # For now aggregation report only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context
    """Generating type name for columns"""

    type_fields = read_dynamic_inasafe_field(aggregation_summary_fields,
                                             affected_exposure_count_field)
    # do not include total, to preserve ordering and proper reference
    type_fields.remove('total')

    # we need to sort the column
    # get the classes lists
    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort columns based on class order
    # create function to sort
    def sort_classes(_type_field):
        """Sort method to retrieve exposure class key index."""
        # class key is the type field name
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _type_field == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    type_fields = sorted(type_fields, key=sort_classes)

    # generate type_header_labels for column header
    type_header_labels = []
    for type_name in type_fields:
        type_label = tr(type_name.capitalize())
        type_header_labels.append(type_label)
    """Generating values for rows"""

    # generate rows of values for values of each column
    rows = []
    aggregation_name_index = aggregation_summary.fieldNameIndex(
        aggregation_name_field['field_name'])
    total_field_index = aggregation_summary.fieldNameIndex(
        total_affected_field['field_name'])

    type_field_index = []
    for type_name in type_fields:
        field_name = affected_exposure_count_field['field_name'] % type_name
        type_index = aggregation_summary.fieldNameIndex(field_name)
        type_field_index.append(type_index)

    for feat in aggregation_summary.getFeatures():
        total_affected_value = format_number(feat[total_field_index],
                                             enable_rounding=is_rounded,
                                             is_population=is_population)
        if total_affected_value == '0':
            # skip aggregation type if the total affected is zero
            continue
        item = {
            # Name is the header for each row
            'name': feat[aggregation_name_index],
            # Total is the total for each row
            'total': total_affected_value
        }
        # Type values is the values for each column in each row
        type_values = []
        for idx in type_field_index:
            affected_value = format_number(feat[idx],
                                           enable_rounding=is_rounded)
            type_values.append(affected_value)
        item['type_values'] = type_values
        rows.append(item)
    """Generate total for footers"""

    # calculate total values for each type. Taken from exposure summary table
    type_total_values = []
    # Get affected field index
    affected_field_index = exposure_summary_table.fieldNameIndex(
        total_affected_field['field_name'])

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    breakdown_field_name = breakdown_field['field_name']
    breakdown_field_index = exposure_summary_table.fieldNameIndex(
        breakdown_field_name)

    # Fetch total affected for each breakdown name
    value_dict = {}
    for feat in exposure_summary_table.getFeatures():
        # exposure summary table is in csv format, so the field returned is
        # always in text format
        affected_value = int(float(feat[affected_field_index]))
        affected_value = format_number(affected_value,
                                       enable_rounding=is_rounded,
                                       is_population=is_population)
        value_dict[feat[breakdown_field_index]] = affected_value

    if value_dict:
        for type_name in type_fields:
            affected_value_string_formatted = value_dict[type_name]
            if affected_value_string_formatted == '0':
                # if total affected for breakdown type is zero
                # current column index
                column_index = len(type_total_values)
                # cut column header
                type_header_labels = (type_header_labels[:column_index] +
                                      type_header_labels[column_index + 1:])
                # cut all row values for the column
                for item in rows:
                    type_values = item['type_values']
                    item['type_values'] = (type_values[:column_index] +
                                           type_values[column_index + 1:])
                continue
            type_total_values.append(affected_value_string_formatted)
    """Get the super total affected"""

    # total for affected (super total)
    analysis_feature = analysis_layer.getFeatures().next()
    field_index = analysis_layer.fieldNameIndex(
        total_affected_field['field_name'])
    total_all = format_number(analysis_feature[field_index],
                              enable_rounding=is_rounded)
    """Generate and format the context"""
    aggregation_area_default_header = resolve_from_dictionary(
        extra_args, 'aggregation_area_default_header')
    header_label = (aggregation_summary.title()
                    or aggregation_area_default_header)

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'], unit=unit_string)
    table_header = ' '.join(table_header.split())

    section_header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    total_in_aggregation_header = resolve_from_dictionary(
        extra_args, 'total_in_aggregation_header')
    context['header'] = section_header
    context['notes'] = notes
    context['aggregation_result'] = {
        'table_header': table_header,
        'header_label': header_label,
        'type_header_labels': type_header_labels,
        'total_label': total_header,
        'total_in_aggregation_area_label': total_in_aggregation_header,
        'rows': rows,
        'type_total_values': type_total_values,
        'total_all': total_all,
    }
    return context
Exemplo n.º 24
0
def aggregation_result_extractor(impact_report, component_metadata):
    """Extracting aggregation result of breakdown from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}

    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    aggregation_summary = impact_report.aggregation_summary
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode

    """Filtering report sections"""

    # Only process for applicable exposure types
    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # For now aggregation report only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')]
    if exposure_type not in itemizable_exposures_all:
        return context

    """Generating type name for columns"""

    type_fields = read_dynamic_inasafe_field(
        aggregation_summary_fields, affected_exposure_count_field)
    # do not include total, to preserve ordering and proper reference
    type_fields.remove('total')

    # we need to sort the column
    # get the classes lists
    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort columns based on class order
    # create function to sort
    def sort_classes(_type_field):
        """Sort method to retrieve exposure class key index."""
        # class key is the type field name
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _type_field == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    type_fields = sorted(type_fields, key=sort_classes)

    # generate type_header_labels for column header
    type_header_labels = []
    for type_name in type_fields:
        type_label = tr(type_name.capitalize())
        type_header_labels.append(type_label)

    """Generating values for rows"""

    # generate rows of values for values of each column
    rows = []
    aggregation_name_index = aggregation_summary.fieldNameIndex(
        aggregation_name_field['field_name'])
    total_field_index = aggregation_summary.fieldNameIndex(
        total_affected_field['field_name'])

    type_field_index = []
    for type_name in type_fields:
        field_name = affected_exposure_count_field['field_name'] % type_name
        type_index = aggregation_summary.fieldNameIndex(field_name)
        type_field_index.append(type_index)

    for feat in aggregation_summary.getFeatures():
        total_affected_value = format_number(
            feat[total_field_index],
            enable_rounding=is_rounded,
            is_population=is_population)
        if total_affected_value == '0':
            # skip aggregation type if the total affected is zero
            continue
        item = {
            # Name is the header for each row
            'name': feat[aggregation_name_index],
            # Total is the total for each row
            'total': total_affected_value
        }
        # Type values is the values for each column in each row
        type_values = []
        for idx in type_field_index:
            affected_value = format_number(
                feat[idx],
                enable_rounding=is_rounded)
            type_values.append(affected_value)
        item['type_values'] = type_values
        rows.append(item)

    """Generate total for footers"""

    # calculate total values for each type. Taken from exposure summary table
    type_total_values = []
    # Get affected field index
    affected_field_index = exposure_summary_table.fieldNameIndex(
        total_affected_field['field_name'])

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [
        exposure_type_field,
        exposure_class_field
    ]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    breakdown_field_name = breakdown_field['field_name']
    breakdown_field_index = exposure_summary_table.fieldNameIndex(
        breakdown_field_name)

    # Fetch total affected for each breakdown name
    value_dict = {}
    for feat in exposure_summary_table.getFeatures():
        # exposure summary table is in csv format, so the field returned is
        # always in text format
        affected_value = int(float(feat[affected_field_index]))
        affected_value = format_number(
            affected_value,
            enable_rounding=is_rounded,
            is_population=is_population)
        value_dict[feat[breakdown_field_index]] = affected_value

    if value_dict:
        for type_name in type_fields:
            affected_value_string_formatted = value_dict[type_name]
            if affected_value_string_formatted == '0':
                # if total affected for breakdown type is zero
                # current column index
                column_index = len(type_total_values)
                # cut column header
                type_header_labels = (
                    type_header_labels[:column_index] +
                    type_header_labels[column_index + 1:])
                # cut all row values for the column
                for item in rows:
                    type_values = item['type_values']
                    item['type_values'] = (
                        type_values[:column_index] +
                        type_values[column_index + 1:])
                continue
            type_total_values.append(affected_value_string_formatted)

    """Get the super total affected"""

    # total for affected (super total)
    analysis_feature = analysis_layer.getFeatures().next()
    field_index = analysis_layer.fieldNameIndex(
        total_affected_field['field_name'])
    total_all = format_number(
        analysis_feature[field_index],
        enable_rounding=is_rounded)

    """Generate and format the context"""
    aggregation_area_default_header = resolve_from_dictionary(
        extra_args, 'aggregation_area_default_header')
    header_label = (
        aggregation_summary.title() or aggregation_area_default_header)

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string)
    table_header = ' '.join(table_header.split())

    section_header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    total_in_aggregation_header = resolve_from_dictionary(
        extra_args, 'total_in_aggregation_header')
    context['header'] = section_header
    context['notes'] = notes
    context['aggregation_result'] = {
        'table_header': table_header,
        'header_label': header_label,
        'type_header_labels': type_header_labels,
        'total_label': total_header,
        'total_in_aggregation_area_label': total_in_aggregation_header,
        'rows': rows,
        'type_total_values': type_total_values,
        'total_all': total_all,
    }
    return context
Exemplo n.º 25
0
def minimum_needs_extractor(impact_report, component_metadata):
    """Extracting minimum needs of the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args
    analysis_layer = impact_report.analysis
    analysis_keywords = analysis_layer.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    is_rounding = not debug_mode

    header = resolve_from_dictionary(extra_args, 'header')
    context['header'] = header

    # check if displaced is not zero
    try:
        displaced_field_name = analysis_keywords[displaced_field['key']]
        total_displaced = value_from_field_name(displaced_field_name,
                                                analysis_layer)
        if total_displaced == 0:
            zero_displaced_message = resolve_from_dictionary(
                extra_args, 'zero_displaced_message')
            context['zero_displaced'] = {
                'status': True,
                'message': zero_displaced_message
            }
            return context
    except KeyError:
        # in case no displaced field
        pass

    # minimum needs calculation only affect population type exposure
    # check if analysis keyword have minimum_needs keywords
    have_minimum_needs_field = False
    for field_key in analysis_keywords:
        if field_key.startswith(minimum_needs_namespace):
            have_minimum_needs_field = True
            break

    if not have_minimum_needs_field:
        return context

    frequencies = {}
    # map each needs to its frequency groups
    for field in (minimum_needs_fields + additional_minimum_needs):
        need_parameter = field.get('need_parameter')
        if isinstance(need_parameter, ResourceParameter):
            frequency = need_parameter.frequency
        else:
            frequency = field.get('frequency')

        if frequency:
            if frequency not in frequencies:
                frequencies[frequency] = [field]
            else:
                frequencies[frequency].append(field)

    needs = []
    analysis_feature = analysis_layer.getFeatures().next()
    header_frequency_format = resolve_from_dictionary(
        extra_args, 'header_frequency_format')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    need_header_format = resolve_from_dictionary(extra_args,
                                                 'need_header_format')
    # group the needs by frequency
    for key, frequency in frequencies.iteritems():
        group = {
            'header': header_frequency_format.format(frequency=tr(key)),
            'total_header': total_header,
            'needs': []
        }
        for field in frequency:
            # check value exists in the field
            field_idx = analysis_layer.fieldNameIndex(field['field_name'])
            if field_idx == -1:
                # skip if field doesn't exists
                continue
            value = format_number(analysis_feature[field_idx],
                                  enable_rounding=is_rounding,
                                  is_population=True)

            if field.get('need_parameter'):
                need_parameter = field['need_parameter']
                """:type: ResourceParameter"""
                name = tr(need_parameter.name)
                unit_abbreviation = need_parameter.unit.abbreviation

            else:
                if field.get('header_name'):
                    name = field.get('header_name')
                else:
                    name = field.get('name')

                need_unit = field.get('unit')
                if need_unit:
                    unit_abbreviation = need_unit.get('abbreviation')

            if unit_abbreviation:
                header = need_header_format.format(
                    name=name, unit_abbreviation=unit_abbreviation)
            else:
                header = name

            item = {'header': header, 'value': value}
            group['needs'].append(item)
        needs.append(group)

    context['needs'] = needs

    return context
Exemplo n.º 26
0
def multi_exposure_general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.3
    """
    context = {}
    extra_args = component_metadata.extra_args

    multi_exposure = impact_report.multi_exposure_impact_function
    analysis_layer = impact_report.analysis
    provenances = [
        impact_function.provenance for impact_function in (
            multi_exposure.impact_functions)]
    debug_mode = multi_exposure.debug
    population_exist = False

    hazard_keywords = provenances[0]['hazard_keywords']
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    # Summarize every value needed for each exposure and extract later to the
    # context.
    summary = []
    map_legend_titles = []
    hazard_classifications = {}
    exposures_stats = []
    for provenance in provenances:
        map_legend_title = provenance['map_legend_title']
        map_legend_titles.append(map_legend_title)
        exposure_stats = {}
        exposure_keywords = provenance['exposure_keywords']
        exposure_type = definition(exposure_keywords['exposure'])
        exposure_stats['exposure'] = exposure_type
        # Only round the number when it is population exposure and it is not
        # in debug mode
        is_rounded = not debug_mode
        is_population = exposure_type is exposure_population
        if is_population:
            population_exist = True

        analysis_feature = next(analysis_layer.getFeatures())
        analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

        exposure_unit = exposure_type['units'][0]
        if exposure_unit['abbreviation']:
            value_header = '{measure} ({abbreviation})'.format(
                measure=map_legend_title,
                abbreviation=exposure_unit['abbreviation'])
        else:
            value_header = '{name}'.format(name=map_legend_title)

        exposure_stats['value_header'] = value_header

        # Get hazard classification
        hazard_classification = definition(
            active_classification(hazard_keywords,
                                  exposure_keywords['exposure']))
        hazard_classifications[exposure_type['key']] = hazard_classification

        # in case there is a classification
        if hazard_classification:
            classification_result = {}
            reported_fields_result = {}
            for hazard_class in hazard_classification['classes']:
                # hazard_count_field is a dynamic field with hazard class
                # as parameter
                field_key_name = exposure_hazard_count_field['key'] % (
                    exposure_type['key'], hazard_class['key'])
                try:
                    # retrieve dynamic field name from analysis_fields keywords
                    # will cause key error if no hazard count for that
                    # particular class
                    field_name = analysis_inasafe_fields[field_key_name]
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    hazard_value = format_number(
                        analysis_feature[field_index],
                        use_rounding=is_rounded,
                        is_population=is_population)
                except KeyError:
                    # in case the field was not found
                    hazard_value = 0

                classification_result[hazard_class['key']] = hazard_value

            # find total field
            try:
                field_key_name = exposure_total_exposed_field['key'] % (
                    exposure_type['key'])
                field_name = analysis_inasafe_fields[field_key_name]
                total = value_from_field_name(field_name, analysis_layer)
                total = format_number(
                    total, use_rounding=is_rounded,
                    is_population=is_population)
                classification_result[total_exposed_field['key']] = total
            except KeyError:
                pass

            exposure_stats['classification_result'] = classification_result

        for item in reported_fields:
            field = item.get('field')
            multi_exposure_field = item.get('multi_exposure_field')
            row_value = '-'
            if multi_exposure_field:
                field_key = (
                    multi_exposure_field['key'] % (exposure_type['key']))
                field_name = (
                    multi_exposure_field['field_name'] % (
                        exposure_type['key']))
                if field_key in analysis_inasafe_fields:
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    row_value = format_number(
                        analysis_feature[field_index],
                        use_rounding=is_rounded,
                        is_population=is_population)

            elif field in [displaced_field, fatalities_field]:
                if field['key'] in analysis_inasafe_fields and is_population:
                    field_index = analysis_layer.fields(
                    ).lookupField(field['name'])
                    if field == fatalities_field:
                        # For fatalities field, we show a range of number
                        # instead
                        row_value = fatalities_range(
                            analysis_feature[field_index])
                    else:
                        row_value = format_number(
                            analysis_feature[field_index],
                            use_rounding=is_rounded,
                            is_population=is_population)

            reported_fields_result[field['key']] = row_value

        exposure_stats['reported_fields_result'] = reported_fields_result
        exposures_stats.append(exposure_stats)

    # After finish summarizing value, then proceed to context extraction.

    # find total value and labels for each exposure
    value_headers = []
    total_values = []
    for exposure_stats in exposures_stats:
        # label
        value_header = exposure_stats['value_header']
        value_headers.append(value_header)

        # total value
        classification_result = exposure_stats['classification_result']
        total_value = classification_result[total_exposed_field['key']]
        total_values.append(total_value)

    classifications = list(hazard_classifications.values())
    is_item_identical = (
        classifications.count(
            classifications[0]) == len(classifications))
    if classifications and is_item_identical:
        hazard_stats = []
        for hazard_class in classifications[0]['classes']:
            values = []
            for exposure_stats in exposures_stats:
                classification_result = exposure_stats['classification_result']
                value = classification_result[hazard_class['key']]
                values.append(value)
            stats = {
                'key': hazard_class['key'],
                'name': hazard_class['name'],
                'numbers': values
            }
            hazard_stats.append(stats)

        total_stats = {
            'key': total_exposed_field['key'],
            'name': total_exposed_field['name'],
            'as_header': True,
            'numbers': total_values
        }
        hazard_stats.append(total_stats)

        summary.append({
            'header_label': hazard_header,
            'value_labels': value_headers,
            'rows': hazard_stats
        })
    # if there are different hazard classifications used in the analysis,
    # we will create a separate table for each hazard classification
    else:
        hazard_classification_groups = {}
        for exposure_key, hazard_classification in (
                iter(list(hazard_classifications.items()))):
            exposure_type = definition(exposure_key)
            if hazard_classification['key'] not in (
                    hazard_classification_groups):
                hazard_classification_groups[hazard_classification['key']] = [
                    exposure_type]
            else:
                hazard_classification_groups[
                    hazard_classification['key']].append(exposure_type)

        for hazard_classification_key, exposures in (
                iter(list(hazard_classification_groups.items()))):
            custom_headers = []
            custom_total_values = []
            # find total value and labels for each exposure
            for exposure_stats in exposures_stats:
                if exposure_stats['exposure'] not in exposures:
                    continue
                # label
                value_header = exposure_stats['value_header']
                custom_headers.append(value_header)

                # total value
                classification_result = exposure_stats['classification_result']
                total_value = classification_result[total_exposed_field['key']]
                custom_total_values.append(total_value)

            hazard_stats = []
            hazard_classification = definition(hazard_classification_key)
            for hazard_class in hazard_classification['classes']:
                values = []
                for exposure_stats in exposures_stats:
                    if exposure_stats['exposure'] not in exposures:
                        continue
                    classification_result = exposure_stats[
                        'classification_result']
                    value = classification_result[hazard_class['key']]
                    values.append(value)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_class['name'],
                    'numbers': values
                }
                hazard_stats.append(stats)

            total_stats = {
                'key': total_exposed_field['key'],
                'name': total_exposed_field['name'],
                'as_header': True,
                'numbers': custom_total_values
            }
            hazard_stats.append(total_stats)

            summary.append({
                'header_label': hazard_header,
                'value_labels': custom_headers,
                'rows': hazard_stats
            })

    reported_fields_stats = []
    for item in reported_fields:
        field = item.get('field')
        values = []
        for exposure_stats in exposures_stats:
            reported_fields_result = exposure_stats['reported_fields_result']
            value = reported_fields_result[field['key']]
            values.append(value)
        stats = {
            'key': field['key'],
            'name': item['header'],
            'numbers': values
        }
        reported_fields_stats.append(stats)

    header_label = resolve_from_dictionary(
        extra_args, ['reported_fields_header'])
    summary.append({
        'header_label': header_label,
        'value_labels': value_headers,
        'rows': reported_fields_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])

    combined_map_legend_title = ''
    for index, map_legend_title in enumerate(map_legend_titles):
        combined_map_legend_title += map_legend_title
        if not (index + 1) == len(map_legend_titles):
            combined_map_legend_title += ', '

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')
    table_header = table_header_format.format(
        title=combined_map_legend_title,
        unit=classifications[0]['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(
        extra_args, ['concept_notes', 'note_format'])

    concepts = resolve_from_dictionary(
        extra_args, ['concept_notes', 'general_concepts'])

    if population_exist:
        concepts += resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context