Esempio n. 1
0
def action_checklist_report_extractor(impact_report, component_metadata):
    """Extracting action checklist of the impact layer to its own report.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.1
    """
    context = {}
    extra_args = component_metadata.extra_args

    components_list = resolve_from_dictionary(
        extra_args, 'components_list')

    context['brand_logo'] = resource_url(
        resources_path('img', 'logos', 'inasafe-logo-white.png'))
    for key, component in list(components_list.items()):
        context[key] = jinja2_output_as_string(
            impact_report, component['key'])

    context['inasafe_resources_base_dir'] = resources_path()

    return context
Esempio n. 2
0
def action_checklist_extractor(impact_report, component_metadata):
    """Extracting action checklist of the exposure layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args

    context['component_key'] = component_metadata.key
    context['header'] = resolve_from_dictionary(extra_args, 'header')
    context['items'] = provenance['action_checklist']

    return context
Esempio n. 3
0
def multi_exposure_analysis_question_extractor(
        impact_report, component_metadata):
    """Extracting analysis question from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.3
    """
    context = {}
    extra_args = component_metadata.extra_args

    multi_exposure = impact_report.multi_exposure_impact_function
    provenance = multi_exposure.provenance

    header = resolve_from_dictionary(extra_args, 'header')
    analysis_questions = []

    analysis_question = provenance['analysis_question']
    analysis_questions.append(analysis_question)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['analysis_questions'] = analysis_questions

    return context
Esempio n. 4
0
def infographic_layout_extractor(impact_report, component_metadata):
    """Extracting infographic result and format it with a layout.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    infographics = resolve_from_dictionary(extra_args, ['infographics'])
    provenance = impact_report.impact_function.provenance

    infographic_result = ''

    for component_key in infographics:
        result = jinja2_output_as_string(impact_report, component_key)
        if result:
            infographic_result += result

    if not infographic_result:
        return context

    resources_dir = safe_dir(sub_dir='../resources')
    context['inasafe_resources_base_dir'] = resources_dir
    context['infographic_content'] = infographic_result
    version = provenance['inasafe_version']
    start_datetime = provenance['start_datetime']
    date = start_datetime.strftime('%Y-%m-%d')
    time = start_datetime.strftime('%H:%M')
    footer_format = resolve_from_dictionary(extra_args, 'footer_format')
    context['footer'] = footer_format.format(
        version=version, analysis_date=date, analysis_time=time)
    return context
Esempio n. 5
0
    def test_resolve_from_dictionary(self):
        """Test resolve_from_dictionary method.

        .. versionadded:: 4.0
        """
        test_dict = {
            'foo': {
                'bar': {
                    'bin': {
                        'baz': 1
                    }
                }
            },
            'foobar': 10
        }

        # test nested resolve
        expected = 1
        actual = resolve_from_dictionary(test_dict, [
            'foo', 'bar', 'bin', 'baz'])

        self.assertEqual(expected, actual)

        # test single resolve using list

        expected = 10
        actual = resolve_from_dictionary(test_dict, ['foobar'])

        self.assertEqual(expected, actual)

        # test single resolve using shorthand notation

        expected = 10
        actual = resolve_from_dictionary(test_dict, 'foobar')

        self.assertEqual(expected, actual)
Esempio n. 6
0
def impact_table_pdf_extractor(impact_report, component_metadata):
    """Extracting impact summary of the impact layer.

    For PDF generations

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    # QGIS Composer needed certain context to generate the output
    # - Map Settings
    # - Substitution maps
    # - Element settings, such as icon for picture file or image source

    context = QGISComposerContext()
    extra_args = component_metadata.extra_args

    html_report_component_key = resolve_from_dictionary(
        extra_args, ['html_report_component_key'])

    # we only have html elements for this
    html_frame_elements = [
        {
            'id': 'impact-report',
            'mode': 'text',
            'text': jinja2_output_as_string(
                impact_report, html_report_component_key),
            'margin_left': 10,
            'margin_top': 10,
        }
    ]
    context.html_frame_elements = html_frame_elements
    return context
Esempio n. 7
0
def impact_table_extractor(impact_report, component_metadata):
    """Extracting impact summary of the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args
    debug_mode = impact_report.impact_function.debug_mode

    components_list = resolve_from_dictionary(
        extra_args, 'components_list')

    # TODO: Decide either to use it or not
    if not debug_mode:
        # only show experimental MMI Detail when in debug mode
        components_list.pop('mmi_detail', None)

    context['brand_logo'] = resource_url(
        resources_path('img', 'logos', 'inasafe-logo-white.png'))
    for key, component in components_list.iteritems():
        context[key] = jinja2_output_as_string(
            impact_report, component['key'])

    context['inasafe_resources_base_dir'] = resources_path()

    return context
Esempio n. 8
0
def impact_table_extractor(impact_report, component_metadata):
    """Extracting impact summary of the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args
    debug_mode = impact_report.impact_function.debug_mode

    components_list = resolve_from_dictionary(extra_args, 'components_list')

    # TODO: Decide either to use it or not
    if not debug_mode:
        # only show experimental MMI Detail when in debug mode
        components_list.pop('mmi_detail', None)

    context['brand_logo'] = resource_url(
        resources_path('img', 'logos', 'inasafe-logo-white.png'))
    for key, component in components_list.iteritems():
        context[key] = jinja2_output_as_string(impact_report, component['key'])

    resources_dir = safe_dir(sub_dir='../resources')
    context['inasafe_resources_base_dir'] = resources_dir

    return context
Esempio n. 9
0
def qgis_composer_extractor(impact_report, component_metadata):
    """Extract composer context.

    This method extract necessary context for a given impact report and
    component metadata and save the context so it can be used in composer
    rendering phase

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    # QGIS Composer needed certain context to generate the output
    # - Map Settings
    # - Substitution maps
    # - Element settings, such as icon for picture file or image source

    # Generate map settings
    qgis_context = impact_report.qgis_composition_context
    inasafe_context = impact_report.inasafe_context
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args

    context = QGISComposerContext()

    # Set default image elements to replace
    image_elements = [
        {
            'id': 'safe-logo',
            'path': inasafe_context.inasafe_logo
        },
        {
            'id': 'black-inasafe-logo',
            'path': inasafe_context.black_inasafe_logo
        },
        {
            'id': 'white-inasafe-logo',
            'path': inasafe_context.white_inasafe_logo
        },
        {
            'id': 'north-arrow',
            'path': inasafe_context.north_arrow
        },
        {
            'id': 'organisation-logo',
            'path': inasafe_context.organisation_logo
        },
        {
            'id': 'supporters_logo',
            'path': inasafe_context.supporters_logo
        }
    ]
    context.image_elements = image_elements

    # Set default HTML Frame elements to replace
    html_frame_elements = [
        {
            'id': 'impact-report',
            'mode': 'text',  # another mode is url
            'text': '',  # TODO: get impact summary table
        }
    ]
    context.html_frame_elements = html_frame_elements

    """Define the layers for the impact map."""

    project = QgsProject.instance()
    layers = []

    exposure_summary_layers = []
    if impact_report.multi_exposure_impact_function:
        for impact_function in (
                impact_report.multi_exposure_impact_function.impact_functions):
            impact_layer = impact_function.exposure_summary or (
                impact_function.aggregate_hazard_impacted)
            exposure_summary_layers.append(impact_layer)

    # use custom ordered layer if any
    if impact_report.ordered_layers:
        for layer in impact_report.ordered_layers:
            layers.append(layer)

        # We are keeping this if we want to enable below behaviour again.
        # Currently realtime might have layer order without impact layer in it.

        # # make sure at least there is an impact layer
        # if impact_report.multi_exposure_impact_function:
        #     additional_layers = []  # for exposure summary layers
        #     impact_layer_found = False
        #     impact_functions = (
        #        impact_report.multi_exposure_impact_function.impact_functions)
        #     # check for impact layer occurrences
        #     for analysis in impact_functions:
        #         impact_layer = analysis.exposure_summary or (
        #             analysis.aggregate_hazard_impacted)
        #         for index, layer in enumerate(layers):
        #             if impact_layer.source() == layer.source():
        #                 add_impact_layers_to_canvas(analysis)
        #                 layers[index] = impact_layer
        #                 impact_layer_found = True
        #     if not impact_layer_found:
        #         for analysis in impact_functions:
        #             add_impact_layers_to_canvas(analysis)
        #             impact_layer = analysis.exposure_summary or (
        #                 analysis.aggregate_hazard_impacted)
        #             layer_uri = full_layer_uri(impact_layer)
        #             layer = load_layer_from_registry(layer_uri)
        #             additional_layers.append(layer)
        #     layers = additional_layers + layers
        # else:
        #     impact_layer = (
        #         impact_report.impact_function.exposure_summary or (
        #             impact_report.impact_function.aggregate_hazard_impacted))
        #     if impact_layer not in layers:
        #         layers.insert(0, impact_layer)

    # use default layer order if no custom ordered layer found
    else:
        if not impact_report.multi_exposure_impact_function:  # single IF
            layers = [impact_report.impact] + impact_report.extra_layers
        else:  # multi-exposure IF
            layers = [] + impact_report.extra_layers

        add_supplementary_layers = (
            not impact_report.multi_exposure_impact_function or not (
                impact_report.multi_exposure_impact_function.
                output_layers_ordered)
        )
        if add_supplementary_layers:
            # Check show only impact.
            show_only_impact = setting(
                'set_show_only_impact_on_report', expected_type=bool)
            if not show_only_impact:
                hazard_layer = project.mapLayers().get(
                    provenance['hazard_layer_id'], None)

                aggregation_layer_id = provenance['aggregation_layer_id']
                if aggregation_layer_id:
                    aggregation_layer = project.mapLayers().get(
                        aggregation_layer_id, None)
                    layers.append(aggregation_layer)

                layers.append(hazard_layer)

            # check hide exposure settings
            hide_exposure_flag = setting(
                'setHideExposureFlag', expected_type=bool)
            if not hide_exposure_flag:
                exposure_layers_id = []
                if provenance.get(
                        provenance_exposure_layer_id['provenance_key']):
                    exposure_layers_id.append(
                        provenance.get(
                            provenance_exposure_layer_id['provenance_key']))
                elif provenance.get(
                        provenance_multi_exposure_layers_id['provenance_key']):
                    exposure_layers_id = provenance.get(
                        provenance_multi_exposure_layers_id['provenance_key'])

                # place exposure at the bottom
                for layer_id in exposure_layers_id:
                    exposure_layer = project.mapLayers().get(layer_id)
                    layers.append(exposure_layer)

    # default extent is analysis extent
    if not qgis_context.extent:
        qgis_context.extent = impact_report.impact_function.analysis_extent

    map_elements = [
        {
            'id': 'impact-map',
            'extent': qgis_context.extent,
            'grid_split_count': 5,
            'layers': layers,
        }
    ]
    context.map_elements = map_elements

    # calculate map_legends, only show the legend for impact layer
    if impact_report.legend_layers:  # use requested legend if any
        layers = impact_report.legend_layers
    elif impact_report.multi_exposure_impact_function:  # multi-exposure IF
        layers = exposure_summary_layers
    else:  # single IF
        layers = [impact_report.impact]
    symbol_count = 0
    for l in layers:
        layer = l
        """:type: qgis.core.QgsMapLayer"""
        try:
            symbol_count += len(layer.legendSymbologyItems())
            continue
        except Exception:  # pylint: disable=broad-except
            pass
        try:
            symbol_count += len(layer.renderer().legendSymbolItems())
            continue
        except Exception:  # pylint: disable=broad-except
            pass
        symbol_count += 1

    legend_title = provenance.get('map_legend_title') or ''

    map_legends = [
        {
            'id': 'impact-legend',
            'title': legend_title,
            'layers': layers,
            'symbol_count': symbol_count,
            # 'column_count': 2,  # the number of column in legend display
        }
    ]
    context.map_legends = map_legends

    # process substitution map
    start_datetime = provenance['start_datetime']
    """:type: datetime.datetime"""
    date_format = resolve_from_dictionary(extra_args, 'date-format')
    time_format = resolve_from_dictionary(extra_args, 'time-format')
    if isinstance(start_datetime, datetime.datetime):
        date = start_datetime.strftime(date_format)
        time = start_datetime.strftime(time_format)
    else:
        date = ''
        time = ''
    long_version = get_version()
    tokens = long_version.split('.')
    version = '%s.%s.%s' % (tokens[0], tokens[1], tokens[2])
    # Get title of the layer
    title = provenance.get('map_title') or ''

    # Set source
    unknown_source_text = resolve_from_dictionary(
        extra_args, ['defaults', 'unknown_source'])
    aggregation_not_used = resolve_from_dictionary(
        extra_args, ['defaults', 'aggregation_not_used'])

    hazard_source = (
        provenance.get(
            'hazard_keywords', {}).get('source') or unknown_source_text)
    exposure_source = (
        provenance.get(
            'exposure_keywords', {}).get('source') or unknown_source_text)
    if provenance['aggregation_layer']:
        aggregation_source = (
            provenance['aggregation_keywords'].get('source')
            or unknown_source_text)
    else:
        aggregation_source = aggregation_not_used

    spatial_reference_format = resolve_from_dictionary(
        extra_args, 'spatial-reference-format')
    reference_name = spatial_reference_format.format(
        crs=impact_report.impact_function.crs.authid())

    analysis_layer = impact_report.analysis
    analysis_name = value_from_field_name(
        analysis_name_field['field_name'], analysis_layer)

    # Prepare the substitution map
    version_title = resolve_from_dictionary(extra_args, 'version-title')
    disclaimer_title = resolve_from_dictionary(extra_args, 'disclaimer-title')
    date_title = resolve_from_dictionary(extra_args, 'date-title')
    time_title = resolve_from_dictionary(extra_args, 'time-title')
    caution_title = resolve_from_dictionary(extra_args, 'caution-title')
    caution_text = resolve_from_dictionary(extra_args, 'caution-text')
    version_text = resolve_from_dictionary(extra_args, 'version-text')
    legend_section_title = resolve_from_dictionary(
        extra_args, 'legend-title')
    information_title = resolve_from_dictionary(
        extra_args, 'information-title')
    supporters_title = resolve_from_dictionary(
        extra_args, 'supporters-title')
    source_title = resolve_from_dictionary(extra_args, 'source-title')
    analysis_title = resolve_from_dictionary(extra_args, 'analysis-title')
    reference_title = resolve_from_dictionary(
        extra_args, 'spatial-reference-title')
    substitution_map = {
        'impact-title': title,
        'date': date,
        'time': time,
        'safe-version': version,  # deprecated
        'disclaimer': inasafe_context.disclaimer,
        # These added in 3.2
        'version-title': version_title,
        'inasafe-version': version,
        'disclaimer-title': disclaimer_title,
        'date-title': date_title,
        'time-title': time_title,
        'caution-title': caution_title,
        'caution-text': caution_text,
        'version-text': version_text.format(version=version),
        'legend-title': legend_section_title,
        'information-title': information_title,
        'supporters-title': supporters_title,
        'source-title': source_title,
        'analysis-title': analysis_title,
        'analysis-name': analysis_name,
        'reference-title': reference_title,
        'reference-name': reference_name,
        'hazard-source': hazard_source,
        'exposure-source': exposure_source,
        'aggregation-source': aggregation_source,
    }
    context.substitution_map = substitution_map
    return context
Esempio n. 10
0
def infographic_people_section_notes_extractor(
        impact_report, component_metadata):
    """Extracting notes for people section in the infographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.2
    """
    hazard_layer = impact_report.hazard
    extra_args = component_metadata.extra_args

    context = {}
    context['notes'] = []

    note = {
        'title': None,
        'description': resolve_from_dictionary(extra_args, 'extra_note'),
        'citations': None
    }
    context['notes'].append(note)

    concept_keys = ['affected_people', 'displaced_people']
    for key in concept_keys:
        note = {
            'title': concepts[key].get('name'),
            'description': concepts[key].get('description'),
            'citations': concepts[key].get('citations')[0]['text']
        }
        context['notes'].append(note)

    hazard_classification = layer_hazard_classification(hazard_layer)

    # generate rate description
    displacement_rates_note_format = resolve_from_dictionary(
        extra_args, 'hazard_displacement_rates_note_format')
    displacement_rates_note = []
    for hazard_class in hazard_classification['classes']:
        hazard_class['classification_unit'] = (
            hazard_classification['classification_unit'])
        displacement_rates_note.append(
            displacement_rates_note_format.format(**hazard_class))

    rate_description = ', '.join(displacement_rates_note)

    note = {
        'title': concepts['displacement_rate'].get('name'),
        'description': rate_description,
        'citations': concepts['displacement_rate'].get('citations')[0]['text']
    }

    context['notes'].append(note)

    return context
Esempio n. 11
0
def qgis_composer_extractor(impact_report, component_metadata):
    """Extract composer context.

    This method extract necessary context for a given impact report and
    component metadata and save the context so it can be used in composer
    rendering phase

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    # QGIS Composer needed certain context to generate the output
    # - Map Settings
    # - Substitution maps
    # - Element settings, such as icon for picture file or image source

    # Generate map settings
    qgis_context = impact_report.qgis_composition_context
    inasafe_context = impact_report.inasafe_context
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args

    context = QGISComposerContext()

    # Set default image elements to replace
    image_elements = [{
        'id': 'safe-logo',
        'path': inasafe_context.inasafe_logo
    }, {
        'id': 'black-inasafe-logo',
        'path': inasafe_context.black_inasafe_logo
    }, {
        'id': 'white-inasafe-logo',
        'path': inasafe_context.white_inasafe_logo
    }, {
        'id': 'north-arrow',
        'path': inasafe_context.north_arrow
    }, {
        'id': 'organisation-logo',
        'path': inasafe_context.organisation_logo
    }, {
        'id': 'supporters_logo',
        'path': inasafe_context.supporters_logo
    }]
    context.image_elements = image_elements

    # Set default HTML Frame elements to replace
    html_frame_elements = [{
        'id': 'impact-report',
        'mode': 'text',  # another mode is url
        'text': '',  # TODO: get impact summary table
    }]
    context.html_frame_elements = html_frame_elements

    # Set default map to resize

    # check show only impact
    show_only_impact = setting('set_show_only_impact_on_report', False, bool)
    layers = [impact_report.impact_function.impact]
    layer_registry = QgsMapLayerRegistry.instance()
    if not show_only_impact:
        hazard_layer = layer_registry.mapLayers().get(
            provenance['hazard_layer_id'], None)

        aggregation_layer_id = provenance['aggregation_layer_id']
        if aggregation_layer_id:
            aggregation_layer = layer_registry.mapLayers().get(
                aggregation_layer_id, None)
            layers.insert(0, aggregation_layer)

        layers.append(hazard_layer)

    # check hide exposure settings
    hide_exposure_flag = setting('setHideExposureFlag', False, bool)
    if not hide_exposure_flag:
        # place exposure at the bottom
        exposure_layer = layer_registry.mapLayers().get(
            provenance['exposure_layer_id'])
        layers.append(exposure_layer)

    # default extent is analysis extent
    if not qgis_context.extent:
        qgis_context.extent = impact_report.impact_function.analysis_extent

    map_elements = [{
        'id': 'impact-map',
        'extent': qgis_context.extent,
        'grid_split_count': 5,
        'layers': layers,
    }]
    context.map_elements = map_elements

    # calculate map_legends
    layers = [impact_report.impact] + impact_report.extra_layers
    symbol_count = 0
    for l in layers:
        layer = l
        """:type: qgis.core.QgsMapLayer"""
        try:
            symbol_count += len(layer.legendSymbologyItems())
            continue
        except Exception:  # pylint: disable=broad-except
            pass
        try:
            symbol_count += len(layer.rendererV2().legendSymbolItemsV2())
            continue
        except Exception:  # pylint: disable=broad-except
            pass
        symbol_count += 1

    legend_title = provenance['map_legend_title'] or ''

    map_legends = [{
        'id': 'impact-legend',
        'title': legend_title,
        'layers': layers,
        'symbol_count': symbol_count,
        # 'column_count': 2,  # the number of column in legend display
    }]
    context.map_legends = map_legends

    # process substitution map
    date_time = provenance['datetime']
    """:type: datetime.datetime"""
    date_format = resolve_from_dictionary(extra_args, 'date-format')
    time_format = resolve_from_dictionary(extra_args, 'time-format')
    if isinstance(date_time, datetime.datetime):
        date = date_time.strftime(date_format)
        time = date_time.strftime(time_format)
    else:
        date = ''
        time = ''
    long_version = get_version()
    tokens = long_version.split('.')
    version = '%s.%s.%s' % (tokens[0], tokens[1], tokens[2])
    # Get title of the layer
    title = provenance['map_title']

    # Set source
    unknown_source_text = resolve_from_dictionary(
        extra_args, ['defaults', 'unknown_source'])
    aggregation_not_used = resolve_from_dictionary(
        extra_args, ['defaults', 'aggregation_not_used'])

    hazard_source = (provenance['hazard_keywords'].get('source')
                     or unknown_source_text)
    exposure_source = (provenance['exposure_keywords'].get('source')
                       or unknown_source_text)
    if provenance['aggregation_layer']:
        aggregation_source = (provenance['aggregation_keywords'].get('source')
                              or unknown_source_text)
    else:
        aggregation_source = aggregation_not_used

    spatial_reference_format = resolve_from_dictionary(
        extra_args, 'spatial-reference-format')
    reference_name = spatial_reference_format.format(
        crs=impact_report.impact_function.impact.crs().authid())

    analysis_layer = impact_report.analysis
    analysis_name = value_from_field_name(analysis_name_field['field_name'],
                                          analysis_layer)

    # Prepare the substitution map
    version_title = resolve_from_dictionary(extra_args, 'version-title')
    disclaimer_title = resolve_from_dictionary(extra_args, 'disclaimer-title')
    date_title = resolve_from_dictionary(extra_args, 'date-title')
    time_title = resolve_from_dictionary(extra_args, 'time-title')
    caution_title = resolve_from_dictionary(extra_args, 'caution-title')
    caution_text = resolve_from_dictionary(extra_args, 'caution-text')
    version_text = resolve_from_dictionary(extra_args, 'version-text')
    legend_section_title = resolve_from_dictionary(extra_args, 'legend-title')
    information_title = resolve_from_dictionary(extra_args,
                                                'information-title')
    supporters_title = resolve_from_dictionary(extra_args, 'supporters-title')
    source_title = resolve_from_dictionary(extra_args, 'source-title')
    analysis_title = resolve_from_dictionary(extra_args, 'analysis-title')
    reference_title = resolve_from_dictionary(extra_args,
                                              'spatial-reference-title')
    substitution_map = {
        'impact-title': title,
        'date': date,
        'time': time,
        'safe-version': version,  # deprecated
        'disclaimer': inasafe_context.disclaimer,
        # These added in 3.2
        'version-title': version_title,
        'inasafe-version': version,
        'disclaimer-title': disclaimer_title,
        'date-title': date_title,
        'time-title': time_title,
        'caution-title': caution_title,
        'caution-text': caution_text,
        'version-text': version_text.format(version=version),
        'legend-title': legend_section_title,
        'information-title': information_title,
        'supporters-title': supporters_title,
        'source-title': source_title,
        'analysis-title': analysis_title,
        'analysis-name': analysis_name,
        'reference-title': reference_title,
        'reference-name': reference_name,
        'hazard-source': hazard_source,
        'exposure-source': exposure_source,
        'aggregation-source': aggregation_source,
    }
    context.substitution_map = substitution_map
    return context
Esempio n. 12
0
def create_section_without_aggregation(aggregation_summary,
                                       analysis_layer,
                                       postprocessor_fields,
                                       section_header,
                                       units_label=None,
                                       use_rounding=True,
                                       extra_component_args=None):
    """Create demographic section context without aggregation.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param use_rounding: flag for rounding, affect number representations
    :type use_rounding: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict
    """
    aggregation_summary_fields = aggregation_summary.keywords['inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']

    # retrieving postprocessor
    postprocessors_fields_found = []
    for output_field in postprocessor_fields['fields']:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    try:
        analysis_layer_fields[displaced_field['key']]
        aggregation_summary_fields[displaced_field['key']]
    except KeyError:
        # no displaced field, can't show result
        return {}
    """Generating header name for columns."""

    # First column header is aggregation title
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        section_header,
        total_population_header,
    ]
    """Generating values for rows."""
    row_values = []

    for idx, output_field in enumerate(postprocessors_fields_found):

        name = output_field.get('header_name')
        if not name:
            name = output_field.get('name')

        row = []
        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = '{name} [{unit}]'
            else:
                header_format = '{name}'

            header = header_format.format(name=name, unit=unit)
        else:
            header_format = '{name}'
            header = header_format.format(name=name)

        row.append(header)

        # if no aggregation layer, then aggregation summary only contain one
        # feature
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(field_name, analysis_layer)
        value = format_number(value,
                              use_rounding=use_rounding,
                              is_population=True)
        row.append(value)

        row_values.append(row)

    return {
        'columns': columns,
        'rows': row_values,
    }
Esempio n. 13
0
def aggregation_postprocessors_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {'sections': OrderedDict()}
    """Initializations."""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    aggregation_summary = impact_report.aggregation_summary
    analysis_layer = impact_report.analysis
    analysis_layer_fields = impact_report.analysis.keywords['inasafe_fields']
    use_rounding = impact_report.impact_function.use_rounding
    use_aggregation = bool(
        impact_report.impact_function.provenance['aggregation_layer'])
    provenance = impact_report.impact_function.provenance
    exposure_keywords = provenance['exposure_keywords']

    # Get exposure type definition
    exposure_type = definition(exposure_keywords['exposure'])

    # this entire section is only for population exposure type
    if not exposure_type == exposure_population:
        return context

    # check zero displaced (there will be no output to display)
    try:
        displaced_field_name = analysis_layer_fields[displaced_field['key']]
        total_displaced = value_from_field_name(displaced_field_name,
                                                analysis_layer)

        zero_displaced = False
        if total_displaced == 0:
            zero_displaced = True
    except KeyError:
        # in case no displaced field
        # let each section handled itself
        zero_displaced = False

    context['component_key'] = component_metadata.key
    context['use_aggregation'] = use_aggregation
    context['header'] = resolve_from_dictionary(extra_args, 'header')

    group_header_format = resolve_from_dictionary(
        extra_args, ['defaults', 'group_header_format'])

    section_header_format = resolve_from_dictionary(
        extra_args, ['defaults', 'section_header_format'])
    if not use_aggregation:
        section_header_format = resolve_from_dictionary(
            extra_args, ['defaults', 'section_header_format_no_aggregation'])
    """Age Groups."""
    age_items = {
        'group':
        age_displaced_count_group,
        'group_header':
        group_header_format.format(
            header_name=age_displaced_count_group['header_name']),
        'fields': [postprocessor_output_field(p) for p in age_postprocessors]
    }

    # check age_fields exists
    for field in age_items['fields']:
        if field['key'] in analysis_layer_fields:
            no_age_field = False
            break
    else:
        no_age_field = True

    context['sections']['age'] = []
    age_section_header = section_header_format.format(
        header_name=age_displaced_count_group['header_name'])
    if zero_displaced:
        context['sections']['age'].append({
            'header':
            age_section_header,
            'empty':
            True,
            'message':
            resolve_from_dictionary(extra_args,
                                    ['defaults', 'zero_displaced_message'])
        })
    elif no_age_field:
        context['sections']['age'].append({
            'header':
            age_section_header,
            'empty':
            True,
            'message':
            resolve_from_dictionary(extra_args,
                                    ['defaults', 'no_age_rate_message'])
        })
    else:
        context['sections']['age'].append(
            create_section(aggregation_summary,
                           analysis_layer,
                           age_items,
                           age_section_header,
                           use_aggregation=use_aggregation,
                           use_rounding=use_rounding,
                           extra_component_args=extra_args))
    """Gender Groups."""
    gender_items = {
        'group':
        gender_displaced_count_group,
        'group_header':
        group_header_format.format(
            header_name=gender_displaced_count_group['header_name']),
        'fields':
        [postprocessor_output_field(p) for p in gender_postprocessors]
    }

    # check gender_fields exists
    for field in gender_items['fields']:
        if field['key'] in analysis_layer_fields:
            no_gender_field = False
            break
    else:
        no_gender_field = True

    context['sections']['gender'] = []
    gender_section_header = section_header_format.format(
        header_name=gender_displaced_count_group['header_name'])
    if zero_displaced:
        context['sections']['gender'].append({
            'header':
            gender_section_header,
            'empty':
            True,
            'message':
            resolve_from_dictionary(extra_args,
                                    ['defaults', 'zero_displaced_message'])
        })
    elif no_gender_field:
        context['sections']['gender'].append({
            'header':
            gender_section_header,
            'empty':
            True,
            'message':
            resolve_from_dictionary(extra_args,
                                    ['defaults', 'no_gender_rate_message'])
        })
    else:
        context['sections']['gender'].append(
            create_section(aggregation_summary,
                           analysis_layer,
                           gender_items,
                           gender_section_header,
                           use_aggregation=use_aggregation,
                           use_rounding=use_rounding,
                           extra_component_args=extra_args))
    """Vulnerability Groups."""
    context['sections']['vulnerability'] = []
    for vulnerability_group in vulnerability_displaced_count_groups:
        vulnerability_items = {
            'group':
            vulnerability_group,
            'group_header':
            group_header_format.format(
                header_name=vulnerability_group['header_name']),
            'fields': [field for field in vulnerability_group['fields']]
        }

        # check vulnerability_fields exists
        for field in vulnerability_items['fields']:
            if field['key'] in analysis_layer_fields:
                no_vulnerability_field = False
                break
        else:
            no_vulnerability_field = True

        vulnerability_section_header = section_header_format.format(
            header_name=vulnerability_group['header_name'])
        if zero_displaced:
            context['sections']['vulnerability'].append({
                'header':
                vulnerability_section_header,
                'empty':
                True,
                'message':
                resolve_from_dictionary(extra_args,
                                        ['defaults', 'zero_displaced_message'])
            })
        elif no_vulnerability_field:
            context['sections']['vulnerability'].append({
                'header':
                vulnerability_section_header,
                'empty':
                True,
                'message':
                resolve_from_dictionary(
                    extra_args, ['defaults', 'no_vulnerability_rate_message'])
            })
        else:
            context['sections']['vulnerability'].append(
                create_section(aggregation_summary,
                               analysis_layer,
                               vulnerability_items,
                               vulnerability_section_header,
                               use_aggregation=use_aggregation,
                               use_rounding=use_rounding,
                               extra_component_args=extra_args))
    """Minimum Needs."""
    context['sections']['minimum_needs'] = []
    minimum_needs_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'header'])
    # Don't show minimum needs if there is no displaced
    if zero_displaced:
        context['sections']['minimum_needs'].append({
            'header':
            minimum_needs_section_header,
            'empty':
            True,
            'message':
            resolve_from_dictionary(extra_args,
                                    ['defaults', 'zero_displaced_message'])
        })
    # Only provides minimum needs breakdown if there is aggregation layer
    elif use_aggregation:
        # minimum needs should provide unit for column headers
        units_label = []
        minimum_needs_items = {
            'group_header': 'Minimum needs breakdown',
            'fields': minimum_needs_fields + additional_minimum_needs
        }

        for field in minimum_needs_items['fields']:
            unit = None
            if field.get('need_parameter'):
                need = field['need_parameter']
                if isinstance(need, ResourceParameter):
                    unit_abbreviation = need.unit.abbreviation
            elif field.get('unit'):
                need_unit = field.get('unit')
                unit_abbreviation = need_unit.get('abbreviation')

            if unit_abbreviation:
                unit_format = '{unit}'
                unit = unit_format.format(unit=unit_abbreviation)
            units_label.append(unit)

        context['sections']['minimum_needs'].append(
            create_section(aggregation_summary,
                           analysis_layer,
                           minimum_needs_items,
                           minimum_needs_section_header,
                           units_label=units_label,
                           use_rounding=use_rounding,
                           extra_component_args=extra_args))
    else:
        sections_not_empty = True
        for _, values in list(context['sections'].items()):
            for value in values:
                if value.get('rows'):
                    break
                else:
                    sections_not_empty = False

        context['sections_not_empty'] = sections_not_empty

    return context
Esempio n. 14
0
def minimum_needs_extractor(impact_report, component_metadata):
    """Extracting minimum needs of the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args
    analysis_layer = impact_report.analysis
    analysis_keywords = analysis_layer.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    is_rounding = not debug_mode

    header = resolve_from_dictionary(extra_args, 'header')
    context['header'] = header

    # check if displaced is not zero
    try:
        displaced_field_name = analysis_keywords[displaced_field['key']]
        total_displaced = value_from_field_name(
            displaced_field_name, analysis_layer)
        if total_displaced == 0:
            zero_displaced_message = resolve_from_dictionary(
                extra_args, 'zero_displaced_message')
            context['zero_displaced'] = {
                'status': True,
                'message': zero_displaced_message
            }
            return context
    except KeyError:
        # in case no displaced field
        pass

    # minimum needs calculation only affect population type exposure
    # check if analysis keyword have minimum_needs keywords
    have_minimum_needs_field = False
    for field_key in analysis_keywords:
        if field_key.startswith(minimum_needs_namespace):
            have_minimum_needs_field = True
            break

    if not have_minimum_needs_field:
        return context

    frequencies = {}
    # map each needs to its frequency groups
    for field in (minimum_needs_fields + additional_minimum_needs):
        need_parameter = field.get('need_parameter')
        if isinstance(need_parameter, ResourceParameter):
            frequency = need_parameter.frequency
        else:
            frequency = field.get('frequency')

        if frequency:
            if frequency not in frequencies:
                frequencies[frequency] = [field]
            else:
                frequencies[frequency].append(field)

    needs = []
    analysis_feature = analysis_layer.getFeatures().next()
    header_frequency_format = resolve_from_dictionary(
        extra_args, 'header_frequency_format')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    need_header_format = resolve_from_dictionary(
        extra_args, 'need_header_format')
    # group the needs by frequency
    for key, frequency in frequencies.iteritems():
        group = {
            'header': header_frequency_format.format(frequency=tr(key)),
            'total_header': total_header,
            'needs': []
        }
        for field in frequency:
            # check value exists in the field
            field_idx = analysis_layer.fieldNameIndex(field['field_name'])
            if field_idx == -1:
                # skip if field doesn't exists
                continue
            value = format_number(
                analysis_feature[field_idx],
                enable_rounding=is_rounding,
                is_population=True)

            if field.get('need_parameter'):
                need_parameter = field['need_parameter']
                """:type: ResourceParameter"""
                name = tr(need_parameter.name)
                unit_abbreviation = need_parameter.unit.abbreviation

            else:
                if field.get('header_name'):
                    name = field.get('header_name')
                else:
                    name = field.get('name')

                need_unit = field.get('unit')
                if need_unit:
                    unit_abbreviation = need_unit.get('abbreviation')

            if unit_abbreviation:
                header = need_header_format.format(
                    name=name,
                    unit_abbreviation=unit_abbreviation)
            else:
                header = name

            item = {
                'header': header,
                'value': value
            }
            group['needs'].append(item)
        needs.append(group)

    context['needs'] = needs

    return context
Esempio n. 15
0
def population_chart_extractor(impact_report, component_metadata):
    """Creating population donut chart.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    provenance = impact_report.impact_function.provenance
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']
    """Generate Donut chart for affected population."""

    # create context for the donut chart

    # retrieve hazard classification from hazard layer
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    if not hazard_classification:
        return context

    data = []
    labels = []
    colors = []

    for hazard_class in hazard_classification['classes']:

        # Skip if it is not affected hazard class
        if not hazard_class['affected']:
            continue

        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (hazard_class['key'], )

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            # Hazard label taken from translated hazard count field
            # label, string-formatted with translated hazard class label
            hazard_value = value_from_field_name(field_name, analysis_layer)
            hazard_value = round_affected_number(hazard_value,
                                                 use_rounding=True,
                                                 use_population_rounding=True)
        except KeyError:
            # in case the field was not found
            continue

        data.append(hazard_value)
        labels.append(hazard_class['name'])
        colors.append(hazard_class['color'].name())

    # add total not affected
    try:
        field_name = analysis_layer_fields[total_not_affected_field['key']]
        hazard_value = value_from_field_name(field_name, analysis_layer)
        hazard_value = round_affected_number(hazard_value,
                                             use_rounding=True,
                                             use_population_rounding=True)

        data.append(hazard_value)
        labels.append(total_not_affected_field['name'])
        colors.append(green.name())
    except KeyError:
        # in case the field is not there
        pass

    # add number for total not affected
    chart_title = resolve_from_dictionary(extra_args, 'chart_title')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    donut_context = DonutChartContext(data=data,
                                      labels=labels,
                                      colors=colors,
                                      inner_radius_ratio=0.5,
                                      stroke_color='#fff',
                                      title=chart_title,
                                      total_header=total_header,
                                      as_file=True)

    context['context'] = donut_context

    return context
Esempio n. 16
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: The impact report that acts as a proxy to fetch
        all the data that extractor needed.
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: The component metadata. Used to obtain
        information about the component we want to render.
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: Context for rendering phase.
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = next(analysis_layer.getFeatures())
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    use_rounding = impact_report.impact_function.use_rounding
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    """Initializations."""

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # Get exposure type definition
    exposure_type = definition(exposure_keywords['exposure'])
    # Only round the number when it is population exposure and we use rounding
    is_population = exposure_type is exposure_population

    # action for places with poopulation exposure
    is_place_with_population = False
    if exposure_type is exposure_place:
        exposure_fields = exposure_keywords['inasafe_fields']
        if exposure_fields.get(population_count_field['key']):
            is_place_with_population = True

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [
        exposure_type_field,
        exposure_class_field
    ]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break

    """Create detail header."""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(
        breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args,
        'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': [],
            'total': []
        },
        'not_affected': {
            'hazards': [],
            'total': []
        }
    }
    for key, group in list(header_hazard_group.items()):
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    affected_header_index = None
    for index, hazard_class in enumerate(hazard_classification['classes']):
        # the tuple format would be:
        # (class name, is it affected, header background color

        hazard_class_name = hazard_class['name']
        affected = hazard_class.get('affected')

        if not affected and not affected_header_index:
            affected_header_index = index + 1
            affected_status = 'not_affected'
        elif affected:
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    if affected_header_index:
        not_affected_header_index = len(hazard_classification['classes']) + 2
    else:
        affected_header_index = len(hazard_classification['classes']) + 1
        not_affected_header_index = affected_header_index + 2

    report_fields = []

    headers.insert(affected_header_index, total_affected_field['name'])
    header_hazard_group['affected']['total'].append(
        total_affected_field['name'])
    report_fields.append(total_affected_field)

    headers.insert(not_affected_header_index, total_not_affected_field['name'])
    header_hazard_group['not_affected']['total'].append(
        total_not_affected_field['name'])
    report_fields.append(total_not_affected_field)

    # affected, not affected, population (if applicable), not exposed,
    # total header
    report_fields += [total_not_exposed_field, total_field]

    place_pop_name = resolve_from_dictionary(
        extra_args, ['place_with_population', 'header'])
    if is_place_with_population:
        # we want to change header name for population
        duplicated_population_count_field = deepcopy(
            exposed_population_count_field)
        duplicated_population_count_field['name'] = place_pop_name
        report_fields.append(duplicated_population_count_field)

    report_fields_index = -2 + -(int(is_place_with_population))
    for report_field in report_fields[report_fields_index:]:
        headers.append(report_field['name'])

    """Create detail rows."""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fields().lookupField(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in list(header_hazard_group.items()):
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fields() \
                    .lookupField(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(
                    count_value,
                    use_rounding=use_rounding,
                    is_population=is_population)
                row.append({
                    'value': count_value,
                    'header_group': group_key
                })
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({
                    'value': 0,
                    'header_group': group_key
                })

        skip_row = False

        for field in report_fields:
            group_key = None
            for key, group in list(header_hazard_group.items()):
                if field['name'] in group['total']:
                    group_key = key
                    break

            field_index = exposure_summary_table.fields().lookupField(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(
                total_count,
                use_rounding=use_rounding,
                is_population=is_population)

            # we comment below code because now we want to show all rows,
            # we can uncomment if we want to remove the rows with zero total

            # if total_count == '0' and field == total_affected_field:
            #     skip_row = True
            #     break

            if group_key:
                if field == total_affected_field:
                    row.insert(
                        affected_header_index,
                        {
                            'value': total_count,
                            'header_group': group_key
                        })
                elif field == total_not_affected_field:
                    row.insert(
                        not_affected_header_index,
                        {
                            'value': total_count,
                            'header_group': group_key
                        })
                else:
                    row.append({
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_keywords)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name

    """create total footers."""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    save_total_affected_field = False
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (
            hazard_class['key'],)
        if not hazard_class.get('affected'):
            save_total_affected_field = True

        group_key = None
        for key, group in list(header_hazard_group.items()):
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fields().lookupField(field_name)
            count_value = format_number(
                analysis_feature[field_index],
                use_rounding=use_rounding,
                is_population=is_population)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers) + int(save_total_affected_field)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            # reduce total affected and not affected column index by 1
            # since we are removing a column
            if group_key == affected_field['field_name']:
                affected_header_index -= 1
            else:
                not_affected_header_index -= 1
            continue
        footers.append({
            'value': count_value,
            'header_group': group_key
        })

    # for footers
    for field in report_fields:

        total_count = value_from_field_name(
            field['field_name'], analysis_layer)

        if not total_count and field['name'] == place_pop_name:
            field = population_count_field
            field['name'] = place_pop_name
            total_count = value_from_field_name(
                field['field_name'], analysis_layer)

        group_key = None
        for key, group in list(header_hazard_group.items()):
            if field['name'] in group['total']:
                group_key = key
                break

        total_count = format_number(
            total_count,
            use_rounding=use_rounding,
            is_population=is_population)
        if group_key:
            if field == total_affected_field:
                footers.insert(
                    affected_header_index,
                    {
                        'value': total_count,
                        'header_group': group_key
                    })
            elif field == total_not_affected_field:
                footers.insert(
                    not_affected_header_index,
                    {
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                footers.append({
                    'value': total_count,
                    'header_group': group_key
                })
        else:
            footers.append(total_count)

    header = resolve_from_dictionary(
        extra_args, 'header')
    notes = resolve_from_dictionary(
        extra_args, 'notes')

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0

    # we want to include total affected and not affected as a group
    # to its class so len(report_fields) - 2
    total_header_index = len(headers) - (len(report_fields) - 2)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in list(header_hazard_group.items()):
            if hazard_class_name in group['hazards'] or (
                    hazard_class_name in group['total']):
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    context['extra_table'] = {}

    # extra table for specific exposure if exist
    extra_fields = resolve_from_dictionary(extra_args, 'exposure_extra_fields')
    if exposure_type['key'] in list(extra_fields.keys()):

        # create header for the extra table
        extra_table_header_format = resolve_from_dictionary(
            extra_args, 'extra_table_header_format')
        extra_table_header = extra_table_header_format.format(
            exposure=exposure_header)

        # headers
        headers = []
        headers.append(
            breakdown_header_template.format(exposure=exposure_header))

        current_unit = None
        currency_unit = setting('currency', expected_type=str)
        for field in extra_fields[exposure_type['key']]:
            field_index = exposure_summary_table.fields().lookupField(
                field['field_name'])
            if field_index < 0:
                LOGGER.debug(
                    'Field name not found: %s, field index: %s' % (
                        field['field_name'], field_index))
                continue

            units = field.get('units')
            if units:
                for unit in units:
                    if currency_unit == unit['key']:
                        current_unit = unit['name']
                        break
                if not current_unit:
                    current_unit = units[0]['name']

            header_format = '{header} ({unit})'
            headers.append(header_format.format(
                header=field['header_name'], unit=current_unit))

        # rows
        details = []
        for feat in exposure_summary_table.getFeatures():
            row = []

            # Get breakdown name
            exposure_summary_table_field_name = breakdown_field['field_name']
            field_index = exposure_summary_table.fields().lookupField(
                exposure_summary_table_field_name)
            class_key = feat[field_index]

            row.append(class_key)

            for field in extra_fields[exposure_type['key']]:
                field_index = exposure_summary_table.fields().lookupField(
                    field['field_name'])
                # noinspection PyBroadException
                try:
                    total_count = int(float(feat[field_index]))
                except BaseException:
                    LOGGER.debug(
                        'Field name not found: %s, field index: %s' % (
                            field['field_name'], field_index))
                    continue
                total_count = format_number(
                    total_count,
                    use_rounding=use_rounding,
                    is_population=is_population)
                row.append(total_count)

            details.append(row)

        details = sorted(details, key=sort_classes)

        context['extra_table'] = {
            'table_header': extra_table_header,
            'headers': headers,
            'details': details,
        }

    return context
def create_section_without_aggregation(
        aggregation_summary, analysis_layer, postprocessor_fields,
        section_header,
        units_label=None,
        debug_mode=False,
        extra_component_args=None):
    """Create demographic section context without aggregation.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param debug_mode: flag for debug_mode, affect number representations
    :type debug_mode: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict
    """
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords[
        'inasafe_fields']
    enable_rounding = not debug_mode

    # retrieving postprocessor
    postprocessors_fields_found = []
    for output_field in postprocessor_fields['fields']:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    try:
        displaced_field_name = analysis_layer_fields[
            displaced_field['key']]
        displaced_field_name = aggregation_summary_fields[
            displaced_field['key']]
    except KeyError:
        # no displaced field, can't show result
        return {}

    """Generating header name for columns"""

    # First column header is aggregation title
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        section_header,
        total_population_header,
    ]

    """Generating values for rows"""
    row_values = []

    for idx, output_field in enumerate(postprocessors_fields_found):

        name = output_field.get('header_name')
        if not name:
            name = output_field.get('name')

        row = []
        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = u'{name} [{unit}]'
            else:
                header_format = u'{name}'

            header = header_format.format(
                name=name,
                unit=unit)
        else:
            header_format = u'{name}'
            header = header_format.format(name=name)

        row.append(header)

        # if no aggregation layer, then aggregation summary only contain one
        # feature
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(
            field_name,
            analysis_layer)
        value = format_number(
            value,
            enable_rounding=enable_rounding,
            is_population=True)
        row.append(value)

        row_values.append(row)

    return {
        'columns': columns,
        'rows': row_values,
    }
def create_section_with_aggregation(
        aggregation_summary, analysis_layer, postprocessor_fields,
        section_header,
        units_label=None,
        debug_mode=False,
        extra_component_args=None):
    """Create demographic section context with aggregation breakdown.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param debug_mode: flag for debug_mode, affect number representations
    :type debug_mode: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict

    .. versionadded:: 4.0
    """
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords[
        'inasafe_fields']
    enable_rounding = not debug_mode

    # retrieving postprocessor
    postprocessors_fields_found = []

    if type(postprocessor_fields) is dict:
        output_fields = postprocessor_fields['fields']
    else:
        output_fields = postprocessor_fields

    for output_field in output_fields:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    # no displaced field, can't show result
    if displaced_field['key'] not in analysis_layer_fields:
        return {}
    if displaced_field['key'] not in aggregation_summary_fields:
        return {}

    """Generating header name for columns"""

    # First column header is aggregation title
    default_aggregation_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'aggregation_header'])
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        aggregation_summary.title() or default_aggregation_header,
        total_population_header,
    ]
    row_values = []

    group_fields_found = []
    start_group_header = True
    for idx, output_field in enumerate(postprocessors_fields_found):

        name = output_field.get('header_name')
        if not name:
            name = output_field.get('name')

        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = u'{name} [{unit}]'
            else:
                header_format = u'{name}'

            header = header_format.format(
                name=name,
                unit=unit)
        else:
            header_format = u'{name}'
            header = header_format.format(name=name)

        if type(postprocessor_fields) is dict:
            try:
                group_header = postprocessor_fields['group_header']
                group_fields = postprocessor_fields['group']['fields']
                if output_field in group_fields:
                    group_fields_found.append(output_field)
                else:
                    columns.append(header)
                    continue
            except KeyError:
                group_fields_found.append(output_field)

        header_dict = {
            'name': header,
            'group_header': group_header,
            'start_group_header': start_group_header
        }

        start_group_header = False
        columns.append(header_dict)

    """Generating values for rows"""

    for feature in aggregation_summary.getFeatures():

        aggregation_name_index = aggregation_summary.fieldNameIndex(
            aggregation_name_field['field_name'])
        displaced_field_name = aggregation_summary_fields[
            displaced_field['key']]
        displaced_field_index = aggregation_summary.fieldNameIndex(
            displaced_field_name)

        aggregation_name = feature[aggregation_name_index]
        total_displaced = feature[displaced_field_index]

        if not total_displaced or isinstance(total_displaced, QPyNullVariant):
            # skip if total displaced null
            continue

        total_displaced = format_number(
            feature[displaced_field_index],
            enable_rounding=enable_rounding,
            is_population=True)

        row = [
            aggregation_name,
            total_displaced,
        ]

        if total_displaced == '0' and not debug_mode:
            continue

        for output_field in postprocessors_fields_found:
            field_name = aggregation_summary_fields[output_field['key']]
            field_index = aggregation_summary.fieldNameIndex(field_name)
            value = feature[field_index]

            value = format_number(
                value,
                enable_rounding=enable_rounding,
                is_population=True)
            row.append(value)

        row_values.append(row)

    """Generating total rows """

    total_displaced_field_name = analysis_layer_fields[
        displaced_field['key']]
    value = value_from_field_name(
        total_displaced_field_name, analysis_layer)
    value = format_number(
        value,
        enable_rounding=enable_rounding,
        is_population=True)
    total_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_header'])
    totals = [
        total_header,
        value
    ]
    for output_field in postprocessors_fields_found:
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(field_name, analysis_layer)
        value = format_number(
            value,
            enable_rounding=enable_rounding,
            is_population=True)
        totals.append(value)

    default_notes = resolve_from_dictionary(
        extra_component_args, ['defaults', 'notes'])

    if type(default_notes) is not list:
        default_notes = [default_notes]

    try:
        notes = default_notes + postprocessor_fields['group']['notes']
    except (TypeError, KeyError):
        notes = default_notes
        pass

    return {
        'notes': notes,
        'header': section_header,
        'columns': columns,
        'rows': row_values,
        'totals': totals,
        'group_header_colspan': len(group_fields_found)
    }
def aggregation_postprocessors_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {
        'sections': OrderedDict()
    }

    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    aggregation_summary = impact_report.aggregation_summary
    analysis_layer = impact_report.analysis
    analysis_layer_fields = impact_report.analysis.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    use_aggregation = bool(impact_report.impact_function.provenance[
        'aggregation_layer'])

    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)

    # this entire section is only for population exposure type
    if not exposure_type == exposure_population:
        return context

    # check zero displaced (there will be no output to display)
    try:
        displaced_field_name = analysis_layer_fields[displaced_field['key']]
        total_displaced = value_from_field_name(
            displaced_field_name, analysis_layer)

        zero_displaced = False
        if total_displaced == 0:
            zero_displaced = True
    except KeyError:
        # in case no displaced field
        # let each section handled itself
        zero_displaced = False

    context['use_aggregation'] = use_aggregation
    if not use_aggregation:
        context['header'] = resolve_from_dictionary(
            extra_args, 'header')

    group_header_format = resolve_from_dictionary(
        extra_args, ['defaults', 'group_header_format'])
    section_header_format = resolve_from_dictionary(
        extra_args, ['defaults', 'section_header_format'])

    """Age Groups"""
    age_items = {
        'group': age_displaced_count_group,
        'group_header': group_header_format.format(
            header_name=age_displaced_count_group['header_name']),
        'fields': [postprocessor_output_field(p) for p in age_postprocessors]
    }

    # check age_fields exists
    for field in age_items['fields']:
        if field['key'] in analysis_layer_fields:
            no_age_field = False
            break
    else:
        no_age_field = True

    context['sections']['age'] = []
    age_section_header = section_header_format.format(
        header_name=age_displaced_count_group['header_name'])
    if zero_displaced:
        context['sections']['age'].append(
            {
                'header': age_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    elif no_age_field:
        context['sections']['age'].append(
            {
                'header': age_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'no_age_rate_message'])
            }
        )
    else:
        context['sections']['age'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                age_items,
                age_section_header,
                use_aggregation=use_aggregation,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )

    """Gender Groups"""
    gender_items = {
        'group': gender_displaced_count_group,
        'group_header': group_header_format.format(
            header_name=gender_displaced_count_group['header_name']),
        'fields': [
            postprocessor_output_field(p) for p in gender_postprocessors]
    }

    # check gender_fields exists
    for field in gender_items['fields']:
        if field['key'] in analysis_layer_fields:
            no_gender_field = False
            break
    else:
        no_gender_field = True

    context['sections']['gender'] = []
    gender_section_header = section_header_format.format(
        header_name=gender_displaced_count_group['header_name'])
    if zero_displaced:
        context['sections']['gender'].append(
            {
                'header': gender_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    elif no_gender_field:
        context['sections']['gender'].append(
            {
                'header': gender_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'no_gender_rate_message'])
            }
        )
    else:
        context['sections']['gender'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                gender_items,
                gender_section_header,
                use_aggregation=use_aggregation,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )

    """Vulnerability Groups"""
    context['sections']['vulnerability'] = []
    for vulnerability_group in vulnerability_displaced_count_groups:
        vulnerability_items = {
            'group': vulnerability_group,
            'group_header': group_header_format.format(
                header_name=vulnerability_group['header_name']),
            'fields': [field for field in vulnerability_group['fields']]
        }

        # check vulnerability_fields exists
        for field in vulnerability_items['fields']:
            if field['key'] in analysis_layer_fields:
                no_vulnerability_field = False
                break
        else:
            no_vulnerability_field = True

        vulnerability_section_header = section_header_format.format(
            header_name=vulnerability_group['header_name'])
        if zero_displaced:
            context['sections']['vulnerability'].append(
                {
                    'header': vulnerability_section_header,
                    'empty': True,
                    'message': resolve_from_dictionary(
                        extra_args, ['defaults', 'zero_displaced_message'])
                }
            )
        elif no_vulnerability_field:
            context['sections']['vulnerability'].append(
                {
                    'header': vulnerability_section_header,
                    'empty': True,
                    'message': resolve_from_dictionary(
                        extra_args,
                        ['defaults', 'no_vulnerability_rate_message'])
                }
            )
        else:
            context['sections']['vulnerability'].append(
                create_section(
                    aggregation_summary,
                    analysis_layer,
                    vulnerability_items,
                    vulnerability_section_header,
                    use_aggregation=use_aggregation,
                    debug_mode=debug_mode,
                    extra_component_args=extra_args)
            )

    """Minimum Needs"""
    context['sections']['minimum_needs'] = []
    minimum_needs_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'header'])
    # Don't show minimum needs if there is no displaced
    if zero_displaced:
        context['sections']['minimum_needs'].append(
            {
                'header': minimum_needs_section_header,
                'empty': True,
                'message': resolve_from_dictionary(
                    extra_args, ['defaults', 'zero_displaced_message'])
            }
        )
    # Only provides minimum needs breakdown if there is aggregation layer
    elif use_aggregation:
        # minimum needs should provide unit for column headers
        units_label = []
        minimum_needs_items = {
            'group_header': u'Minimum needs breakdown',
            'fields': minimum_needs_fields
        }

        for field in minimum_needs_items['fields']:
            need = field['need_parameter']
            if isinstance(need, ResourceParameter):
                unit = None
                unit_abbreviation = need.unit.abbreviation
                if unit_abbreviation:
                    unit_format = '{unit}'
                    unit = unit_format.format(
                        unit=unit_abbreviation)
                units_label.append(unit)

        context['sections']['minimum_needs'].append(
            create_section(
                aggregation_summary,
                analysis_layer,
                minimum_needs_items,
                minimum_needs_section_header,
                units_label=units_label,
                debug_mode=debug_mode,
                extra_component_args=extra_args)
        )
    else:
        sections_not_empty = True
        for _, values in context['sections'].iteritems():
            for value in values:
                if value.get('rows'):
                    break
                else:
                    sections_not_empty = False

        context['sections_not_empty'] = sections_not_empty

    return context
def aggregation_postprocessors_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {
        'sections': OrderedDict()
    }

    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    aggregation_summary = impact_report.aggregation_summary
    analysis_layer = impact_report.analysis
    analysis_layer_fields = impact_report.analysis.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    use_aggregation = bool(impact_report.impact_function.provenance[
        'aggregation_layer'])

    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)

    # this entire section is only for population exposure type
    if not exposure_type == exposure_population:
        return context

    # check zero displaced (there will be no output to display)
    try:
        displaced_field_name = analysis_layer_fields[displaced_field['key']]
        total_displaced = value_from_field_name(
            displaced_field_name, analysis_layer)

        zero_displaced = False
        if total_displaced == 0:
            zero_displaced = True
    except KeyError:
        # in case no displaced field
        # let each section handled itself
        zero_displaced = False

    context['use_aggregation'] = use_aggregation
    if not use_aggregation:
        context['header'] = resolve_from_dictionary(
            extra_args, 'header')

    age_fields = [postprocessor_output_field(p) for p in age_postprocessors]
    gender_fields = [male_displaced_count_field] + [
        postprocessor_output_field(p) for p in female_postprocessors]

    # check age_fields exists
    for field in age_fields:
        if field['key'] in analysis_layer_fields:
            no_age_field = False
            break
    else:
        no_age_field = True

    # check gender_fields exists
    for field in gender_fields:
        if field['key'] in analysis_layer_fields:
            no_gender_field = False
            break
    else:
        no_gender_field = True

    age_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'age', 'header'])
    if zero_displaced:
        context['sections']['age'] = {
            'header': age_section_header,
            'empty': True,
            'message': resolve_from_dictionary(
                extra_args, ['defaults', 'zero_displaced_message'])
        }
    elif no_age_field:
        context['sections']['age'] = {
            'header': age_section_header,
            'empty': True,
            'message': resolve_from_dictionary(
                extra_args, ['defaults', 'no_age_rate_message'])
        }
    else:
        context['sections']['age'] = create_section(
            aggregation_summary,
            analysis_layer,
            age_fields,
            age_section_header,
            use_aggregation=use_aggregation,
            debug_mode=debug_mode,
            extra_component_args=extra_args)

    gender_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'gender', 'header'])
    if zero_displaced:
        context['sections']['gender'] = {
            'header': gender_section_header,
            'empty': True,
            'message': resolve_from_dictionary(
                extra_args, ['defaults', 'zero_displaced_message'])
        }
    elif no_gender_field:
        context['sections']['gender'] = {
            'header': gender_section_header,
            'empty': True,
            'message': resolve_from_dictionary(
                extra_args, ['defaults', 'no_gender_rate_message'])
        }
    else:
        context['sections']['gender'] = create_section(
            aggregation_summary,
            analysis_layer,
            gender_fields,
            gender_section_header,
            use_aggregation=use_aggregation,
            debug_mode=debug_mode,
            extra_component_args=extra_args)

    minimum_needs_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'header'])
    # Don't show minimum needs if there is no displaced
    if zero_displaced:
        context['sections']['minimum_needs'] = {
            'header': minimum_needs_section_header,
            'empty': True,
            'message': resolve_from_dictionary(
                extra_args, ['defaults', 'zero_displaced_message'])
        }
    # Only provides minimum needs breakdown if there is aggregation layer
    elif use_aggregation:
        # minimum needs should provide unit for column headers
        units_label = []

        for field in minimum_needs_fields:
            need = field['need_parameter']
            if isinstance(need, ResourceParameter):
                unit = None
                unit_abbreviation = need.unit.abbreviation
                if unit_abbreviation:
                    unit_format = '{unit}'
                    unit = unit_format.format(
                        unit=unit_abbreviation)
                units_label.append(unit)

        context['sections']['minimum_needs'] = create_section(
            aggregation_summary,
            analysis_layer,
            minimum_needs_fields,
            minimum_needs_section_header,
            units_label=units_label,
            debug_mode=debug_mode,
            extra_component_args=extra_args)
    else:
        sections_not_empty = True
        for _, value in context['sections'].iteritems():
            if value.get('rows'):
                break
        else:
            sections_not_empty = False

        context['sections_not_empty'] = sections_not_empty

    return context
def create_section_with_aggregation(
        aggregation_summary, analysis_layer, postprocessor_fields,
        section_header,
        units_label=None,
        debug_mode=False,
        extra_component_args=None):
    """Create demographic section context with aggregation breakdown.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param debug_mode: flag for debug_mode, affect number representations
    :type debug_mode: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict

    .. versionadded:: 4.0
    """
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords[
        'inasafe_fields']
    enable_rounding = not debug_mode

    # retrieving postprocessor
    postprocessors_fields_found = []
    for output_field in postprocessor_fields:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    # no displaced field, can't show result
    if displaced_field['key'] not in analysis_layer_fields:
        return {}
    if displaced_field['key'] not in aggregation_summary_fields:
        return {}

    """Generating header name for columns"""

    # First column header is aggregation title
    default_aggregation_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'aggregation_header'])
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        aggregation_summary.title() or default_aggregation_header,
        total_population_header,
    ]
    row_values = []

    for idx, output_field in enumerate(postprocessors_fields_found):
        name = output_field['name']
        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = u'{name} [{unit}]'
            else:
                header_format = u'{name}'

            header = header_format.format(
                name=name,
                unit=unit)
        else:
            header_format = u'{name}'
            header = header_format.format(name=name)

        columns.append(header)

    """Generating values for rows"""

    for feature in aggregation_summary.getFeatures():

        aggregation_name_index = aggregation_summary.fieldNameIndex(
            aggregation_name_field['field_name'])
        displaced_field_name = aggregation_summary_fields[
            displaced_field['key']]
        displaced_field_index = aggregation_summary.fieldNameIndex(
            displaced_field_name)

        aggregation_name = feature[aggregation_name_index]
        total_displaced = feature[displaced_field_index]

        if not total_displaced or isinstance(total_displaced, QPyNullVariant):
            # skip if total displaced null
            continue

        total_displaced = format_number(
            feature[displaced_field_index],
            enable_rounding=enable_rounding)

        row = [
            aggregation_name,
            total_displaced,
        ]

        if total_displaced == '0' and not debug_mode:
            continue

        for output_field in postprocessors_fields_found:
            field_name = aggregation_summary_fields[output_field['key']]
            field_index = aggregation_summary.fieldNameIndex(field_name)
            value = feature[field_index]

            value = format_number(
                value,
                enable_rounding=enable_rounding)
            row.append(value)

        row_values.append(row)

    """Generating total rows """

    total_displaced_field_name = analysis_layer_fields[
        displaced_field['key']]
    value = value_from_field_name(
        total_displaced_field_name, analysis_layer)
    value = format_number(
        value,
        enable_rounding=enable_rounding)
    total_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_header'])
    totals = [
        total_header,
        value
    ]
    for output_field in postprocessors_fields_found:
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(field_name, analysis_layer)
        value = format_number(
            value,
            enable_rounding=enable_rounding)
        totals.append(value)

    notes = resolve_from_dictionary(
        extra_component_args, ['defaults', 'notes'])
    return {
        'notes': notes,
        'header': section_header,
        'columns': columns,
        'rows': row_values,
        'totals': totals,
    }
def analysis_provenance_details_extractor(impact_report, component_metadata):
    """Extracting provenance details of layers.

    This extractor would be the main provenance details extractor which produce
    tree view provenance details.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.1
    """
    context = {}
    extra_args = component_metadata.extra_args

    default_source = resolve_from_dictionary(
        extra_args, ['defaults', 'source'])
    default_reference = resolve_from_dictionary(
        extra_args, ['defaults', 'reference'])
    provenance_format_args = resolve_from_dictionary(
        extra_args, 'provenance_format')

    keywords_order = [
        'title',
        'source',
        'layer_purpose',
        'layer_geometry',
        'hazard',
        'exposure',
        'hazard_category',
        'exposure_unit',
        'value_map',
        'value_maps',
        'inasafe_fields',
        'inasafe_default_values',
        'layer_mode',
        'hazard_layer',
        'exposure_layer',
        'aggregation_layer',
        'keywords_version']

    debug_mode = impact_report.impact_function.debug_mode

    # we define dict here to create a different object of keyword
    hazard_keywords = dict(impact_report.impact_function.provenance[
        'hazard_keywords'])

    # hazard_keywords doesn't have hazard_layer path information
    hazard_layer = impact_report.impact_function.provenance.get('hazard_layer')
    hazard_keywords['hazard_layer'] = hazard_layer

    # keep only value maps with IF exposure
    for keyword in ['value_maps', 'thresholds']:
        if hazard_keywords.get(keyword):
            temp_keyword = dict(hazard_keywords[keyword])
            for key in temp_keyword:
                if key not in impact_report.impact_function.provenance[
                        'exposure_keywords']['exposure']:
                    del hazard_keywords[keyword][key]

    header = resolve_from_dictionary(
        provenance_format_args, 'hazard_header')
    provenance_format = resolve_from_dictionary(
        provenance_format_args, 'hazard_format')
    hazard_provenance = {
        'header': header.title(),
        'provenances': headerize(
            sorted_keywords_by_order(hazard_keywords, keywords_order))
    }

    # convert value if there is dict_keywords
    provenances = hazard_provenance['provenances']
    hazard_provenance['provenances'] = resolve_dict_keywords(provenances)

    # we define dict here to create a different object of keyword
    exposure_keywords = dict(impact_report.impact_function.provenance[
        'exposure_keywords'])

    # exposure_keywords doesn't have exposure_layer path information
    exposure_layer = impact_report.impact_function.provenance.get(
        'exposure_layer')
    exposure_keywords['exposure_layer'] = exposure_layer

    header = resolve_from_dictionary(
        provenance_format_args, 'exposure_header')
    provenance_format = resolve_from_dictionary(
        provenance_format_args, 'exposure_format')
    exposure_provenance = {
        'header': header.title(),
        'provenances': headerize(
            sorted_keywords_by_order(exposure_keywords, keywords_order))
    }

    # convert value if there is dict_keywords
    provenances = exposure_provenance['provenances']
    exposure_provenance['provenances'] = resolve_dict_keywords(provenances)

    # aggregation keywords could be None so we don't define dict here
    aggregation_keywords = impact_report.impact_function.provenance[
        'aggregation_keywords']

    header = resolve_from_dictionary(
        provenance_format_args, 'aggregation_header')
    provenance_format = resolve_from_dictionary(
        provenance_format_args, 'aggregation_format')

    aggregation_provenance = {
        'header': header.title(),
        'provenances': None
    }

    # only if aggregation layer used
    if aggregation_keywords:
        # we define dict here to create a different object of keyword
        aggregation_keywords = dict(aggregation_keywords)

        # aggregation_keywords doesn't have aggregation_layer path information
        aggregation_layer = impact_report.impact_function.provenance.get(
            'aggregation_layer')
        aggregation_keywords['aggregation_layer'] = aggregation_layer

        aggregation_provenance['provenances'] = headerize(
            sorted_keywords_by_order(aggregation_keywords, keywords_order))

        # convert value if there is dict_keywords
        provenances = aggregation_provenance['provenances']
        aggregation_provenance['provenances'] = resolve_dict_keywords(
            provenances)

    else:
        aggregation_not_used = resolve_from_dictionary(
            extra_args, ['defaults', 'aggregation_not_used'])
        aggregation_provenance['provenances'] = aggregation_not_used

    all_provenance_keywords = dict(impact_report.impact_function.provenance)

    # we add debug mode information to the provenance
    if debug_mode:
        all_provenance_keywords['debug_mode'] = 'On'
    else:
        all_provenance_keywords['debug_mode'] = 'Off'

    header = resolve_from_dictionary(
        provenance_format_args, 'analysis_environment_header')
    analysis_environment_provenance_items = OrderedDict()
    analysis_environment_provenance_keys = [
        'os',
        'inasafe_version',
        'debug_mode',
        'qgis_version',
        'qt_version',
        'gdal_version',
        'pyqt_version']

    for item in analysis_environment_provenance_keys:
        analysis_environment_provenance_items[item] = (
            all_provenance_keywords[item])

    analysis_environment_provenance = {
        'header': header.title(),
        'provenances': headerize(analysis_environment_provenance_items)
    }

    impact_function_name = impact_report.impact_function.name
    header = resolve_from_dictionary(
        provenance_format_args, 'impact_function_header')
    provenance_format = resolve_from_dictionary(
        provenance_format_args, 'impact_function_format')
    impact_function_provenance = {
        'header': header.title(),
        'provenances': impact_function_name
    }

    provenance_detail = OrderedDict()
    provenance_detail['impact_function'] = impact_function_provenance
    provenance_detail['hazard'] = hazard_provenance
    provenance_detail['exposure'] = exposure_provenance
    provenance_detail['aggregation'] = aggregation_provenance
    provenance_detail['analysis_environment'] = analysis_environment_provenance

    analysis_details_header = resolve_from_dictionary(
        extra_args, ['header', 'analysis_detail'])

    context.update({
        'header': analysis_details_header,
        'details': provenance_detail
    })

    return context
Esempio n. 23
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: The impact report that acts as a proxy to fetch
        all the data that extractor needed.
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: The component metadata. Used to obtain
        information about the component we want to render.
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: Context for rendering phase.
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = analysis_layer.getFeatures().next()
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    use_rounding = impact_report.impact_function.use_rounding
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']
    """Initializations."""

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # Get exposure type definition
    exposure_type = definition(exposure_keywords['exposure'])
    # Only round the number when it is population exposure and we use rounding
    is_population = exposure_type is exposure_population

    # action for places with poopulation exposure
    is_place_with_population = False
    if exposure_type is exposure_place:
        exposure_fields = exposure_keywords['inasafe_fields']
        if exposure_fields.get(population_count_field['key']):
            is_place_with_population = True

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    """Create detail header."""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args, 'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': [],
            'total': []
        },
        'not_affected': {
            'hazards': [],
            'total': []
        }
    }
    for key, group in header_hazard_group.iteritems():
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    affected_header_index = None
    for index, hazard_class in enumerate(hazard_classification['classes']):
        # the tuple format would be:
        # (class name, is it affected, header background color

        hazard_class_name = hazard_class['name']
        affected = hazard_class.get('affected')

        if not affected and not affected_header_index:
            affected_header_index = index + 1
            affected_status = 'not_affected'
        elif affected:
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    if affected_header_index:
        not_affected_header_index = len(hazard_classification['classes']) + 2
    else:
        affected_header_index = len(hazard_classification['classes']) + 1
        not_affected_header_index = affected_header_index + 2

    report_fields = []

    headers.insert(affected_header_index, total_affected_field['name'])
    header_hazard_group['affected']['total'].append(
        total_affected_field['name'])
    report_fields.append(total_affected_field)

    headers.insert(not_affected_header_index, total_not_affected_field['name'])
    header_hazard_group['not_affected']['total'].append(
        total_not_affected_field['name'])
    report_fields.append(total_not_affected_field)

    # affected, not affected, population (if applicable), not exposed,
    # total header
    report_fields += [total_not_exposed_field, total_field]

    place_pop_name = resolve_from_dictionary(
        extra_args, ['place_with_population', 'header'])
    if is_place_with_population:
        # we want to change header name for population
        duplicated_population_count_field = deepcopy(
            exposed_population_count_field)
        duplicated_population_count_field['name'] = place_pop_name
        report_fields.append(duplicated_population_count_field)

    report_fields_index = -2 + -(int(is_place_with_population))
    for report_field in report_fields[report_fields_index:]:
        headers.append(report_field['name'])
    """Create detail rows."""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fieldNameIndex(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in header_hazard_group.iteritems():
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fieldNameIndex(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(count_value,
                                            use_rounding=use_rounding,
                                            is_population=is_population)
                row.append({'value': count_value, 'header_group': group_key})
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({'value': 0, 'header_group': group_key})

        skip_row = False

        for field in report_fields:
            group_key = None
            for key, group in header_hazard_group.iteritems():
                if field['name'] in group['total']:
                    group_key = key
                    break

            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(total_count,
                                        use_rounding=use_rounding,
                                        is_population=is_population)

            # we comment below code because now we want to show all rows,
            # we can uncomment if we want to remove the rows with zero total

            # if total_count == '0' and field == total_affected_field:
            #     skip_row = True
            #     break

            if group_key:
                if field == total_affected_field:
                    row.insert(affected_header_index, {
                        'value': total_count,
                        'header_group': group_key
                    })
                elif field == total_not_affected_field:
                    row.insert(not_affected_header_index, {
                        'value': total_count,
                        'header_group': group_key
                    })
                else:
                    row.append({
                        'value': total_count,
                        'header_group': group_key
                    })
            else:
                row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_keywords)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name
    """create total footers."""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    save_total_affected_field = False
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (hazard_class['key'], )
        if not hazard_class.get('affected'):
            save_total_affected_field = True

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fieldNameIndex(field_name)
            count_value = format_number(analysis_feature[field_index],
                                        use_rounding=use_rounding,
                                        is_population=is_population)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers) + int(save_total_affected_field)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            # reduce total affected and not affected column index by 1
            # since we are removing a column
            if group_key == affected_field['field_name']:
                affected_header_index -= 1
            else:
                not_affected_header_index -= 1
            continue
        footers.append({'value': count_value, 'header_group': group_key})

    # for footers
    for field in report_fields:

        total_count = value_from_field_name(field['field_name'],
                                            analysis_layer)

        if not total_count and field['name'] == place_pop_name:
            field = population_count_field
            field['name'] = place_pop_name
            total_count = value_from_field_name(field['field_name'],
                                                analysis_layer)

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if field['name'] in group['total']:
                group_key = key
                break

        total_count = format_number(total_count,
                                    use_rounding=use_rounding,
                                    is_population=is_population)
        if group_key:
            if field == total_affected_field:
                footers.insert(affected_header_index, {
                    'value': total_count,
                    'header_group': group_key
                })
            elif field == total_not_affected_field:
                footers.insert(not_affected_header_index, {
                    'value': total_count,
                    'header_group': group_key
                })
            else:
                footers.append({
                    'value': total_count,
                    'header_group': group_key
                })
        else:
            footers.append(total_count)

    header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')

    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0

    # we want to include total affected and not affected as a group
    # to its class so len(report_fields) - 2
    total_header_index = len(headers) - (len(report_fields) - 2)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class_name in group['hazards'] or (hazard_class_name
                                                         in group['total']):
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    context['extra_table'] = {}

    # extra table for specific exposure if exist
    extra_fields = resolve_from_dictionary(extra_args, 'exposure_extra_fields')
    if exposure_type['key'] in extra_fields.keys():

        # create header for the extra table
        extra_table_header_format = resolve_from_dictionary(
            extra_args, 'extra_table_header_format')
        extra_table_header = extra_table_header_format.format(
            exposure=exposure_header)

        # headers
        headers = []
        headers.append(
            breakdown_header_template.format(exposure=exposure_header))

        current_unit = None
        currency_unit = setting('currency', expected_type=str)
        for field in extra_fields[exposure_type['key']]:
            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            if field_index < 0:
                LOGGER.debug('Field name not found: %s, field index: %s' %
                             (field['field_name'], field_index))
                continue

            units = field.get('units')
            if units:
                for unit in units:
                    if currency_unit == unit['key']:
                        current_unit = unit['name']
                        break
                if not current_unit:
                    current_unit = units[0]['name']

            header_format = '{header} ({unit})'
            headers.append(
                header_format.format(header=field['header_name'],
                                     unit=current_unit))

        # rows
        details = []
        for feat in exposure_summary_table.getFeatures():
            row = []

            # Get breakdown name
            exposure_summary_table_field_name = breakdown_field['field_name']
            field_index = exposure_summary_table.fieldNameIndex(
                exposure_summary_table_field_name)
            class_key = feat[field_index]

            row.append(class_key)

            for field in extra_fields[exposure_type['key']]:
                field_index = exposure_summary_table.fieldNameIndex(
                    field['field_name'])
                # noinspection PyBroadException
                try:
                    total_count = int(float(feat[field_index]))
                except:
                    LOGGER.debug('Field name not found: %s, field index: %s' %
                                 (field['field_name'], field_index))
                    continue
                total_count = format_number(total_count,
                                            use_rounding=use_rounding,
                                            is_population=is_population)
                row.append(total_count)

            details.append(row)

        details = sorted(details, key=sort_classes)

        context['extra_table'] = {
            'table_header': extra_table_header,
            'headers': headers,
            'details': details,
        }

    return context
def analysis_provenance_details_simplified_extractor(
        impact_report, component_metadata):
    """Extracting simplified version of provenance details of layers.

    This extractor will produce provenance details which will be displayed in
    the main report.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    default_source = resolve_from_dictionary(
        extra_args, ['defaults', 'source'])
    default_reference = resolve_from_dictionary(
        extra_args, ['defaults', 'reference'])
    provenance_format_args = resolve_from_dictionary(
        extra_args, 'provenance_format')

    hazard_keywords = impact_report.impact_function.provenance[
        'hazard_keywords']
    header = resolve_from_dictionary(
        provenance_format_args, 'hazard_header')
    provenance_format = resolve_from_dictionary(
        provenance_format_args, 'hazard_format')
    hazard_provenance = {
        'header': header,
        'provenance': provenance_format.format(
            layer_name=hazard_keywords.get('title'),
            source=hazard_keywords.get('source') or default_source)
    }

    exposure_keywords = impact_report.impact_function.provenance[
        'exposure_keywords']
    header = resolve_from_dictionary(
        provenance_format_args, 'exposure_header')
    provenance_format = resolve_from_dictionary(
        provenance_format_args, 'exposure_format')
    exposure_provenance = {
        'header': header,
        'provenance': provenance_format.format(
            layer_name=exposure_keywords.get('title'),
            source=exposure_keywords.get('source') or default_source)
    }

    aggregation_keywords = impact_report.impact_function.provenance[
        'aggregation_keywords']
    header = resolve_from_dictionary(
        provenance_format_args, 'aggregation_header')
    provenance_format = resolve_from_dictionary(
        provenance_format_args, 'aggregation_format')
    # only if aggregation layer used
    if aggregation_keywords:
        provenance_string = provenance_format.format(
            layer_name=aggregation_keywords.get('title'),
            source=aggregation_keywords.get('source') or default_source)
    else:
        aggregation_not_used = resolve_from_dictionary(
            extra_args, ['defaults', 'aggregation_not_used'])
        provenance_string = aggregation_not_used

    aggregation_provenance = {
        'header': header,
        'provenance': provenance_string
    }

    impact_function_name = impact_report.impact_function.name
    header = resolve_from_dictionary(
        provenance_format_args, 'impact_function_header')
    provenance_format = resolve_from_dictionary(
        provenance_format_args, 'impact_function_format')
    impact_function_provenance = {
        'header': header,
        'provenance': provenance_format.format(
            impact_function_name=impact_function_name,
            reference=default_reference)
    }

    provenance_detail = OrderedDict()
    provenance_detail['hazard'] = hazard_provenance
    provenance_detail['exposure'] = exposure_provenance
    provenance_detail['aggregation'] = aggregation_provenance
    provenance_detail['impact_function'] = impact_function_provenance

    analysis_details_header = resolve_from_dictionary(
        extra_args, ['header', 'analysis_detail'])

    context.update({
        'header': analysis_details_header,
        'details': provenance_detail
    })

    return context
Esempio n. 25
0
def general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    # figure out analysis report type
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode

    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # find hazard class
    summary = []

    analysis_feature = analysis_layer.getFeatures().next()
    analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

    exposure_unit = exposure_type['units'][0]
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')
    if exposure_unit['abbreviation']:
        value_header = u'{measure} ({abbreviation})'.format(**exposure_unit)
    else:
        value_header = u'{name}'.format(**exposure_unit)

    # in case there is a classification
    if 'classification' in hazard_layer.keywords:

        # retrieve hazard classification from hazard layer
        hazard_classification = layer_hazard_classification(hazard_layer)

        # classified hazard must have hazard count in analysis layer
        hazard_stats = []
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )
            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = analysis_inasafe_fields[field_key_name]
                field_index = analysis_layer.fieldNameIndex(field_name)
                # Hazard label taken from translated hazard count field
                # label, string-formatted with translated hazard class label
                hazard_label = hazard_class['name']

                hazard_value = format_number(
                    analysis_feature[field_index],
                    enable_rounding=is_rounded,
                    is_population=is_population)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': hazard_value
                }
            except KeyError:
                # in case the field was not found
                hazard_label = hazard_class['name']
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': 0,
                }

            hazard_stats.append(stats)

        # find total field
        try:
            field_name = analysis_inasafe_fields[total_field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            total = format_number(
                total, enable_rounding=is_rounded, is_population=is_population)
            stats = {
                'key': total_field['key'],
                'name': total_field['name'],
                'as_header': True,
                'value': total
            }
            hazard_stats.append(stats)
        except KeyError:
            pass

        summary.append({
            'header_label': hazard_header,
            'value_label': value_header,
            'rows': hazard_stats
        })

    # retrieve affected column
    report_stats = []

    reported_fields = resolve_from_dictionary(
        extra_args, 'reported_fields')
    for item in reported_fields:
        header = item['header']
        field = item['field']
        if field['key'] in analysis_inasafe_fields:
            field_index = analysis_layer.fieldNameIndex(
                field['field_name'])
            if field == fatalities_field:
                # For fatalities field, we show a range of number
                # instead
                row_value = fatalities_range(analysis_feature[field_index])
            else:
                row_value = format_number(
                    analysis_feature[field_index],
                    enable_rounding=is_rounded,
                    is_population=is_population)
            row_stats = {
                'key': field['key'],
                'name': header,
                'value': row_value
            }
            report_stats.append(row_stats)

    # Give report section
    exposure_type = layer_definition_type(exposure_layer)
    header_label = exposure_type['name']
    summary.append({
        'header_label': header_label,
        # This should depend on exposure unit
        # TODO: Change this so it can take the unit dynamically
        'value_label': value_header,
        'rows': report_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])
    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')
    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=hazard_classification['classification_unit'])

    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header

    return context
Esempio n. 26
0
def mmi_detail_extractor(impact_report, component_metadata):
    """Extracting MMI-related analysis result.

    This extractor should only be used for EQ Raster with Population.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    analysis_layer = impact_report.analysis
    analysis_layer_keywords = analysis_layer.keywords
    extra_args = component_metadata.extra_args
    use_rounding = impact_report.impact_function.use_rounding
    provenance = impact_report.impact_function.provenance
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    # check if this is EQ raster with population
    hazard_type = definition(hazard_keywords['hazard'])
    if not hazard_type == hazard_earthquake:
        return context

    hazard_geometry = hazard_keywords[layer_geometry['key']]
    if not hazard_geometry == layer_geometry_raster['key']:
        return context

    exposure_type = definition(exposure_keywords['exposure'])
    if not exposure_type == exposure_population:
        return context

    header = resolve_from_dictionary(extra_args, 'header')

    context['header'] = header

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')
    """Generate headers."""
    table_header = [resolve_from_dictionary(extra_args, 'mmi_header')
                    ] + [v['header'] for v in reported_fields]
    """Extract MMI-related data"""
    # mmi is ranged from 1 to 10, which means: [1, 11)
    mmi_range = list(range(1, 11))
    rows = []
    roman_numeral = [
        'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X'
    ]
    for i in mmi_range:
        columns = [roman_numeral[i - 1]]
        for value in reported_fields:
            field = value['field']
            try:
                key_name = field['key'] % (i, )
                field_name = analysis_layer_keywords[key_name]
                # check field exists
                count = value_from_field_name(field_name, analysis_layer)
                if not count:
                    count = 0
            except KeyError:
                count = 0
            count = format_number(count,
                                  use_rounding=use_rounding,
                                  is_population=True)
            columns.append(count)

        rows.append(columns)
    """Extract total."""
    total_footer = [resolve_from_dictionary(extra_args, 'total_header')]

    total_fields = resolve_from_dictionary(extra_args, 'total_fields')
    for field in total_fields:
        try:
            field_name = analysis_layer_keywords[field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            if not total:
                total = 0
        except KeyError:
            total = 0
        total = format_number(total,
                              use_rounding=use_rounding,
                              is_population=True)
        total_footer.append(total)

    context['component_key'] = component_metadata.key
    context['mmi'] = {
        'header': table_header,
        'rows': rows,
        'footer': total_footer
    }

    return context
Esempio n. 27
0
def qgis_composer_extractor(impact_report, component_metadata):
    """Extract composer context.

    This method extract necessary context for a given impact report and
    component metadata and save the context so it can be used in composer
    rendering phase

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    # QGIS Composer needed certain context to generate the output
    # - Map Settings
    # - Substitution maps
    # - Element settings, such as icon for picture file or image source

    # Generate map settings
    qgis_context = impact_report.qgis_composition_context
    inasafe_context = impact_report.inasafe_context
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args

    context = QGISComposerContext()

    # Set default image elements to replace
    image_elements = [
        {
            'id': 'safe-logo',
            'path': inasafe_context.inasafe_logo
        },
        {
            'id': 'black-inasafe-logo',
            'path': inasafe_context.black_inasafe_logo
        },
        {
            'id': 'white-inasafe-logo',
            'path': inasafe_context.white_inasafe_logo
        },
        {
            'id': 'north-arrow',
            'path': inasafe_context.north_arrow
        },
        {
            'id': 'organisation-logo',
            'path': inasafe_context.organisation_logo
        },
        {
            'id': 'supporters_logo',
            'path': inasafe_context.supporters_logo
        }
    ]
    context.image_elements = image_elements

    # Set default HTML Frame elements to replace
    html_frame_elements = [
        {
            'id': 'impact-report',
            'mode': 'text',  # another mode is url
            'text': '',  # TODO: get impact summary table
        }
    ]
    context.html_frame_elements = html_frame_elements

    # Set default map to resize

    # check show only impact
    show_only_impact = setting('set_show_only_impact_on_report', False, bool)
    layers = [impact_report.impact] + impact_report.extra_layers
    layer_registry = QgsMapLayerRegistry.instance()
    if not show_only_impact:
        hazard_layer = layer_registry.mapLayers().get(
            provenance['hazard_layer_id'], None)

        aggregation_layer_id = provenance['aggregation_layer_id']
        if aggregation_layer_id:
            aggregation_layer = layer_registry.mapLayers().get(
                aggregation_layer_id, None)
            layers.insert(0, aggregation_layer)

        layers.append(hazard_layer)

    # check hide exposure settings
    hide_exposure_flag = setting('setHideExposureFlag', False, bool)
    if not hide_exposure_flag:
        # place exposure at the bottom
        exposure_layer = layer_registry.mapLayers().get(
            provenance['exposure_layer_id'])
        layers.append(exposure_layer)

    # default extent is analysis extent
    if not qgis_context.extent:
        qgis_context.extent = impact_report.impact_function.analysis_extent

    map_elements = [
        {
            'id': 'impact-map',
            'extent': qgis_context.extent,
            'grid_split_count': 5,
            'layers': layers,
        }
    ]
    context.map_elements = map_elements

    # calculate map_legends, only show the legend for impact layer
    layers = [impact_report.impact]
    symbol_count = 0
    for l in layers:
        layer = l
        """:type: qgis.core.QgsMapLayer"""
        try:
            symbol_count += len(layer.legendSymbologyItems())
            continue
        except Exception:  # pylint: disable=broad-except
            pass
        try:
            symbol_count += len(layer.rendererV2().legendSymbolItemsV2())
            continue
        except Exception:  # pylint: disable=broad-except
            pass
        symbol_count += 1

    legend_title = provenance['map_legend_title'] or ''

    map_legends = [
        {
            'id': 'impact-legend',
            'title': legend_title,
            'layers': layers,
            'symbol_count': symbol_count,
            # 'column_count': 2,  # the number of column in legend display
        }
    ]
    context.map_legends = map_legends

    # process substitution map
    start_datetime = provenance['start_datetime']
    """:type: datetime.datetime"""
    date_format = resolve_from_dictionary(extra_args, 'date-format')
    time_format = resolve_from_dictionary(extra_args, 'time-format')
    if isinstance(start_datetime, datetime.datetime):
        date = start_datetime.strftime(date_format)
        time = start_datetime.strftime(time_format)
    else:
        date = ''
        time = ''
    long_version = get_version()
    tokens = long_version.split('.')
    version = '%s.%s.%s' % (tokens[0], tokens[1], tokens[2])
    # Get title of the layer
    title = provenance['map_title']

    # Set source
    unknown_source_text = resolve_from_dictionary(
        extra_args, ['defaults', 'unknown_source'])
    aggregation_not_used = resolve_from_dictionary(
        extra_args, ['defaults', 'aggregation_not_used'])

    hazard_source = (
        provenance['hazard_keywords'].get('source') or unknown_source_text)
    exposure_source = (
        provenance['exposure_keywords'].get('source') or unknown_source_text)
    if provenance['aggregation_layer']:
        aggregation_source = (
            provenance['aggregation_keywords'].get('source') or
            unknown_source_text)
    else:
        aggregation_source = aggregation_not_used

    spatial_reference_format = resolve_from_dictionary(
        extra_args, 'spatial-reference-format')
    reference_name = spatial_reference_format.format(
        crs=impact_report.impact_function.impact.crs().authid())

    analysis_layer = impact_report.analysis
    analysis_name = value_from_field_name(
        analysis_name_field['field_name'], analysis_layer)

    # Prepare the substitution map
    version_title = resolve_from_dictionary(extra_args, 'version-title')
    disclaimer_title = resolve_from_dictionary(extra_args, 'disclaimer-title')
    date_title = resolve_from_dictionary(extra_args, 'date-title')
    time_title = resolve_from_dictionary(extra_args, 'time-title')
    caution_title = resolve_from_dictionary(extra_args, 'caution-title')
    caution_text = resolve_from_dictionary(extra_args, 'caution-text')
    version_text = resolve_from_dictionary(extra_args, 'version-text')
    legend_section_title = resolve_from_dictionary(
        extra_args, 'legend-title')
    information_title = resolve_from_dictionary(
        extra_args, 'information-title')
    supporters_title = resolve_from_dictionary(
        extra_args, 'supporters-title')
    source_title = resolve_from_dictionary(extra_args, 'source-title')
    analysis_title = resolve_from_dictionary(extra_args, 'analysis-title')
    reference_title = resolve_from_dictionary(
        extra_args, 'spatial-reference-title')
    substitution_map = {
        'impact-title': title,
        'date': date,
        'time': time,
        'safe-version': version,  # deprecated
        'disclaimer': inasafe_context.disclaimer,
        # These added in 3.2
        'version-title': version_title,
        'inasafe-version': version,
        'disclaimer-title': disclaimer_title,
        'date-title': date_title,
        'time-title': time_title,
        'caution-title': caution_title,
        'caution-text': caution_text,
        'version-text': version_text.format(version=version),
        'legend-title': legend_section_title,
        'information-title': information_title,
        'supporters-title': supporters_title,
        'source-title': source_title,
        'analysis-title': analysis_title,
        'analysis-name': analysis_name,
        'reference-title': reference_title,
        'reference-name': reference_name,
        'hazard-source': hazard_source,
        'exposure-source': exposure_source,
        'aggregation-source': aggregation_source,
    }
    context.substitution_map = substitution_map
    return context
Esempio n. 28
0
def qgis_composer_infographic_extractor(impact_report, component_metadata):
    """Extract composer context specific for infographic template.

    This method extract necessary context for a given impact report and
    component metadata and save the context so it can be used in composer
    rendering phase

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.2
    """
    qgis_context = impact_report.qgis_composition_context
    extra_args = component_metadata.extra_args

    context = QGISComposerContext()
    """Image Elements"""

    # get all image elements with their respective source path
    image_elements = deepcopy(image_item_elements)

    # remove inasafe_logo_white because we use expression for the image source
    image_elements.remove(inasafe_logo_white)
    # remove population_chart because we still don't have the source path
    image_elements.remove(population_chart)
    context.image_elements = image_elements

    # get the source path of population_chart
    population_donut_path = impact_report.component_absolute_output_path(
        'population-chart-png')
    population_chart['path'] = population_donut_path

    context.image_elements.append(population_chart)
    """HTML Elements"""

    components = resolve_from_dictionary(extra_args, 'components')
    html_elements = deepcopy(html_frame_elements)

    # get the html content from component that has been proceed
    for element in html_elements:
        component = components.get(element['component'])
        if component:
            element['text'] = jinja2_output_as_string(impact_report,
                                                      component['key'])

    context.html_frame_elements = html_elements
    """Map Elements"""

    map_overview_layer = None
    layer_registry = QgsMapLayerRegistry.instance()
    for layer in layer_registry.mapLayers().values():
        if layer.name() == map_overview['id']:
            map_overview_layer = layer

    layers = [impact_report.impact_function.analysis_impacted]

    if map_overview_layer:
        layers.append(map_overview_layer)

    # default extent is analysis extent
    if not qgis_context.extent:
        qgis_context.extent = impact_report.impact_function.analysis_extent

    map_elements = [{
        'id': 'map-overview',
        'extent': qgis_context.extent,
        'grid_split_count': 5,
        'layers': layers,
    }]

    context.map_elements = map_elements

    return context
Esempio n. 29
0
def qgis_composer_infographic_extractor(impact_report, component_metadata):
    """Extract composer context specific for infographic template.

    This method extract necessary context for a given impact report and
    component metadata and save the context so it can be used in composer
    rendering phase

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.2
    """
    qgis_context = impact_report.qgis_composition_context
    extra_args = component_metadata.extra_args

    context = QGISComposerContext()

    """Image Elements"""

    # get all image elements with their respective source path
    image_elements = deepcopy(image_item_elements)

    # remove inasafe_logo_white because we use expression for the image source
    image_elements.remove(inasafe_logo_white)
    # remove population_chart because we still don't have the source path
    image_elements.remove(population_chart)
    context.image_elements = image_elements

    # get the source path of population_chart
    population_donut_path = impact_report.component_absolute_output_path(
        'population-chart-png')
    population_chart['path'] = population_donut_path

    context.image_elements.append(population_chart)

    """HTML Elements"""

    components = resolve_from_dictionary(extra_args, 'components')
    html_elements = deepcopy(html_frame_elements)

    # get the html content from component that has been proceed
    for element in html_elements:
        component = components.get(element['component'])
        if component:
            element['text'] = jinja2_output_as_string(
                impact_report, component['key'])

    context.html_frame_elements = html_elements

    """Map Elements"""

    map_overview_layer = None
    layer_registry = QgsMapLayerRegistry.instance()
    for layer in layer_registry.mapLayers().values():
        if layer.name() == map_overview['id']:
            map_overview_layer = layer

    layers = [impact_report.impact_function.analysis_impacted]

    if map_overview_layer:
        layers.append(map_overview_layer)

    # default extent is analysis extent
    if not qgis_context.extent:
        qgis_context.extent = impact_report.impact_function.analysis_extent

    map_elements = [
        {
            'id': 'map-overview',
            'extent': qgis_context.extent,
            'grid_split_count': 5,
            'layers': layers,
        }
    ]

    context.map_elements = map_elements

    return context
Esempio n. 30
0
def create_section_with_aggregation(aggregation_summary,
                                    analysis_layer,
                                    postprocessor_fields,
                                    section_header,
                                    units_label=None,
                                    use_rounding=True,
                                    extra_component_args=None):
    """Create demographic section context with aggregation breakdown.

    :param aggregation_summary: Aggregation summary
    :type aggregation_summary: qgis.core.QgsVectorlayer

    :param analysis_layer: Analysis layer
    :type analysis_layer: qgis.core.QgsVectorLayer

    :param postprocessor_fields: Postprocessor fields to extract
    :type postprocessor_fields: list[dict]

    :param section_header: Section header text
    :type section_header: qgis.core.QgsVectorLayer

    :param units_label: Unit label for each column
    :type units_label: list[str]

    :param use_rounding: flag for rounding, affect number representations
    :type use_rounding: bool

    :param extra_component_args: extra_args passed from report component
        metadata
    :type extra_component_args: dict

    :return: context for gender section
    :rtype: dict

    .. versionadded:: 4.0
    """
    aggregation_summary_fields = aggregation_summary.keywords['inasafe_fields']
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']

    # retrieving postprocessor
    postprocessors_fields_found = []

    if isinstance(postprocessor_fields, dict):
        output_fields = postprocessor_fields['fields']
    else:
        output_fields = postprocessor_fields

    for output_field in output_fields:
        if output_field['key'] in aggregation_summary_fields:
            postprocessors_fields_found.append(output_field)

    if not postprocessors_fields_found:
        return {}

    # figuring out displaced field
    # no displaced field, can't show result
    if displaced_field['key'] not in analysis_layer_fields:
        return {}
    if displaced_field['key'] not in aggregation_summary_fields:
        return {}
    """Generating header name for columns."""

    # First column header is aggregation title
    default_aggregation_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'aggregation_header'])
    total_population_header = resolve_from_dictionary(
        extra_component_args, ['defaults', 'total_population_header'])
    columns = [
        aggregation_summary.title() or default_aggregation_header,
        total_population_header,
    ]
    row_values = []

    group_fields_found = []
    start_group_header = True
    for idx, output_field in enumerate(postprocessors_fields_found):

        name = output_field.get('header_name')
        if not name:
            name = output_field.get('name')

        if units_label or output_field.get('unit'):
            unit = None
            if units_label:
                unit = units_label[idx]
            elif output_field.get('unit'):
                unit = output_field.get('unit').get('abbreviation')

            if unit:
                header_format = '{name} [{unit}]'
            else:
                header_format = '{name}'

            header = header_format.format(name=name, unit=unit)
        else:
            header_format = '{name}'
            header = header_format.format(name=name)

        if isinstance(postprocessor_fields, dict):
            try:
                group_header = postprocessor_fields['group_header']
                group_fields = postprocessor_fields['group']['fields']
                if output_field in group_fields:
                    group_fields_found.append(output_field)
                else:
                    columns.append(header)
                    continue
            except KeyError:
                group_fields_found.append(output_field)

        header_dict = {
            'name': header,
            'group_header': group_header,
            'start_group_header': start_group_header
        }

        start_group_header = False
        columns.append(header_dict)
    """Generating values for rows."""

    for feature in aggregation_summary.getFeatures():

        aggregation_name_index = aggregation_summary.fields().lookupField(
            aggregation_name_field['field_name'])
        displaced_field_name = aggregation_summary_fields[
            displaced_field['key']]
        displaced_field_index = aggregation_summary.fields().lookupField(
            displaced_field_name)

        aggregation_name = feature[aggregation_name_index]
        total_displaced = feature[displaced_field_index]

        if not total_displaced or total_displaced is None:
            # skip if total displaced null
            continue

        total_displaced = format_number(feature[displaced_field_index],
                                        use_rounding=use_rounding,
                                        is_population=True)

        row = [
            aggregation_name,
            total_displaced,
        ]

        if total_displaced == '0' and not use_rounding:
            continue

        for output_field in postprocessors_fields_found:
            field_name = aggregation_summary_fields[output_field['key']]
            field_index = aggregation_summary.fields().lookupField(field_name)
            value = feature[field_index]

            value = format_number(value,
                                  use_rounding=use_rounding,
                                  is_population=True)
            row.append(value)

        row_values.append(row)
    """Generating total rows."""

    total_displaced_field_name = analysis_layer_fields[displaced_field['key']]
    value = value_from_field_name(total_displaced_field_name, analysis_layer)
    value = format_number(value, use_rounding=use_rounding, is_population=True)
    total_header = resolve_from_dictionary(extra_component_args,
                                           ['defaults', 'total_header'])
    totals = [total_header, value]
    for output_field in postprocessors_fields_found:
        field_name = analysis_layer_fields[output_field['key']]
        value = value_from_field_name(field_name, analysis_layer)
        value = format_number(value,
                              use_rounding=use_rounding,
                              is_population=True)
        totals.append(value)

    default_notes = resolve_from_dictionary(extra_component_args,
                                            ['defaults', 'notes'])

    if not isinstance(default_notes, list):
        default_notes = [default_notes]

    try:
        notes = default_notes + postprocessor_fields['group']['notes']
    except (TypeError, KeyError):
        notes = default_notes
        pass

    return {
        'notes': notes,
        'header': section_header,
        'columns': columns,
        'rows': row_values,
        'totals': totals,
        'group_header_colspan': len(group_fields_found)
    }
Esempio n. 31
0
def infographic_people_section_notes_extractor(
        impact_report, component_metadata):
    """Extracting notes for people section in the infographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.2
    """
    hazard_layer = impact_report.hazard
    extra_args = component_metadata.extra_args

    context = {}
    context['notes'] = []

    note = {
        'title': None,
        'description': resolve_from_dictionary(extra_args, 'extra_note'),
        'citations': None
    }
    context['notes'].append(note)

    concept_keys = ['affected_people', 'displaced_people']
    for key in concept_keys:
        note = {
            'title': concepts[key].get('name'),
            'description': concepts[key].get('description'),
            'citations': concepts[key].get('citations')[0]['text']
        }
        context['notes'].append(note)

    hazard_classification = layer_hazard_classification(hazard_layer)

    # generate rate description
    displacement_rates_note_format = resolve_from_dictionary(
        extra_args, 'hazard_displacement_rates_note_format')
    displacement_rates_note = []
    for hazard_class in hazard_classification['classes']:
        hazard_class['classification_unit'] = (
            hazard_classification['classification_unit'])
        displacement_rates_note.append(
            displacement_rates_note_format.format(**hazard_class))

    rate_description = ', '.join(displacement_rates_note)

    note = {
        'title': concepts['displacement_rate'].get('name'),
        'description': rate_description,
        'citations': concepts['displacement_rate'].get('citations')[0]['text']
    }

    context['notes'].append(note)

    return context
Esempio n. 32
0
def population_infographic_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args
    """Initializations"""
    hazard_layer = impact_report.hazard
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    icons = component_metadata.extra_args.get('icons')

    # this report sections only applies if it is a population report.
    population_fields = [
        population_count_field['key'],
        exposure_count_field['key'] % (exposure_population['key'], ),
    ] + [f['key'] for f in minimum_needs_fields]

    for item in population_fields:
        if item in analysis_layer_fields:
            break
    else:
        return context

    # We try to get total affected field
    # if it didn't exists, check other fields to show
    total_affected_fields = [
        total_affected_field['key'],
        # We might want to check other fields, but turn it off until further
        # discussion
        population_count_field['key'],
        exposure_count_field['key'] % (exposure_population['key'], ),
    ]

    for item in total_affected_fields:
        if item in analysis_layer_fields:
            total_affected = value_from_field_name(analysis_layer_fields[item],
                                                   analysis_layer)
            total_affected_field_used = item
            break
    else:
        return context

    if displaced_field['key'] in analysis_layer_fields:
        total_displaced = value_from_field_name(
            analysis_layer_fields[displaced_field['key']], analysis_layer)
    else:
        return context

    sections = OrderedDict()
    """People Section"""

    # Take default value from definitions
    people_header = resolve_from_dictionary(extra_args,
                                            ['sections', 'people', 'header'])
    people_items = resolve_from_dictionary(extra_args,
                                           ['sections', 'people', 'items'])

    # create context for affected infographic
    sub_header = resolve_from_dictionary(people_items[0], 'sub_header')

    # retrieve relevant header based on the fields we showed.
    sub_header = sub_header[total_affected_field_used]

    affected_infographic = PeopleInfographicElement(
        header=sub_header,
        icon=icons.get(total_affected_field['key']),
        number=total_affected)

    # create context for displaced infographic
    sub_header = resolve_from_dictionary(people_items[1], 'sub_header')
    sub_header_note_format = resolve_from_dictionary(people_items[1],
                                                     'sub_header_note_format')
    rate_description_format = resolve_from_dictionary(
        people_items[1], 'rate_description_format')
    rate_description = []

    hazard_classification = layer_hazard_classification(hazard_layer)
    for hazard_class in hazard_classification['classes']:
        displacement_rate = hazard_class.get('displacement_rate', 0)
        if displacement_rate:
            rate_description.append(
                rate_description_format.format(**hazard_class))

    rate_description_string = ', '.join(rate_description)

    sub_header_note = sub_header_note_format.format(
        rate_description=rate_description_string)

    displaced_infographic = PeopleInfographicElement(
        header=sub_header,
        header_note=sub_header_note,
        icon=icons.get(displaced_field['key']),
        number=total_displaced)

    sections['people'] = {
        'header': people_header,
        'items': [affected_infographic, displaced_infographic]
    }
    """Vulnerability Section"""

    # Take default value from definitions
    vulnerability_items = resolve_from_dictionary(
        extra_args, ['sections', 'vulnerability', 'items'])

    vulnerability_section_header = resolve_from_dictionary(
        extra_args, ['sections', 'vulnerability', 'header'])

    vulnerability_section_sub_header_format = resolve_from_dictionary(
        extra_args, ['sections', 'vulnerability', 'sub_header_format'])

    infographic_elements = []
    for group in vulnerability_items:
        fields = group['fields']
        group_header = group['sub_group_header']
        bootstrap_column = group['bootstrap_column']
        element_column = group['element_column']
        headers = group['headers']
        elements = []
        for field, header in zip(fields, headers):
            field_key = field['key']
            try:
                field_name = analysis_layer_fields[field_key]
                value = value_from_field_name(field_name, analysis_layer)
            except KeyError:
                # It means the field is not there
                continue

            if value:
                value_percentage = value * 100.0 / total_displaced
            else:
                value_percentage = 0

            infographic_element = PeopleVulnerabilityInfographicElement(
                header=header,
                icon=icons.get(field_key),
                number=value,
                percentage=value_percentage)
            elements.append(infographic_element)
        if elements:
            infographic_elements.append({
                'group_header': group_header,
                'bootstrap_column': bootstrap_column,
                'element_column': element_column,
                'items': elements
            })

    total_displaced_rounded = format_number(total_displaced,
                                            enable_rounding=True,
                                            is_population=True)

    sections['vulnerability'] = {
        'header':
        vulnerability_section_header,
        'small_header':
        vulnerability_section_sub_header_format.format(
            number_displaced=total_displaced_rounded),
        'items':
        infographic_elements
    }
    """Minimum Needs Section"""

    minimum_needs_header = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'header'])
    empty_unit_string = resolve_from_dictionary(
        extra_args, ['sections', 'minimum_needs', 'empty_unit_string'])

    items = []

    for item in minimum_needs_fields:
        need = item['need_parameter']
        if isinstance(need, ResourceParameter):

            needs_count = value_from_field_name(item['field_name'],
                                                analysis_layer)

            if need.unit.abbreviation:
                unit_string = '{unit}/{frequency}'.format(
                    unit=need.unit.abbreviation, frequency=need.frequency)
            else:
                unit_string = empty_unit_string

            item = PeopleMinimumNeedsInfographicElement(header=item['name'],
                                                        icon=icons.get(
                                                            item['key']),
                                                        number=needs_count,
                                                        unit=unit_string)
            items.append(item)

    # TODO: get from impact function provenance later
    needs_profile = NeedsProfile()

    sections['minimum_needs'] = {
        'header': minimum_needs_header,
        'small_header': needs_profile.provenance,
        'items': items,
    }
    """Population Charts"""

    population_donut_path = impact_report.component_absolute_output_path(
        'population-chart-png')

    css_label_classes = []
    try:
        population_chart_context = impact_report.metadata.component_by_key(
            'population-chart').context['context']
        """
        :type: safe.report.extractors.infographic_elements.svg_charts.
            DonutChartContext
        """
        for pie_slice in population_chart_context.slices:
            label = pie_slice['label']
            if not label:
                continue
            css_class = label.replace(' ', '').lower()
            css_label_classes.append(css_class)
    except KeyError:
        population_chart_context = None

    sections['population_chart'] = {
        'img_path': resource_url(population_donut_path),
        'context': population_chart_context,
        'css_label_classes': css_label_classes
    }

    context['brand_logo'] = resource_url(
        resources_path('img', 'logos', 'inasafe-logo-white.png'))
    context['sections'] = sections
    context['title'] = analysis_layer.title() or value_from_field_name(
        analysis_name_field['field_name'], analysis_layer)

    return context
Esempio n. 33
0
def aggregation_result_extractor(impact_report, component_metadata):
    """Extracting aggregation result of breakdown from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    aggregation_summary = impact_report.aggregation_summary
    aggregation_summary_fields = aggregation_summary.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    """Filtering report sections"""

    # Only process for applicable exposure types
    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # For now aggregation report only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context
    """Generating type name for columns"""

    type_fields = read_dynamic_inasafe_field(aggregation_summary_fields,
                                             affected_exposure_count_field)
    # do not include total, to preserve ordering and proper reference
    type_fields.remove('total')

    # we need to sort the column
    # get the classes lists
    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort columns based on class order
    # create function to sort
    def sort_classes(_type_field):
        """Sort method to retrieve exposure class key index."""
        # class key is the type field name
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _type_field == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    type_fields = sorted(type_fields, key=sort_classes)

    # generate type_header_labels for column header
    type_header_labels = []
    for type_name in type_fields:
        type_label = tr(type_name.capitalize())
        type_header_labels.append(type_label)
    """Generating values for rows"""

    # generate rows of values for values of each column
    rows = []
    aggregation_name_index = aggregation_summary.fieldNameIndex(
        aggregation_name_field['field_name'])
    total_field_index = aggregation_summary.fieldNameIndex(
        total_affected_field['field_name'])

    type_field_index = []
    for type_name in type_fields:
        field_name = affected_exposure_count_field['field_name'] % type_name
        type_index = aggregation_summary.fieldNameIndex(field_name)
        type_field_index.append(type_index)

    for feat in aggregation_summary.getFeatures():
        total_affected_value = format_number(feat[total_field_index],
                                             enable_rounding=is_rounded,
                                             is_population=is_population)
        if total_affected_value == '0':
            # skip aggregation type if the total affected is zero
            continue
        item = {
            # Name is the header for each row
            'name': feat[aggregation_name_index],
            # Total is the total for each row
            'total': total_affected_value
        }
        # Type values is the values for each column in each row
        type_values = []
        for idx in type_field_index:
            affected_value = format_number(feat[idx],
                                           enable_rounding=is_rounded)
            type_values.append(affected_value)
        item['type_values'] = type_values
        rows.append(item)
    """Generate total for footers"""

    # calculate total values for each type. Taken from exposure summary table
    type_total_values = []
    # Get affected field index
    affected_field_index = exposure_summary_table.fieldNameIndex(
        total_affected_field['field_name'])

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    breakdown_field_name = breakdown_field['field_name']
    breakdown_field_index = exposure_summary_table.fieldNameIndex(
        breakdown_field_name)

    # Fetch total affected for each breakdown name
    value_dict = {}
    for feat in exposure_summary_table.getFeatures():
        # exposure summary table is in csv format, so the field returned is
        # always in text format
        affected_value = int(float(feat[affected_field_index]))
        affected_value = format_number(affected_value,
                                       enable_rounding=is_rounded,
                                       is_population=is_population)
        value_dict[feat[breakdown_field_index]] = affected_value

    if value_dict:
        for type_name in type_fields:
            affected_value_string_formatted = value_dict[type_name]
            if affected_value_string_formatted == '0':
                # if total affected for breakdown type is zero
                # current column index
                column_index = len(type_total_values)
                # cut column header
                type_header_labels = (type_header_labels[:column_index] +
                                      type_header_labels[column_index + 1:])
                # cut all row values for the column
                for item in rows:
                    type_values = item['type_values']
                    item['type_values'] = (type_values[:column_index] +
                                           type_values[column_index + 1:])
                continue
            type_total_values.append(affected_value_string_formatted)
    """Get the super total affected"""

    # total for affected (super total)
    analysis_feature = analysis_layer.getFeatures().next()
    field_index = analysis_layer.fieldNameIndex(
        total_affected_field['field_name'])
    total_all = format_number(analysis_feature[field_index],
                              enable_rounding=is_rounded)
    """Generate and format the context"""
    aggregation_area_default_header = resolve_from_dictionary(
        extra_args, 'aggregation_area_default_header')
    header_label = (aggregation_summary.title()
                    or aggregation_area_default_header)

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'], unit=unit_string)
    table_header = ' '.join(table_header.split())

    section_header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    total_in_aggregation_header = resolve_from_dictionary(
        extra_args, 'total_in_aggregation_header')
    context['header'] = section_header
    context['notes'] = notes
    context['aggregation_result'] = {
        'table_header': table_header,
        'header_label': header_label,
        'type_header_labels': type_header_labels,
        'total_label': total_header,
        'total_in_aggregation_area_label': total_in_aggregation_header,
        'rows': rows,
        'type_total_values': type_total_values,
        'total_all': total_all,
    }
    return context
Esempio n. 34
0
def population_chart_extractor(impact_report, component_metadata):
    """Creating population donut chart.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    hazard_layer = impact_report.hazard
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']

    """Generate Donut chart for affected population"""

    # create context for the donut chart

    # retrieve hazard classification from hazard layer
    hazard_classification = layer_hazard_classification(hazard_layer)

    if not hazard_classification:
        return context

    data = []
    labels = []
    colors = []

    for hazard_class in hazard_classification['classes']:

        # Skip if it is not affected hazard class
        if not hazard_class['affected']:
            continue

        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (
            hazard_class['key'],)

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            # Hazard label taken from translated hazard count field
            # label, string-formatted with translated hazard class label
            hazard_value = value_from_field_name(field_name, analysis_layer)
            hazard_value = round_affected_number(
                hazard_value,
                enable_rounding=True,
                use_population_rounding=True)
        except KeyError:
            # in case the field was not found
            continue

        data.append(hazard_value)
        labels.append(hazard_class['name'])
        colors.append(hazard_class['color'].name())

    # add total not affected
    try:
        field_name = analysis_layer_fields[total_not_affected_field['key']]
        hazard_value = value_from_field_name(field_name, analysis_layer)
        hazard_value = round_affected_number(
            hazard_value,
            enable_rounding=True,
            use_population_rounding=True)

        data.append(hazard_value)
        labels.append(total_not_affected_field['name'])
        colors.append(green.name())
    except KeyError:
        # in case the field is not there
        pass

    # add number for total not affected
    chart_title = resolve_from_dictionary(extra_args, 'chart_title')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    donut_context = DonutChartContext(
        data=data,
        labels=labels,
        colors=colors,
        inner_radius_ratio=0.5,
        stroke_color='#fff',
        title=chart_title,
        total_header=total_header,
        as_file=True)

    context['context'] = donut_context

    return context
Esempio n. 35
0
def analysis_provenance_details_simplified_extractor(impact_report,
                                                     component_metadata):
    """Extracting simplified version of provenance details of layers.

    This extractor will produce provenance details which will be displayed in
    the main report.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    default_source = resolve_from_dictionary(extra_args,
                                             ['defaults', 'source'])
    default_reference = resolve_from_dictionary(extra_args,
                                                ['defaults', 'reference'])
    provenance_format_args = resolve_from_dictionary(extra_args,
                                                     'provenance_format')

    hazard_keywords = impact_report.impact_function.provenance[
        'hazard_keywords']
    header = resolve_from_dictionary(provenance_format_args, 'hazard_header')
    provenance_format = resolve_from_dictionary(provenance_format_args,
                                                'hazard_format')
    hazard_provenance = {
        'header':
        header,
        'provenance':
        provenance_format.format(
            layer_name=hazard_keywords.get('title'),
            source=QgsDataSourceUri.removePassword(
                decode_full_layer_uri(hazard_keywords.get('source'))[0]
                or default_source))
    }

    exposure_keywords = impact_report.impact_function.provenance[
        'exposure_keywords']
    header = resolve_from_dictionary(provenance_format_args, 'exposure_header')
    provenance_format = resolve_from_dictionary(provenance_format_args,
                                                'exposure_format')
    exposure_provenance = {
        'header':
        header,
        'provenance':
        provenance_format.format(
            layer_name=exposure_keywords.get('title'),
            source=QgsDataSourceUri.removePassword(
                decode_full_layer_uri(exposure_keywords.get('source'))[0]
                or default_source))
    }

    aggregation_keywords = impact_report.impact_function.provenance[
        'aggregation_keywords']
    header = resolve_from_dictionary(provenance_format_args,
                                     'aggregation_header')
    provenance_format = resolve_from_dictionary(provenance_format_args,
                                                'aggregation_format')
    # only if aggregation layer used
    if aggregation_keywords:
        provenance_string = provenance_format.format(
            layer_name=aggregation_keywords.get('title'),
            source=QgsDataSourceUri.removePassword(
                decode_full_layer_uri(aggregation_keywords.get('source'))[0]
                or default_source))
    else:
        aggregation_not_used = resolve_from_dictionary(
            extra_args, ['defaults', 'aggregation_not_used'])
        provenance_string = aggregation_not_used

    aggregation_provenance = {
        'header': header,
        'provenance': provenance_string
    }

    impact_function_name = impact_report.impact_function.name
    header = resolve_from_dictionary(provenance_format_args,
                                     'impact_function_header')
    provenance_format = resolve_from_dictionary(provenance_format_args,
                                                'impact_function_format')
    impact_function_provenance = {
        'header':
        header,
        'provenance':
        provenance_format.format(impact_function_name=impact_function_name,
                                 reference=default_reference)
    }

    provenance_detail = OrderedDict()
    provenance_detail['hazard'] = hazard_provenance
    provenance_detail['exposure'] = exposure_provenance
    provenance_detail['aggregation'] = aggregation_provenance
    provenance_detail['impact_function'] = impact_function_provenance

    analysis_details_header = resolve_from_dictionary(
        extra_args, ['header', 'analysis_detail'])

    context['component_key'] = component_metadata.key
    context.update({
        'header': analysis_details_header,
        'details': provenance_detail
    })

    return context
Esempio n. 36
0
def multi_exposure_general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.3
    """
    context = {}
    extra_args = component_metadata.extra_args

    multi_exposure = impact_report.multi_exposure_impact_function
    analysis_layer = impact_report.analysis
    provenances = [
        impact_function.provenance
        for impact_function in (multi_exposure.impact_functions)
    ]
    debug_mode = multi_exposure.debug
    population_exist = False

    hazard_keywords = provenances[0]['hazard_keywords']
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    # Summarize every value needed for each exposure and extract later to the
    # context.
    summary = []
    map_legend_titles = []
    hazard_classifications = {}
    exposures_stats = []
    for provenance in provenances:
        map_legend_title = provenance['map_legend_title']
        map_legend_titles.append(map_legend_title)
        exposure_stats = {}
        exposure_keywords = provenance['exposure_keywords']
        exposure_type = definition(exposure_keywords['exposure'])
        exposure_stats['exposure'] = exposure_type
        # Only round the number when it is population exposure and it is not
        # in debug mode
        is_rounded = not debug_mode
        is_population = exposure_type is exposure_population
        if is_population:
            population_exist = True

        analysis_feature = next(analysis_layer.getFeatures())
        analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

        exposure_unit = exposure_type['units'][0]
        if exposure_unit['abbreviation']:
            value_header = '{measure} ({abbreviation})'.format(
                measure=map_legend_title,
                abbreviation=exposure_unit['abbreviation'])
        else:
            value_header = '{name}'.format(name=map_legend_title)

        exposure_stats['value_header'] = value_header

        # Get hazard classification
        hazard_classification = definition(
            active_classification(hazard_keywords,
                                  exposure_keywords['exposure']))
        hazard_classifications[exposure_type['key']] = hazard_classification

        # in case there is a classification
        if hazard_classification:
            classification_result = {}
            reported_fields_result = {}
            for hazard_class in hazard_classification['classes']:
                # hazard_count_field is a dynamic field with hazard class
                # as parameter
                field_key_name = exposure_hazard_count_field['key'] % (
                    exposure_type['key'], hazard_class['key'])
                try:
                    # retrieve dynamic field name from analysis_fields keywords
                    # will cause key error if no hazard count for that
                    # particular class
                    field_name = analysis_inasafe_fields[field_key_name]
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    hazard_value = format_number(analysis_feature[field_index],
                                                 use_rounding=is_rounded,
                                                 is_population=is_population)
                except KeyError:
                    # in case the field was not found
                    hazard_value = 0

                classification_result[hazard_class['key']] = hazard_value

            # find total field
            try:
                field_key_name = exposure_total_exposed_field['key'] % (
                    exposure_type['key'])
                field_name = analysis_inasafe_fields[field_key_name]
                total = value_from_field_name(field_name, analysis_layer)
                total = format_number(total,
                                      use_rounding=is_rounded,
                                      is_population=is_population)
                classification_result[total_exposed_field['key']] = total
            except KeyError:
                pass

            exposure_stats['classification_result'] = classification_result

        for item in reported_fields:
            field = item.get('field')
            multi_exposure_field = item.get('multi_exposure_field')
            row_value = '-'
            if multi_exposure_field:
                field_key = (multi_exposure_field['key'] %
                             (exposure_type['key']))
                field_name = (multi_exposure_field['field_name'] %
                              (exposure_type['key']))
                if field_key in analysis_inasafe_fields:
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    row_value = format_number(analysis_feature[field_index],
                                              use_rounding=is_rounded,
                                              is_population=is_population)

            elif field in [displaced_field, fatalities_field]:
                if field['key'] in analysis_inasafe_fields and is_population:
                    field_index = analysis_layer.fields().lookupField(
                        field['name'])
                    if field == fatalities_field:
                        # For fatalities field, we show a range of number
                        # instead
                        row_value = fatalities_range(
                            analysis_feature[field_index])
                    else:
                        row_value = format_number(
                            analysis_feature[field_index],
                            use_rounding=is_rounded,
                            is_population=is_population)

            reported_fields_result[field['key']] = row_value

        exposure_stats['reported_fields_result'] = reported_fields_result
        exposures_stats.append(exposure_stats)

    # After finish summarizing value, then proceed to context extraction.

    # find total value and labels for each exposure
    value_headers = []
    total_values = []
    for exposure_stats in exposures_stats:
        # label
        value_header = exposure_stats['value_header']
        value_headers.append(value_header)

        # total value
        classification_result = exposure_stats['classification_result']
        total_value = classification_result[total_exposed_field['key']]
        total_values.append(total_value)

    classifications = list(hazard_classifications.values())
    is_item_identical = (classifications.count(
        classifications[0]) == len(classifications))
    if classifications and is_item_identical:
        hazard_stats = []
        for hazard_class in classifications[0]['classes']:
            values = []
            for exposure_stats in exposures_stats:
                classification_result = exposure_stats['classification_result']
                value = classification_result[hazard_class['key']]
                values.append(value)
            stats = {
                'key': hazard_class['key'],
                'name': hazard_class['name'],
                'numbers': values
            }
            hazard_stats.append(stats)

        total_stats = {
            'key': total_exposed_field['key'],
            'name': total_exposed_field['name'],
            'as_header': True,
            'numbers': total_values
        }
        hazard_stats.append(total_stats)

        summary.append({
            'header_label': hazard_header,
            'value_labels': value_headers,
            'rows': hazard_stats
        })
    # if there are different hazard classifications used in the analysis,
    # we will create a separate table for each hazard classification
    else:
        hazard_classification_groups = {}
        for exposure_key, hazard_classification in (iter(
                list(hazard_classifications.items()))):
            exposure_type = definition(exposure_key)
            if hazard_classification['key'] not in (
                    hazard_classification_groups):
                hazard_classification_groups[hazard_classification['key']] = [
                    exposure_type
                ]
            else:
                hazard_classification_groups[
                    hazard_classification['key']].append(exposure_type)

        for hazard_classification_key, exposures in (iter(
                list(hazard_classification_groups.items()))):
            custom_headers = []
            custom_total_values = []
            # find total value and labels for each exposure
            for exposure_stats in exposures_stats:
                if exposure_stats['exposure'] not in exposures:
                    continue
                # label
                value_header = exposure_stats['value_header']
                custom_headers.append(value_header)

                # total value
                classification_result = exposure_stats['classification_result']
                total_value = classification_result[total_exposed_field['key']]
                custom_total_values.append(total_value)

            hazard_stats = []
            hazard_classification = definition(hazard_classification_key)
            for hazard_class in hazard_classification['classes']:
                values = []
                for exposure_stats in exposures_stats:
                    if exposure_stats['exposure'] not in exposures:
                        continue
                    classification_result = exposure_stats[
                        'classification_result']
                    value = classification_result[hazard_class['key']]
                    values.append(value)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_class['name'],
                    'numbers': values
                }
                hazard_stats.append(stats)

            total_stats = {
                'key': total_exposed_field['key'],
                'name': total_exposed_field['name'],
                'as_header': True,
                'numbers': custom_total_values
            }
            hazard_stats.append(total_stats)

            summary.append({
                'header_label': hazard_header,
                'value_labels': custom_headers,
                'rows': hazard_stats
            })

    reported_fields_stats = []
    for item in reported_fields:
        field = item.get('field')
        values = []
        for exposure_stats in exposures_stats:
            reported_fields_result = exposure_stats['reported_fields_result']
            value = reported_fields_result[field['key']]
            values.append(value)
        stats = {
            'key': field['key'],
            'name': item['header'],
            'numbers': values
        }
        reported_fields_stats.append(stats)

    header_label = resolve_from_dictionary(extra_args,
                                           ['reported_fields_header'])
    summary.append({
        'header_label': header_label,
        'value_labels': value_headers,
        'rows': reported_fields_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])

    combined_map_legend_title = ''
    for index, map_legend_title in enumerate(map_legend_titles):
        combined_map_legend_title += map_legend_title
        if not (index + 1) == len(map_legend_titles):
            combined_map_legend_title += ', '

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')
    table_header = table_header_format.format(
        title=combined_map_legend_title,
        unit=classifications[0]['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(extra_args,
                                          ['concept_notes', 'note_format'])

    concepts = resolve_from_dictionary(extra_args,
                                       ['concept_notes', 'general_concepts'])

    if population_exist:
        concepts += resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context
Esempio n. 37
0
def notes_assumptions_extractor(impact_report, component_metadata):
    """Extracting notes and assumptions of the exposure layer

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args
    exposure_type = layer_definition_type(exposure_layer)

    context['header'] = resolve_from_dictionary(extra_args, 'header')
    context['items'] = provenance['notes']

    # Get hazard classification
    hazard_classification = layer_hazard_classification(hazard_layer)

    # Check hazard affected class
    affected_classes = []
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('affected', False):
            affected_classes.append(hazard_class)

    if affected_classes:
        affected_note_format = resolve_from_dictionary(extra_args,
                                                       'affected_note_format')

        # generate hazard classes
        hazard_classes = ', '.join([c['name'] for c in affected_classes])

        context['items'].append(
            affected_note_format.format(hazard_classes=hazard_classes))

    # Check hazard have displacement rate
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('displacement_rate', 0) > 0:
            have_displacement_rate = True
            break
    else:
        have_displacement_rate = False

    # Only show displacement note if analysis about population exposure
    if have_displacement_rate and exposure_type == exposure_population:
        # add notes for displacement rate used
        displacement_note_format = resolve_from_dictionary(
            extra_args, 'displacement_rates_note_format')

        # generate rate description
        hazard_note_format = resolve_from_dictionary(
            extra_args, 'hazard_displacement_rates_note_format')
        hazard_note = []
        for hazard_class in hazard_classification['classes']:
            hazard_note.append(hazard_note_format.format(**hazard_class))

        rate_description = ', '.join(hazard_note)
        context['items'].append(
            displacement_note_format.format(rate_description=rate_description))

    return context
Esempio n. 38
0
def notes_assumptions_extractor(impact_report, component_metadata):
    """Extracting notes and assumptions of the exposure layer

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']
    exposure_type = definition(exposure_keywords['exposure'])

    analysis_note_dict = resolve_from_dictionary(extra_args, 'analysis_notes')
    context['items'] = [analysis_note_dict]

    context['component_key'] = component_metadata.key
    context['header'] = resolve_from_dictionary(extra_args, 'header')
    context['items'] += provenance['notes']

    # Get hazard classification
    hazard_classification = definition(
        active_classification(hazard_keywords, exposure_keywords['exposure']))

    # Check hazard affected class
    affected_classes = []
    for hazard_class in hazard_classification['classes']:
        if exposure_keywords['exposure'] == exposure_population['key']:
            # Taking from profile
            is_affected_class = is_affected(
                hazard=hazard_keywords['hazard'],
                classification=hazard_classification['key'],
                hazard_class=hazard_class['key'],
            )
            if is_affected_class:
                affected_classes.append(hazard_class)
        else:
            if hazard_class.get('affected', False):
                affected_classes.append(hazard_class)

    if affected_classes:
        affected_note_dict = resolve_from_dictionary(
            extra_args, 'affected_note_format')

        # generate hazard classes
        hazard_classes = ', '.join([
            c['name'] for c in affected_classes
        ])

        for index, affected_note in enumerate(affected_note_dict['item_list']):
            affected_note_dict['item_list'][index] = (
                affected_note.format(hazard_classes=hazard_classes)
            )

        context['items'].append(affected_note_dict)

    # Check hazard have displacement rate
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('displacement_rate', 0) > 0:
            have_displacement_rate = True
            break
    else:
        have_displacement_rate = False

    # Only show displacement note if analysis about population exposure
    if have_displacement_rate and exposure_type == exposure_population:
        # add notes for displacement rate used
        displacement_note_dict = resolve_from_dictionary(
            extra_args, 'displacement_rates_note_format')

        # generate rate description
        displacement_rates_note_format = resolve_from_dictionary(
            extra_args, 'hazard_displacement_rates_note_format')
        displacement_rates_note = []
        for hazard_class in hazard_classification['classes']:
            the_hazard_class = deepcopy(hazard_class)
            the_hazard_class['displacement_rate'] = get_displacement_rate(
                hazard=hazard_keywords['hazard'],
                classification=hazard_classification['key'],
                hazard_class=the_hazard_class['key']
            )
            displacement_rates_note.append(
                displacement_rates_note_format.format(**the_hazard_class))

        rate_description = ', '.join(displacement_rates_note)

        for index, displacement_note in enumerate(
                displacement_note_dict['item_list']):
            displacement_note_dict['item_list'][index] = (
                displacement_note.format(rate_description=rate_description)
            )

        context['items'].append(displacement_note_dict)

    # Check hazard have displacement rate
    have_fatality_rate = False
    for hazard_class in hazard_classification['classes']:
        if hazard_class.get('fatality_rate', None) is not None and \
                hazard_class.get('fatality_rate', 0) > 0:
            have_fatality_rate = True
            break

    if have_fatality_rate and exposure_type == exposure_population:
        # add notes for fatality rate used
        fatality_note_dict = resolve_from_dictionary(
            extra_args, 'fatality_rates_note_format')

        # generate rate description
        fatality_rates_note_format = resolve_from_dictionary(
            extra_args, 'hazard_fatality_rates_note_format')
        fatality_rates_note = []
        for hazard_class in hazard_classification['classes']:
            # we make a copy here because we don't want to
            # change the real value.
            copy_of_hazard_class = dict(hazard_class)
            if copy_of_hazard_class['fatality_rate'] is None or \
                    copy_of_hazard_class['fatality_rate'] <= 0:
                copy_of_hazard_class['fatality_rate'] = 0
            else:
                # we want to show the rate as a scientific notation
                copy_of_hazard_class['fatality_rate'] = (
                    html_scientific_notation_rate(
                        copy_of_hazard_class['fatality_rate']))

            fatality_rates_note.append(
                fatality_rates_note_format.format(**copy_of_hazard_class))

        rate_description = ', '.join(fatality_rates_note)

        for index, fatality_note in enumerate(fatality_note_dict['item_list']):
            fatality_note_dict['item_list'][index] = (
                fatality_note.format(rate_description=rate_description)
            )

        context['items'].append(fatality_note_dict)

    return context
Esempio n. 39
0
def qgis_composer_extractor(impact_report, component_metadata):
    """Extract composer context.

    This method extract necessary context for a given impact report and
    component metadata and save the context so it can be used in composer
    rendering phase

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    # QGIS Composer needed certain context to generate the output
    # - Map Settings
    # - Substitution maps
    # - Element settings, such as icon for picture file or image source

    # Generate map settings
    qgis_context = impact_report.qgis_composition_context
    inasafe_context = impact_report.inasafe_context
    provenance = impact_report.impact_function.provenance
    extra_args = component_metadata.extra_args

    context = QGISComposerContext()

    # Set default image elements to replace
    image_elements = [{
        'id': 'safe-logo',
        'path': inasafe_context.inasafe_logo
    }, {
        'id': 'black-inasafe-logo',
        'path': inasafe_context.black_inasafe_logo
    }, {
        'id': 'white-inasafe-logo',
        'path': inasafe_context.white_inasafe_logo
    }, {
        'id': 'north-arrow',
        'path': inasafe_context.north_arrow
    }, {
        'id': 'organisation-logo',
        'path': inasafe_context.organisation_logo
    }, {
        'id': 'supporters_logo',
        'path': inasafe_context.supporters_logo
    }]
    context.image_elements = image_elements

    # Set default HTML Frame elements to replace
    html_frame_elements = [{
        'id': 'impact-report',
        'mode': 'text',  # another mode is url
        'text': '',  # TODO: get impact summary table
    }]
    context.html_frame_elements = html_frame_elements
    """Define the layers for the impact map."""

    project = QgsProject.instance()
    layers = []

    exposure_summary_layers = []
    if impact_report.multi_exposure_impact_function:
        for impact_function in (
                impact_report.multi_exposure_impact_function.impact_functions):
            impact_layer = impact_function.exposure_summary or (
                impact_function.aggregate_hazard_impacted)
            exposure_summary_layers.append(impact_layer)

    # use custom ordered layer if any
    if impact_report.ordered_layers:
        for layer in impact_report.ordered_layers:
            layers.append(layer)

        # We are keeping this if we want to enable below behaviour again.
        # Currently realtime might have layer order without impact layer in it.

        # # make sure at least there is an impact layer
        # if impact_report.multi_exposure_impact_function:
        #     additional_layers = []  # for exposure summary layers
        #     impact_layer_found = False
        #     impact_functions = (
        #        impact_report.multi_exposure_impact_function.impact_functions)
        #     # check for impact layer occurrences
        #     for analysis in impact_functions:
        #         impact_layer = analysis.exposure_summary or (
        #             analysis.aggregate_hazard_impacted)
        #         for index, layer in enumerate(layers):
        #             if impact_layer.source() == layer.source():
        #                 add_impact_layers_to_canvas(analysis)
        #                 layers[index] = impact_layer
        #                 impact_layer_found = True
        #     if not impact_layer_found:
        #         for analysis in impact_functions:
        #             add_impact_layers_to_canvas(analysis)
        #             impact_layer = analysis.exposure_summary or (
        #                 analysis.aggregate_hazard_impacted)
        #             layer_uri = full_layer_uri(impact_layer)
        #             layer = load_layer_from_registry(layer_uri)
        #             additional_layers.append(layer)
        #     layers = additional_layers + layers
        # else:
        #     impact_layer = (
        #         impact_report.impact_function.exposure_summary or (
        #             impact_report.impact_function.aggregate_hazard_impacted))
        #     if impact_layer not in layers:
        #         layers.insert(0, impact_layer)

    # use default layer order if no custom ordered layer found
    else:
        if not impact_report.multi_exposure_impact_function:  # single IF
            layers = [impact_report.impact] + impact_report.extra_layers
        else:  # multi-exposure IF
            layers = [] + impact_report.extra_layers

        add_supplementary_layers = (
            not impact_report.multi_exposure_impact_function
            or not (impact_report.multi_exposure_impact_function.
                    output_layers_ordered))
        if add_supplementary_layers:
            # Check show only impact.
            show_only_impact = setting('set_show_only_impact_on_report',
                                       expected_type=bool)
            if not show_only_impact:
                hazard_layer = project.mapLayers().get(
                    provenance['hazard_layer_id'], None)

                aggregation_layer_id = provenance['aggregation_layer_id']
                if aggregation_layer_id:
                    aggregation_layer = project.mapLayers().get(
                        aggregation_layer_id, None)
                    layers.append(aggregation_layer)

                layers.append(hazard_layer)

            # check hide exposure settings
            hide_exposure_flag = setting('setHideExposureFlag',
                                         expected_type=bool)
            if not hide_exposure_flag:
                exposure_layers_id = []
                if provenance.get(
                        provenance_exposure_layer_id['provenance_key']):
                    exposure_layers_id.append(
                        provenance.get(
                            provenance_exposure_layer_id['provenance_key']))
                elif provenance.get(
                        provenance_multi_exposure_layers_id['provenance_key']):
                    exposure_layers_id = provenance.get(
                        provenance_multi_exposure_layers_id['provenance_key'])

                # place exposure at the bottom
                for layer_id in exposure_layers_id:
                    exposure_layer = project.mapLayers().get(layer_id)
                    layers.append(exposure_layer)

    # default extent is analysis extent
    if not qgis_context.extent:
        qgis_context.extent = impact_report.impact_function.analysis_extent

    map_elements = [{
        'id': 'impact-map',
        'extent': qgis_context.extent,
        'grid_split_count': 5,
        'layers': layers,
    }]
    context.map_elements = map_elements

    # calculate map_legends, only show the legend for impact layer
    if impact_report.legend_layers:  # use requested legend if any
        layers = impact_report.legend_layers
    elif impact_report.multi_exposure_impact_function:  # multi-exposure IF
        layers = exposure_summary_layers
    else:  # single IF
        layers = [impact_report.impact]
    symbol_count = 0
    for l in layers:
        layer = l
        """:type: qgis.core.QgsMapLayer"""
        try:
            symbol_count += len(layer.legendSymbologyItems())
            continue
        except Exception:  # pylint: disable=broad-except
            pass
        try:
            symbol_count += len(layer.renderer().legendSymbolItems())
            continue
        except Exception:  # pylint: disable=broad-except
            pass
        symbol_count += 1

    legend_title = provenance.get('map_legend_title') or ''

    map_legends = [{
        'id': 'impact-legend',
        'title': legend_title,
        'layers': layers,
        'symbol_count': symbol_count,
        # 'column_count': 2,  # the number of column in legend display
    }]
    context.map_legends = map_legends

    # process substitution map
    start_datetime = provenance['start_datetime']
    """:type: datetime.datetime"""
    date_format = resolve_from_dictionary(extra_args, 'date-format')
    time_format = resolve_from_dictionary(extra_args, 'time-format')
    if isinstance(start_datetime, datetime.datetime):
        date = start_datetime.strftime(date_format)
        time = start_datetime.strftime(time_format)
    else:
        date = ''
        time = ''
    long_version = get_version()
    tokens = long_version.split('.')
    version = '%s.%s.%s' % (tokens[0], tokens[1], tokens[2])
    # Get title of the layer
    title = provenance.get('map_title') or ''

    # Set source
    unknown_source_text = resolve_from_dictionary(
        extra_args, ['defaults', 'unknown_source'])
    aggregation_not_used = resolve_from_dictionary(
        extra_args, ['defaults', 'aggregation_not_used'])

    hazard_source = (provenance.get('hazard_keywords', {}).get('source')
                     or unknown_source_text)
    exposure_source = (provenance.get('exposure_keywords', {}).get('source')
                       or unknown_source_text)
    if provenance['aggregation_layer']:
        aggregation_source = (provenance['aggregation_keywords'].get('source')
                              or unknown_source_text)
    else:
        aggregation_source = aggregation_not_used

    spatial_reference_format = resolve_from_dictionary(
        extra_args, 'spatial-reference-format')
    reference_name = spatial_reference_format.format(
        crs=impact_report.impact_function.crs.authid())

    analysis_layer = impact_report.analysis
    analysis_name = value_from_field_name(analysis_name_field['field_name'],
                                          analysis_layer)

    # Prepare the substitution map
    version_title = resolve_from_dictionary(extra_args, 'version-title')
    disclaimer_title = resolve_from_dictionary(extra_args, 'disclaimer-title')
    date_title = resolve_from_dictionary(extra_args, 'date-title')
    time_title = resolve_from_dictionary(extra_args, 'time-title')
    caution_title = resolve_from_dictionary(extra_args, 'caution-title')
    caution_text = resolve_from_dictionary(extra_args, 'caution-text')
    version_text = resolve_from_dictionary(extra_args, 'version-text')
    legend_section_title = resolve_from_dictionary(extra_args, 'legend-title')
    information_title = resolve_from_dictionary(extra_args,
                                                'information-title')
    supporters_title = resolve_from_dictionary(extra_args, 'supporters-title')
    source_title = resolve_from_dictionary(extra_args, 'source-title')
    analysis_title = resolve_from_dictionary(extra_args, 'analysis-title')
    reference_title = resolve_from_dictionary(extra_args,
                                              'spatial-reference-title')
    substitution_map = {
        'impact-title': title,
        'date': date,
        'time': time,
        'safe-version': version,  # deprecated
        'disclaimer': inasafe_context.disclaimer,
        # These added in 3.2
        'version-title': version_title,
        'inasafe-version': version,
        'disclaimer-title': disclaimer_title,
        'date-title': date_title,
        'time-title': time_title,
        'caution-title': caution_title,
        'caution-text': caution_text,
        'version-text': version_text.format(version=version),
        'legend-title': legend_section_title,
        'information-title': information_title,
        'supporters-title': supporters_title,
        'source-title': source_title,
        'analysis-title': analysis_title,
        'analysis-name': analysis_name,
        'reference-title': reference_title,
        'reference-name': reference_name,
        'hazard-source': hazard_source,
        'exposure-source': exposure_source,
        'aggregation-source': aggregation_source,
    }
    context.substitution_map = substitution_map
    return context
Esempio n. 40
0
def analysis_detail_extractor(impact_report, component_metadata):
    """Extracting analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    analysis_feature = analysis_layer.getFeatures().next()
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode
    """Initializations"""

    # Get hazard classification
    hazard_classification = layer_hazard_classification(hazard_layer)

    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounding = not debug_mode

    # Analysis detail only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')
    ]
    if exposure_type not in itemizable_exposures_all:
        return context

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [exposure_type_field, exposure_class_field]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    """Create detail header"""
    headers = []

    # breakdown header
    breakdown_header_template = ''
    if breakdown_field == exposure_type_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_type_format')
    elif breakdown_field == exposure_class_field:
        breakdown_header_template = resolve_from_dictionary(
            extra_args, 'breakdown_header_class_format')

    # check if there is header type associations
    type_header_mapping = resolve_from_dictionary(
        extra_args, 'exposure_type_header_mapping')

    if exposure_type['key'] in type_header_mapping:
        exposure_header = type_header_mapping[exposure_type['key']]
    else:
        exposure_header = exposure_type['name']

    headers.append(breakdown_header_template.format(exposure=exposure_header))

    # this is mapping for customizing double header for
    # affected/not affected hazard classes
    hazard_class_header_mapping = resolve_from_dictionary(
        extra_args, 'hazard_class_header_mapping')
    # hazard header
    # TODO: we need to get affected and not_affected key from
    # definitions concept
    header_hazard_group = {
        'affected': {
            'hazards': []
        },
        'not_affected': {
            'hazards': []
        }
    }
    for key, group in header_hazard_group.iteritems():
        if key in hazard_class_header_mapping:
            header_hazard_group[key].update(hazard_class_header_mapping[key])

    for hazard_class in hazard_classification['classes']:
        # the tuple format would be:
        # (class name, is it affected, header background color
        hazard_class_name = hazard_class['name']
        if hazard_class.get('affected'):
            affected_status = 'affected'
        else:
            affected_status = 'not_affected'

        header_hazard_group[affected_status]['hazards'].append(
            hazard_class_name)
        headers.append(hazard_class_name)

    # affected, not affected, not exposed, total header
    report_fields = [
        total_affected_field, total_not_affected_field,
        total_not_exposed_field, total_field
    ]
    for report_field in report_fields:
        headers.append(report_field['name'])
    """Create detail rows"""
    details = []
    for feat in exposure_summary_table.getFeatures():
        row = []

        # Get breakdown name
        exposure_summary_table_field_name = breakdown_field['field_name']
        field_index = exposure_summary_table.fieldNameIndex(
            exposure_summary_table_field_name)
        class_key = feat[field_index]

        row.append(class_key)

        # Get hazard count
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )

            group_key = None
            for key, group in header_hazard_group.iteritems():
                if hazard_class['name'] in group['hazards']:
                    group_key = key
                    break

            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = exposure_summary_table_fields[field_key_name]
                field_index = exposure_summary_table.fieldNameIndex(field_name)
                # exposure summary table is in csv format, so the field
                # returned is always in text format
                count_value = int(float(feat[field_index]))
                count_value = format_number(count_value,
                                            enable_rounding=is_rounding)
                row.append({'value': count_value, 'header_group': group_key})
            except KeyError:
                # in case the field was not found
                # assume value 0
                row.append({'value': 0, 'header_group': group_key})

        skip_row = False

        for field in report_fields:
            field_index = exposure_summary_table.fieldNameIndex(
                field['field_name'])
            total_count = int(float(feat[field_index]))
            total_count = format_number(total_count,
                                        enable_rounding=is_rounding)
            if total_count == '0' and field == total_affected_field:
                skip_row = True
                break

            row.append(total_count)

        if skip_row:
            continue

        details.append(row)

    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort detail rows based on class order
    # create function to sort
    def sort_classes(_row):
        """Sort method to retrieve exposure class key index."""
        # class key is first column
        _class_key = _row[0]
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _class_key == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    details = sorted(details, key=sort_classes)

    # retrieve breakdown name from classes list
    for row in details:
        class_key = row[0]
        for exposure_class in exposure_classes_lists:
            if class_key == exposure_class['key']:
                breakdown_name = exposure_class['name']
                break
        else:
            # attempt for dynamic translations
            breakdown_name = tr(class_key.capitalize())

        # replace class_key with the class name
        row[0] = breakdown_name
    """create total footers"""
    # create total header
    footers = [total_field['name']]
    # total for hazard
    for hazard_class in hazard_classification['classes']:
        # hazard_count_field is a dynamic field with hazard class
        # as parameter
        field_key_name = hazard_count_field['key'] % (hazard_class['key'], )

        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class['name'] in group['hazards']:
                group_key = key
                break

        try:
            # retrieve dynamic field name from analysis_fields keywords
            # will cause key error if no hazard count for that particular
            # class
            field_name = analysis_layer_fields[field_key_name]
            field_index = analysis_layer.fieldNameIndex(field_name)
            count_value = format_number(analysis_feature[field_index],
                                        enable_rounding=is_rounding)
        except KeyError:
            # in case the field was not found
            # assume value 0
            count_value = '0'

        if count_value == '0':
            # if total affected for hazard class is zero, delete entire
            # column
            column_index = len(footers)
            # delete header column
            headers = headers[:column_index] + headers[column_index + 1:]
            for row_idx in range(0, len(details)):
                row = details[row_idx]
                row = row[:column_index] + row[column_index + 1:]
                details[row_idx] = row
            continue
        footers.append({'value': count_value, 'header_group': group_key})

    # for footers
    for field in report_fields:
        total_count = value_from_field_name(field['field_name'],
                                            analysis_layer)
        total_count = format_number(total_count, enable_rounding=is_rounding)
        footers.append(total_count)

    header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')

    context['header'] = header
    context['group_border_color'] = resolve_from_dictionary(
        extra_args, 'group_border_color')
    context['notes'] = notes

    breakdown_header_index = 0
    total_header_index = len(headers) - len(report_fields)
    context['detail_header'] = {
        'header_hazard_group': header_hazard_group,
        'breakdown_header_index': breakdown_header_index,
        'total_header_index': total_header_index
    }

    # modify headers to include double header
    affected_headers = []
    last_group = 0
    for i in range(breakdown_header_index, total_header_index):
        hazard_class_name = headers[i]
        group_key = None
        for key, group in header_hazard_group.iteritems():
            if hazard_class_name in group['hazards']:
                group_key = key
                break

        if group_key and group_key not in affected_headers:
            affected_headers.append(group_key)
            headers[i] = {
                'name': hazard_class_name,
                'start': True,
                'header_group': group_key,
                'colspan': 1
            }
            last_group = i
            header_hazard_group[group_key]['start_index'] = i
        elif group_key:
            colspan = headers[last_group]['colspan']
            headers[last_group]['colspan'] = colspan + 1
            headers[i] = {
                'name': hazard_class_name,
                'start': False,
                'header_group': group_key
            }

    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string,
        exposure=exposure_header)
    table_header = ' '.join(table_header.split())

    context['detail_table'] = {
        'table_header': table_header,
        'headers': headers,
        'details': details,
        'footers': footers,
    }

    return context
Esempio n. 41
0
def population_infographic_extractor(impact_report, component_metadata):
    """Extracting aggregate result of demographic.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    """Initializations"""
    hazard_layer = impact_report.hazard
    analysis_layer = impact_report.analysis
    analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
    icons = component_metadata.extra_args.get('icons')

    # this report sections only applies if it is a population report.
    population_fields = [
        population_count_field['key'],
        exposure_count_field['key'] % (exposure_population['key'], ),
    ] + [f['key'] for f in minimum_needs_fields]

    for item in population_fields:
        if item in analysis_layer_fields:
            break
    else:
        return context

    # We try to get total affected field
    # if it didn't exists, check other fields to show
    total_affected_fields = [
        total_affected_field['key'],
        # We might want to check other fields, but turn it off until further
        # discussion
        population_count_field['key'],
        exposure_count_field['key'] % (exposure_population['key'], ),
    ]

    for item in total_affected_fields:
        if item in analysis_layer_fields:
            total_affected = value_from_field_name(
                analysis_layer_fields[item],
                analysis_layer)
            total_affected_field_used = item
            break
    else:
        return context

    if displaced_field['key'] in analysis_layer_fields:
        total_displaced = value_from_field_name(
            analysis_layer_fields[displaced_field['key']],
            analysis_layer)
    else:
        return context

    sections = OrderedDict()

    """People Section"""

    # Take default value from definitions
    people_header = resolve_from_dictionary(
        extra_args, ['sections', 'people', 'header'])
    people_items = resolve_from_dictionary(
        extra_args, ['sections', 'people', 'items'])

    # create context for affected infographic
    sub_header = resolve_from_dictionary(
        people_items[0], 'sub_header')

    # retrieve relevant header based on the fields we showed.
    sub_header = sub_header[total_affected_field_used]

    affected_infographic = PeopleInfographicElement(
        header=sub_header,
        icon=icons.get(
            total_affected_field['key']),
        number=total_affected)

    # create context for displaced infographic
    sub_header = resolve_from_dictionary(
        people_items[1], 'sub_header')
    sub_header_note_format = resolve_from_dictionary(
        people_items[1], 'sub_header_note_format')
    rate_description_format = resolve_from_dictionary(
        people_items[1], 'rate_description_format')
    rate_description = []

    hazard_classification = layer_hazard_classification(hazard_layer)
    for hazard_class in hazard_classification['classes']:
        displacement_rate = hazard_class.get('displacement_rate', 0)
        if displacement_rate:
            rate_description.append(
                rate_description_format.format(**hazard_class))

    rate_description_string = ', '.join(rate_description)

    sub_header_note = sub_header_note_format.format(
        rate_description=rate_description_string)

    displaced_infographic = PeopleInfographicElement(
        header=sub_header,
        header_note=sub_header_note,
        icon=icons.get(
            displaced_field['key']),
        number=total_displaced)

    sections['people'] = {
        'header': people_header,
        'items': [
            affected_infographic,
            displaced_infographic
        ]
    }

    """Vulnerability Section"""

    # Take default value from definitions
    vulnerability_items = resolve_from_dictionary(
        extra_args,
        ['sections', 'vulnerability', 'items'])

    vulnerability_section_header = resolve_from_dictionary(
        extra_args,
        ['sections', 'vulnerability', 'header'])

    vulnerability_section_sub_header_format = resolve_from_dictionary(
        extra_args,
        ['sections', 'vulnerability', 'sub_header_format'])

    infographic_elements = []
    for group in vulnerability_items:
        fields = group['fields']
        group_header = group['sub_group_header']
        bootstrap_column = group['bootstrap_column']
        element_column = group['element_column']
        headers = group['headers']
        elements = []
        for field, header in zip(fields, headers):
            field_key = field['key']
            try:
                field_name = analysis_layer_fields[field_key]
                value = value_from_field_name(
                    field_name, analysis_layer)
            except KeyError:
                # It means the field is not there
                continue

            if value:
                value_percentage = value * 100.0 / total_displaced
            else:
                value_percentage = 0

            infographic_element = PeopleVulnerabilityInfographicElement(
                header=header,
                icon=icons.get(field_key),
                number=value,
                percentage=value_percentage
            )
            elements.append(infographic_element)
        if elements:
            infographic_elements.append({
                'group_header': group_header,
                'bootstrap_column': bootstrap_column,
                'element_column': element_column,
                'items': elements
            })

    total_displaced_rounded = format_number(
        total_displaced,
        enable_rounding=True,
        is_population=True)

    sections['vulnerability'] = {
        'header': vulnerability_section_header,
        'small_header': vulnerability_section_sub_header_format.format(
            number_displaced=total_displaced_rounded),
        'items': infographic_elements
    }

    """Minimum Needs Section"""

    minimum_needs_header = resolve_from_dictionary(
        extra_args,
        ['sections', 'minimum_needs', 'header'])
    empty_unit_string = resolve_from_dictionary(
        extra_args,
        ['sections', 'minimum_needs', 'empty_unit_string'])

    items = []

    for item in minimum_needs_fields:
        need = item['need_parameter']
        if isinstance(need, ResourceParameter):

            needs_count = value_from_field_name(
                item['field_name'], analysis_layer)

            if need.unit.abbreviation:
                unit_string = '{unit}/{frequency}'.format(
                    unit=need.unit.abbreviation,
                    frequency=need.frequency)
            else:
                unit_string = empty_unit_string

            item = PeopleMinimumNeedsInfographicElement(
                header=item['name'],
                icon=icons.get(
                    item['key']),
                number=needs_count,
                unit=unit_string)
            items.append(item)

    # TODO: get from impact function provenance later
    needs_profile = NeedsProfile()

    sections['minimum_needs'] = {
        'header': minimum_needs_header,
        'small_header': needs_profile.provenance,
        'items': items,
    }

    """Population Charts"""

    population_donut_path = impact_report.component_absolute_output_path(
        'population-chart-png')

    css_label_classes = []
    try:
        population_chart_context = impact_report.metadata.component_by_key(
            'population-chart').context['context']
        """
        :type: safe.report.extractors.infographic_elements.svg_charts.
            DonutChartContext
        """
        for pie_slice in population_chart_context.slices:
            label = pie_slice['label']
            if not label:
                continue
            css_class = label.replace(' ', '').lower()
            css_label_classes.append(css_class)
    except KeyError:
        population_chart_context = None

    sections['population_chart'] = {
        'img_path': resource_url(population_donut_path),
        'context': population_chart_context,
        'css_label_classes': css_label_classes
    }

    context['brand_logo'] = resource_url(
        resources_path('img', 'logos', 'inasafe-logo-white.png'))
    context['sections'] = sections
    context['title'] = analysis_layer.title() or value_from_field_name(
        analysis_name_field['field_name'], analysis_layer)

    return context
Esempio n. 42
0
def general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args

    # figure out analysis report type
    hazard_layer = impact_report.hazard
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    debug_mode = impact_report.impact_function.debug_mode

    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # find hazard class
    summary = []

    analysis_feature = analysis_layer.getFeatures().next()
    analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

    exposure_unit = exposure_type['units'][0]
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')
    if exposure_unit['abbreviation']:
        value_header = u'{measure} ({abbreviation})'.format(**exposure_unit)
    else:
        value_header = u'{name}'.format(**exposure_unit)

    # in case there is a classification
    if 'classification' in hazard_layer.keywords:

        # retrieve hazard classification from hazard layer
        hazard_classification = layer_hazard_classification(hazard_layer)

        # classified hazard must have hazard count in analysis layer
        hazard_stats = []
        for hazard_class in hazard_classification['classes']:
            # hazard_count_field is a dynamic field with hazard class
            # as parameter
            field_key_name = hazard_count_field['key'] % (
                hazard_class['key'], )
            try:
                # retrieve dynamic field name from analysis_fields keywords
                # will cause key error if no hazard count for that particular
                # class
                field_name = analysis_inasafe_fields[field_key_name]
                field_index = analysis_layer.fieldNameIndex(field_name)
                # Hazard label taken from translated hazard count field
                # label, string-formatted with translated hazard class label
                hazard_label = hazard_class['name']

                hazard_value = format_number(analysis_feature[field_index],
                                             enable_rounding=is_rounded,
                                             is_population=is_population)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': hazard_value
                }
            except KeyError:
                # in case the field was not found
                hazard_label = hazard_class['name']
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_label,
                    'value': 0,
                }

            hazard_stats.append(stats)

        # find total field
        try:
            field_name = analysis_inasafe_fields[total_field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            total = format_number(total,
                                  enable_rounding=is_rounded,
                                  is_population=is_population)
            stats = {
                'key': total_field['key'],
                'name': total_field['name'],
                'as_header': True,
                'value': total
            }
            hazard_stats.append(stats)
        except KeyError:
            pass

        summary.append({
            'header_label': hazard_header,
            'value_label': value_header,
            'rows': hazard_stats
        })

    # retrieve affected column
    report_stats = []

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')
    for item in reported_fields:
        header = item['header']
        field = item['field']
        if field['key'] in analysis_inasafe_fields:
            field_index = analysis_layer.fieldNameIndex(field['field_name'])
            if field == fatalities_field:
                # For fatalities field, we show a range of number
                # instead
                row_value = fatalities_range(analysis_feature[field_index])
            else:
                row_value = format_number(analysis_feature[field_index],
                                          enable_rounding=is_rounded,
                                          is_population=is_population)
            row_stats = {
                'key': field['key'],
                'name': header,
                'value': row_value
            }
            report_stats.append(row_stats)

    # Give report section
    exposure_type = layer_definition_type(exposure_layer)
    header_label = exposure_type['name']
    summary.append({
        'header_label': header_label,
        # This should depend on exposure unit
        # TODO: Change this so it can take the unit dynamically
        'value_label': value_header,
        'rows': report_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])
    table_header_format = resolve_from_dictionary(extra_args,
                                                  'table_header_format')
    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=hazard_classification['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(extra_args,
                                          ['concept_notes', 'note_format'])

    if is_population:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])
    else:
        concepts = resolve_from_dictionary(
            extra_args, ['concept_notes', 'general_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context
Esempio n. 43
0
def mmi_detail_extractor(impact_report, component_metadata):
    """Extracting MMI-related analysis result.

    This extractor should only be used for EQ Raster with Population.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    analysis_layer = impact_report.analysis
    analysis_layer_keywords = analysis_layer.keywords
    extra_args = component_metadata.extra_args
    use_rounding = impact_report.impact_function.use_rounding
    provenance = impact_report.impact_function.provenance
    hazard_keywords = provenance['hazard_keywords']
    exposure_keywords = provenance['exposure_keywords']

    # check if this is EQ raster with population
    hazard_type = definition(hazard_keywords['hazard'])
    if not hazard_type == hazard_earthquake:
        return context

    hazard_geometry = hazard_keywords[layer_geometry['key']]
    if not hazard_geometry == layer_geometry_raster['key']:
        return context

    exposure_type = definition(exposure_keywords['exposure'])
    if not exposure_type == exposure_population:
        return context

    header = resolve_from_dictionary(extra_args, 'header')

    context['header'] = header

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    """Generate headers."""
    table_header = [
        resolve_from_dictionary(extra_args, 'mmi_header')
    ] + [v['header'] for v in reported_fields]

    """Extract MMI-related data"""
    # mmi is ranged from 1 to 10, which means: [1, 11)
    mmi_range = list(range(1, 11))
    rows = []
    roman_numeral = [
        'I',
        'II',
        'III',
        'IV',
        'V',
        'VI',
        'VII',
        'VIII',
        'IX',
        'X'
    ]
    for i in mmi_range:
        columns = [roman_numeral[i - 1]]
        for value in reported_fields:
            field = value['field']
            try:
                key_name = field['key'] % (i, )
                field_name = analysis_layer_keywords[key_name]
                # check field exists
                count = value_from_field_name(field_name, analysis_layer)
                if not count:
                    count = 0
            except KeyError:
                count = 0
            count = format_number(
                count,
                use_rounding=use_rounding,
                is_population=True)
            columns.append(count)

        rows.append(columns)

    """Extract total."""
    total_footer = [
        resolve_from_dictionary(extra_args, 'total_header')
    ]

    total_fields = resolve_from_dictionary(extra_args, 'total_fields')
    for field in total_fields:
        try:
            field_name = analysis_layer_keywords[field['key']]
            total = value_from_field_name(field_name, analysis_layer)
            if not total:
                total = 0
        except KeyError:
            total = 0
        total = format_number(
            total,
            use_rounding=use_rounding,
            is_population=True)
        total_footer.append(total)

    context['component_key'] = component_metadata.key
    context['mmi'] = {
        'header': table_header,
        'rows': rows,
        'footer': total_footer
    }

    return context
Esempio n. 44
0
def analysis_provenance_details_extractor(impact_report, component_metadata):
    """Extracting provenance details of layers.

    This extractor would be the main provenance details extractor which produce
    tree view provenance details.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.1
    """
    context = {}
    extra_args = component_metadata.extra_args

    provenance_format_args = resolve_from_dictionary(extra_args,
                                                     'provenance_format')

    keywords_order = [
        'title',
        'source',
        'layer_purpose',
        'layer_geometry',
        'hazard',
        'exposure',
        'hazard_category',
        'exposure_unit',
        'value_map',
        'value_maps',
        'inasafe_fields',
        'inasafe_default_values',
        'layer_mode',
        'hazard_layer',
        'exposure_layer',
        'aggregation_layer',
        'keyword_version',
        'classification',
    ]

    use_rounding = impact_report.impact_function.use_rounding
    debug_mode = impact_report.impact_function.debug_mode

    # we define dict here to create a different object of keyword
    hazard_keywords = dict(
        impact_report.impact_function.provenance['hazard_keywords'])

    # hazard_keywords doesn't have hazard_layer path information
    hazard_layer = QgsDataSourceUri.removePassword(
        decode_full_layer_uri(
            impact_report.impact_function.provenance.get(
                provenance_hazard_layer['provenance_key']))[0])
    hazard_keywords['hazard_layer'] = hazard_layer

    # keep only value maps with IF exposure
    for keyword in ['value_maps', 'thresholds']:
        if hazard_keywords.get(keyword):
            temp_keyword = dict(hazard_keywords[keyword])
            for key in temp_keyword:
                if key not in impact_report.impact_function.provenance[
                        'exposure_keywords']['exposure']:
                    del hazard_keywords[keyword][key]

    header = resolve_from_dictionary(provenance_format_args, 'hazard_header')
    hazard_provenance = {
        'header':
        header.title(),
        'provenances':
        headerize(sorted_keywords_by_order(hazard_keywords, keywords_order))
    }

    # convert value if there is dict_keywords
    provenances = hazard_provenance['provenances']
    hazard_provenance['provenances'] = resolve_dict_keywords(provenances)

    # we define dict here to create a different object of keyword
    exposure_keywords = dict(
        impact_report.impact_function.provenance['exposure_keywords'])

    # exposure_keywords doesn't have exposure_layer path information
    exposure_layer = QgsDataSourceUri.removePassword(
        decode_full_layer_uri(
            impact_report.impact_function.provenance.get(
                provenance_exposure_layer['provenance_key']))[0])
    exposure_keywords['exposure_layer'] = exposure_layer

    header = resolve_from_dictionary(provenance_format_args, 'exposure_header')
    exposure_provenance = {
        'header':
        header.title(),
        'provenances':
        headerize(sorted_keywords_by_order(exposure_keywords, keywords_order))
    }

    # convert value if there is dict_keywords
    provenances = exposure_provenance['provenances']
    exposure_provenance['provenances'] = resolve_dict_keywords(provenances)

    # aggregation keywords could be None so we don't define dict here
    aggregation_keywords = impact_report.impact_function.provenance[
        'aggregation_keywords']

    header = resolve_from_dictionary(provenance_format_args,
                                     'aggregation_header')

    aggregation_provenance = {'header': header.title(), 'provenances': None}

    # only if aggregation layer used
    if aggregation_keywords:
        # we define dict here to create a different object of keyword
        aggregation_keywords = dict(aggregation_keywords)

        # aggregation_keywords doesn't have aggregation_layer path information
        aggregation_layer = QgsDataSourceUri.removePassword(
            decode_full_layer_uri(
                impact_report.impact_function.provenance.get(
                    provenance_aggregation_layer['provenance_key']))[0])
        aggregation_keywords['aggregation_layer'] = aggregation_layer

        aggregation_provenance['provenances'] = headerize(
            sorted_keywords_by_order(aggregation_keywords, keywords_order))

        # convert value if there is dict_keywords
        provenances = aggregation_provenance['provenances']
        aggregation_provenance['provenances'] = resolve_dict_keywords(
            provenances)

    else:
        aggregation_not_used = resolve_from_dictionary(
            extra_args, ['defaults', 'aggregation_not_used'])
        aggregation_provenance['provenances'] = aggregation_not_used

    all_provenance_keywords = dict(impact_report.impact_function.provenance)

    # we add debug mode information to the provenance
    all_provenance_keywords[provenance_use_rounding['provenance_key']] = (
        'On' if use_rounding else 'Off')
    all_provenance_keywords['debug_mode'] = 'On' if debug_mode else 'Off'

    header = resolve_from_dictionary(provenance_format_args,
                                     'analysis_environment_header')
    analysis_environment_provenance_items = OrderedDict()
    analysis_environment_provenance_keys = [
        'os', 'inasafe_version', provenance_use_rounding['provenance_key'],
        'debug_mode', 'qgis_version', 'qt_version', 'gdal_version',
        'pyqt_version'
    ]

    for item in analysis_environment_provenance_keys:
        analysis_environment_provenance_items[item] = (
            all_provenance_keywords[item])

    analysis_environment_provenance = {
        'header': header.title(),
        'provenances': headerize(analysis_environment_provenance_items)
    }

    impact_function_name = impact_report.impact_function.name
    header = resolve_from_dictionary(provenance_format_args,
                                     'impact_function_header')
    impact_function_provenance = {
        'header': header.title(),
        'provenances': impact_function_name
    }

    provenance_detail = OrderedDict()
    provenance_detail['impact_function'] = impact_function_provenance
    provenance_detail['hazard'] = hazard_provenance
    provenance_detail['exposure'] = exposure_provenance
    provenance_detail['aggregation'] = aggregation_provenance
    provenance_detail['analysis_environment'] = analysis_environment_provenance

    analysis_details_header = resolve_from_dictionary(
        extra_args, ['header', 'analysis_detail'])

    context['component_key'] = component_metadata.key
    context.update({
        'header': analysis_details_header,
        'details': provenance_detail
    })

    return context
Esempio n. 45
0
def aggregation_result_extractor(impact_report, component_metadata):
    """Extracting aggregation result of breakdown from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}

    """Initializations"""

    extra_args = component_metadata.extra_args
    # Find out aggregation report type
    exposure_layer = impact_report.exposure
    analysis_layer = impact_report.analysis
    provenance = impact_report.impact_function.provenance
    exposure_summary_table = impact_report.exposure_summary_table
    if exposure_summary_table:
        exposure_summary_table_fields = exposure_summary_table.keywords[
            'inasafe_fields']
    aggregation_summary = impact_report.aggregation_summary
    aggregation_summary_fields = aggregation_summary.keywords[
        'inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode

    """Filtering report sections"""

    # Only process for applicable exposure types
    # Get exposure type definition
    exposure_type = layer_definition_type(exposure_layer)
    # Only round the number when it is population exposure and it is not
    # in debug mode
    is_rounded = not debug_mode
    is_population = exposure_type is exposure_population

    # For now aggregation report only applicable for breakable exposure types:
    itemizable_exposures_all = [
        exposure for exposure in exposure_all
        if exposure.get('classifications')]
    if exposure_type not in itemizable_exposures_all:
        return context

    """Generating type name for columns"""

    type_fields = read_dynamic_inasafe_field(
        aggregation_summary_fields, affected_exposure_count_field)
    # do not include total, to preserve ordering and proper reference
    type_fields.remove('total')

    # we need to sort the column
    # get the classes lists
    # retrieve classes definitions
    exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer)

    # sort columns based on class order
    # create function to sort
    def sort_classes(_type_field):
        """Sort method to retrieve exposure class key index."""
        # class key is the type field name
        # find index in class list
        for i, _exposure_class in enumerate(exposure_classes_lists):
            if _type_field == _exposure_class['key']:
                index = i
                break
        else:
            index = -1

        return index

    # sort
    type_fields = sorted(type_fields, key=sort_classes)

    # generate type_header_labels for column header
    type_header_labels = []
    for type_name in type_fields:
        type_label = tr(type_name.capitalize())
        type_header_labels.append(type_label)

    """Generating values for rows"""

    # generate rows of values for values of each column
    rows = []
    aggregation_name_index = aggregation_summary.fieldNameIndex(
        aggregation_name_field['field_name'])
    total_field_index = aggregation_summary.fieldNameIndex(
        total_affected_field['field_name'])

    type_field_index = []
    for type_name in type_fields:
        field_name = affected_exposure_count_field['field_name'] % type_name
        type_index = aggregation_summary.fieldNameIndex(field_name)
        type_field_index.append(type_index)

    for feat in aggregation_summary.getFeatures():
        total_affected_value = format_number(
            feat[total_field_index],
            enable_rounding=is_rounded,
            is_population=is_population)
        if total_affected_value == '0':
            # skip aggregation type if the total affected is zero
            continue
        item = {
            # Name is the header for each row
            'name': feat[aggregation_name_index],
            # Total is the total for each row
            'total': total_affected_value
        }
        # Type values is the values for each column in each row
        type_values = []
        for idx in type_field_index:
            affected_value = format_number(
                feat[idx],
                enable_rounding=is_rounded)
            type_values.append(affected_value)
        item['type_values'] = type_values
        rows.append(item)

    """Generate total for footers"""

    # calculate total values for each type. Taken from exposure summary table
    type_total_values = []
    # Get affected field index
    affected_field_index = exposure_summary_table.fieldNameIndex(
        total_affected_field['field_name'])

    # Get breakdown field
    breakdown_field = None
    # I'm not sure what's the difference
    # It is possible to have exposure_type_field or exposure_class_field
    # at the moment
    breakdown_fields = [
        exposure_type_field,
        exposure_class_field
    ]
    for field in breakdown_fields:
        if field['key'] in exposure_summary_table_fields:
            breakdown_field = field
            break
    breakdown_field_name = breakdown_field['field_name']
    breakdown_field_index = exposure_summary_table.fieldNameIndex(
        breakdown_field_name)

    # Fetch total affected for each breakdown name
    value_dict = {}
    for feat in exposure_summary_table.getFeatures():
        # exposure summary table is in csv format, so the field returned is
        # always in text format
        affected_value = int(float(feat[affected_field_index]))
        affected_value = format_number(
            affected_value,
            enable_rounding=is_rounded,
            is_population=is_population)
        value_dict[feat[breakdown_field_index]] = affected_value

    if value_dict:
        for type_name in type_fields:
            affected_value_string_formatted = value_dict[type_name]
            if affected_value_string_formatted == '0':
                # if total affected for breakdown type is zero
                # current column index
                column_index = len(type_total_values)
                # cut column header
                type_header_labels = (
                    type_header_labels[:column_index] +
                    type_header_labels[column_index + 1:])
                # cut all row values for the column
                for item in rows:
                    type_values = item['type_values']
                    item['type_values'] = (
                        type_values[:column_index] +
                        type_values[column_index + 1:])
                continue
            type_total_values.append(affected_value_string_formatted)

    """Get the super total affected"""

    # total for affected (super total)
    analysis_feature = analysis_layer.getFeatures().next()
    field_index = analysis_layer.fieldNameIndex(
        total_affected_field['field_name'])
    total_all = format_number(
        analysis_feature[field_index],
        enable_rounding=is_rounded)

    """Generate and format the context"""
    aggregation_area_default_header = resolve_from_dictionary(
        extra_args, 'aggregation_area_default_header')
    header_label = (
        aggregation_summary.title() or aggregation_area_default_header)

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')

    # check unit
    units = exposure_type['units']
    if units:
        unit = units[0]
        abbreviation = unit['abbreviation']
        if abbreviation:
            unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
        else:
            unit_string = ''
    else:
        unit_string = ''

    table_header = table_header_format.format(
        title=provenance['map_legend_title'],
        unit=unit_string)
    table_header = ' '.join(table_header.split())

    section_header = resolve_from_dictionary(extra_args, 'header')
    notes = resolve_from_dictionary(extra_args, 'notes')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    total_in_aggregation_header = resolve_from_dictionary(
        extra_args, 'total_in_aggregation_header')
    context['header'] = section_header
    context['notes'] = notes
    context['aggregation_result'] = {
        'table_header': table_header,
        'header_label': header_label,
        'type_header_labels': type_header_labels,
        'total_label': total_header,
        'total_in_aggregation_area_label': total_in_aggregation_header,
        'rows': rows,
        'type_total_values': type_total_values,
        'total_all': total_all,
    }
    return context
Esempio n. 46
0
def minimum_needs_extractor(impact_report, component_metadata):
    """Extracting minimum needs of the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.0
    """
    context = {}
    extra_args = component_metadata.extra_args
    analysis_layer = impact_report.analysis
    analysis_keywords = analysis_layer.keywords['inasafe_fields']
    debug_mode = impact_report.impact_function.debug_mode
    is_rounding = not debug_mode

    header = resolve_from_dictionary(extra_args, 'header')
    context['header'] = header

    # check if displaced is not zero
    try:
        displaced_field_name = analysis_keywords[displaced_field['key']]
        total_displaced = value_from_field_name(displaced_field_name,
                                                analysis_layer)
        if total_displaced == 0:
            zero_displaced_message = resolve_from_dictionary(
                extra_args, 'zero_displaced_message')
            context['zero_displaced'] = {
                'status': True,
                'message': zero_displaced_message
            }
            return context
    except KeyError:
        # in case no displaced field
        pass

    # minimum needs calculation only affect population type exposure
    # check if analysis keyword have minimum_needs keywords
    have_minimum_needs_field = False
    for field_key in analysis_keywords:
        if field_key.startswith(minimum_needs_namespace):
            have_minimum_needs_field = True
            break

    if not have_minimum_needs_field:
        return context

    frequencies = {}
    # map each needs to its frequency groups
    for field in (minimum_needs_fields + additional_minimum_needs):
        need_parameter = field.get('need_parameter')
        if isinstance(need_parameter, ResourceParameter):
            frequency = need_parameter.frequency
        else:
            frequency = field.get('frequency')

        if frequency:
            if frequency not in frequencies:
                frequencies[frequency] = [field]
            else:
                frequencies[frequency].append(field)

    needs = []
    analysis_feature = analysis_layer.getFeatures().next()
    header_frequency_format = resolve_from_dictionary(
        extra_args, 'header_frequency_format')
    total_header = resolve_from_dictionary(extra_args, 'total_header')
    need_header_format = resolve_from_dictionary(extra_args,
                                                 'need_header_format')
    # group the needs by frequency
    for key, frequency in frequencies.iteritems():
        group = {
            'header': header_frequency_format.format(frequency=tr(key)),
            'total_header': total_header,
            'needs': []
        }
        for field in frequency:
            # check value exists in the field
            field_idx = analysis_layer.fieldNameIndex(field['field_name'])
            if field_idx == -1:
                # skip if field doesn't exists
                continue
            value = format_number(analysis_feature[field_idx],
                                  enable_rounding=is_rounding,
                                  is_population=True)

            if field.get('need_parameter'):
                need_parameter = field['need_parameter']
                """:type: ResourceParameter"""
                name = tr(need_parameter.name)
                unit_abbreviation = need_parameter.unit.abbreviation

            else:
                if field.get('header_name'):
                    name = field.get('header_name')
                else:
                    name = field.get('name')

                need_unit = field.get('unit')
                if need_unit:
                    unit_abbreviation = need_unit.get('abbreviation')

            if unit_abbreviation:
                header = need_header_format.format(
                    name=name, unit_abbreviation=unit_abbreviation)
            else:
                header = name

            item = {'header': header, 'value': value}
            group['needs'].append(item)
        needs.append(group)

    context['needs'] = needs

    return context
Esempio n. 47
0
def multi_exposure_general_report_extractor(impact_report, component_metadata):
    """Extracting general analysis result from the impact layer.

    :param impact_report: the impact report that acts as a proxy to fetch
        all the data that extractor needed
    :type impact_report: safe.report.impact_report.ImpactReport

    :param component_metadata: the component metadata. Used to obtain
        information about the component we want to render
    :type component_metadata: safe.report.report_metadata.
        ReportComponentsMetadata

    :return: context for rendering phase
    :rtype: dict

    .. versionadded:: 4.3
    """
    context = {}
    extra_args = component_metadata.extra_args

    multi_exposure = impact_report.multi_exposure_impact_function
    analysis_layer = impact_report.analysis
    provenances = [
        impact_function.provenance for impact_function in (
            multi_exposure.impact_functions)]
    debug_mode = multi_exposure.debug
    population_exist = False

    hazard_keywords = provenances[0]['hazard_keywords']
    hazard_header = resolve_from_dictionary(extra_args, 'hazard_header')

    reported_fields = resolve_from_dictionary(extra_args, 'reported_fields')

    # Summarize every value needed for each exposure and extract later to the
    # context.
    summary = []
    map_legend_titles = []
    hazard_classifications = {}
    exposures_stats = []
    for provenance in provenances:
        map_legend_title = provenance['map_legend_title']
        map_legend_titles.append(map_legend_title)
        exposure_stats = {}
        exposure_keywords = provenance['exposure_keywords']
        exposure_type = definition(exposure_keywords['exposure'])
        exposure_stats['exposure'] = exposure_type
        # Only round the number when it is population exposure and it is not
        # in debug mode
        is_rounded = not debug_mode
        is_population = exposure_type is exposure_population
        if is_population:
            population_exist = True

        analysis_feature = next(analysis_layer.getFeatures())
        analysis_inasafe_fields = analysis_layer.keywords['inasafe_fields']

        exposure_unit = exposure_type['units'][0]
        if exposure_unit['abbreviation']:
            value_header = '{measure} ({abbreviation})'.format(
                measure=map_legend_title,
                abbreviation=exposure_unit['abbreviation'])
        else:
            value_header = '{name}'.format(name=map_legend_title)

        exposure_stats['value_header'] = value_header

        # Get hazard classification
        hazard_classification = definition(
            active_classification(hazard_keywords,
                                  exposure_keywords['exposure']))
        hazard_classifications[exposure_type['key']] = hazard_classification

        # in case there is a classification
        if hazard_classification:
            classification_result = {}
            reported_fields_result = {}
            for hazard_class in hazard_classification['classes']:
                # hazard_count_field is a dynamic field with hazard class
                # as parameter
                field_key_name = exposure_hazard_count_field['key'] % (
                    exposure_type['key'], hazard_class['key'])
                try:
                    # retrieve dynamic field name from analysis_fields keywords
                    # will cause key error if no hazard count for that
                    # particular class
                    field_name = analysis_inasafe_fields[field_key_name]
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    hazard_value = format_number(
                        analysis_feature[field_index],
                        use_rounding=is_rounded,
                        is_population=is_population)
                except KeyError:
                    # in case the field was not found
                    hazard_value = 0

                classification_result[hazard_class['key']] = hazard_value

            # find total field
            try:
                field_key_name = exposure_total_exposed_field['key'] % (
                    exposure_type['key'])
                field_name = analysis_inasafe_fields[field_key_name]
                total = value_from_field_name(field_name, analysis_layer)
                total = format_number(
                    total, use_rounding=is_rounded,
                    is_population=is_population)
                classification_result[total_exposed_field['key']] = total
            except KeyError:
                pass

            exposure_stats['classification_result'] = classification_result

        for item in reported_fields:
            field = item.get('field')
            multi_exposure_field = item.get('multi_exposure_field')
            row_value = '-'
            if multi_exposure_field:
                field_key = (
                    multi_exposure_field['key'] % (exposure_type['key']))
                field_name = (
                    multi_exposure_field['field_name'] % (
                        exposure_type['key']))
                if field_key in analysis_inasafe_fields:
                    field_index = analysis_layer.fields() \
                        .lookupField(field_name)
                    row_value = format_number(
                        analysis_feature[field_index],
                        use_rounding=is_rounded,
                        is_population=is_population)

            elif field in [displaced_field, fatalities_field]:
                if field['key'] in analysis_inasafe_fields and is_population:
                    field_index = analysis_layer.fields(
                    ).lookupField(field['name'])
                    if field == fatalities_field:
                        # For fatalities field, we show a range of number
                        # instead
                        row_value = fatalities_range(
                            analysis_feature[field_index])
                    else:
                        row_value = format_number(
                            analysis_feature[field_index],
                            use_rounding=is_rounded,
                            is_population=is_population)

            reported_fields_result[field['key']] = row_value

        exposure_stats['reported_fields_result'] = reported_fields_result
        exposures_stats.append(exposure_stats)

    # After finish summarizing value, then proceed to context extraction.

    # find total value and labels for each exposure
    value_headers = []
    total_values = []
    for exposure_stats in exposures_stats:
        # label
        value_header = exposure_stats['value_header']
        value_headers.append(value_header)

        # total value
        classification_result = exposure_stats['classification_result']
        total_value = classification_result[total_exposed_field['key']]
        total_values.append(total_value)

    classifications = list(hazard_classifications.values())
    is_item_identical = (
        classifications.count(
            classifications[0]) == len(classifications))
    if classifications and is_item_identical:
        hazard_stats = []
        for hazard_class in classifications[0]['classes']:
            values = []
            for exposure_stats in exposures_stats:
                classification_result = exposure_stats['classification_result']
                value = classification_result[hazard_class['key']]
                values.append(value)
            stats = {
                'key': hazard_class['key'],
                'name': hazard_class['name'],
                'numbers': values
            }
            hazard_stats.append(stats)

        total_stats = {
            'key': total_exposed_field['key'],
            'name': total_exposed_field['name'],
            'as_header': True,
            'numbers': total_values
        }
        hazard_stats.append(total_stats)

        summary.append({
            'header_label': hazard_header,
            'value_labels': value_headers,
            'rows': hazard_stats
        })
    # if there are different hazard classifications used in the analysis,
    # we will create a separate table for each hazard classification
    else:
        hazard_classification_groups = {}
        for exposure_key, hazard_classification in (
                iter(list(hazard_classifications.items()))):
            exposure_type = definition(exposure_key)
            if hazard_classification['key'] not in (
                    hazard_classification_groups):
                hazard_classification_groups[hazard_classification['key']] = [
                    exposure_type]
            else:
                hazard_classification_groups[
                    hazard_classification['key']].append(exposure_type)

        for hazard_classification_key, exposures in (
                iter(list(hazard_classification_groups.items()))):
            custom_headers = []
            custom_total_values = []
            # find total value and labels for each exposure
            for exposure_stats in exposures_stats:
                if exposure_stats['exposure'] not in exposures:
                    continue
                # label
                value_header = exposure_stats['value_header']
                custom_headers.append(value_header)

                # total value
                classification_result = exposure_stats['classification_result']
                total_value = classification_result[total_exposed_field['key']]
                custom_total_values.append(total_value)

            hazard_stats = []
            hazard_classification = definition(hazard_classification_key)
            for hazard_class in hazard_classification['classes']:
                values = []
                for exposure_stats in exposures_stats:
                    if exposure_stats['exposure'] not in exposures:
                        continue
                    classification_result = exposure_stats[
                        'classification_result']
                    value = classification_result[hazard_class['key']]
                    values.append(value)
                stats = {
                    'key': hazard_class['key'],
                    'name': hazard_class['name'],
                    'numbers': values
                }
                hazard_stats.append(stats)

            total_stats = {
                'key': total_exposed_field['key'],
                'name': total_exposed_field['name'],
                'as_header': True,
                'numbers': custom_total_values
            }
            hazard_stats.append(total_stats)

            summary.append({
                'header_label': hazard_header,
                'value_labels': custom_headers,
                'rows': hazard_stats
            })

    reported_fields_stats = []
    for item in reported_fields:
        field = item.get('field')
        values = []
        for exposure_stats in exposures_stats:
            reported_fields_result = exposure_stats['reported_fields_result']
            value = reported_fields_result[field['key']]
            values.append(value)
        stats = {
            'key': field['key'],
            'name': item['header'],
            'numbers': values
        }
        reported_fields_stats.append(stats)

    header_label = resolve_from_dictionary(
        extra_args, ['reported_fields_header'])
    summary.append({
        'header_label': header_label,
        'value_labels': value_headers,
        'rows': reported_fields_stats
    })

    header = resolve_from_dictionary(extra_args, ['header'])

    combined_map_legend_title = ''
    for index, map_legend_title in enumerate(map_legend_titles):
        combined_map_legend_title += map_legend_title
        if not (index + 1) == len(map_legend_titles):
            combined_map_legend_title += ', '

    table_header_format = resolve_from_dictionary(
        extra_args, 'table_header_format')
    table_header = table_header_format.format(
        title=combined_map_legend_title,
        unit=classifications[0]['classification_unit'])

    # Section notes
    note_format = resolve_from_dictionary(
        extra_args, ['concept_notes', 'note_format'])

    concepts = resolve_from_dictionary(
        extra_args, ['concept_notes', 'general_concepts'])

    if population_exist:
        concepts += resolve_from_dictionary(
            extra_args, ['concept_notes', 'population_concepts'])

    notes = []
    for concept in concepts:
        note = note_format.format(**concept)
        notes.append(note)

    context['component_key'] = component_metadata.key
    context['header'] = header
    context['summary'] = summary
    context['table_header'] = table_header
    context['notes'] = notes

    return context