def test_exposure_summary_table(self): """Test we can produce the breakdown for the exposure type.""" aggregate_hazard = load_test_vector_layer( 'gisv4', 'intermediate', 'aggregate_classified_hazard_summary.geojson') aggregate_hazard.keywords['hazard_keywords'] = { 'classification': 'generic_hazard_classes' } # I need the number of unique exposure unique_exposure = read_dynamic_inasafe_field( aggregate_hazard.keywords['inasafe_fields'], exposure_count_field) # I need the number of unique hazard fields = aggregate_hazard.keywords['inasafe_fields'] hazard_class = fields[hazard_class_field['key']] hazard_class_index = aggregate_hazard.fieldNameIndex(hazard_class) unique_hazard = aggregate_hazard.uniqueValues(hazard_class_index) layer = exposure_summary_table(aggregate_hazard, None) check_inasafe_fields(layer) self.assertEqual(len(unique_exposure), layer.featureCount()) # We should have # one column per hazard # one for the exposure # one for total affected # one for total not affected # one for total not exposed # one for total self.assertEqual(layer.fields().count(), len(unique_hazard) + 5)
def test_exposure_summary_table_productivity(self): """Test we can produce the breakdown for the exposure type.""" aggregate_hazard = load_test_vector_layer( 'gisv4', 'intermediate', 'summaries', 'land_cover_aggregate_hazard_impacted.geojson') aggregate_hazard.keywords['hazard_keywords'] = { 'hazard': 'generic', 'classification': 'generic_hazard_classes' } aggregate_hazard.keywords['exposure_keywords'] = { 'exposure': 'land_cover' } exposure_summary = load_test_vector_layer( 'gisv4', 'intermediate', 'summaries', 'land_cover_exposure_summary.geojson' ) # I need the number of unique exposure unique_exposure = read_dynamic_inasafe_field( aggregate_hazard.keywords['inasafe_fields'], exposure_count_field) # I need the number of unique hazard fields = aggregate_hazard.keywords['inasafe_fields'] hazard_class = fields[hazard_class_field['key']] hazard_class_index = aggregate_hazard.fields().lookupField(hazard_class) unique_hazard = aggregate_hazard.uniqueValues(hazard_class_index) layer = exposure_summary_table(aggregate_hazard, exposure_summary) check_inasafe_fields(layer) self.assertEqual(len(unique_exposure), layer.featureCount()) # We should have # one column per hazard # 1. one for the exposure # 2. one for total affected # 3. one for total not affected # 4. one for total not exposed # 5. one for total # 6. one for affected productivity # 7. one for affected production cost # 8. one for affected production value self.assertEqual(layer.fields().count(), len(unique_hazard) + 8)
def test_exposure_summary_table_productivity(self): """Test we can produce the breakdown for the exposure type.""" aggregate_hazard = load_test_vector_layer( 'gisv4', 'intermediate', 'summaries', 'land_cover_aggregate_hazard_impacted.geojson') aggregate_hazard.keywords['hazard_keywords'] = { 'classification': 'generic_hazard_classes' } exposure_summary = load_test_vector_layer( 'gisv4', 'intermediate', 'summaries', 'land_cover_exposure_summary.geojson' ) # I need the number of unique exposure unique_exposure = read_dynamic_inasafe_field( aggregate_hazard.keywords['inasafe_fields'], exposure_count_field) # I need the number of unique hazard fields = aggregate_hazard.keywords['inasafe_fields'] hazard_class = fields[hazard_class_field['key']] hazard_class_index = aggregate_hazard.fieldNameIndex(hazard_class) unique_hazard = aggregate_hazard.uniqueValues(hazard_class_index) layer = exposure_summary_table(aggregate_hazard, exposure_summary) check_inasafe_fields(layer) self.assertEqual(len(unique_exposure), layer.featureCount()) # We should have # one column per hazard # one for the exposure # one for total affected # one for total not affected # one for total not exposed # one for total # one for affected productivity # one for affected production cost # one for affected production value self.assertEqual(layer.fields().count(), len(unique_hazard) + 8)
def aggregation_result_extractor(impact_report, component_metadata): """Extracting aggregation result of breakdown from the impact layer. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.0 """ context = {} """Initializations""" extra_args = component_metadata.extra_args # Find out aggregation report type exposure_layer = impact_report.exposure analysis_layer = impact_report.analysis provenance = impact_report.impact_function.provenance exposure_summary_table = impact_report.exposure_summary_table if exposure_summary_table: exposure_summary_table_fields = exposure_summary_table.keywords[ 'inasafe_fields'] aggregation_summary = impact_report.aggregation_summary aggregation_summary_fields = aggregation_summary.keywords['inasafe_fields'] debug_mode = impact_report.impact_function.debug_mode """Filtering report sections""" # Only process for applicable exposure types # Get exposure type definition exposure_type = layer_definition_type(exposure_layer) # Only round the number when it is population exposure and it is not # in debug mode is_rounded = not debug_mode is_population = exposure_type is exposure_population # For now aggregation report only applicable for breakable exposure types: itemizable_exposures_all = [ exposure for exposure in exposure_all if exposure.get('classifications') ] if exposure_type not in itemizable_exposures_all: return context """Generating type name for columns""" type_fields = read_dynamic_inasafe_field(aggregation_summary_fields, affected_exposure_count_field) # do not include total, to preserve ordering and proper reference type_fields.remove('total') # we need to sort the column # get the classes lists # retrieve classes definitions exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer) # sort columns based on class order # create function to sort def sort_classes(_type_field): """Sort method to retrieve exposure class key index.""" # class key is the type field name # find index in class list for i, _exposure_class in enumerate(exposure_classes_lists): if _type_field == _exposure_class['key']: index = i break else: index = -1 return index # sort type_fields = sorted(type_fields, key=sort_classes) # generate type_header_labels for column header type_header_labels = [] for type_name in type_fields: type_label = tr(type_name.capitalize()) type_header_labels.append(type_label) """Generating values for rows""" # generate rows of values for values of each column rows = [] aggregation_name_index = aggregation_summary.fieldNameIndex( aggregation_name_field['field_name']) total_field_index = aggregation_summary.fieldNameIndex( total_affected_field['field_name']) type_field_index = [] for type_name in type_fields: field_name = affected_exposure_count_field['field_name'] % type_name type_index = aggregation_summary.fieldNameIndex(field_name) type_field_index.append(type_index) for feat in aggregation_summary.getFeatures(): total_affected_value = format_number(feat[total_field_index], enable_rounding=is_rounded, is_population=is_population) if total_affected_value == '0': # skip aggregation type if the total affected is zero continue item = { # Name is the header for each row 'name': feat[aggregation_name_index], # Total is the total for each row 'total': total_affected_value } # Type values is the values for each column in each row type_values = [] for idx in type_field_index: affected_value = format_number(feat[idx], enable_rounding=is_rounded) type_values.append(affected_value) item['type_values'] = type_values rows.append(item) """Generate total for footers""" # calculate total values for each type. Taken from exposure summary table type_total_values = [] # Get affected field index affected_field_index = exposure_summary_table.fieldNameIndex( total_affected_field['field_name']) # Get breakdown field breakdown_field = None # I'm not sure what's the difference # It is possible to have exposure_type_field or exposure_class_field # at the moment breakdown_fields = [exposure_type_field, exposure_class_field] for field in breakdown_fields: if field['key'] in exposure_summary_table_fields: breakdown_field = field break breakdown_field_name = breakdown_field['field_name'] breakdown_field_index = exposure_summary_table.fieldNameIndex( breakdown_field_name) # Fetch total affected for each breakdown name value_dict = {} for feat in exposure_summary_table.getFeatures(): # exposure summary table is in csv format, so the field returned is # always in text format affected_value = int(float(feat[affected_field_index])) affected_value = format_number(affected_value, enable_rounding=is_rounded, is_population=is_population) value_dict[feat[breakdown_field_index]] = affected_value if value_dict: for type_name in type_fields: affected_value_string_formatted = value_dict[type_name] if affected_value_string_formatted == '0': # if total affected for breakdown type is zero # current column index column_index = len(type_total_values) # cut column header type_header_labels = (type_header_labels[:column_index] + type_header_labels[column_index + 1:]) # cut all row values for the column for item in rows: type_values = item['type_values'] item['type_values'] = (type_values[:column_index] + type_values[column_index + 1:]) continue type_total_values.append(affected_value_string_formatted) """Get the super total affected""" # total for affected (super total) analysis_feature = analysis_layer.getFeatures().next() field_index = analysis_layer.fieldNameIndex( total_affected_field['field_name']) total_all = format_number(analysis_feature[field_index], enable_rounding=is_rounded) """Generate and format the context""" aggregation_area_default_header = resolve_from_dictionary( extra_args, 'aggregation_area_default_header') header_label = (aggregation_summary.title() or aggregation_area_default_header) table_header_format = resolve_from_dictionary(extra_args, 'table_header_format') # check unit units = exposure_type['units'] if units: unit = units[0] abbreviation = unit['abbreviation'] if abbreviation: unit_string = '({abbreviation})'.format(abbreviation=abbreviation) else: unit_string = '' else: unit_string = '' table_header = table_header_format.format( title=provenance['map_legend_title'], unit=unit_string) table_header = ' '.join(table_header.split()) section_header = resolve_from_dictionary(extra_args, 'header') notes = resolve_from_dictionary(extra_args, 'notes') total_header = resolve_from_dictionary(extra_args, 'total_header') total_in_aggregation_header = resolve_from_dictionary( extra_args, 'total_in_aggregation_header') context['header'] = section_header context['notes'] = notes context['aggregation_result'] = { 'table_header': table_header, 'header_label': header_label, 'type_header_labels': type_header_labels, 'total_label': total_header, 'total_in_aggregation_area_label': total_in_aggregation_header, 'rows': rows, 'type_total_values': type_total_values, 'total_all': total_all, } return context
def aggregation_summary(aggregate_hazard, aggregation, callback=None): """Compute the summary from the aggregate hazard to the analysis layer. Source layer : | haz_id | haz_class | aggr_id | aggr_name | total_feature | Target layer : | aggr_id | aggr_name | Output layer : | aggr_id | aggr_name | count of affected features per exposure type :param aggregate_hazard: The layer to aggregate vector layer. :type aggregate_hazard: QgsVectorLayer :param aggregation: The aggregation vector layer where to write statistics. :type aggregation: QgsVectorLayer :param callback: A function to all to indicate progress. The function should accept params 'current' (int), 'maximum' (int) and 'step' (str). Defaults to None. :type callback: function :return: The new aggregation layer with summary. :rtype: QgsVectorLayer .. versionadded:: 4.0 """ output_layer_name = summary_2_aggregation_steps['output_layer_name'] processing_step = summary_2_aggregation_steps['step_name'] source_fields = aggregate_hazard.keywords['inasafe_fields'] target_fields = aggregation.keywords['inasafe_fields'] target_compulsory_fields = [ aggregation_id_field, aggregation_name_field, ] check_inputs(target_compulsory_fields, target_fields) # Missing exposure_count_field source_compulsory_fields = [ aggregation_id_field, aggregation_name_field, hazard_id_field, hazard_class_field, affected_field, ] check_inputs(source_compulsory_fields, source_fields) pattern = exposure_count_field['key'] pattern = pattern.replace('%s', '') unique_exposure = read_dynamic_inasafe_field(source_fields, exposure_count_field) absolute_values = create_absolute_values_structure(aggregate_hazard, ['aggregation_id']) flat_table = FlatTable('aggregation_id', 'exposure_class') aggregation_index = source_fields[aggregation_id_field['key']] # We want to loop over affected features only. request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) expression = '\"%s\" = \'%s\'' % (affected_field['field_name'], tr('True')) request.setFilterExpression(expression) for area in aggregate_hazard.getFeatures(request): for key, name_field in source_fields.iteritems(): if key.endswith(pattern): aggregation_id = area[aggregation_index] exposure_class = key.replace(pattern, '') value = area[name_field] flat_table.add_value(value, aggregation_id=aggregation_id, exposure_class=exposure_class) # We summarize every absolute values. for field, field_definition in absolute_values.iteritems(): value = area[field] if not value or isinstance(value, QPyNullVariant): value = 0 field_definition[0].add_value( value, aggregation_id=area[aggregation_index], ) shift = aggregation.fields().count() aggregation.startEditing() add_fields(aggregation, absolute_values, [total_affected_field], unique_exposure, affected_exposure_count_field) aggregation_index = target_fields[aggregation_id_field['key']] request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) for area in aggregation.getFeatures(request): aggregation_value = area[aggregation_index] total = 0 for i, val in enumerate(unique_exposure): sum = flat_table.get_value(aggregation_id=aggregation_value, exposure_class=val) total += sum aggregation.changeAttributeValue(area.id(), shift + i, sum) aggregation.changeAttributeValue(area.id(), shift + len(unique_exposure), total) for i, field in enumerate(absolute_values.itervalues()): value = field[0].get_value(aggregation_id=aggregation_value, ) target_index = shift + len(unique_exposure) + 1 + i aggregation.changeAttributeValue(area.id(), target_index, value) aggregation.commitChanges() aggregation.keywords['title'] = layer_purpose_aggregation_summary['name'] if qgis_version() >= 21800: aggregation.setName(aggregation.keywords['title']) else: aggregation.setLayerName(aggregation.keywords['title']) aggregation.keywords['layer_purpose'] = ( layer_purpose_aggregation_summary['key']) check_layer(aggregation) return aggregation
def exposure_summary_table(aggregate_hazard, exposure_summary=None, callback=None): """Compute the summary from the aggregate hazard to analysis. Source layer : | haz_id | haz_class | aggr_id | aggr_name | exposure_count | Output layer : | exp_type | count_hazard_class | total | :param aggregate_hazard: The layer to aggregate vector layer. :type aggregate_hazard: QgsVectorLayer :param exposure_summary: The layer impact layer. :type exposure_summary: QgsVectorLayer :param callback: A function to all to indicate progress. The function should accept params 'current' (int), 'maximum' (int) and 'step' (str). Defaults to None. :type callback: function :return: The new tabular table, without geometry. :rtype: QgsVectorLayer .. versionadded:: 4.0 """ output_layer_name = summary_4_exposure_summary_table_steps[ 'output_layer_name'] source_fields = aggregate_hazard.keywords['inasafe_fields'] source_compulsory_fields = [ aggregation_id_field, aggregation_name_field, hazard_id_field, hazard_class_field, affected_field, total_field ] check_inputs(source_compulsory_fields, source_fields) absolute_values = create_absolute_values_structure(aggregate_hazard, ['all']) hazard_class = source_fields[hazard_class_field['key']] hazard_class_index = aggregate_hazard.fields().lookupField(hazard_class) unique_hazard = aggregate_hazard.uniqueValues(hazard_class_index) unique_exposure = read_dynamic_inasafe_field(source_fields, exposure_count_field) flat_table = FlatTable('hazard_class', 'exposure_class') request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) for area in aggregate_hazard.getFeatures(): hazard_value = area[hazard_class_index] for exposure in unique_exposure: key_name = exposure_count_field['key'] % exposure field_name = source_fields[key_name] exposure_count = area[field_name] if not exposure_count: exposure_count = 0 flat_table.add_value(exposure_count, hazard_class=hazard_value, exposure_class=exposure) # We summarize every absolute values. for field, field_definition in list(absolute_values.items()): value = area[field] if not value: value = 0 field_definition[0].add_value(value, all='all') tabular = create_memory_layer(output_layer_name, QgsWkbTypes.NullGeometry) tabular.startEditing() field = create_field_from_definition(exposure_type_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][exposure_type_field['key']] = ( exposure_type_field['field_name']) hazard_keywords = aggregate_hazard.keywords['hazard_keywords'] hazard = hazard_keywords['hazard'] classification = hazard_keywords['classification'] exposure_keywords = aggregate_hazard.keywords['exposure_keywords'] exposure = exposure_keywords['exposure'] hazard_affected = {} for hazard_class in unique_hazard: if (hazard_class == '' or hazard_class is None or (hasattr(hazard_class, 'isNull') and hazard_class.isNull())): hazard_class = 'NULL' field = create_field_from_definition(hazard_count_field, hazard_class) tabular.addAttribute(field) key = hazard_count_field['key'] % hazard_class value = hazard_count_field['field_name'] % hazard_class tabular.keywords['inasafe_fields'][key] = value hazard_affected[hazard_class] = post_processor_affected_function( exposure=exposure, hazard=hazard, classification=classification, hazard_class=hazard_class) field = create_field_from_definition(total_affected_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][total_affected_field['key']] = ( total_affected_field['field_name']) # essentially have the same value as NULL_hazard_count # but with this, make sure that it exists in layer so it can be used for # reporting, and can be referenced to fields.py to take the label. field = create_field_from_definition(total_not_affected_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][total_not_affected_field['key']] = ( total_not_affected_field['field_name']) field = create_field_from_definition(total_not_exposed_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][total_not_exposed_field['key']] = ( total_not_exposed_field['field_name']) field = create_field_from_definition(total_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][total_field['key']] = ( total_field['field_name']) summarization_dicts = {} if exposure_summary: summarization_dicts = summarize_result(exposure_summary) sorted_keys = sorted(summarization_dicts.keys()) for key in sorted_keys: summary_field = summary_rules[key]['summary_field'] field = create_field_from_definition(summary_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][summary_field['key']] = ( summary_field['field_name']) # Only add absolute value if there is no summarization / no exposure # classification if not summarization_dicts: # For each absolute values for absolute_field in list(absolute_values.keys()): field_definition = definition(absolute_values[absolute_field][1]) field = create_field_from_definition(field_definition) tabular.addAttribute(field) key = field_definition['key'] value = field_definition['field_name'] tabular.keywords['inasafe_fields'][key] = value for exposure_type in unique_exposure: feature = QgsFeature() attributes = [exposure_type] total_affected = 0 total_not_affected = 0 total_not_exposed = 0 total = 0 for hazard_class in unique_hazard: if hazard_class == '' or hazard_class is None: hazard_class = 'NULL' value = flat_table.get_value(hazard_class=hazard_class, exposure_class=exposure_type) attributes.append(value) if hazard_affected[hazard_class] == not_exposed_class['key']: total_not_exposed += value elif hazard_affected[hazard_class]: total_affected += value else: total_not_affected += value total += value attributes.append(total_affected) attributes.append(total_not_affected) attributes.append(total_not_exposed) attributes.append(total) if summarization_dicts: for key in sorted_keys: attributes.append(summarization_dicts[key].get( exposure_type, 0)) else: for i, field in enumerate(absolute_values.values()): value = field[0].get_value(all='all') attributes.append(value) feature.setAttributes(attributes) tabular.addFeature(feature) # Sanity check ± 1 to the result. Disabled for now as it seems ± 1 is # not enough. ET 13/02/17 # total_computed = ( # total_affected + total_not_affected + total_not_exposed) # if not -1 < (total_computed - total) < 1: # raise ComputationError tabular.commitChanges() tabular.keywords['title'] = layer_purpose_exposure_summary_table['name'] if qgis_version() >= 21800: tabular.setName(tabular.keywords['title']) else: tabular.setLayerName(tabular.keywords['title']) tabular.keywords['layer_purpose'] = layer_purpose_exposure_summary_table[ 'key'] check_layer(tabular, has_geometry=False) return tabular
def multi_exposure_analysis_summary(analysis, intermediate_analysis): """Merge intermediate analysis into one analysis summary. List of analysis layers like: | analysis_id | count_hazard_class | affected_count | total | Target layer : | analysis_id | Output layer : | analysis_id | count_hazard_class | affected_count | total | :param analysis: The target vector layer where to write statistics. :type analysis: QgsVectorLayer :param intermediate_analysis: List of analysis layer for a single exposure. :type intermediate_analysis: list :return: The new target layer with summary. :rtype: QgsVectorLayer .. versionadded:: 4.3 """ analysis.startEditing() request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) target_id = next(analysis.getFeatures(request)).id() for analysis_result in intermediate_analysis: exposure = analysis_result.keywords['exposure_keywords']['exposure'] iterator = analysis_result.getFeatures(request) feature = next(iterator) source_fields = analysis_result.keywords['inasafe_fields'] # Dynamic fields hazards = read_dynamic_inasafe_field(source_fields, hazard_count_field) for hazard_zone in hazards: field = create_field_from_definition(exposure_hazard_count_field, exposure, hazard_zone) analysis.addAttribute(field) index = analysis.fields().lookupField(field.name()) value = feature[analysis_result.fields().lookupField( hazard_count_field['field_name'] % hazard_zone)] analysis.changeAttributeValue(target_id, index, value) # keywords key = exposure_hazard_count_field['key'] % (exposure, hazard_zone) value = exposure_hazard_count_field['field_name'] % (exposure, hazard_zone) analysis.keywords['inasafe_fields'][key] = value # total affected source_index = analysis_result.fields().lookupField( total_affected_field['field_name']) field = create_field_from_definition(exposure_total_affected_field, exposure) analysis.addAttribute(field) index = analysis.fields().lookupField(field.name()) analysis.changeAttributeValue(target_id, index, feature[source_index]) # keywords key = exposure_total_affected_field['key'] % exposure value = exposure_total_affected_field['field_name'] % exposure analysis.keywords['inasafe_fields'][key] = value # total not affected source_index = analysis_result.fields().lookupField( total_not_affected_field['field_name']) field = create_field_from_definition(exposure_total_not_affected_field, exposure) analysis.addAttribute(field) index = analysis.fields().lookupField(field.name()) analysis.changeAttributeValue(target_id, index, feature[source_index]) # keywords key = exposure_total_not_affected_field['key'] % exposure value = exposure_total_not_affected_field['field_name'] % exposure analysis.keywords['inasafe_fields'][key] = value # total exposed source_index = analysis_result.fields().lookupField( total_exposed_field['field_name']) field = create_field_from_definition(exposure_total_exposed_field, exposure) analysis.addAttribute(field) index = analysis.fields().lookupField(field.name()) analysis.changeAttributeValue(target_id, index, feature[source_index]) # keywords key = exposure_total_exposed_field['key'] % exposure value = exposure_total_exposed_field['field_name'] % exposure analysis.keywords['inasafe_fields'][key] = value # total not exposed source_index = analysis_result.fields().lookupField( total_not_exposed_field['field_name']) field = create_field_from_definition(exposure_total_not_exposed_field, exposure) analysis.addAttribute(field) index = analysis.fields().lookupField(field.name()) analysis.changeAttributeValue(target_id, index, feature[source_index]) # keywords key = exposure_total_not_exposed_field['key'] % exposure value = exposure_total_not_exposed_field['field_name'] % exposure analysis.keywords['inasafe_fields'][key] = value # total source_index = analysis_result.fields().lookupField( total_field['field_name']) field = create_field_from_definition(exposure_total_field, exposure) analysis.addAttribute(field) index = analysis.fields().lookupField(field.name()) analysis.changeAttributeValue(target_id, index, feature[source_index]) # keywords key = exposure_total_field['key'] % exposure value = exposure_total_field['field_name'] % exposure analysis.keywords['inasafe_fields'][key] = value analysis.commitChanges() analysis.keywords['title'] = ( layer_purpose_analysis_impacted['multi_exposure_name']) analysis.keywords['layer_purpose'] = layer_purpose_analysis_impacted['key'] # Set up the extra keywords so everyone knows it's a # multi exposure analysis result. extra_keywords = { extra_keyword_analysis_type['key']: MULTI_EXPOSURE_ANALYSIS_FLAG } analysis.keywords['extra_keywords'] = extra_keywords if qgis_version() >= 21600: analysis.setName(analysis.keywords['title']) else: analysis.setLayerName(analysis.keywords['title']) return analysis
def multi_exposure_aggregation_summary(aggregation, intermediate_layers): """Merge intermediate aggregations into one aggregation summary. Source layer : | aggr_id | aggr_name | count of affected features per exposure type Target layer : | aggregation_id | aggregation_name | Output layer : | aggr_id | aggr_name | count of affected per exposure type for each :param aggregation: The target vector layer where to write statistics. :type aggregation: QgsVectorLayer :param intermediate_layers: List of aggregation layer for a single exposure :type intermediate_layers: list :return: The new target layer with summary. :rtype: QgsVectorLayer .. versionadded:: 4.3 """ target_index_field_name = ( aggregation.keywords['inasafe_fields'][aggregation_id_field['key']]) aggregation.startEditing() request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) for layer in intermediate_layers: source_fields = layer.keywords['inasafe_fields'] exposure = layer.keywords['exposure_keywords']['exposure'] unique_exposure = read_dynamic_inasafe_field( source_fields, affected_exposure_count_field, [total_affected_field]) field_map = {} for exposure_class in unique_exposure: field = create_field_from_definition( exposure_affected_exposure_type_count_field, name=exposure, sub_name=exposure_class) aggregation.addAttribute(field) source_field_index = layer.fields().lookupField( affected_exposure_count_field['field_name'] % exposure_class) target_field_index = aggregation.fields().lookupField(field.name()) field_map[source_field_index] = target_field_index # Total affected field field = create_field_from_definition(exposure_total_not_affected_field, exposure) aggregation.addAttribute(field) source_field_index = layer.fields().lookupField( total_affected_field['field_name']) target_field_index = aggregation.fields().lookupField(field.name()) field_map[source_field_index] = target_field_index # Get Aggregation ID from original feature index = (layer.fields().lookupField( source_fields[aggregation_id_field['key']])) for source_feature in layer.getFeatures(request): target_expression = QgsFeatureRequest() target_expression.setFlags(QgsFeatureRequest.NoGeometry) expression = '\"{field_name}\" = {id_value}'.format( field_name=target_index_field_name, id_value=source_feature[index]) target_expression.setFilterExpression(expression) iterator = aggregation.getFeatures(target_expression) target_feature = next(iterator) # It must return only 1 feature. for source_field, target_field in list(field_map.items()): aggregation.changeAttributeValue(target_feature.id(), target_field, source_feature[source_field]) try: next(iterator) except StopIteration: # Everything is fine, it's normal. pass else: # This should never happen ! IDs are duplicated in the # aggregation layer. raise Exception( 'Aggregation IDs are duplicated in the aggregation layer. ' 'We can\'t make any joins.') aggregation.commitChanges() aggregation.keywords['title'] = ( layer_purpose_aggregation_summary['multi_exposure_name']) aggregation.keywords['layer_purpose'] = ( layer_purpose_aggregation_summary['key']) # Set up the extra keywords so everyone knows it's a # multi exposure analysis result. extra_keywords = { extra_keyword_analysis_type['key']: MULTI_EXPOSURE_ANALYSIS_FLAG } aggregation.keywords['extra_keywords'] = extra_keywords if qgis_version() >= 21600: aggregation.setName(aggregation.keywords['title']) else: aggregation.setLayerName(aggregation.keywords['title']) return aggregation
def aggregation_result_extractor(impact_report, component_metadata): """Extracting aggregation result of breakdown from the impact layer. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.0 """ context = {} """Initializations""" extra_args = component_metadata.extra_args # Find out aggregation report type exposure_layer = impact_report.exposure analysis_layer = impact_report.analysis provenance = impact_report.impact_function.provenance exposure_summary_table = impact_report.exposure_summary_table if exposure_summary_table: exposure_summary_table_fields = exposure_summary_table.keywords[ 'inasafe_fields'] aggregation_summary = impact_report.aggregation_summary aggregation_summary_fields = aggregation_summary.keywords[ 'inasafe_fields'] debug_mode = impact_report.impact_function.debug_mode """Filtering report sections""" # Only process for applicable exposure types # Get exposure type definition exposure_type = layer_definition_type(exposure_layer) # Only round the number when it is population exposure and it is not # in debug mode is_rounded = not debug_mode is_population = exposure_type is exposure_population # For now aggregation report only applicable for breakable exposure types: itemizable_exposures_all = [ exposure for exposure in exposure_all if exposure.get('classifications')] if exposure_type not in itemizable_exposures_all: return context """Generating type name for columns""" type_fields = read_dynamic_inasafe_field( aggregation_summary_fields, affected_exposure_count_field) # do not include total, to preserve ordering and proper reference type_fields.remove('total') # we need to sort the column # get the classes lists # retrieve classes definitions exposure_classes_lists = retrieve_exposure_classes_lists(exposure_layer) # sort columns based on class order # create function to sort def sort_classes(_type_field): """Sort method to retrieve exposure class key index.""" # class key is the type field name # find index in class list for i, _exposure_class in enumerate(exposure_classes_lists): if _type_field == _exposure_class['key']: index = i break else: index = -1 return index # sort type_fields = sorted(type_fields, key=sort_classes) # generate type_header_labels for column header type_header_labels = [] for type_name in type_fields: type_label = tr(type_name.capitalize()) type_header_labels.append(type_label) """Generating values for rows""" # generate rows of values for values of each column rows = [] aggregation_name_index = aggregation_summary.fieldNameIndex( aggregation_name_field['field_name']) total_field_index = aggregation_summary.fieldNameIndex( total_affected_field['field_name']) type_field_index = [] for type_name in type_fields: field_name = affected_exposure_count_field['field_name'] % type_name type_index = aggregation_summary.fieldNameIndex(field_name) type_field_index.append(type_index) for feat in aggregation_summary.getFeatures(): total_affected_value = format_number( feat[total_field_index], enable_rounding=is_rounded, is_population=is_population) if total_affected_value == '0': # skip aggregation type if the total affected is zero continue item = { # Name is the header for each row 'name': feat[aggregation_name_index], # Total is the total for each row 'total': total_affected_value } # Type values is the values for each column in each row type_values = [] for idx in type_field_index: affected_value = format_number( feat[idx], enable_rounding=is_rounded) type_values.append(affected_value) item['type_values'] = type_values rows.append(item) """Generate total for footers""" # calculate total values for each type. Taken from exposure summary table type_total_values = [] # Get affected field index affected_field_index = exposure_summary_table.fieldNameIndex( total_affected_field['field_name']) # Get breakdown field breakdown_field = None # I'm not sure what's the difference # It is possible to have exposure_type_field or exposure_class_field # at the moment breakdown_fields = [ exposure_type_field, exposure_class_field ] for field in breakdown_fields: if field['key'] in exposure_summary_table_fields: breakdown_field = field break breakdown_field_name = breakdown_field['field_name'] breakdown_field_index = exposure_summary_table.fieldNameIndex( breakdown_field_name) # Fetch total affected for each breakdown name value_dict = {} for feat in exposure_summary_table.getFeatures(): # exposure summary table is in csv format, so the field returned is # always in text format affected_value = int(float(feat[affected_field_index])) affected_value = format_number( affected_value, enable_rounding=is_rounded, is_population=is_population) value_dict[feat[breakdown_field_index]] = affected_value if value_dict: for type_name in type_fields: affected_value_string_formatted = value_dict[type_name] if affected_value_string_formatted == '0': # if total affected for breakdown type is zero # current column index column_index = len(type_total_values) # cut column header type_header_labels = ( type_header_labels[:column_index] + type_header_labels[column_index + 1:]) # cut all row values for the column for item in rows: type_values = item['type_values'] item['type_values'] = ( type_values[:column_index] + type_values[column_index + 1:]) continue type_total_values.append(affected_value_string_formatted) """Get the super total affected""" # total for affected (super total) analysis_feature = analysis_layer.getFeatures().next() field_index = analysis_layer.fieldNameIndex( total_affected_field['field_name']) total_all = format_number( analysis_feature[field_index], enable_rounding=is_rounded) """Generate and format the context""" aggregation_area_default_header = resolve_from_dictionary( extra_args, 'aggregation_area_default_header') header_label = ( aggregation_summary.title() or aggregation_area_default_header) table_header_format = resolve_from_dictionary( extra_args, 'table_header_format') # check unit units = exposure_type['units'] if units: unit = units[0] abbreviation = unit['abbreviation'] if abbreviation: unit_string = '({abbreviation})'.format(abbreviation=abbreviation) else: unit_string = '' else: unit_string = '' table_header = table_header_format.format( title=provenance['map_legend_title'], unit=unit_string) table_header = ' '.join(table_header.split()) section_header = resolve_from_dictionary(extra_args, 'header') notes = resolve_from_dictionary(extra_args, 'notes') total_header = resolve_from_dictionary(extra_args, 'total_header') total_in_aggregation_header = resolve_from_dictionary( extra_args, 'total_in_aggregation_header') context['header'] = section_header context['notes'] = notes context['aggregation_result'] = { 'table_header': table_header, 'header_label': header_label, 'type_header_labels': type_header_labels, 'total_label': total_header, 'total_in_aggregation_area_label': total_in_aggregation_header, 'rows': rows, 'type_total_values': type_total_values, 'total_all': total_all, } return context
def aggregation_summary(aggregate_hazard, aggregation, callback=None): """Compute the summary from the aggregate hazard to the analysis layer. Source layer : | haz_id | haz_class | aggr_id | aggr_name | total_feature | Target layer : | aggr_id | aggr_name | Output layer : | aggr_id | aggr_name | count of affected features per exposure type :param aggregate_hazard: The layer to aggregate vector layer. :type aggregate_hazard: QgsVectorLayer :param aggregation: The aggregation vector layer where to write statistics. :type aggregation: QgsVectorLayer :param callback: A function to all to indicate progress. The function should accept params 'current' (int), 'maximum' (int) and 'step' (str). Defaults to None. :type callback: function :return: The new aggregation layer with summary. :rtype: QgsVectorLayer .. versionadded:: 4.0 """ output_layer_name = summary_2_aggregation_steps['output_layer_name'] processing_step = summary_2_aggregation_steps['step_name'] source_fields = aggregate_hazard.keywords['inasafe_fields'] target_fields = aggregation.keywords['inasafe_fields'] target_compulsory_fields = [ aggregation_id_field, aggregation_name_field, ] check_inputs(target_compulsory_fields, target_fields) # Missing exposure_count_field source_compulsory_fields = [ aggregation_id_field, aggregation_name_field, hazard_id_field, hazard_class_field, affected_field, ] check_inputs(source_compulsory_fields, source_fields) pattern = exposure_count_field['key'] pattern = pattern.replace('%s', '') unique_exposure = read_dynamic_inasafe_field( source_fields, exposure_count_field) absolute_values = create_absolute_values_structure( aggregate_hazard, ['aggregation_id']) flat_table = FlatTable('aggregation_id', 'exposure_class') aggregation_index = source_fields[aggregation_id_field['key']] # We want to loop over affected features only. request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) expression = '\"%s\" = \'%s\'' % ( affected_field['field_name'], tr('True')) request.setFilterExpression(expression) for area in aggregate_hazard.getFeatures(request): for key, name_field in source_fields.iteritems(): if key.endswith(pattern): aggregation_id = area[aggregation_index] exposure_class = key.replace(pattern, '') value = area[name_field] flat_table.add_value( value, aggregation_id=aggregation_id, exposure_class=exposure_class ) # We summarize every absolute values. for field, field_definition in absolute_values.iteritems(): value = area[field] if not value or isinstance(value, QPyNullVariant): value = 0 field_definition[0].add_value( value, aggregation_id=area[aggregation_index], ) shift = aggregation.fields().count() aggregation.startEditing() add_fields( aggregation, absolute_values, [total_affected_field], unique_exposure, affected_exposure_count_field) aggregation_index = target_fields[aggregation_id_field['key']] request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) for area in aggregation.getFeatures(request): aggregation_value = area[aggregation_index] total = 0 for i, val in enumerate(unique_exposure): sum = flat_table.get_value( aggregation_id=aggregation_value, exposure_class=val ) total += sum aggregation.changeAttributeValue(area.id(), shift + i, sum) aggregation.changeAttributeValue( area.id(), shift + len(unique_exposure), total) for i, field in enumerate(absolute_values.itervalues()): value = field[0].get_value( aggregation_id=aggregation_value, ) target_index = shift + len(unique_exposure) + 1 + i aggregation.changeAttributeValue( area.id(), target_index, value) aggregation.commitChanges() aggregation.keywords['title'] = layer_purpose_aggregation_summary['name'] if qgis_version() >= 21800: aggregation.setName(aggregation.keywords['title']) else: aggregation.setLayerName(aggregation.keywords['title']) aggregation.keywords['layer_purpose'] = ( layer_purpose_aggregation_summary['key']) check_layer(aggregation) return aggregation
def exposure_summary_table( aggregate_hazard, exposure_summary=None, callback=None): """Compute the summary from the aggregate hazard to analysis. Source layer : | haz_id | haz_class | aggr_id | aggr_name | exposure_count | Output layer : | exp_type | count_hazard_class | total | :param aggregate_hazard: The layer to aggregate vector layer. :type aggregate_hazard: QgsVectorLayer :param exposure_summary: The layer impact layer. :type exposure_summary: QgsVectorLayer :param callback: A function to all to indicate progress. The function should accept params 'current' (int), 'maximum' (int) and 'step' (str). Defaults to None. :type callback: function :return: The new tabular table, without geometry. :rtype: QgsVectorLayer .. versionadded:: 4.0 """ output_layer_name = summary_4_exposure_summary_table_steps[ 'output_layer_name'] source_fields = aggregate_hazard.keywords['inasafe_fields'] source_compulsory_fields = [ aggregation_id_field, aggregation_name_field, hazard_id_field, hazard_class_field, affected_field, total_field ] check_inputs(source_compulsory_fields, source_fields) absolute_values = create_absolute_values_structure( aggregate_hazard, ['all']) hazard_class = source_fields[hazard_class_field['key']] hazard_class_index = aggregate_hazard.fields().lookupField(hazard_class) unique_hazard = aggregate_hazard.uniqueValues(hazard_class_index) unique_exposure = read_dynamic_inasafe_field( source_fields, exposure_count_field) flat_table = FlatTable('hazard_class', 'exposure_class') request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) for area in aggregate_hazard.getFeatures(): hazard_value = area[hazard_class_index] for exposure in unique_exposure: key_name = exposure_count_field['key'] % exposure field_name = source_fields[key_name] exposure_count = area[field_name] if not exposure_count: exposure_count = 0 flat_table.add_value( exposure_count, hazard_class=hazard_value, exposure_class=exposure ) # We summarize every absolute values. for field, field_definition in list(absolute_values.items()): value = area[field] if not value: value = 0 field_definition[0].add_value( value, all='all' ) tabular = create_memory_layer(output_layer_name, QgsWkbTypes.NullGeometry) tabular.startEditing() field = create_field_from_definition(exposure_type_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][exposure_type_field['key']] = ( exposure_type_field['field_name']) hazard_keywords = aggregate_hazard.keywords['hazard_keywords'] hazard = hazard_keywords['hazard'] classification = hazard_keywords['classification'] exposure_keywords = aggregate_hazard.keywords['exposure_keywords'] exposure = exposure_keywords['exposure'] hazard_affected = {} for hazard_class in unique_hazard: if (hazard_class == '' or hazard_class is None or (hasattr(hazard_class, 'isNull') and hazard_class.isNull())): hazard_class = 'NULL' field = create_field_from_definition(hazard_count_field, hazard_class) tabular.addAttribute(field) key = hazard_count_field['key'] % hazard_class value = hazard_count_field['field_name'] % hazard_class tabular.keywords['inasafe_fields'][key] = value hazard_affected[hazard_class] = post_processor_affected_function( exposure=exposure, hazard=hazard, classification=classification, hazard_class=hazard_class ) field = create_field_from_definition(total_affected_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][total_affected_field['key']] = ( total_affected_field['field_name']) # essentially have the same value as NULL_hazard_count # but with this, make sure that it exists in layer so it can be used for # reporting, and can be referenced to fields.py to take the label. field = create_field_from_definition(total_not_affected_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][total_not_affected_field['key']] = ( total_not_affected_field['field_name']) field = create_field_from_definition(total_not_exposed_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][total_not_exposed_field['key']] = ( total_not_exposed_field['field_name']) field = create_field_from_definition(total_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][total_field['key']] = ( total_field['field_name']) summarization_dicts = {} if exposure_summary: summarization_dicts = summarize_result(exposure_summary) sorted_keys = sorted(summarization_dicts.keys()) for key in sorted_keys: summary_field = summary_rules[key]['summary_field'] field = create_field_from_definition(summary_field) tabular.addAttribute(field) tabular.keywords['inasafe_fields'][ summary_field['key']] = ( summary_field['field_name']) # Only add absolute value if there is no summarization / no exposure # classification if not summarization_dicts: # For each absolute values for absolute_field in list(absolute_values.keys()): field_definition = definition(absolute_values[absolute_field][1]) field = create_field_from_definition(field_definition) tabular.addAttribute(field) key = field_definition['key'] value = field_definition['field_name'] tabular.keywords['inasafe_fields'][key] = value for exposure_type in unique_exposure: feature = QgsFeature() attributes = [exposure_type] total_affected = 0 total_not_affected = 0 total_not_exposed = 0 total = 0 for hazard_class in unique_hazard: if hazard_class == '' or hazard_class is None: hazard_class = 'NULL' value = flat_table.get_value( hazard_class=hazard_class, exposure_class=exposure_type ) attributes.append(value) if hazard_affected[hazard_class] == not_exposed_class['key']: total_not_exposed += value elif hazard_affected[hazard_class]: total_affected += value else: total_not_affected += value total += value attributes.append(total_affected) attributes.append(total_not_affected) attributes.append(total_not_exposed) attributes.append(total) if summarization_dicts: for key in sorted_keys: attributes.append(summarization_dicts[key].get( exposure_type, 0)) else: for i, field in enumerate(absolute_values.values()): value = field[0].get_value( all='all' ) attributes.append(value) feature.setAttributes(attributes) tabular.addFeature(feature) # Sanity check ± 1 to the result. Disabled for now as it seems ± 1 is # not enough. ET 13/02/17 # total_computed = ( # total_affected + total_not_affected + total_not_exposed) # if not -1 < (total_computed - total) < 1: # raise ComputationError tabular.commitChanges() tabular.keywords['title'] = layer_purpose_exposure_summary_table['name'] if qgis_version() >= 21800: tabular.setName(tabular.keywords['title']) else: tabular.setLayerName(tabular.keywords['title']) tabular.keywords['layer_purpose'] = layer_purpose_exposure_summary_table[ 'key'] check_layer(tabular, has_geometry=False) return tabular