def test_generate_report_dictionary_from_dom(self): """Test generate_report_dictionary_from_dom function.""" self.mock_the_dialog(test_entire_mode=False) self.impact_merge_dialog.prepare_input() self.impact_merge_dialog.validate_all_layers() # Create the DOM first_postprocessing_report = \ self.impact_merge_dialog.first_impact['postprocessing_report'] second_postprocessing_report = \ self.impact_merge_dialog.second_impact['postprocessing_report'] first_report = ( '<body>' + first_postprocessing_report + '</body>') second_report = ( '<body>' + second_postprocessing_report + '</body>') # Now create a dom document for each first_document = minidom.parseString(get_string(first_report)) second_document = minidom.parseString(get_string(second_report)) tables = first_document.getElementsByTagName('table') tables += second_document.getElementsByTagName('table') report_dict = \ self.impact_merge_dialog.generate_report_dictionary_from_dom( tables) # There should be 4 keys in that dict # (3 for each aggregation unit and 1 for total in aggregation unit) expected_number_of_keys = 4 self.assertEqual(len(report_dict), expected_number_of_keys)
def create_keyword_file(self, algorithm): """Create keyword file for the raster file created. Basically copy a template from keyword file in converter data and add extra keyword (usually a title) :param algorithm: Which re-sampling algorithm to use. valid options are 'nearest' (for nearest neighbour), 'invdist' (for inverse distance), 'average' (for moving average). Defaults to 'nearest' if not specified. Note that passing re-sampling alg parameters is currently not supported. If None is passed it will be replaced with 'nearest'. :type algorithm: str """ if self.algorithm_name: keyword_path = os.path.join( self.output_dir, '%s-%s.keywords' % (self.output_basename, algorithm)) else: keyword_path = os.path.join(self.output_dir, '%s.keywords' % self.output_basename) mmi_keywords = os.path.join(data_dir(), 'mmi.keywords') shutil.copyfile(mmi_keywords, keyword_path) # append title and source to the keywords file if len(self.title.strip()) == 0: keyword_title = self.output_basename else: keyword_title = self.title with open(keyword_path, 'a') as keyword_file: keyword_file.write(get_string('title: %s \n' % keyword_title)) keyword_file.write(get_string('source: %s ' % self.source))
def create_keyword_file(self, algorithm): """Create keyword file for the raster file created. Basically copy a template from keyword file in converter data and add extra keyword (usually a title) :param algorithm: Which re-sampling algorithm to use. valid options are 'nearest' (for nearest neighbour), 'invdist' (for inverse distance), 'average' (for moving average). Defaults to 'nearest' if not specified. Note that passing re-sampling alg parameters is currently not supported. If None is passed it will be replaced with 'nearest'. :type algorithm: str """ if self.algorithm_name: keyword_path = os.path.join(self.output_dir, "%s-%s.keywords" % (self.output_basename, algorithm)) else: keyword_path = os.path.join(self.output_dir, "%s.keywords" % self.output_basename) mmi_keywords = os.path.join(data_dir(), "mmi.keywords") shutil.copyfile(mmi_keywords, keyword_path) # append title and source to the keywords file if len(self.title.strip()) == 0: keyword_title = self.output_basename else: keyword_title = self.title with open(keyword_path, "a") as keyword_file: keyword_file.write(get_string("title: %s \n" % keyword_title)) keyword_file.write(get_string("source: %s " % self.source))
def test_get_string(self): """Test get_string function.""" unicode_text = u'Test \xe1, \xe9, \xed, \xf3, \xfa, \xfc, \xf1, \xbf' string_repr = 'Test \xc3\xa1, \xc3\xa9, \xc3\xad, \xc3\xb3, ' \ '\xc3\xba, \xc3\xbc, \xc3\xb1, \xc2\xbf' message = 'It should return %s, but it returned %s' % ( get_string(unicode_text), string_repr) self.assertEqual(get_string(unicode_text), string_repr, message)
def test_get_string(self): """Test get_string function.""" text = 'Test \xe1, \xe9, \xed, \xf3, \xfa, \xfc, \xf1, \xbf' string_repr = b'Test \xc3\xa1, \xc3\xa9, \xc3\xad, \xc3\xb3, \xc3\xba, \xc3\xbc, \xc3\xb1, \xc2\xbf' message = 'It should return %s, but it returned %s' % ( string_repr, get_string(text)) self.assertEqual(get_string(text), string_repr, message) self.assertEqual(get_string(string_repr), string_repr)
def test_generate_reports(self): """Test generate_reports function.""" self.mock_the_dialog(test_entire_mode=False) self.impact_merge_dialog.prepare_input() self.impact_merge_dialog.validate_all_layers() # Create the DOM first_postprocessing_report = \ self.impact_merge_dialog.first_impact['postprocessing_report'] second_postprocessing_report = \ self.impact_merge_dialog.second_impact['postprocessing_report'] first_report = ( '<body>' + first_postprocessing_report + '</body>') second_report = ( '<body>' + second_postprocessing_report + '</body>') # Now create a dom document for each first_document = minidom.parseString(get_string(first_report)) second_document = minidom.parseString(get_string(second_report)) first_impact_tables = first_document.getElementsByTagName('table') second_impact_tables = second_document.getElementsByTagName('table') first_report_dict = \ self.impact_merge_dialog.generate_report_dictionary_from_dom( first_impact_tables) second_report_dict = \ self.impact_merge_dialog.generate_report_dictionary_from_dom( second_impact_tables) self.impact_merge_dialog.generate_report_summary( first_report_dict, second_report_dict) self.impact_merge_dialog.generate_html_reports( first_report_dict, second_report_dict) # Generate PDF Reports self.impact_merge_dialog.generate_reports() # There should be 3 pdf files in self.impact_merge_dialog.out_dir report_list = glob( os.path.join( self.impact_merge_dialog.out_dir, '*.pdf')) expected_reports_number = 3 self.assertEqual(len(report_list), expected_reports_number)
def __init__(self, message=None): """"General constructor. :param message: The optional error message. :type message: str, unicode, MessageElement """"" if isinstance(message, unicode): super(InaSAFEError, self).__init__(get_string(message)) self.message = message elif isinstance(message, str): super(InaSAFEError, self).__init__(message) self.message = get_unicode(message) elif isinstance(message, MessageElement): super(InaSAFEError, self).__init__(message.to_text()) self.message = get_unicode(message.to_text()) elif message is None: pass elif isinstance(message, BaseException): super(InaSAFEError, self).__init__(unicode(message)) self.message = unicode(message) # This shouldn't happen... else: raise TypeError
def test_str_unicode_str(self): """Test if str(unicode(str)) works correctly.""" text = 'Test á, é, í, ó, ú, ü, ñ, ¿' unicode_repr = get_unicode(text) str_repr = get_string(unicode_repr) message = 'It should return %s, but it returned %s' % (text, str_repr) self.assertEqual(text, str_repr, message)
def test_str_unicode_str(self): """Test if str(unicode(str)) works correctly.""" text = 'Test á, é, í, ó, ú, ü, ñ, ¿'.encode('utf-8') unicode_repr = get_unicode(text) str_repr = get_string(unicode_repr) message = 'It should return %s, but it returned %s' % (text, str_repr) self.assertEqual(text, str_repr, message)
def on_pbnNext_released(self): """Handle the Next button release. .. note:: This is an automatic Qt slot executed when the Next button is released. """ current_step = self.get_current_step() if current_step == self.step_kw_fields_mapping: try: self.step_kw_fields_mapping.get_field_mapping() except InvalidValidationException as e: display_warning_message_box(self, tr('Invalid Field Mapping'), get_string(e.message)) return if current_step.step_type == STEP_FC: self.impact_function_steps.append(current_step) elif current_step.step_type == STEP_KW: self.keyword_steps.append(current_step) else: LOGGER.debug(current_step.step_type) raise InvalidWizardStep # Save keywords if it's the end of the keyword creation mode if current_step == self.step_kw_summary: self.save_current_keywords() # After any step involving Browser, add selected layer to map canvas if current_step in [ self.step_fc_hazlayer_from_browser, self.step_fc_explayer_from_browser, self.step_fc_agglayer_from_browser ]: if not QgsMapLayerRegistry.instance().mapLayersByName( self.layer.name()): QgsMapLayerRegistry.instance().addMapLayers([self.layer]) # Make the layer visible. Might be hidden by default. See #2925 legend = self.iface.legendInterface() legend.setLayerVisible(self.layer, True) # After the extent selection, save the extent and disconnect signals if current_step == self.step_fc_extent: self.step_fc_extent.write_extent() # Determine the new step to be switched new_step = current_step.get_next_step() if new_step is not None: # Prepare the next tab new_step.set_widgets() else: # Wizard complete self.accept() return self.go_to_step(new_step)
def accept(self): """Method invoked when OK button is clicked.""" try: self.save_metadata() except InvalidValidationException as e: display_warning_message_box( self, tr('Invalid Field Mapping'), get_string(e.message)) return super(FieldMappingDialog, self).accept()
def __str__(self): """return the HTML code for the table cell as a string .. note:: Since we are using the bootstrap framework we set alignment using inlined css as bootstrap will override the alignment given by align and valign html attributes. """ attribs_str = '' if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor if self.width: self.attribs['width'] = self.width if self.align: self.attribs['align'] = self.align self.style += 'text-align: ' + self.align + ';' if self.char: self.attribs['char'] = self.char if self.charoff: self.attribs['charoff'] = self.charoff if self.valign: self.attribs['valign'] = self.valign self.style += 'text-align: ' + self.valign + ';' if self.style: self.attribs['style'] = self.style if self.cell_class: self.attribs['class'] = self.cell_class if self.row_span: self.attribs['rowspan'] = self.row_span if self.col_span: self.attribs['colspan'] = self.col_span for attr in self.attribs: attribs_str += ' %s="%s"' % (attr, self.attribs[attr]) if self.text: text = self.text else: # An empty cell should at least contain a non-breaking space text = ' ' attribs_str = get_string(attribs_str) text = get_string(text) if self.header: return ' <th%s>%s</th>\n' % (attribs_str, text) else: return ' <td%s>%s</td>\n' % (attribs_str, text)
def on_pbnNext_released(self): """Handle the Next button release. .. note:: This is an automatic Qt slot executed when the Next button is released. """ current_step = self.get_current_step() if current_step == self.step_kw_fields_mapping: try: self.step_kw_fields_mapping.get_field_mapping() except InvalidValidationException as e: display_warning_message_box( self, tr('Invalid Field Mapping'), get_string(e.message)) return if current_step.step_type == STEP_FC: self.impact_function_steps.append(current_step) elif current_step.step_type == STEP_KW: self.keyword_steps.append(current_step) else: LOGGER.debug(current_step.step_type) raise InvalidWizardStep # Save keywords if it's the end of the keyword creation mode if current_step == self.step_kw_summary: self.save_current_keywords() # After any step involving Browser, add selected layer to map canvas if current_step in [self.step_fc_hazlayer_from_browser, self.step_fc_explayer_from_browser, self.step_fc_agglayer_from_browser]: if not QgsMapLayerRegistry.instance().mapLayersByName( self.layer.name()): QgsMapLayerRegistry.instance().addMapLayers([self.layer]) # Make the layer visible. Might be hidden by default. See #2925 legend = self.iface.legendInterface() legend.setLayerVisible(self.layer, True) # After the extent selection, save the extent and disconnect signals if current_step == self.step_fc_extent: self.step_fc_extent.write_extent() # Determine the new step to be switched new_step = current_step.get_next_step() if new_step is not None: # Prepare the next tab new_step.set_widgets() else: # Wizard complete self.accept() return self.go_to_step(new_step)
def test_generate_html_reports(self): """Test generate_html_reports function.""" self.mock_the_dialog(test_entire_mode=False) self.impact_merge_dialog.prepare_input() self.impact_merge_dialog.validate_all_layers() first_postprocessing_report = \ self.impact_merge_dialog.first_impact['postprocessing_report'] second_postprocessing_report = \ self.impact_merge_dialog.second_impact['postprocessing_report'] first_report = ( '<body>' + first_postprocessing_report + '</body>') second_report = ( '<body>' + second_postprocessing_report + '</body>') # Now create a dom document for each first_document = minidom.parseString(get_string(first_report)) second_document = minidom.parseString(get_string(second_report)) first_impact_tables = first_document.getElementsByTagName('table') second_impact_tables = second_document.getElementsByTagName('table') first_report_dict = \ self.impact_merge_dialog.generate_report_dictionary_from_dom( first_impact_tables) second_report_dict = \ self.impact_merge_dialog.generate_report_dictionary_from_dom( second_impact_tables) self.impact_merge_dialog.generate_html_reports( first_report_dict, second_report_dict) # There should be 4 HTML files generated html_list = glob( os.path.join( temp_dir(self.impact_merge_dialog.__class__.__name__), '*.html')) expected_html_number = 4 self.assertEqual(len(html_list), expected_html_number)
def merge(self): """Merge the postprocessing_report from each impact.""" # Ensure there is always only a single root element or minidom moans first_postprocessing_report = \ self.first_impact['postprocessing_report'] second_postprocessing_report = \ self.second_impact['postprocessing_report'] # noinspection PyTypeChecker first_report = '<body>' + first_postprocessing_report + '</body>' # noinspection PyTypeChecker second_report = '<body>' + second_postprocessing_report + '</body>' # Now create a dom document for each first_document = minidom.parseString(get_string(first_report)) second_document = minidom.parseString(get_string(second_report)) first_impact_tables = first_document.getElementsByTagName('table') second_impact_tables = second_document.getElementsByTagName('table') # Now create dictionary report from DOM first_report_dict = self.generate_report_dictionary_from_dom( first_impact_tables) second_report_dict = self.generate_report_dictionary_from_dom( second_impact_tables) # Generate report summary for all aggregation unit self.generate_report_summary(first_report_dict, second_report_dict) # Generate html reports file from merged dictionary self.generate_html_reports(first_report_dict, second_report_dict) # Generate PDF Reports using composer and/or atlas generation: self.generate_reports() # Delete html report files: for area in self.html_reports: report_path = self.html_reports[area] if os.path.exists(report_path): os.remove(report_path)
def write_to_file(self, filename): """Save raster data to file Args: * filename: filename with extension .tif Gdal documentation at: http://www.gdal.org/classGDALRasterBand.html """ # Check file format basename, extension = os.path.splitext(filename) msg = ('Invalid file type for file %s. Only extension ' 'tif allowed.' % filename) verify(extension in ['.tif'], msg) file_format = DRIVER_MAP[extension] # Get raster data A = self.get_data() # Get Dimensions. Note numpy and Gdal swap order N, M = A.shape # Create empty file. # FIXME (Ole): It appears that this is created as single # precision even though Float64 is specified # - see issue #17 driver = gdal.GetDriverByName(file_format) fid = driver.Create(get_string(filename), M, N, 1, gdal.GDT_Float64) if fid is None: msg = ('Gdal could not create filename %s using ' 'format %s' % (filename, file_format)) raise WriteLayerError(msg) self.filename = filename # Write metadata fid.SetProjection(str(self.projection)) fid.SetGeoTransform(self.geotransform) # Write data fid.GetRasterBand(1).WriteArray(A) fid.GetRasterBand(1).SetNoDataValue(self.get_nodata_value()) # noinspection PyUnusedLocal fid = None # Close # Write keywords if any write_iso19115_metadata(filename, self.keywords)
def prettify_xml(xml_str): """ returns prettified XML without blank lines based on http://stackoverflow.com/questions/14479656/ :param xml_str: the XML to be prettified :type xml_str: str :return: the prettified XML :rtype: str """ parsed_xml = parseString(get_string(xml_str)) pretty_xml = b'\n'.join( [line for line in parsed_xml.toprettyxml( indent=' ' * 2, encoding='UTF-8').split(b'\n') if line.strip()]) if not pretty_xml.endswith(b'\n'): pretty_xml += b'\n' return pretty_xml.decode('utf-8')
def inasafe_exposure_summary_field_values(field, feature, parent): """Retrieve all values from a field in the exposure summary layer. """ _ = feature, parent # NOQA layer = exposure_summary_layer() if not layer: return None index = layer.fieldNameIndex(field) if index < 0: return None values = [] for feat in layer.getFeatures(): value = get_string(feat[index]) values.append(value) return str(values)
def write_keywords(keywords, filename, sublayer=None): """Write keywords dictonary to file :param keywords: Dictionary of keyword, value pairs :type keywords: dict :param filename: Name of keywords file. Extension expected to be .keywords :type filename: str :param sublayer: Optional sublayer applicable only to multilayer formats such as sqlite or netcdf which can potentially hold more than one layer. The string should map to the layer group as per the example below. **If the keywords file contains sublayer definitions but no sublayer was defined, keywords file content will be removed and replaced with only the keywords provided here.** :type sublayer: str A keyword file with sublayers may look like this: [osm_buildings] datatype: osm category: exposure subcategory: building purpose: dki title: buildings_osm_4326 [osm_flood] datatype: flood category: hazard subcategory: building title: flood_osm_4326 Keys must be strings not containing the ":" character Values can be anything that can be converted to a string (using Python's str function) Surrounding whitespace is removed from values, but keys are unmodified The reason being that keys must always be valid for the dictionary they came from. For values we have decided to be flexible and treat entries like 'unit:m' the same as 'unit: m', or indeed 'unit: m '. Otherwise, unintentional whitespace in values would lead to surprising errors in the application. """ # Input checks basename, ext = os.path.splitext(filename) msg = ('Unknown extension for file %s. Expected %s.keywords' % (filename, basename)) verify(ext == '.keywords', msg) # First read any keywords out of the file so that we can retain # keywords for other sublayers existing_keywords = read_keywords(filename, all_blocks=True) first_value = None if len(existing_keywords) > 0: first_value = existing_keywords[existing_keywords.keys()[0]] multilayer_flag = isinstance(first_value, dict) handle = file(filename, 'w') if multilayer_flag: if sublayer is not None and sublayer != '': # replace existing keywords / add new for this layer existing_keywords[sublayer] = keywords for key, value in existing_keywords.iteritems(): handle.write(_keywords_to_string(value, sublayer=key)) handle.write('\n') else: # It is currently a multilayer but we will replace it with # a single keyword block since the user passed no sublayer handle.write(_keywords_to_string(keywords)) else: # currently a simple layer so replace it with our content keywords = _keywords_to_string(keywords, sublayer=sublayer) handle.write(get_string(keywords)) handle.close()
def add_layers_to_canvas_with_custom_orders(order, impact_function, iface=None): """Helper to add layers to the map canvas following a specific order. From top to bottom in the legend: [ ('FromCanvas', layer name, full layer URI, QML), ('FromAnalysis', layer purpose, layer group, None), ... ] The full layer URI is coming from our helper. :param order: Special structure the list of layers to add. :type order: list :param impact_function: The multi exposure impact function used. :type impact_function: MultiExposureImpactFunction :param iface: QGIS QGisAppInterface instance. :type iface: QGisAppInterface """ root = QgsProject.instance().layerTreeRoot() root.setVisible(False) # Make all layers hidden. group_analysis = root.insertGroup(0, impact_function.name) group_analysis.setVisible(Qt.Checked) group_analysis.setCustomProperty(MULTI_EXPOSURE_ANALYSIS_FLAG, True) # Insert layers in the good order in the group. for layer_definition in order: if layer_definition[0] == FROM_CANVAS['key']: style = QDomDocument() style.setContent(get_string(layer_definition[3])) layer = load_layer(layer_definition[2], layer_definition[1])[0] layer.importNamedStyle(style) QgsMapLayerRegistry.instance().addMapLayer(layer, False) layer_node = group_analysis.addLayer(layer) layer_node.setVisible(Qt.Checked) else: if layer_definition[2] == impact_function.name: for layer in impact_function.outputs: if layer.keywords['layer_purpose'] == layer_definition[1]: QgsMapLayerRegistry.instance().addMapLayer( layer, False) layer_node = group_analysis.addLayer(layer) layer_node.setVisible(Qt.Checked) try: title = layer.keywords['title'] if qgis_version() >= 21800: layer.setName(title) else: layer.setLayerName(title) except KeyError: pass break else: for sub_impact_function in impact_function.impact_functions: # Iterate over each sub impact function used in the # multi exposure analysis. if sub_impact_function.name == layer_definition[2]: for layer in sub_impact_function.outputs: purpose = layer_definition[1] if layer.keywords['layer_purpose'] == purpose: QgsMapLayerRegistry.instance().addMapLayer( layer, False) layer_node = group_analysis.addLayer(layer) layer_node.setVisible(Qt.Checked) try: title = layer.keywords['title'] if qgis_version() >= 21800: layer.setName(title) else: layer.setLayerName(title) except KeyError: pass break if iface: iface.setActiveLayer(impact_function.analysis_impacted)
def run(self): """Risk plugin for volcano hazard on building/structure. Counts number of building exposed to each volcano hazard zones. :returns: Map of building exposed to volcanic hazard zones. Table with number of buildings affected :rtype: dict """ self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') # Get parameters from layer's keywords self.hazard_class_attribute = self.hazard.keyword('field') self.name_attribute = self.hazard.keyword('volcano_name_field') self.hazard_class_mapping = self.hazard.keyword('value_map') # Try to get the value from keyword, if not exist, it will not fail, # but use the old get_osm_building_usage try: self.exposure_class_attribute = self.exposure.keyword( 'structure_class_field') except KeywordNotFoundError: self.exposure_class_attribute = None # Input checks if not self.hazard.layer.is_polygon_data: message = ( 'Input hazard must be a polygon. I got %s with ' 'layer type %s' % (self.hazard.name, self.hazard.layer.get_geometry_name())) raise Exception(message) # Check if hazard_zone_attribute exists in hazard_layer if (self.hazard_class_attribute not in self.hazard.layer.get_attribute_names()): message = ( 'Hazard data %s did not contain expected attribute %s ' % (self.hazard.name, self.hazard_class_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) # Get names of volcanoes considered if self.name_attribute in self.hazard.layer.get_attribute_names(): volcano_name_list = set() for row in self.hazard.layer.get_data(): # Run through all polygons and get unique names volcano_name_list.add(row[self.name_attribute]) self.volcano_names = ', '.join(volcano_name_list) else: self.volcano_names = tr('Not specified in data') # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword( 'vector_hazard_classification') # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification['classes'] # Initialize OrderedDict of affected buildings self.affected_buildings = OrderedDict() # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class['key'] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class['name']] = \ self.hazard_class_mapping.pop(vector_hazard_class['key']) # Adding the class name as a key in affected_building self.affected_buildings[vector_hazard_class['name']] = {} # Run interpolation function for polygon2raster interpolated_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer) # Extract relevant exposure data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() self.buildings = {} for i in range(len(features)): # Get the hazard value based on the value mapping in keyword hazard_value = get_key_for_value( features[i][self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value features[i][self.target_field] = get_string(hazard_value) if (self.exposure_class_attribute and self.exposure_class_attribute in attribute_names): usage = features[i][self.exposure_class_attribute] else: usage = get_osm_building_usage(attribute_names, features[i]) if usage in [None, 'NULL', 'null', 'Null', 0]: usage = tr('Unknown') if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][ usage] = OrderedDict([ (tr('Buildings Affected'), 0)]) self.buildings[usage] += 1 if hazard_value in self.affected_buildings.keys(): self.affected_buildings[hazard_value][usage][ tr('Buildings Affected')] += 1 # Lump small entries and 'unknown' into 'other' category # Building threshold #2468 postprocessors = self.parameters['postprocessors'] building_postprocessors = postprocessors['BuildingType'][0] self.building_report_threshold = building_postprocessors.value[0].value self._consolidate_to_other() # Generate simple impact report impact_summary = impact_table = self.html_report() # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] colours = colours[::-1] # flip colours = colours[:len(self.affected_buildings.keys())] style_classes = [] i = 0 for category_name in self.affected_buildings.keys(): style_class = dict() style_class['label'] = tr(category_name) style_class['transparency'] = 0 style_class['value'] = category_name style_class['size'] = 1 if i >= len(self.affected_buildings.keys()): i = len(self.affected_buildings.keys()) - 1 style_class['colour'] = colours[i] i += 1 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Buildings affected by volcanic hazard zone') legend_title = tr('Building count') legend_units = tr('(building)') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Buildings affected by volcanic hazard zone'), keywords=impact_layer_keywords, style_info=style_info ) self._impact = impact_layer return impact_layer
def _keyword_to_row(self, keyword, value): """Helper to make a message row from a keyword. .. versionadded:: 3.2 Use this when constructing a table from keywords to display as part of a message object. :param keyword: The keyword to be rendered. :type keyword: str :param value: Value of the keyword to be rendered. :type value: basestring :returns: A row to be added to a messaging table. :rtype: safe.messaging.items.row.Row """ row = m.Row() # Translate titles explicitly if possible if keyword == 'title': value = self.tr(value) # we want to show the user the concept name rather than its key # if possible. TS definition = self.definition(keyword) if definition is None: definition = self.tr(keyword.capitalize().replace('_', ' ')) else: definition = definition['name'] # We deal with some special cases first: # In this case the value contains a DICT that we want to present nicely if keyword == 'value_map': value = self._dict_to_row(value) # In these KEYWORD cases we show the DESCRIPTION for # the VALUE definition elif keyword in [ 'vector_hazard_classification', 'raster_hazard_classification' ]: # get the definition for this class from definitions.py value = self.definition(value) value = value['description'] # In these VALUE cases we show the DESCRIPTION for # the VALUE definition elif value in []: # get the definition for this class from definitions.py value = self.definition(value) value = value['description'] # In these VALUE cases we show the NAME for the VALUE definition elif value in [ 'multiple_event', 'single_event', 'point', 'line', 'polygon' 'field' ]: # get the name for this class from definitions.py value = self.definition(value) value = value['name'] # otherwise just treat the keyword as literal text else: # Otherwise just directly read the value value = get_string(value) key = m.ImportantText(definition) row.add(m.Cell(key)) row.add(m.Cell(value)) return row
def _keyword_to_row(self, keyword, value): """Helper to make a message row from a keyword. .. versionadded:: 3.2 Use this when constructing a table from keywords to display as part of a message object. :param keyword: The keyword to be rendered. :type keyword: str :param value: Value of the keyword to be rendered. :type value: basestring :returns: A row to be added to a messaging table. :rtype: safe.messaging.items.row.Row """ row = m.Row() # Translate titles explicitly if possible if keyword == 'title': value = self.tr(value) # we want to show the user the concept name rather than its key # if possible. TS definition = self.definition(keyword) if definition is None: definition = self.tr(keyword.capitalize().replace('_', ' ')) else: definition = definition['name'] # We deal with some special cases first: # In this case the value contains a DICT that we want to present nicely if keyword == 'value_map': value = self._dict_to_row(value) # In these KEYWORD cases we show the DESCRIPTION for # the VALUE definition elif keyword in [ 'vector_hazard_classification', 'raster_hazard_classification']: # get the definition for this class from definitions.py value = self.definition(value) value = value['description'] # In these VALUE cases we show the DESCRIPTION for # the VALUE definition elif value in []: # get the definition for this class from definitions.py value = self.definition(value) value = value['description'] # In these VALUE cases we show the NAME for the VALUE definition elif value in [ 'multiple_event', 'single_event', 'point', 'line', 'polygon' 'field']: # get the name for this class from definitions.py value = self.definition(value) value = value['name'] # otherwise just treat the keyword as literal text else: # Otherwise just directly read the value value = get_string(value) key = m.ImportantText(definition) row.add(m.Cell(key)) row.add(m.Cell(value)) return row
if not os.path.isfile(keyword_file_path): message = tr('No keywords file found for %s' % keyword_file_path) raise NoKeywordsFoundError(message) # now get the requested keyword using the inasafe library try: dictionary = read_keywords(keyword_file_path) except Exception, e: message = tr( 'Keyword retrieval failed for %s (%s) \n %s' % ( keyword_file_path, keyword, str(e))) raise KeywordNotFoundError(message) # if no keyword was supplied, just return the dict if keyword is None: if 'keyword_version' in dictionary.keys(): dictionary['keyword_version'] = get_string( dictionary['keyword_version']) return dictionary if keyword not in dictionary: message = tr('No value was found in file %s for keyword %s' % ( keyword_file_path, keyword)) raise KeywordNotFoundError(message) try: value = dictionary[keyword] except: raise if 'keyword_version' == keyword: value = get_string(value) return value
def merge(self): """Merge the postprocessing_report from each impact.""" # Ensure there is always only a single root element or minidom moans first_postprocessing_report = \ self.first_impact['postprocessing_report'] second_postprocessing_report = \ self.second_impact['postprocessing_report'] # noinspection PyTypeChecker first_report = '<body>' + first_postprocessing_report + '</body>' # noinspection PyTypeChecker second_report = '<body>' + second_postprocessing_report + '</body>' # Now create a dom document for each first_document = minidom.parseString(get_string(first_report)) second_document = minidom.parseString(get_string(second_report)) first_impact_tables = first_document.getElementsByTagName('table') second_impact_tables = second_document.getElementsByTagName('table') # Now create dictionary report from DOM first_report_dict = self.generate_report_dictionary_from_dom( first_impact_tables) second_report_dict = self.generate_report_dictionary_from_dom( second_impact_tables) # Rizky: Consistency checks with aggregation # Make sure the aggregation layer both presents in both layers # We shouldn't have problems with Entire Area mode. It just means # the impact layer and summary is merged into single report. # We can have 3 cases: # 1. If both of them were not aggregated, we can just merge the map # only # 2. If both of them were aggregated, we can just merge the map, and # merge postprocessor report using 'Total aggregation in areas' key # 3. If only one of them were aggregated, we can just merge the map, # and uses postprocessor report from the one who has. if self.entire_area_mode: # We won't be bothered with the map, it will be merged anyway in # all 3 cases. We should bother with the postprocessor report. # If one of them has the report, it means it will contain more # than one report keys. We can just swap the first report if they # have one key, and the second have more than one if (len(first_report_dict.keys()) == 1 and len(second_report_dict.keys()) > 1): swap_var = first_report_dict first_report_dict = second_report_dict second_report_dict = swap_var # This condition will covers aggregated mode # For this case, we should make sure both layers are aggregated with # the same aggregation layer of the chosen aggregation layer else: # check that both layers must have aggregated postprocessor. # aggregated postprocessor means the report_dict must have minimum # 2 keys if not (len(first_report_dict.keys()) > 1 and len(second_report_dict.keys()) > 1): raise InvalidLayerError( self.tr( 'Please choose impact layers with aggregated ' 'postprocessor if you want to use aggregation layer.')) # collect all report keys (will contain aggregation areas in the # report) report_keys = first_report_dict.keys() # Discard the last keys. It will always contains total area, not # aggregated area if len(report_keys) > 0: del report_keys[-1] sec_report = second_report_dict.keys() if len(sec_report) > 0: del sec_report[-1] for k in sec_report: if k not in report_keys: report_keys.append(k) # collect all aggregation areas in aggregation layer layer = self.aggregation['layer'] aggregation_attr = self.aggregation['aggregation_attribute'] aggregation_attr_index = layer.fieldNameIndex(aggregation_attr) aggregation_keys = [] for f in layer.getFeatures(): area = f[aggregation_attr_index] if area not in aggregation_keys: aggregation_keys.append(area) is_subset = True for k in report_keys: if k not in aggregation_keys: is_subset = False if not is_subset: # This means report keys contains area keys that is not in # aggregation layer. Which means possibly it is using the # wrong aggregation layer. raise InvalidLayerError( self.tr('First and Second layer does not use chosen ' 'Aggregation layer')) # Generate report summary for all aggregation unit self.generate_report_summary(first_report_dict, second_report_dict) # Generate html reports file from merged dictionary self.generate_html_reports(first_report_dict, second_report_dict) # Generate PDF Reports using composer and/or atlas generation: self.generate_reports() # Delete html report files: for area in self.html_reports: report_path = self.html_reports[area] # Rizky : Fix possible bugs in Windows related to issue: # https://github.com/AIFDR/inasafe/issues/1862 try: # avoid race condition using with statement with open(report_path, 'w') as report_file: report_file.close() os.remove(report_path) except OSError: pass
def write_to_file(self, filename, sublayer=None): """Save vector data to file :param filename: filename with extension .shp or .gml :type filename: str :param sublayer: Optional parameter for writing a sublayer. Ignored unless we are writing to an sqlite file. :type sublayer: str :raises: WriteLayerError Note: Shp limitation, if attribute names are longer than 10 characters they will be truncated. This is due to limitations in the shp file driver and has to be done here since gdal v1.7 onwards has changed its handling of this issue: http://www.gdal.org/ogr/drv_shapefile.html **For this reason we recommend writing to spatialite.** """ # Check file format base_name, extension = os.path.splitext(filename) msg = ('Invalid file type for file %s. Only extensions ' 'sqlite, shp or gml allowed.' % filename) verify(extension in ['.sqlite', '.shp', '.gml'], msg) driver = DRIVER_MAP[extension] # FIXME (Ole): Tempory flagging of GML issue (ticket #18) if extension == '.gml': msg = ('OGR GML driver does not store geospatial reference.' 'This format is disabled for the time being. See ' 'https://github.com/AIFDR/riab/issues/18') raise WriteLayerError(msg) # Derive layer_name from filename (excluding preceding dirs) if sublayer is None or extension == '.shp': layer_name = os.path.split(base_name)[-1] else: layer_name = sublayer # Get vector data if self.is_polygon_data: geometry = self.get_geometry(as_geometry_objects=True) else: geometry = self.get_geometry() data = self.get_data() N = len(geometry) # Clear any previous file of this name (ogr does not overwrite) try: os.remove(filename) except OSError: pass # Create new file with one layer drv = ogr.GetDriverByName(driver) if drv is None: msg = 'OGR driver %s not available' % driver raise WriteLayerError(msg) ds = drv.CreateDataSource(get_string(filename)) if ds is None: msg = 'Creation of output file %s failed' % filename raise WriteLayerError(msg) lyr = ds.CreateLayer(get_string(layer_name), self.projection.spatial_reference, self.geometry_type) if lyr is None: msg = 'Could not create layer %s' % layer_name raise WriteLayerError(msg) # Define attributes if any store_attributes = False fields = [] if data is not None: if len(data) > 0: try: fields = data[0].keys() except: msg = ('Input parameter "attributes" was specified ' 'but it does not contain list of dictionaries ' 'with field information as expected. The first ' 'element is %s' % data[0]) raise WriteLayerError(msg) else: # Establish OGR types for each element ogr_types = {} for name in fields: att = data[0][name] py_type = type(att) msg = ('Unknown type for storing vector ' 'data: %s, %s' % (name, str(py_type)[1:-1])) verify(py_type in TYPE_MAP, msg) ogr_types[name] = TYPE_MAP[py_type] else: # msg = ('Input parameter "data" was specified ' # 'but appears to be empty') # raise InaSAFEError(msg) pass # Create attribute fields in layer store_attributes = True for name in fields: # Rizky : OGR can't handle unicode field name, thus we # convert it to ASCII fd = ogr.FieldDefn(str(name), ogr_types[name]) # FIXME (Ole): Trying to address issue #16 # But it doesn't work and # somehow changes the values of MMI in test # width = max(128, len(name)) # print name, width # fd.SetWidth(width) # Silent handling of warnings like # Warning 6: Normalized/laundered field name: # 'CONTENTS_LOSS_AUD' to 'CONTENTS_L' gdal.PushErrorHandler('CPLQuietErrorHandler') if lyr.CreateField(fd) != 0: msg = 'Could not create field %s' % name raise WriteLayerError(msg) # Restore error handler gdal.PopErrorHandler() # Store geometry geom = ogr.Geometry(self.geometry_type) layer_def = lyr.GetLayerDefn() for i in range(N): # Create new feature instance feature = ogr.Feature(layer_def) # Store geometry and check if self.is_point_data: x = float(geometry[i][0]) y = float(geometry[i][1]) geom.SetPoint_2D(0, x, y) elif self.is_line_data: geom = array_to_line(geometry[i], geometry_type=ogr.wkbLineString) elif self.is_polygon_data: # Create polygon geometry geom = ogr.Geometry(ogr.wkbPolygon) # Add outer ring linear_ring = array_to_line(geometry[i].outer_ring, geometry_type=ogr.wkbLinearRing) geom.AddGeometry(linear_ring) # Add inner rings if any for A in geometry[i].inner_rings: geom.AddGeometry( array_to_line(A, geometry_type=ogr.wkbLinearRing)) else: msg = 'Geometry type %s not implemented' % self.geometry_type raise WriteLayerError(msg) feature.SetGeometry(geom) G = feature.GetGeometryRef() if G is None: msg = 'Could not create GeometryRef for file %s' % filename raise WriteLayerError(msg) # Store attributes if store_attributes: for j, name in enumerate(fields): actual_field_name = layer_def.GetFieldDefn(j).GetNameRef() val = data[i][name] if isinstance(val, numpy.ndarray): # A singleton of type <type 'numpy.ndarray'> works # for gdal version 1.6 but fails for version 1.8 # in SetField with error: NotImplementedError: # Wrong number of arguments for overloaded function val = float(val) elif val is None: val = '' # We do this because there is NaN problem on windows # NaN value must be converted to _pseudo_in to solve the # problem. But, when InaSAFE read the file, it'll be # converted back to NaN value, so that NaN in InaSAFE is a # numpy.nan # please check https://github.com/AIFDR/inasafe/issues/269 # for more information if val != val: val = _pseudo_inf feature.SetField(actual_field_name, val) # Save this feature if lyr.CreateFeature(feature) != 0: msg = 'Failed to create feature %i in file %s' % (i, filename) raise WriteLayerError(msg) feature.Destroy() # Write keywords if any # write_keywords(self.keywords, base_name + '.keywords') write_iso19115_metadata(filename, self.keywords) self.keywords = read_iso19115_metadata(filename)
def run(self): """Risk plugin for volcano hazard on building/structure. Counts number of building exposed to each volcano hazard zones. :returns: Map of building exposed to volcanic hazard zones. Table with number of buildings affected :rtype: dict """ self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') # Get parameters from layer's keywords self.hazard_class_attribute = self.hazard.keyword('field') self.name_attribute = self.hazard.keyword('volcano_name_field') self.hazard_class_mapping = self.hazard.keyword('value_map') # Try to get the value from keyword, if not exist, it will not fail, # but use the old get_osm_building_usage try: self.exposure_class_attribute = self.exposure.keyword( 'structure_class_field') except KeywordNotFoundError: self.exposure_class_attribute = None # Input checks if not self.hazard.layer.is_polygon_data: message = ( 'Input hazard must be a polygon. I got %s with ' 'layer type %s' % (self.hazard.name, self.hazard.layer.get_geometry_name())) raise Exception(message) # Check if hazard_zone_attribute exists in hazard_layer if (self.hazard_class_attribute not in self.hazard.layer.get_attribute_names()): message = ( 'Hazard data %s did not contain expected attribute %s ' % (self.hazard.name, self.hazard_class_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) # Get names of volcanoes considered if self.name_attribute in self.hazard.layer.get_attribute_names(): volcano_name_list = set() for row in self.hazard.layer.get_data(): # Run through all polygons and get unique names volcano_name_list.add(row[self.name_attribute]) self.volcano_names = ', '.join(volcano_name_list) else: self.volcano_names = tr('Not specified in data') # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword( 'vector_hazard_classification') # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification['classes'] # Initialize OrderedDict of affected buildings self.affected_buildings = OrderedDict() # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class['key'] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class['name']] = \ self.hazard_class_mapping.pop(vector_hazard_class['key']) # Adding the class name as a key in affected_building self.affected_buildings[vector_hazard_class['name']] = {} # Run interpolation function for polygon2raster interpolated_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer) # Extract relevant exposure data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() self.buildings = {} for i in range(len(features)): # Get the hazard value based on the value mapping in keyword hazard_value = get_key_for_value( features[i][self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value features[i][self.target_field] = get_string(hazard_value) if (self.exposure_class_attribute and self.exposure_class_attribute in attribute_names): usage = features[i][self.exposure_class_attribute] else: usage = get_osm_building_usage(attribute_names, features[i]) if usage in [None, 'NULL', 'null', 'Null', 0]: usage = tr('Unknown') if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][usage] = OrderedDict([ (tr('Buildings Affected'), 0) ]) self.buildings[usage] += 1 if hazard_value in self.affected_buildings.keys(): self.affected_buildings[hazard_value][usage][tr( 'Buildings Affected')] += 1 # Lump small entries and 'unknown' into 'other' category # Building threshold #2468 postprocessors = self.parameters['postprocessors'] building_postprocessors = postprocessors['BuildingType'][0] self.building_report_threshold = building_postprocessors.value[0].value self._consolidate_to_other() # Generate simple impact report impact_summary = impact_table = self.html_report() # Create style colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] colours = colours[::-1] # flip colours = colours[:len(self.affected_buildings.keys())] style_classes = [] i = 0 for category_name in self.affected_buildings.keys(): style_class = dict() style_class['label'] = tr(category_name) style_class['transparency'] = 0 style_class['value'] = category_name style_class['size'] = 1 if i >= len(self.affected_buildings.keys()): i = len(self.affected_buildings.keys()) - 1 style_class['colour'] = colours[i] i += 1 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Buildings affected by volcanic hazard zone') legend_title = tr('Building count') legend_units = tr('(building)') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Buildings affected by volcanic hazard zone'), keywords=impact_layer_keywords, style_info=style_info) self._impact = impact_layer return impact_layer
def write_keywords(keywords, filename, sublayer=None): """Write keywords dictonary to file :param keywords: Dictionary of keyword, value pairs :type keywords: dict :param filename: Name of keywords file. Extension expected to be .keywords :type filename: str :param sublayer: Optional sublayer applicable only to multilayer formats such as sqlite or netcdf which can potentially hold more than one layer. The string should map to the layer group as per the example below. **If the keywords file contains sublayer definitions but no sublayer was defined, keywords file content will be removed and replaced with only the keywords provided here.** :type sublayer: str A keyword file with sublayers may look like this: [osm_buildings] datatype: osm category: exposure subcategory: building purpose: dki title: buildings_osm_4326 [osm_flood] datatype: flood category: hazard subcategory: building title: flood_osm_4326 Keys must be strings not containing the ":" character Values can be anything that can be converted to a string (using Python's str function) Surrounding whitespace is removed from values, but keys are unmodified The reason being that keys must always be valid for the dictionary they came from. For values we have decided to be flexible and treat entries like 'unit:m' the same as 'unit: m', or indeed 'unit: m '. Otherwise, unintentional whitespace in values would lead to surprising errors in the application. """ # Input checks basename, ext = os.path.splitext(filename) msg = "Unknown extension for file %s. Expected %s.keywords" % (filename, basename) verify(ext == ".keywords", msg) # First read any keywords out of the file so that we can retain # keywords for other sublayers existing_keywords = read_keywords(filename, all_blocks=True) first_value = None if len(existing_keywords) > 0: first_value = existing_keywords[existing_keywords.keys()[0]] multilayer_flag = isinstance(first_value, dict) handle = file(filename, "w") if multilayer_flag: if sublayer is not None and sublayer != "": # replace existing keywords / add new for this layer existing_keywords[sublayer] = keywords for key, value in existing_keywords.iteritems(): handle.write(_keywords_to_string(value, sublayer=key)) handle.write("\n") else: # It is currently a multilayer but we will replace it with # a single keyword block since the user passed no sublayer handle.write(_keywords_to_string(keywords)) else: # currently a simple layer so replace it with our content keywords = get_string(_keywords_to_string(keywords, sublayer=sublayer)) handle.write(keywords) handle.close() write_keyword_in_iso_metadata(filename)
def run(self): """Risk plugin for volcano hazard on building/structure. Counts number of building exposed to each volcano hazard zones. :returns: Map of building exposed to volcanic hazard zones. Table with number of buildings affected :rtype: dict """ # Get parameters from layer's keywords self.hazard_class_attribute = self.hazard.keyword('field') self.name_attribute = self.hazard.keyword('volcano_name_field') self.hazard_class_mapping = self.hazard.keyword('value_map') self.exposure_class_attribute = self.exposure.keyword( 'structure_class_field') exposure_value_mapping = self.exposure.keyword('value_mapping') # Input checks if not self.hazard.layer.is_polygon_data: message = ( 'Input hazard must be a polygon. I got %s with ' 'layer type %s' % (self.hazard.name, self.hazard.layer.get_geometry_name())) raise Exception(message) # Check if hazard_zone_attribute exists in hazard_layer if (self.hazard_class_attribute not in self.hazard.layer.get_attribute_names()): message = ( 'Hazard data %s did not contain expected attribute %s ' % (self.hazard.name, self.hazard_class_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) # Get names of volcanoes considered if self.name_attribute in self.hazard.layer.get_attribute_names(): volcano_name_list = set() for row in self.hazard.layer.get_data(): # Run through all polygons and get unique names volcano_name_list.add(row[self.name_attribute]) self.volcano_names = ', '.join(volcano_name_list) else: self.volcano_names = tr('Not specified in data') # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword( 'vector_hazard_classification') # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification['classes'] # Initialize OrderedDict of affected buildings hazard_class = [] # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class['key'] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class['name']] = \ self.hazard_class_mapping.pop(vector_hazard_class['key']) # Adding the class name as a key in affected_building hazard_class.append(vector_hazard_class['name']) # Run interpolation function for polygon2raster interpolated_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer) # Extract relevant exposure data features = interpolated_layer.get_data() self.init_report_var(hazard_class) for i in range(len(features)): # Get the hazard value based on the value mapping in keyword hazard_value = get_key_for_value( features[i][self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value features[i][self.target_field] = get_string(hazard_value) usage = features[i][self.exposure_class_attribute] usage = main_type(usage, exposure_value_mapping) affected = False if hazard_value in self.affected_buildings.keys(): affected = True self.classify_feature(hazard_value, usage, affected) self.reorder_dictionaries() # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] colours = colours[::-1] # flip colours = colours[:len(self.affected_buildings.keys())] style_classes = [] for i, category_name in enumerate(self.affected_buildings.keys()): style_class = dict() style_class['label'] = tr(category_name) style_class['transparency'] = 0 style_class['value'] = category_name style_class['size'] = 1 if i >= len(self.affected_buildings.keys()): i = len(self.affected_buildings.keys()) - 1 style_class['colour'] = colours[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') impact_data = self.generate_data() extra_keywords = { 'target_field': self.target_field, 'map_title': self.metadata().key('map_title'), 'legend_notes': self.metadata().key('legend_notes'), 'legend_units': self.metadata().key('legend_units'), 'legend_title': self.metadata().key('legend_title') } impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=self.metadata().key('layer_name'), keywords=impact_layer_keywords, style_info=style_info ) impact_layer.impact_data = impact_data self._impact = impact_layer return impact_layer
def test_get_string(self): """Test get_string function.""" unicode_text = u"Test \xe1, \xe9, \xed, \xf3, \xfa, \xfc, \xf1, \xbf" string_repr = "Test \xc3\xa1, \xc3\xa9, \xc3\xad, \xc3\xb3, " "\xc3\xba, \xc3\xbc, \xc3\xb1, \xc2\xbf" message = "It should return %s, but it returned %s" % (get_string(unicode_text), string_repr) self.assertEqual(get_string(unicode_text), string_repr, message)
def merge(self): """Merge the postprocessing_report from each impact.""" # Ensure there is always only a single root element or minidom moans first_postprocessing_report = \ self.first_impact['postprocessing_report'] second_postprocessing_report = \ self.second_impact['postprocessing_report'] # noinspection PyTypeChecker first_report = '<body>' + first_postprocessing_report + '</body>' # noinspection PyTypeChecker second_report = '<body>' + second_postprocessing_report + '</body>' # Now create a dom document for each first_document = minidom.parseString(get_string(first_report)) second_document = minidom.parseString(get_string(second_report)) first_impact_tables = first_document.getElementsByTagName('table') second_impact_tables = second_document.getElementsByTagName('table') # Now create dictionary report from DOM first_report_dict = self.generate_report_dictionary_from_dom( first_impact_tables) second_report_dict = self.generate_report_dictionary_from_dom( second_impact_tables) # Rizky: Consistency checks with aggregation # Make sure the aggregation layer both presents in both layers # We shouldn't have problems with Entire Area mode. It just means # the impact layer and summary is merged into single report. # We can have 3 cases: # 1. If both of them were not aggregated, we can just merge the map # only # 2. If both of them were aggregated, we can just merge the map, and # merge postprocessor report using 'Total aggregation in areas' key # 3. If only one of them were aggregated, we can just merge the map, # and uses postprocessor report from the one who has. if self.entire_area_mode: # We won't be bothered with the map, it will be merged anyway in # all 3 cases. We should bother with the postprocessor report. # If one of them has the report, it means it will contain more # than one report keys. We can just swap the first report if they # have one key, and the second have more than one if (len(first_report_dict.keys()) == 1 and len(second_report_dict.keys()) > 1): swap_var = first_report_dict first_report_dict = second_report_dict second_report_dict = swap_var # This condition will covers aggregated mode # For this case, we should make sure both layers are aggregated with # the same aggregation layer of the chosen aggregation layer else: # check that both layers must have aggregated postprocessor. # aggregated postprocessor means the report_dict must have minimum # 2 keys if not (len(first_report_dict.keys()) > 1 and len(second_report_dict.keys()) > 1): raise InvalidLayerError(self.tr( 'Please choose impact layers with aggregated ' 'postprocessor if you want to use aggregation layer.')) # collect all report keys (will contain aggregation areas in the # report) report_keys = first_report_dict.keys() # Discard the last keys. It will always contains total area, not # aggregated area if len(report_keys) > 0: del report_keys[-1] sec_report = second_report_dict.keys() if len(sec_report) > 0: del sec_report[-1] for k in sec_report: if k not in report_keys: report_keys.append(k) # collect all aggregation areas in aggregation layer layer = self.aggregation['layer'] aggregation_attr = self.aggregation['aggregation_attribute'] aggregation_attr_index = layer.fieldNameIndex(aggregation_attr) aggregation_keys = [] for f in layer.getFeatures(): area = f[aggregation_attr_index] if area not in aggregation_keys: aggregation_keys.append(area) is_subset = True for k in report_keys: if k not in aggregation_keys: is_subset = False if not is_subset: # This means report keys contains area keys that is not in # aggregation layer. Which means possibly it is using the # wrong aggregation layer. raise InvalidLayerError( self.tr('First and Second layer does not use chosen ' 'Aggregation layer')) # Generate report summary for all aggregation unit self.generate_report_summary(first_report_dict, second_report_dict) # Generate html reports file from merged dictionary self.generate_html_reports(first_report_dict, second_report_dict) # Generate PDF Reports using composer and/or atlas generation: self.generate_reports() # Delete html report files: for area in self.html_reports: report_path = self.html_reports[area] # Rizky : Fix possible bugs in Windows related to issue: # https://github.com/AIFDR/inasafe/issues/1862 try: # avoid race condition using with statement with open(report_path, 'w') as report_file: report_file.close() os.remove(report_path) except OSError: pass
def _keyword_to_row(self, keyword, value, wrap_slash=False): """Helper to make a message row from a keyword. .. versionadded:: 3.2 Use this when constructing a table from keywords to display as part of a message object. :param keyword: The keyword to be rendered. :type keyword: str :param value: Value of the keyword to be rendered. :type value: basestring :param wrap_slash: Whether to replace slashes with the slash plus the html <wbr> tag which will help to e.g. wrap html in small cells if it contains a long filename. Disabled by default as it may cause side effects if the text contains html markup. :type wrap_slash: bool :returns: A row to be added to a messaging table. :rtype: safe.messaging.items.row.Row """ row = m.Row() # Translate titles explicitly if possible if keyword == 'title': value = tr(value) # # See #2569 if keyword == 'url': if isinstance(value, QUrl): value = value.toString() if keyword == 'date': if isinstance(value, QDateTime): value = value.toString('d MMM yyyy') elif isinstance(value, datetime): value = value.strftime('%d %b %Y') # we want to show the user the concept name rather than its key # if possible. TS keyword_definition = definition(keyword) if keyword_definition is None: keyword_definition = tr(keyword.capitalize().replace( '_', ' ')) else: try: keyword_definition = keyword_definition['name'] except KeyError: # Handling if name is not exist. keyword_definition = keyword_definition['key'].capitalize() keyword_definition = keyword_definition.replace('_', ' ') # We deal with some special cases first: # In this case the value contains a DICT that we want to present nicely if keyword in [ 'value_map', 'inasafe_fields', 'inasafe_default_values']: value = self._dict_to_row(value) elif keyword == 'value_maps': value = self._value_maps_row(value) elif keyword == 'thresholds': value = self._threshold_to_row(value) # In these KEYWORD cases we show the DESCRIPTION for # the VALUE keyword_definition elif keyword in ['classification']: # get the keyword_definition for this class from definitions value = definition(value) value = value['description'] # In these VALUE cases we show the DESCRIPTION for # the VALUE keyword_definition elif value in []: # get the keyword_definition for this class from definitions value = definition(value) value = value['description'] # In these VALUE cases we show the NAME for the VALUE # keyword_definition elif value in [ 'multiple_event', 'single_event', 'point', 'line', 'polygon' 'field']: # get the name for this class from definitions value = definition(value) value = value['name'] # otherwise just treat the keyword as literal text else: # Otherwise just directly read the value value = get_string(value) key = m.ImportantText(keyword_definition) row.add(m.Cell(key)) row.add(m.Cell(value, wrap_slash=wrap_slash)) return row
def write_to_file(self, filename, sublayer=None): """Save vector data to file :param filename: filename with extension .shp or .gml :type filename: str :param sublayer: Optional parameter for writing a sublayer. Ignored unless we are writing to an sqlite file. :type sublayer: str :raises: WriteLayerError Note: Shp limitation, if attribute names are longer than 10 characters they will be truncated. This is due to limitations in the shp file driver and has to be done here since gdal v1.7 onwards has changed its handling of this issue: http://www.gdal.org/ogr/drv_shapefile.html **For this reason we recommend writing to spatialite.** """ # Check file format base_name, extension = os.path.splitext(filename) msg = ('Invalid file type for file %s. Only extensions ' 'sqlite, shp or gml allowed.' % filename) verify(extension in ['.sqlite', '.shp', '.gml'], msg) driver = DRIVER_MAP[extension] # FIXME (Ole): Tempory flagging of GML issue (ticket #18) if extension == '.gml': msg = ('OGR GML driver does not store geospatial reference.' 'This format is disabled for the time being. See ' 'https://github.com/AIFDR/riab/issues/18') raise WriteLayerError(msg) # Derive layer_name from filename (excluding preceding dirs) if sublayer is None or extension == '.shp': layer_name = os.path.split(base_name)[-1] else: layer_name = sublayer # Get vector data if self.is_polygon_data: geometry = self.get_geometry(as_geometry_objects=True) else: geometry = self.get_geometry() data = self.get_data() N = len(geometry) # Clear any previous file of this name (ogr does not overwrite) try: os.remove(filename) except OSError: pass # Create new file with one layer drv = ogr.GetDriverByName(driver) if drv is None: msg = 'OGR driver %s not available' % driver raise WriteLayerError(msg) ds = drv.CreateDataSource(get_string(filename)) if ds is None: msg = 'Creation of output file %s failed' % filename raise WriteLayerError(msg) lyr = ds.CreateLayer(get_string(layer_name), self.projection.spatial_reference, self.geometry_type) if lyr is None: msg = 'Could not create layer %s' % layer_name raise WriteLayerError(msg) # Define attributes if any store_attributes = False fields = [] if data is not None: if len(data) > 0: try: fields = data[0].keys() except: msg = ('Input parameter "attributes" was specified ' 'but it does not contain list of dictionaries ' 'with field information as expected. The first ' 'element is %s' % data[0]) raise WriteLayerError(msg) else: # Establish OGR types for each element ogr_types = {} for name in fields: att = data[0][name] py_type = type(att) msg = ('Unknown type for storing vector ' 'data: %s, %s' % (name, str(py_type)[1:-1])) verify(py_type in TYPE_MAP, msg) ogr_types[name] = TYPE_MAP[py_type] else: # msg = ('Input parameter "data" was specified ' # 'but appears to be empty') # raise InaSAFEError(msg) pass # Create attribute fields in layer store_attributes = True for name in fields: fd = ogr.FieldDefn(name, ogr_types[name]) # FIXME (Ole): Trying to address issue #16 # But it doesn't work and # somehow changes the values of MMI in test # width = max(128, len(name)) # print name, width # fd.SetWidth(width) # Silent handling of warnings like # Warning 6: Normalized/laundered field name: # 'CONTENTS_LOSS_AUD' to 'CONTENTS_L' gdal.PushErrorHandler('CPLQuietErrorHandler') if lyr.CreateField(fd) != 0: msg = 'Could not create field %s' % name raise WriteLayerError(msg) # Restore error handler gdal.PopErrorHandler() # Store geometry geom = ogr.Geometry(self.geometry_type) layer_def = lyr.GetLayerDefn() for i in range(N): # Create new feature instance feature = ogr.Feature(layer_def) # Store geometry and check if self.is_point_data: x = float(geometry[i][0]) y = float(geometry[i][1]) geom.SetPoint_2D(0, x, y) elif self.is_line_data: geom = array_to_line( geometry[i], geometry_type=ogr.wkbLineString) elif self.is_polygon_data: # Create polygon geometry geom = ogr.Geometry(ogr.wkbPolygon) # Add outer ring linear_ring = array_to_line( geometry[i].outer_ring, geometry_type=ogr.wkbLinearRing) geom.AddGeometry(linear_ring) # Add inner rings if any for A in geometry[i].inner_rings: geom.AddGeometry(array_to_line( A, geometry_type=ogr.wkbLinearRing)) else: msg = 'Geometry type %s not implemented' % self.geometry_type raise WriteLayerError(msg) feature.SetGeometry(geom) G = feature.GetGeometryRef() if G is None: msg = 'Could not create GeometryRef for file %s' % filename raise WriteLayerError(msg) # Store attributes if store_attributes: for j, name in enumerate(fields): actual_field_name = layer_def.GetFieldDefn(j).GetNameRef() val = data[i][name] if isinstance(val, numpy.ndarray): # A singleton of type <type 'numpy.ndarray'> works # for gdal version 1.6 but fails for version 1.8 # in SetField with error: NotImplementedError: # Wrong number of arguments for overloaded function val = float(val) elif val is None: val = '' # We do this because there is NaN problem on windows # NaN value must be converted to _pseudo_in to solve the # problem. But, when InaSAFE read the file, it'll be # converted back to NaN value, so that NaN in InaSAFE is a # numpy.nan # please check https://github.com/AIFDR/inasafe/issues/269 # for more information if val != val: val = _pseudo_inf feature.SetField(actual_field_name, val) # Save this feature if lyr.CreateFeature(feature) != 0: msg = 'Failed to create feature %i in file %s' % (i, filename) raise WriteLayerError(msg) feature.Destroy() # Write keywords if any write_keywords(self.keywords, base_name + '.keywords')
keyword_file_path += '.keywords' if not os.path.isfile(keyword_file_path): message = tr('No keywords file found for %s' % keyword_file_path) raise NoKeywordsFoundError(message) # now get the requested keyword using the inasafe library try: dictionary = read_keywords(keyword_file_path) except Exception, e: message = tr('Keyword retrieval failed for %s (%s) \n %s' % (keyword_file_path, keyword, str(e))) raise KeywordNotFoundError(message) # if no keyword was supplied, just return the dict if keyword is None: if 'keyword_version' in dictionary.keys(): dictionary['keyword_version'] = get_string( dictionary['keyword_version']) return dictionary if keyword not in dictionary: message = tr('No value was found in file %s for keyword %s' % (keyword_file_path, keyword)) raise KeywordNotFoundError(message) try: value = dictionary[keyword] except: raise if 'keyword_version' == keyword: value = get_string(value) return value
def _keyword_to_row(self, keyword, value, wrap_slash=False): """Helper to make a message row from a keyword. .. versionadded:: 3.2 Use this when constructing a table from keywords to display as part of a message object. :param keyword: The keyword to be rendered. :type keyword: str :param value: Value of the keyword to be rendered. :type value: basestring :param wrap_slash: Whether to replace slashes with the slash plus the html <wbr> tag which will help to e.g. wrap html in small cells if it contains a long filename. Disabled by default as it may cause side effects if the text contains html markup. :type wrap_slash: bool :returns: A row to be added to a messaging table. :rtype: safe.messaging.items.row.Row """ row = m.Row() # Translate titles explicitly if possible if keyword == 'title': value = tr(value) # # See #2569 if keyword == 'url': if isinstance(value, QUrl): value = value.toString() if keyword == 'date': if isinstance(value, QDateTime): value = value.toString('d MMM yyyy') elif isinstance(value, datetime): value = value.strftime('%d %b %Y') # we want to show the user the concept name rather than its key # if possible. TS keyword_definition = definition(keyword) if keyword_definition is None: keyword_definition = tr(keyword.capitalize().replace('_', ' ')) else: try: keyword_definition = keyword_definition['name'] except KeyError: # Handling if name is not exist. keyword_definition = keyword_definition['key'].capitalize() keyword_definition = keyword_definition.replace('_', ' ') # We deal with some special cases first: # In this case the value contains a DICT that we want to present nicely if keyword in [ 'value_map', 'inasafe_fields', 'inasafe_default_values' ]: value = self._dict_to_row(value) elif keyword == 'value_maps': value = self._value_maps_row(value) elif keyword == 'thresholds': value = self._threshold_to_row(value) # In these KEYWORD cases we show the DESCRIPTION for # the VALUE keyword_definition elif keyword in ['classification']: # get the keyword_definition for this class from definitions value = definition(value) value = value['description'] # In these VALUE cases we show the DESCRIPTION for # the VALUE keyword_definition elif value in []: # get the keyword_definition for this class from definitions value = definition(value) value = value['description'] # In these VALUE cases we show the NAME for the VALUE # keyword_definition elif value in [ 'multiple_event', 'single_event', 'point', 'line', 'polygon' 'field' ]: # get the name for this class from definitions value = definition(value) value = value['name'] # otherwise just treat the keyword as literal text else: # Otherwise just directly read the value value = get_string(value) key = m.ImportantText(keyword_definition) row.add(m.Cell(key)) row.add(m.Cell(value, wrap_slash=wrap_slash)) return row
def run(self): """Risk plugin for volcano hazard on building/structure. Counts number of building exposed to each volcano hazard zones. :returns: Map of building exposed to volcanic hazard zones. Table with number of buildings affected :rtype: dict """ # Get parameters from layer's keywords self.hazard_class_attribute = self.hazard.keyword('field') self.name_attribute = self.hazard.keyword('volcano_name_field') self.hazard_class_mapping = self.hazard.keyword('value_map') self.exposure_class_attribute = self.exposure.keyword( 'structure_class_field') exposure_value_mapping = self.exposure.keyword('value_mapping') # Input checks if not self.hazard.layer.is_polygon_data: message = ( 'Input hazard must be a polygon. I got %s with ' 'layer type %s' % (self.hazard.name, self.hazard.layer.get_geometry_name())) raise Exception(message) # Check if hazard_zone_attribute exists in hazard_layer if (self.hazard_class_attribute not in self.hazard.layer.get_attribute_names()): message = ( 'Hazard data %s did not contain expected attribute %s ' % (self.hazard.name, self.hazard_class_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) # Get names of volcanoes considered if self.name_attribute in self.hazard.layer.get_attribute_names(): for row in self.hazard.layer.get_data(): # Run through all polygons and get unique names self.volcano_names.add(row[self.name_attribute]) # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword( 'vector_hazard_classification') # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification['classes'] # Initialize OrderedDict of affected buildings hazard_class = [] # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class['key'] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class['name']] = \ self.hazard_class_mapping.pop(vector_hazard_class['key']) # Adding the class name as a key in affected_building hazard_class.append(vector_hazard_class['name']) # Run interpolation function for polygon2raster interpolated_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer) # Extract relevant exposure data features = interpolated_layer.get_data() self.init_report_var(hazard_class) for i in range(len(features)): # Get the hazard value based on the value mapping in keyword hazard_value = get_key_for_value( features[i][self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value features[i][self.target_field] = get_string(hazard_value) usage = features[i][self.exposure_class_attribute] usage = main_type(usage, exposure_value_mapping) affected = False if hazard_value in self.affected_buildings.keys(): affected = True self.classify_feature(hazard_value, usage, affected) self.reorder_dictionaries() # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] colours = colours[::-1] # flip colours = colours[:len(self.affected_buildings.keys())] style_classes = [] for i, category_name in enumerate(self.affected_buildings.keys()): style_class = dict() style_class['label'] = tr(category_name) style_class['transparency'] = 0 style_class['value'] = category_name style_class['size'] = 1 if i >= len(self.affected_buildings.keys()): i = len(self.affected_buildings.keys()) - 1 style_class['colour'] = colours[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') impact_data = self.generate_data() extra_keywords = { 'target_field': self.target_field, 'map_title': self.map_title(), 'legend_notes': self.metadata().key('legend_notes'), 'legend_units': self.metadata().key('legend_units'), 'legend_title': self.metadata().key('legend_title') } impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=self.map_title(), keywords=impact_layer_keywords, style_info=style_info ) impact_layer.impact_data = impact_data self._impact = impact_layer return impact_layer