def run(layers): """Risk plugin for earthquake school damage """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='MMI') # Extract relevant numerical data coordinates = E.get_geometry() shaking = H.get_data() # Calculate building damage building_damage = [] for i in range(len(shaking)): x = float(shaking[i]['MMI']) if x < 6.0 or (x != x): # x != x -> check for nan pre python 2.6 value = 0.0 else: value = (0.692 * (x ** 4) - 15.82 * (x ** 3) + 135.0 * (x ** 2) - 509.0 * x + 714.4) building_damage.append({'DAMAGE': value, 'MMI': x}) # Create new layer and return V = Vector(data=building_damage, projection=E.get_projection(), geometry=coordinates) return V
def run(layers): """Risk plugin for earthquake school damage """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='MMI') # Extract relevant numerical data coordinates = E.get_geometry() shaking = H.get_data() # Calculate building damage building_damage = [] for i in range(len(shaking)): x = float(shaking[i]['MMI']) if x < 6.0 or (x != x): # x != x -> check for nan pre python 2.6 value = 0.0 else: value = (0.692 * (x**4) - 15.82 * (x**3) + 135.0 * (x**2) - 509.0 * x + 714.4) building_damage.append({'DAMAGE': value, 'MMI': x}) # Create new layer and return V = Vector(data=building_damage, projection=E.get_projection(), geometry=coordinates) return V
def run(self): """Tsunami raster impact to buildings (e.g. from Open Street Map).""" self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') # Thresholds for tsunami hazard zone breakdown. low_max = self.parameters['low_threshold'].value medium_max = self.parameters['medium_threshold'].value high_max = self.parameters['high_threshold'].value # Interpolate hazard level to building locations interpolated_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=self.target_field) # Extract relevant exposure data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() total_features = len(interpolated_layer) # but use the old get_osm_building_usage try: structure_class_field = self.exposure.keyword( 'structure_class_field') except KeywordNotFoundError: structure_class_field = None # Building breakdown self.buildings = {} # Impacted building breakdown self.affected_buildings = OrderedDict([ (self.hazard_classes[0], {}), (self.hazard_classes[1], {}), (self.hazard_classes[2], {}), (self.hazard_classes[3], {}), (self.hazard_classes[4], {}) ]) categories = self.affected_buildings.keys() for i in range(total_features): # Get the interpolated depth water_depth = float(features[i][self.target_field]) if water_depth <= 0: inundated_status = 0 elif 0 < water_depth <= low_max: inundated_status = 1 # low elif low_max < water_depth <= medium_max: inundated_status = 2 # medium elif medium_max < water_depth <= high_max: inundated_status = 3 # high elif high_max < water_depth: inundated_status = 4 # very high # If not a number or a value beside real number. else: inundated_status = 0 # Count affected buildings by usage type if available if (structure_class_field in attribute_names and structure_class_field): usage = features[i].get(structure_class_field, None) else: usage = get_osm_building_usage( attribute_names, features[i]) if usage is None or usage == 0: usage = 'unknown' if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][usage] = OrderedDict([ (tr('Buildings Affected'), 0)]) # Count all buildings by type self.buildings[usage] += 1 # Add calculated impact to existing attributes features[i][self.target_field] = inundated_status category = categories[inundated_status] self.affected_buildings[category][usage][ tr('Buildings Affected')] += 1 # Lump small entries and 'unknown' into 'other' category # Building threshold #2468 postprocessors = self.parameters['postprocessors'] building_postprocessors = postprocessors['BuildingType'][0] self.building_report_threshold = building_postprocessors.value[0].value self._consolidate_to_other() # Generate simple impact report impact_table = impact_summary = self.html_report() # For printing map purpose map_title = tr('Inundated buildings') legend_title = tr('Inundated structure status') legend_units = tr('(low, medium, high, and very high)') style_classes = [ dict( label=self.hazard_classes[0] + ': 0 m', value=0, colour='#00FF00', transparency=0, size=1 ), dict( label=self.hazard_classes[1] + ': 0.1 - %.1f m' % low_max, value=1, colour='#FFFF00', transparency=0, size=1 ), dict( label=self.hazard_classes[2] + ': %.1f - %.1f m' % ( low_max + 0.1, medium_max), value=2, colour='#FFB700', transparency=0, size=1 ), dict( label=self.hazard_classes[3] + ': %.1f - %.1f m' % ( medium_max + 0.1, high_max), value=3, colour='#FF6F00', transparency=0, size=1 ), dict( label=self.hazard_classes[4] + ' > %.1f m' % high_max, value=4, colour='#FF0000', transparency=0, size=1 ), ] style_info = dict( target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_title': legend_title, 'legend_units': legend_units, 'buildings_total': total_features, 'buildings_affected': self.total_affected_buildings } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) vector_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Estimated buildings affected'), keywords=impact_layer_keywords, style_info=style_info) # Create vector layer and return self._impact = vector_layer return vector_layer
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations datatype = E.get_keywords()['datatype'] vclass_tag = 'ITB_Class' if datatype.lower() == 'osm': # Map from OSM attributes to the ITB building classes # Emap = osm2itb(E) print 'osm2itb has not been implemented' elif datatype.lower() == 'sigab': # Emap = sigabitb(E) print 'sigab2itb has not been implemented' elif datatype.lower() == 'itb': Emap = E # Interpolate hazard level to building locations Hi = assign_hazard_values_to_exposure_data(H, Emap, attribute_name='MMI') # Extract relevant numerical data coordinates = Emap.get_geometry() shaking = Hi.get_data() N = len(shaking) # List attributes to carry forward to result layer attributes = Emap.get_attribute_names() # Calculate building damage count50 = 0 count25 = 0 count10 = 0 count0 = 0 building_damage = [] for i in range(N): mmi = float(shaking[i]['MMI']) building_class = Emap.get_data(vclass_tag, i) building_type = str(building_class) damage_params = vul_curves[building_type] beta = damage_params['beta'] median = damage_params['median'] msg = 'Invalid parameter value for ' + building_type verify(beta + median > 0.0, msg) percent_damage = lognormal_cdf(mmi, median=median, sigma=beta) * 100 # Collect shake level and calculated damage result_dict = {self.target_field: percent_damage, 'MMI': mmi} # Carry all orginal attributes forward for key in attributes: result_dict[key] = Emap.get_data(key, i) # Record result for this feature building_damage.append(result_dict) # Debugging #if percent_damage > 0.01: # print mmi, percent_damage # Calculate statistics if percent_damage < 10: count0 += 1 if 10 <= percent_damage < 33: count10 += 1 if 33 <= percent_damage < 66: count25 += 1 if 66 <= percent_damage: count50 += 1 # fid.close() # Create report Hname = H.get_name() Ename = E.get_name() impact_summary = ('<b>In case of "%s" the estimated impact to ' '"%s" ' 'is:</b><br><br><p>' % (Hname, Ename)) impact_summary += ( '<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s (<10%%):</td><td>%i</td></tr>' ' <tr><td>%s (10-33%%):</td><td>%i</td></tr>' ' <tr><td>%s (33-66%%):</td><td>%i</td></tr>' ' <tr><td>%s (66-100%%):</td><td>%i</td></tr>' '</table></font>' % (_('Buildings'), _('Total'), _('All'), N, _('No damage'), count0, _('Low damage'), count10, _('Medium damage'), count25, _('High damage'), count50)) impact_summary += '<br>' # Blank separation row impact_summary += '<b>' + _('Assumption') + ':</b><br>' # This is the proper text: #_('Levels of impact are defined by post 2009 ' # 'Padang earthquake survey conducted by Geoscience ' # 'Australia and Institute of Teknologi Bandung.')) #_('Unreinforced masonry is assumed where no ' # 'structural information is available.')) impact_summary += _('Levels of impact are defined by post 2009 ' 'Padang earthquake survey conducted by Geoscience ' 'Australia and Institute of Teknologi Bandung.') impact_summary += _('Unreinforced masonry is assumed where no ' 'structural information is available.') # Create style style_classes = [ dict(label=_('No damage'), min=0, max=10, colour='#00ff00', transparency=1), dict(label=_('Low damage'), min=10, max=33, colour='#ffff00', transparency=1), dict(label=_('Medium damage'), min=33, max=66, colour='#ffaa00', transparency=1), dict(label=_('High damage'), min=66, max=100, colour='#ff0000', transparency=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=building_damage, projection=E.get_projection(), geometry=coordinates, name='Estimated pct damage', keywords={'impact_summary': impact_summary}, style_info=style_info) return V
def run(self, layers=None): """Classified hazard impact to buildings (e.g. from Open Street Map). :param layers: List of layers expected to contain. * hazard: Classified Hazard layer * exposure: Vector layer of structure data on the same grid as hazard """ self.validate() self.prepare(layers) # The 3 classes low_t = self.parameters['low_hazard_class'] medium_t = self.parameters['medium_hazard_class'] high_t = self.parameters['high_hazard_class'] # Extract data hazard = self.hazard # Classified Hazard exposure = self.exposure # Building locations # Determine attribute name for hazard levels if hazard.is_raster: hazard_attribute = 'level' else: hazard_attribute = None interpolated_result = assign_hazard_values_to_exposure_data( hazard, exposure, attribute_name=hazard_attribute, mode='constant') # Extract relevant exposure data attribute_names = interpolated_result.get_attribute_names() attributes = interpolated_result.get_data() buildings_total = len(interpolated_result) # Calculate building impact self.buildings = {} self.affected_buildings = OrderedDict([(tr('High Hazard Class'), {}), (tr('Medium Hazard Class'), {}), (tr('Low Hazard Class'), {})]) for i in range(buildings_total): usage = get_osm_building_usage(attribute_names, attributes[i]) if usage is None or usage == 0: usage = 'unknown' if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][usage] = OrderedDict([ (tr('Buildings Affected'), 0) ]) # Count all buildings by type self.buildings[usage] += 1 attributes[i][self.target_field] = 0 attributes[i][self.affected_field] = 0 level = float(attributes[i]['level']) level = float(numpy_round(level)) if level == high_t: impact_level = tr('High Hazard Class') elif level == medium_t: impact_level = tr('Medium Hazard Class') elif level == low_t: impact_level = tr('Low Hazard Class') else: continue # Add calculated impact to existing attributes attributes[i][self.target_field] = { tr('High Hazard Class'): 3, tr('Medium Hazard Class'): 2, tr('Low Hazard Class'): 1 }[impact_level] attributes[i][self.affected_field] = 1 # Count affected buildings by type self.affected_buildings[impact_level][usage][tr( 'Buildings Affected')] += 1 # Consolidate the small building usage groups < 25 to other self._consolidate_to_other() # Create style style_classes = [ dict(label=tr('High'), value=3, colour='#F31A1C', transparency=0, size=2, border_color='#969696', border_width=0.2), dict(label=tr('Medium'), value=2, colour='#F4A442', transparency=0, size=2, border_color='#969696', border_width=0.2), dict(label=tr('Low'), value=1, colour='#EBF442', transparency=0, size=2, border_color='#969696', border_width=0.2), dict(label=tr('Not Affected'), value=None, colour='#1EFC7C', transparency=0, size=2, border_color='#969696', border_width=0.2) ] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') impact_table = impact_summary = self.generate_html_report() # For printing map purpose map_title = tr('Buildings affected') legend_units = tr('(Low, Medium, High)') legend_title = tr('Structure inundated status') # Create vector layer and return vector_layer = Vector(data=attributes, projection=exposure.get_projection(), geometry=exposure.get_geometry(), name=tr('Estimated buildings affected'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.affected_field, 'map_title': map_title, 'legend_units': legend_units, 'legend_title': legend_title, 'buildings_total': buildings_total, 'buildings_affected': self.total_affected_buildings }, style_info=style_info) self._impact = vector_layer return vector_layer
def run(self, layers=None): """Earthquake impact to buildings (e.g. from OpenStreetMap). :param layers: All the input layers (Hazard Layer and Exposure Layer) """ self.validate() self.prepare(layers) LOGGER.debug('Running earthquake building impact') # merely initialize building_value = 0 contents_value = 0 # Thresholds for mmi breakdown. t0 = self.parameters['low_threshold'] t1 = self.parameters['medium_threshold'] t2 = self.parameters['high_threshold'] # Class Attribute and Label. class_1 = {'label': tr('Low'), 'class': 1} class_2 = {'label': tr('Medium'), 'class': 2} class_3 = {'label': tr('High'), 'class': 3} # Extract data hazard_layer = self.hazard # Depth exposure_layer = self.exposure # Building locations # Define attribute name for hazard levels. hazard_attribute = 'mmi' # Determine if exposure data have NEXIS attributes. attribute_names = exposure_layer.get_attribute_names() if ( 'FLOOR_AREA' in attribute_names and 'BUILDING_C' in attribute_names and 'CONTENTS_C' in attribute_names): self.is_nexis = True else: self.is_nexis = False # Interpolate hazard level to building locations. interpolate_result = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=hazard_attribute ) # Extract relevant exposure data # attribute_names = interpolate_result.get_attribute_names() attributes = interpolate_result.get_data() interpolate_size = len(interpolate_result) # Building breakdown self.buildings = {} # Impacted building breakdown self.affected_buildings = OrderedDict([ (tr('High'), {}), (tr('Medium'), {}), (tr('Low'), {}) ]) for i in range(interpolate_size): # Classify building according to shake level # and calculate dollar losses if self.is_nexis: try: area = float(attributes[i]['FLOOR_AREA']) except (ValueError, KeyError): # print 'Got area', attributes[i]['FLOOR_AREA'] area = 0.0 try: building_value_density = float(attributes[i]['BUILDING_C']) except (ValueError, KeyError): # print 'Got bld value', attributes[i]['BUILDING_C'] building_value_density = 0.0 try: contents_value_density = float(attributes[i]['CONTENTS_C']) except (ValueError, KeyError): # print 'Got cont value', attributes[i]['CONTENTS_C'] contents_value_density = 0.0 building_value = building_value_density * area contents_value = contents_value_density * area usage = get_osm_building_usage(attribute_names, attributes[i]) if usage is None or usage == 0: usage = 'unknown' if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): if self.is_nexis: self.affected_buildings[category][usage] = OrderedDict( [ (tr('Buildings Affected'), 0), (tr('Buildings value ($M)'), 0), (tr('Contents value ($M)'), 0)]) else: self.affected_buildings[category][usage] = OrderedDict( [ (tr('Buildings Affected'), 0)]) self.buildings[usage] += 1 try: mmi = float(attributes[i][hazard_attribute]) # MMI except TypeError: mmi = 0.0 if t0 <= mmi < t1: cls = 1 category = tr('Low') elif t1 <= mmi < t2: cls = 2 category = tr('Medium') elif t2 <= mmi: cls = 3 category = tr('High') else: # Not reported for less than level t0 continue attributes[i][self.target_field] = cls self.affected_buildings[ category][usage][tr('Buildings Affected')] += 1 if self.is_nexis: self.affected_buildings[category][usage][ tr('Buildings value ($M)')] += building_value / 1000000.0 self.affected_buildings[category][usage][ tr('Contents value ($M)')] += contents_value / 1000000.0 # Consolidate the small building usage groups < 25 to other self._consolidate_to_other() impact_table = impact_summary = self.generate_html_report() # Create style style_classes = [dict(label=class_1['label'], value=class_1['class'], colour='#ffff00', transparency=1), dict(label=class_2['label'], value=class_2['class'], colour='#ffaa00', transparency=1), dict(label=class_3['label'], value=class_3['class'], colour='#ff0000', transparency=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Building affected by earthquake') legend_notes = tr('The level of the impact is according to the ' 'threshold the user input.') legend_units = tr('(mmi)') legend_title = tr('Impact level') # Create vector layer and return result_layer = Vector( data=attributes, projection=interpolate_result.get_projection(), geometry=interpolate_result.get_geometry(), name=tr('Estimated buildings affected'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title, 'target_field': self.target_field, 'statistics_type': self.statistics_type, 'statistics_classes': self.statistics_classes}, style_info=style_info) msg = 'Created vector layer %s' % str(result_layer) LOGGER.debug(msg) self._impact = result_layer return result_layer
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations datatype = E.get_keywords()["datatype"] vclass_tag = "ITB_Class" if datatype.lower() == "osm": # Map from OSM attributes to the ITB building classes # Emap = osm2itb(E) print "osm2itb has not been implemented" elif datatype.lower() == "sigab": # Emap = sigabitb(E) print "sigab2itb has not been implemented" elif datatype.lower() == "itb": Emap = E # Interpolate hazard level to building locations Hi = assign_hazard_values_to_exposure_data(H, Emap, attribute_name="MMI") # Extract relevant numerical data coordinates = Emap.get_geometry() shaking = Hi.get_data() N = len(shaking) # List attributes to carry forward to result layer attributes = Emap.get_attribute_names() # Calculate building damage count50 = 0 count25 = 0 count10 = 0 count0 = 0 building_damage = [] for i in range(N): mmi = float(shaking[i]["MMI"]) building_class = Emap.get_data(vclass_tag, i) building_type = str(building_class) damage_params = vul_curves[building_type] beta = damage_params["beta"] median = damage_params["median"] msg = "Invalid parameter value for " + building_type verify(beta + median > 0.0, msg) percent_damage = lognormal_cdf(mmi, median=median, sigma=beta) * 100 # Collect shake level and calculated damage result_dict = {self.target_field: percent_damage, "MMI": mmi} # Carry all orginal attributes forward for key in attributes: result_dict[key] = Emap.get_data(key, i) # Record result for this feature building_damage.append(result_dict) # Debugging # if percent_damage > 0.01: # print mmi, percent_damage # Calculate statistics if percent_damage < 10: count0 += 1 if 10 <= percent_damage < 33: count10 += 1 if 33 <= percent_damage < 66: count25 += 1 if 66 <= percent_damage: count50 += 1 # fid.close() # Create report Hname = H.get_name() Ename = E.get_name() impact_summary = '<b>In case of "%s" the estimated impact to ' '"%s" ' "is:</b><br><br><p>" % (Hname, Ename) impact_summary += ( '<table border="0" width="320px">' " <tr><th><b>%s</b></th><th><b>%s</b></th></th>" " <tr></tr>" " <tr><td>%s:</td><td>%i</td></tr>" " <tr><td>%s (<10%%):</td><td>%i</td></tr>" " <tr><td>%s (10-33%%):</td><td>%i</td></tr>" " <tr><td>%s (33-66%%):</td><td>%i</td></tr>" " <tr><td>%s (66-100%%):</td><td>%i</td></tr>" "</table></font>" % ( tr("Buildings"), tr("Total"), tr("All"), N, tr("No damage"), count0, tr("Low damage"), count10, tr("Medium damage"), count25, tr("High damage"), count50, ) ) impact_summary += "<br>" # Blank separation row impact_summary += "<b>" + tr("Assumption") + ":</b><br>" # This is the proper text: # tr('Levels of impact are defined by post 2009 ' # 'Padang earthquake survey conducted by Geoscience ' # 'Australia and Institute of Teknologi Bandung.')) # tr('Unreinforced masonry is assumed where no ' # 'structural information is available.')) impact_summary += tr( "Levels of impact are defined by post 2009 " "Padang earthquake survey conducted by Geoscience " "Australia and Institute of Teknologi Bandung." ) impact_summary += tr("Unreinforced masonry is assumed where no " "structural information is available.") # Create style style_classes = [ dict(label=tr("No damage"), min=0, max=10, colour="#00ff00", transparency=0), dict(label=tr("Low damage"), min=10, max=33, colour="#ffff00", transparency=0), dict(label=tr("Medium damage"), min=33, max=66, colour="#ffaa00", transparency=0), dict(label=tr("High damage"), min=66, max=100, colour="#ff0000", transparency=0), ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector( data=building_damage, projection=E.get_projection(), geometry=coordinates, name="Estimated pct damage", keywords={"impact_summary": impact_summary}, style_info=style_info, ) return V
def run(self, layers): """Risk plugin for volcano population evacuation Input layers: List of layers expected to contain H: Vector polygon layer of volcano impact zones P: Raster layer of population data on the same grid as H Counts number of people exposed to volcano event. Return Map of population exposed to the volcano hazard zone. Table with number of people evacuated and supplies required. """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Input checks if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. ' 'I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not (H.is_polygon_data or H.is_point_data): raise Exception(msg) if H.is_point_data: # Use concentric circles radii = self.parameters['distance [km]'] centers = H.get_geometry() attributes = H.get_data() rad_m = [x * 1000 for x in radii] # Convert to meters H = make_circular_polygon(centers, rad_m, attributes=attributes) # NOTE (Sunni) : I commented out this one because there will be # a permission problem on windows #H.write_to_file('Evac_zones_%s.shp' % str(radii)) # To check category_title = 'Radius' category_header = tr('Distance [km]') category_names = radii name_attribute = 'NAME' # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = 'KRB' category_header = tr('Category') # FIXME (Ole): Change to English and use translation system category_names = ['Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I'] name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map attributes = H.get_data() # Get names of volcanos considered if name_attribute in H.get_attribute_names(): D = {} for att in H.get_data(): # Run through all polygons and get unique names D[att[name_attribute]] = None volcano_names = '' for name in D: volcano_names += '%s, ' % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr('Not specified in data') if not category_title in H.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (H.get_name(), category_title)) raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon and total evacuated = 0 for attr in P.get_data(): # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category cat = new_attributes[poly_id][category_title] categories[cat] += pop # Count totals total = int(numpy.sum(E.get_data(nan=0))) # Don't show digits less than a 1000 total = round_thousand(total) # Count number and cumulative for each zone cum = 0 pops = {} cums = {} for name in category_names: if category_title == 'Radius': key = name * 1000 # Convert to meters else: key = name pop = int(categories[key]) pop = round_thousand(pop) cum += pop cum = round_thousand(cum) pops[name] = pop cums[name] = cum # Use final accumulation as total number needing evac evacuated = cum # Calculate estimated needs based on BNPB Perka # 7/2008 minimum bantuan # FIXME (Ole): Refactor into one function to be shared rice = int(evacuated * 2.8) drinking_water = int(evacuated * 17.5) water = int(evacuated * 67) family_kits = int(evacuated / 5) toilets = int(evacuated / 20) # Generate impact report for the pdf map blank_cell = '' table_body = [question, TableRow([tr('Volcanos considered'), '%s' % volcano_names, blank_cell], header=True), TableRow([tr('People needing evacuation'), '%s' % format_int(evacuated), blank_cell], header=True), TableRow([category_header, tr('Total'), tr('Cumulative')], header=True)] for name in category_names: table_body.append(TableRow([name, format_int(pops[name]), format_int(cums[name])])) table_body.extend([TableRow(tr('Map shows population affected in ' 'each of volcano hazard polygons.')), TableRow([tr('Needs per week'), tr('Total'), blank_cell], header=True), [tr('Rice [kg]'), format_int(rice), blank_cell], [tr('Drinking Water [l]'), format_int(drinking_water), blank_cell], [tr('Clean Water [l]'), format_int(water), blank_cell], [tr('Family Kits'), format_int(family_kits), blank_cell], [tr('Toilets'), format_int(toilets), blank_cell]]) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total population %s in the exposure layer') % format_int(total), tr('People need evacuation if they are within the ' 'volcanic hazard zones.')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('People affected by volcanic hazard zone') # Define classes for legend for flooded population counts colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] population_counts = [x[self.target_field] for x in new_attributes] cls = [0] + numpy.linspace(1, max(population_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = tr('0') else: label = tr('%i - %i') % (lo, hi) entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=50, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=tr('Population Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(as_geometry_objects=True), name=tr('Population affected by volcanic hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) return V
def run(self, layers): """Risk plugin for earthquake school damage """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations keywords = E.get_keywords() if 'datatype' in keywords: datatype = keywords['datatype'] if datatype.lower() == 'osm': # Map from OSM attributes to the guideline classes (URM and RM) E = osm2bnpb(E, target_attribute=self.vclass_tag) elif datatype.lower() == 'sigab': # Map from SIGAB attributes to the guideline classes # (URM and RM) E = sigab2bnpb(E) else: E = unspecific2bnpb(E, target_attribute=self.vclass_tag) else: E = unspecific2bnpb(E, target_attribute=self.vclass_tag) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='MMI') # Extract relevant numerical data coordinates = E.get_geometry() shaking = H.get_data() N = len(shaking) # List attributes to carry forward to result layer attributes = E.get_attribute_names() # Calculate building damage count3 = 0 count2 = 0 count1 = 0 count_unknown = 0 building_damage = [] for i in range(N): mmi = float(shaking[i]['MMI']) building_class = E.get_data(self.vclass_tag, i) lo, hi = damage_parameters[building_class] if numpy.isnan(mmi): # If we don't know the shake level assign Not-a-Number damage = numpy.nan count_unknown += 1 elif mmi < lo: damage = 1 # Low count1 += 1 elif lo <= mmi < hi: damage = 2 # Medium count2 += 1 elif mmi >= hi: damage = 3 # High count3 += 1 else: msg = 'Undefined shakelevel %s' % str(mmi) raise Exception(msg) # Collect shake level and calculated damage result_dict = {self.target_field: damage, 'MMI': mmi} # Carry all orginal attributes forward for key in attributes: result_dict[key] = E.get_data(key, i) # Record result for this feature building_damage.append(result_dict) # Create report impact_summary = ('<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' ' <tr><td>%s:</td><td>%s</td></tr>' ' <tr><td>%s (10-25%%):</td><td>%s</td></tr>' ' <tr><td>%s (25-50%%):</td><td>%s</td></tr>' ' <tr><td>%s (50-100%%):</td><td>%s</td></tr>' % (tr('Buildings'), tr('Total'), tr('All'), format_int(N), tr('Low damage'), format_int(count1), tr('Medium damage'), format_int(count2), tr('High damage'), format_int(count3))) impact_summary += (' <tr><td>%s (NaN):</td><td>%s</td></tr>' % ('Unknown', format_int(count_unknown))) impact_summary += '</table>' # Create style style_classes = [dict(label=tr('Low damage'), min=0.5, max=1.5, colour='#fecc5c', transparency=0), dict(label=tr('Medium damage'), min=1.5, max=2.5, colour='#fd8d3c', transparency=0), dict(label=tr('High damage'), min=2.5, max=3.5, colour='#f31a1c', transparency=0)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=building_damage, projection=E.get_projection(), geometry=coordinates, name='Estimated damage level', keywords={'impact_summary': impact_summary}, style_info=style_info) return V
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map) """ threshold = 1.0 # Flood threshold [m] # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Determine attribute name for hazard levels if H.is_raster: hazard_attribute = 'depth' else: hazard_attribute = 'FLOODPRONE' # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, E, attribute_name=hazard_attribute) # Extract relevant exposure data attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate building impact count = 0 buildings = {} affected_buildings = {} for i in range(N): if hazard_attribute == 'depth': # Get the interpolated depth x = float(attributes[i]['depth']) x = x > threshold elif hazard_attribute == 'FLOODPRONE': # Use interpolated polygon attribute atts = attributes[i] if 'FLOODPRONE' in atts: res = atts['FLOODPRONE'] if res is None: x = False else: x = res.lower() == 'yes' else: # If there isn't a flood prone attribute, # assume that building is wet if inside polygon # as flag by generic attribute AFFECTED res = atts['Affected'] if res is None: x = False else: x = res else: msg = (_('Unknown hazard type %s. ' 'Must be either "depth" or "floodprone"') % hazard_attribute) raise Exception(msg) # Count affected buildings by usage type if available if 'type' in attribute_names: usage = attributes[i]['type'] else: usage = None if usage is not None and usage != 0: key = usage else: key = 'unknown' if key not in buildings: buildings[key] = 0 affected_buildings[key] = 0 # Count all buildings by type buildings[key] += 1 if x is True: # Count affected buildings by type affected_buildings[key] += 1 # Count total affected buildings count += 1 # Add calculated impact to existing attributes attributes[i][self.target_field] = x # Lump small entries and 'unknown' into 'other' category for usage in buildings.keys(): x = buildings[usage] if x < 25 or usage == 'unknown': if 'other' not in buildings: buildings['other'] = 0 affected_buildings['other'] = 0 buildings['other'] += x affected_buildings['other'] += affected_buildings[usage] del buildings[usage] del affected_buildings[usage] # Generate csv file of results ## fid = open('C:\dki_table_%s.csv' % H.get_name(), 'wb') ## fid.write('%s, %s, %s\n' % (_('Building type'), ## _('Temporarily closed'), ## _('Total'))) ## fid.write('%s, %i, %i\n' % (_('All'), count, N)) # Generate simple impact report table_body = [question, TableRow([_('Building type'), _('Temporarily closed'), _('Total')], header=True), TableRow([_('All'), count, N])] ## fid.write('%s, %s, %s\n' % (_('Building type'), ## _('Temporarily closed'), ## _('Total'))) # Generate break down by building usage type is available if 'type' in attribute_names: # Make list of building types building_list = [] for usage in buildings: building_type = usage.replace('_', ' ') # Lookup internationalised value if available if building_type in internationalised_values: building_type = internationalised_values[building_type] else: print ('WARNING: %s could not be translated' % building_type) building_list.append([building_type.capitalize(), affected_buildings[usage], buildings[usage]]) ## fid.write('%s, %i, %i\n' % (building_type.capitalize(), ## affected_buildings[usage], ## buildings[usage])) # Sort alphabetically building_list.sort() #table_body.append(TableRow([_('Building type'), # _('Temporarily closed'), # _('Total')], header=True)) table_body.append(TableRow(_('Breakdown by building type'), header=True)) for row in building_list: s = TableRow(row) table_body.append(s) ## fid.close() table_body.append(TableRow(_('Action Checklist:'), header=True)) table_body.append(TableRow(_('Are the critical facilities still ' 'open?'))) table_body.append(TableRow(_('Notes'), header=True)) assumption = _('Buildings are said to be flooded when ') if hazard_attribute == 'depth': assumption += _('flood levels exceed %.1f m') % threshold else: assumption += _('in areas marked as flood prone') table_body.append(assumption) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Buildings inundated') # Create style style_classes = [dict(label=_('Not Flooded'), min=0, max=0, colour='#1EFC7C', transparency=0, size=1), dict(label=_('Flooded'), min=1, max=1, colour='#F31A1C', transparency=0, size=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), name=_('Estimated buildings affected'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return V
def run(self, layers): """Risk plugin for volcano hazard on building/structure Input layers: List of layers expected to contain my_hazard: Hazard layer of volcano my_exposure: Vector layer of structure data on the same grid as my_hazard Counts number of building exposed to each volcano hazard zones. Return Map of building exposed to volcanic hazard zones Table with number of buildings affected """ # Identify hazard and exposure layers my_hazard = get_hazard_layer(layers) # Volcano hazard layer my_exposure = get_exposure_layer(layers) is_point_data = False question = get_question(my_hazard.get_name(), my_exposure.get_name(), self) # Input checks if not my_hazard.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % my_hazard.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. I got %s ' 'with layer type %s' % (my_hazard.get_name(), my_hazard.get_geometry_name())) if not (my_hazard.is_polygon_data or my_hazard.is_point_data): raise Exception(msg) if my_hazard.is_point_data: # Use concentric circles radii = self.parameters['distances [km]'] is_point_data = True centers = my_hazard.get_geometry() attributes = my_hazard.get_data() rad_m = [x * 1000 for x in radii] # Convert to meters Z = make_circular_polygon(centers, rad_m, attributes=attributes) # To check category_title = 'Radius' my_hazard = Z category_names = rad_m name_attribute = 'NAME' # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = 'KRB' # FIXME (Ole): Change to English and use translation system category_names = [ 'Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I' ] name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map # Get names of volcanos considered if name_attribute in my_hazard.get_attribute_names(): D = {} for att in my_hazard.get_data(): # Run through all polygons and get unique names D[att[name_attribute]] = None volcano_names = '' for name in D: volcano_names += '%s, ' % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr('Not specified in data') if not category_title in my_hazard.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (my_hazard.get_name(), category_title)) # noinspection PyExceptionInherit raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(my_hazard, my_exposure) # Initialise attributes of output dataset with all attributes # from input polygon and a building count of zero new_attributes = my_hazard.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count impacted building per polygon and total for attr in P.get_data(): # Update building count for associated polygon poly_id = attr['polygon_id'] if poly_id is not None: new_attributes[poly_id][self.target_field] += 1 # Update building count for each category cat = new_attributes[poly_id][category_title] categories[cat] += 1 # Count totals total = len(my_exposure) # Generate simple impact report blank_cell = '' table_body = [ question, TableRow( [tr('Volcanos considered'), '%s' % volcano_names, blank_cell], header=True), TableRow([tr('Distance [km]'), tr('Total'), tr('Cumulative')], header=True) ] cum = 0 for name in category_names: # prevent key error count = categories.get(name, 0) cum += count if is_point_data: name = int(name) / 1000 table_body.append( TableRow([name, format_int(count), format_int(cum)])) table_body.append( TableRow( tr('Map shows buildings affected in ' 'each of volcano hazard polygons.'))) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([ TableRow(tr('Notes'), header=True), tr('Total number of buildings %s in the viewable ' 'area') % format_int(total), tr('Only buildings available in OpenStreetMap ' 'are considered.') ]) impact_summary = Table(table_body).toNewlineFreeString() building_counts = [x[self.target_field] for x in new_attributes] if max(building_counts) == 0 == min(building_counts): table_body = [ question, TableRow([ tr('Number of buildings affected'), '%s' % format_int(cum), blank_cell ], header=True) ] my_message = Table(table_body).toNewlineFreeString() raise ZeroImpactException(my_message) # Create style colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] classes = create_classes(building_counts, len(colours)) interval_classes = humanize_class(classes) style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 0: transparency = 100 style_class['min'] = 0 else: transparency = 30 style_class['min'] = classes[i - 1] style_class['transparency'] = transparency style_class['colour'] = colours[i] style_class['max'] = classes[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='graduatedSymbol') # For printing map purpose map_title = tr('Buildings affected by volcanic hazard zone') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) legend_units = tr('(building)') legend_title = tr('Building count') # Create vector layer and return V = Vector(data=new_attributes, projection=my_hazard.get_projection(), geometry=my_hazard.get_geometry(as_geometry_objects=True), name=tr('Buildings affected by volcanic hazard zone'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title }, style_info=style_info) return V
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map). """ threshold = self.parameters['threshold [m]'] # Flood threshold [m] verify(isinstance(threshold, float), 'Expected thresholds to be a float. Got %s' % str(threshold)) # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Determine attribute name for hazard levels if H.is_raster: mode = 'grid' hazard_attribute = 'depth' else: mode = 'regions' hazard_attribute = None # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data( H, E, attribute_name=hazard_attribute) # Extract relevant exposure data attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate building impact count = 0 buildings = {} affected_buildings = {} for i in range(N): if mode == 'grid': # Get the interpolated depth x = float(attributes[i]['depth']) x = x >= threshold elif mode == 'regions': # Use interpolated polygon attribute atts = attributes[i] # FIXME (Ole): Need to agree whether to use one or the # other as this can be very confusing! # For now look for 'affected' first if 'affected' in atts: # E.g. from flood forecast # Assume that building is wet if inside polygon # as flagged by attribute Flooded res = atts['affected'] if res is None: x = False else: x = bool(res) elif 'FLOODPRONE' in atts: res = atts['FLOODPRONE'] if res is None: x = False else: x = res.lower() == 'yes' elif DEFAULT_ATTRIBUTE in atts: # Check the default attribute assigned for points # covered by a polygon res = atts[DEFAULT_ATTRIBUTE] if res is None: x = False else: x = res else: # there is no flood related attribute msg = ('No flood related attribute found in %s. ' 'I was looking for either "affected", "FLOODPRONE" ' 'or "inapolygon". The latter should have been ' 'automatically set by call to ' 'assign_hazard_values_to_exposure_data(). ' 'Sorry I can\'t help more.') raise Exception(msg) else: msg = (tr( 'Unknown hazard type %s. Must be either "depth" or "grid"') % mode) raise Exception(msg) # Count affected buildings by usage type if available if 'type' in attribute_names: usage = attributes[i]['type'] elif 'TYPE' in attribute_names: usage = attributes[i]['TYPE'] else: usage = None if 'amenity' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['amenity'] if 'building_t' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['building_t'] if 'office' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['office'] if 'tourism' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['tourism'] if 'leisure' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['leisure'] if 'building' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['building'] if usage == 'yes': usage = 'building' if usage is not None and usage != 0: key = usage else: key = 'unknown' if key not in buildings: buildings[key] = 0 affected_buildings[key] = 0 # Count all buildings by type buildings[key] += 1 if x is True: # Count affected buildings by type affected_buildings[key] += 1 # Count total affected buildings count += 1 # Add calculated impact to existing attributes attributes[i][self.target_field] = x # Lump small entries and 'unknown' into 'other' category for usage in buildings.keys(): x = buildings[usage] if x < 25 or usage == 'unknown': if 'other' not in buildings: buildings['other'] = 0 affected_buildings['other'] = 0 buildings['other'] += x affected_buildings['other'] += affected_buildings[usage] del buildings[usage] del affected_buildings[usage] # Generate simple impact report table_body = [ question, TableRow([tr('Building type'), tr('Number flooded'), tr('Total')], header=True), TableRow([tr('All'), format_int(count), format_int(N)]) ] school_closed = 0 hospital_closed = 0 # Generate break down by building usage type is available list_type_attribute = [ 'TYPE', 'type', 'amenity', 'building_t', 'office', 'tourism', 'leisure', 'building' ] intersect_type = set(attribute_names) & set(list_type_attribute) if len(intersect_type) > 0: # Make list of building types building_list = [] for usage in buildings: building_type = usage.replace('_', ' ') # Lookup internationalised value if available building_type = tr(building_type) building_list.append([ building_type.capitalize(), format_int(affected_buildings[usage]), format_int(buildings[usage]) ]) if building_type == 'school': school_closed = affected_buildings[usage] if building_type == 'hospital': hospital_closed = affected_buildings[usage] # Sort alphabetically building_list.sort() table_body.append( TableRow(tr('Breakdown by building type'), header=True)) for row in building_list: s = TableRow(row) table_body.append(s) table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append( TableRow(tr('Are the critical facilities still open?'))) table_body.append( TableRow( tr('Which structures have warning capacity (eg. sirens, speakers, ' 'etc.)?'))) table_body.append( TableRow(tr('Which buildings will be evacuation centres?'))) table_body.append( TableRow(tr('Where will we locate the operations centre?'))) table_body.append( TableRow( tr('Where will we locate warehouse and/or distribution centres?' ))) if school_closed > 0: table_body.append( TableRow( tr('Where will the students from the %s closed schools go to ' 'study?') % format_int(school_closed))) if hospital_closed > 0: table_body.append( TableRow( tr('Where will the patients from the %s closed hospitals go ' 'for treatment and how will we transport them?') % format_int(hospital_closed))) table_body.append(TableRow(tr('Notes'), header=True)) assumption = tr('Buildings are said to be flooded when ') if mode == 'grid': assumption += tr('flood levels exceed %.1f m') % threshold else: assumption += tr('in regions marked as affected') table_body.append(assumption) # Result impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary # Create style style_classes = [ dict(label=tr('Not Inundated'), value=0, colour='#1EFC7C', transparency=0, size=1), dict(label=tr('Inundated'), value=1, colour='#F31A1C', transparency=0, size=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Buildings inundated') legend_units = tr('(inundated or not inundated)') legend_title = tr('Structure inundated status') # Create vector layer and return V = Vector(data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), name=tr('Estimated buildings affected'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_units': legend_units, 'legend_title': legend_title }, style_info=style_info) return V
def run(self): """Earthquake impact to buildings (e.g. from OpenStreetMap).""" LOGGER.debug('Running earthquake building impact') # merely initialize building_value = 0 contents_value = 0 # Thresholds for mmi breakdown. t0 = self.parameters['low_threshold'].value t1 = self.parameters['medium_threshold'].value t2 = self.parameters['high_threshold'].value # Class Attribute and Label. class_1 = {'label': tr('Low'), 'class': 1} class_2 = {'label': tr('Medium'), 'class': 2} class_3 = {'label': tr('High'), 'class': 3} # Define attribute name for hazard levels. hazard_attribute = 'mmi' # Determine if exposure data have NEXIS attributes. attribute_names = self.exposure.layer.get_attribute_names() if ('FLOOR_AREA' in attribute_names and 'BUILDING_C' in attribute_names and 'CONTENTS_C' in attribute_names): self.is_nexis = True else: self.is_nexis = False # Interpolate hazard level to building locations. interpolate_result = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=hazard_attribute) # Get parameters from layer's keywords structure_class_field = self.exposure.keyword('structure_class_field') exposure_value_mapping = self.exposure.keyword('value_mapping') attributes = interpolate_result.get_data() interpolate_size = len(interpolate_result) hazard_classes = [tr('Low'), tr('Medium'), tr('High')] self.init_report_var(hazard_classes) removed = [] for i in range(interpolate_size): # Classify building according to shake level # and calculate dollar losses if self.is_nexis: try: area = float(attributes[i]['FLOOR_AREA']) except (ValueError, KeyError): # print 'Got area', attributes[i]['FLOOR_AREA'] area = 0.0 try: building_value_density = float(attributes[i]['BUILDING_C']) except (ValueError, KeyError): # print 'Got bld value', attributes[i]['BUILDING_C'] building_value_density = 0.0 try: contents_value_density = float(attributes[i]['CONTENTS_C']) except (ValueError, KeyError): # print 'Got cont value', attributes[i]['CONTENTS_C'] contents_value_density = 0.0 building_value = building_value_density * area contents_value = contents_value_density * area usage = attributes[i].get(structure_class_field, None) usage = main_type(usage, exposure_value_mapping) if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): if self.is_nexis: self.affected_buildings[category][usage] = OrderedDict( [(tr('Buildings Affected'), 0), (tr('Buildings value ($M)'), 0), (tr('Contents value ($M)'), 0)]) else: self.affected_buildings[category][usage] = \ OrderedDict([(tr('Buildings Affected'), 0)]) self.buildings[usage] += 1 try: mmi = float(attributes[i][hazard_attribute]) # MMI except TypeError: mmi = 0.0 if t0 <= mmi < t1: cls = 1 category = tr('Low') elif t1 <= mmi < t2: cls = 2 category = tr('Medium') elif t2 <= mmi: cls = 3 category = tr('High') else: # Not reported for less than level t0 # RMN: We still need to add target_field attribute # So, set it to None attributes[i][self.target_field] = None continue attributes[i][self.target_field] = cls self.affected_buildings[category][usage][tr( 'Buildings Affected')] += 1 if self.is_nexis: self.affected_buildings[category][usage][tr( 'Buildings value ($M)')] += building_value / 1000000.0 self.affected_buildings[category][usage][tr( 'Contents value ($M)')] += contents_value / 1000000.0 self.reorder_dictionaries() # remove un-categorized element removed.reverse() geometry = interpolate_result.get_geometry() for i in range(0, len(removed)): del attributes[removed[i]] del geometry[removed[i]] if len(attributes) < 1: raise ZeroImpactException() # Create style style_classes = [ dict(label=class_1['label'], value=class_1['class'], colour='#ffff00', transparency=1), dict(label=class_2['label'], value=class_2['class'], colour='#ffaa00', transparency=1), dict(label=class_3['label'], value=class_3['class'], colour='#ff0000', transparency=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') impact_data = self.generate_data() extra_keywords = { 'map_title': self.map_title(), 'legend_notes': self.metadata().key('legend_notes'), 'legend_units': self.metadata().key('legend_units'), 'legend_title': self.metadata().key('legend_title'), 'target_field': self.target_field, } impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Vector(data=attributes, projection=interpolate_result.get_projection(), geometry=geometry, name=self.map_title(), keywords=impact_layer_keywords, style_info=style_info) impact_layer.impact_data = impact_data self._impact = impact_layer return impact_layer
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Map from different kinds of datasets to Padang vulnerability classes datatype = E.get_keywords()['datatype'] vclass_tag = 'VCLASS' if datatype.lower() == 'osm': # Map from OSM attributes Emap = osm2padang(E) elif datatype.lower() == 'sigab': # Map from SIGAB attributes Emap = sigab2padang(E) else: Emap = E # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, Emap, attribute_name='MMI') # Extract relevant numerical data attributes = I.get_data() N = len(I) # Calculate building damage count_high = count_medium = count_low = count_none = 0 for i in range(N): mmi = float(attributes[i]['MMI']) building_type = Emap.get_data(vclass_tag, i) damage_params = damage_curves[building_type] beta = damage_params['beta'] median = damage_params['median'] percent_damage = log_normal_cdf(mmi, median=median, sigma=beta) * 100 # Add calculated impact to existing attributes attributes[i][self.target_field] = percent_damage # Calculate statistics if percent_damage < 10: count_none += 1 if 10 <= percent_damage < 33: count_low += 1 if 33 <= percent_damage < 66: count_medium += 1 if 66 <= percent_damage: count_high += 1 # Generate impact report table_body = [ question, TableRow([tr('Buildings'), tr('Total')], header=True), TableRow([tr('All'), N]), TableRow([tr('No damage'), format_int(count_none)]), TableRow([tr('Low damage'), format_int(count_low)]), TableRow([tr('Medium damage'), format_int(count_medium)]), TableRow([tr('High damage'), format_int(count_high)]) ] table_body.append(TableRow(tr('Notes'), header=True)) table_body.append( tr('Levels of impact are defined by post 2009 ' 'Padang earthquake survey conducted by Geoscience ' 'Australia and Institute of Teknologi Bandung.')) table_body.append( tr('Unreinforced masonry is assumed where no ' 'structural information is available.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = tr('Earthquake damage to buildings') # Create style style_classes = [ dict(label=tr('No damage'), min=0, max=10, colour='#00ff00', transparency=0), dict(label=tr('Low damage'), min=10, max=33, colour='#ffff00', transparency=0), dict(label=tr('Medium damage'), min=33, max=66, colour='#ffaa00', transparency=0), dict(label=tr('High damage'), min=66, max=100, colour='#ff0000', transparency=0) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=E.get_projection(), geometry=E.get_geometry(), name='Estimated pct damage', keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field }, style_info=style_info) return V
def run(self, layers): """Risk plugin for flood population evacuation Input: layers: List of layers expected to contain my_hazard : Vector polygon layer of flood depth my_exposure : Raster layer of population data on the same grid as my_hazard Counts number of people exposed to areas identified as flood prone Return Map of population exposed to flooding Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers my_hazard = get_hazard_layer(layers) # Flood inundation my_exposure = get_exposure_layer(layers) question = get_question(my_hazard.get_name(), my_exposure.get_name(), self) # Check that hazard is polygon type if not my_hazard.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % my_hazard.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon layer. I got %s with layer ' 'type %s' % (my_hazard.get_name(), my_hazard.get_geometry_name())) if not my_hazard.is_polygon_data: raise Exception(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(my_hazard, my_exposure, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = my_hazard.get_data() category_title = 'affected' # FIXME: Should come from keywords deprecated_category_title = 'FLOODPRONE' categories = {} for attr in new_attributes: attr[self.target_field] = 0 try: cat = attr[category_title] except KeyError: cat = attr['FLOODPRONE'] categories[cat] = 0 # Count affected population per polygon, per category and total affected_population = 0 for attr in P.get_data(): affected = False if 'affected' in attr: res = attr['affected'] if res is None: x = False else: x = bool(res) affected = x elif 'FLOODPRONE' in attr: # If there isn't an 'affected' attribute, res = attr['FLOODPRONE'] if res is not None: affected = res.lower() == 'yes' elif 'Affected' in attr: # Check the default attribute assigned for points # covered by a polygon res = attr['Affected'] if res is None: x = False else: x = res affected = x else: # there is no flood related attribute msg = ('No flood related attribute found in %s. ' 'I was looking fore either "Flooded", "FLOODPRONE" ' 'or "Affected". The latter should have been ' 'automatically set by call to ' 'assign_hazard_values_to_exposure_data(). ' 'Sorry I can\'t help more.') raise Exception(msg) if affected: # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category try: cat = new_attributes[poly_id][category_title] except KeyError: cat = new_attributes[poly_id][deprecated_category_title] categories[cat] += pop # Update total affected_population += pop affected_population = round_thousand(affected_population) # Estimate number of people in need of evacuation evacuated = (affected_population * self.parameters['evacuation_percentage'] / 100.0) total = int(numpy.sum(my_exposure.get_data(nan=0, scaling=False))) # Don't show digits less than a 1000 total = round_thousand(total) evacuated = round_thousand(evacuated) # Calculate estimated minimum needs minimum_needs = self.parameters['minimum needs'] tot_needs = evacuated_population_weekly_needs(evacuated, minimum_needs) # Generate impact report for the pdf map table_body = [ question, TableRow([ tr('People affected'), '%s%s' % (format_int(int(affected_population)), ('*' if affected_population >= 1000 else '')) ], header=True), TableRow([ tr('People needing evacuation'), '%s%s' % (format_int(int(evacuated)), ('*' if evacuated >= 1000 else '')) ], header=True), TableRow([ TableCell(tr('* Number is rounded to the nearest 1000'), col_span=2) ], header=False), TableRow([ tr('Evacuation threshold'), '%s%%' % format_int(self.parameters['evacuation_percentage']) ], header=True), TableRow( tr('Map shows population affected in each flood' ' prone area')), TableRow( tr('Table below shows the weekly minium needs ' 'for all evacuated people')), TableRow([tr('Needs per week'), tr('Total')], header=True), [tr('Rice [kg]'), format_int(tot_needs['rice'])], [ tr('Drinking Water [l]'), format_int(tot_needs['drinking_water']) ], [tr('Clean Water [l]'), format_int(tot_needs['water'])], [tr('Family Kits'), format_int(tot_needs['family_kits'])], [tr('Toilets'), format_int(tot_needs['toilets'])] ] impact_table = Table(table_body).toNewlineFreeString() table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append(TableRow(tr('How will warnings be disseminated?'))) table_body.append(TableRow(tr('How will we reach stranded people?'))) table_body.append(TableRow(tr('Do we have enough relief items?'))) table_body.append( TableRow( tr('If yes, where are they located and how ' 'will we distribute them?'))) table_body.append( TableRow( tr('If no, where can we obtain additional ' 'relief items from and how will we ' 'transport them to here?'))) # Extend impact report for on-screen display table_body.extend([ TableRow(tr('Notes'), header=True), tr('Total population: %s') % format_int(total), tr('People need evacuation if in area identified ' 'as "Flood Prone"'), tr('Minimum needs are defined in BNPB ' 'regulation 7/2008') ]) impact_summary = Table(table_body).toNewlineFreeString() # Create style # Define classes for legend for flooded population counts colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] population_counts = [x['population'] for x in new_attributes] classes = create_classes(population_counts, len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 0: transparency = 100 style_class['min'] = 0 else: transparency = 0 style_class['min'] = classes[i - 1] style_class['transparency'] = transparency style_class['colour'] = colours[i] style_class['max'] = classes[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='graduatedSymbol') # For printing map purpose map_title = tr('People affected by flood prone areas') legend_notes = tr('Thousand separator is represented by \'.\'') legend_units = tr('(people per polygon)') legend_title = tr('Population Count') # Create vector layer and return V = Vector(data=new_attributes, projection=my_hazard.get_projection(), geometry=my_hazard.get_geometry(), name=tr('Population affected by flood prone areas'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title }, style_info=style_info) return V
def run(self, layers): """Risk plugin for earthquake school damage """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations keywords = E.get_keywords() if 'datatype' in keywords: datatype = keywords['datatype'] if datatype.lower() == 'osm': # Map from OSM attributes to the guideline classes (URM and RM) E = osm2bnpb(E, target_attribute=self.vclass_tag) elif datatype.lower() == 'sigab': # Map from SIGAB attributes to the guideline classes # (URM and RM) E = sigab2bnpb(E) else: E = unspecific2bnpb(E, target_attribute=self.vclass_tag) else: E = unspecific2bnpb(E, target_attribute=self.vclass_tag) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='MMI') # Extract relevant numerical data coordinates = E.get_geometry() shaking = H.get_data() N = len(shaking) # List attributes to carry forward to result layer attributes = E.get_attribute_names() # Calculate building damage count3 = 0 count2 = 0 count1 = 0 count_unknown = 0 building_damage = [] for i in range(N): mmi = float(shaking[i]['MMI']) building_class = E.get_data(self.vclass_tag, i) lo, hi = damage_parameters[building_class] if numpy.isnan(mmi): # If we don't know the shake level assign Not-a-Number damage = numpy.nan count_unknown += 1 elif mmi < lo: damage = 1 # Low count1 += 1 elif lo <= mmi < hi: damage = 2 # Medium count2 += 1 elif mmi >= hi: damage = 3 # High count3 += 1 else: msg = 'Undefined shakelevel %s' % str(mmi) raise Exception(msg) # Collect shake level and calculated damage result_dict = {self.target_field: damage, 'MMI': mmi} # Carry all orginal attributes forward for key in attributes: result_dict[key] = E.get_data(key, i) # Record result for this feature building_damage.append(result_dict) # Create report impact_summary = ( '<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' ' <tr><td>%s:</td><td>%s</td></tr>' ' <tr><td>%s (10-25%%):</td><td>%s</td></tr>' ' <tr><td>%s (25-50%%):</td><td>%s</td></tr>' ' <tr><td>%s (50-100%%):</td><td>%s</td></tr>' % (tr('Buildings'), tr('Total'), tr('All'), format_int(N), tr('Low damage'), format_int(count1), tr('Medium damage'), format_int(count2), tr('High damage'), format_int(count3))) impact_summary += (' <tr><td>%s (NaN):</td><td>%s</td></tr>' % ('Unknown', format_int(count_unknown))) impact_summary += '</table>' # Create style style_classes = [ dict(label=tr('Low damage'), min=0.5, max=1.5, colour='#fecc5c', transparency=0), dict(label=tr('Medium damage'), min=1.5, max=2.5, colour='#fd8d3c', transparency=0), dict(label=tr('High damage'), min=2.5, max=3.5, colour='#f31a1c', transparency=0) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=building_damage, projection=E.get_projection(), geometry=coordinates, name='Estimated damage level', keywords={'impact_summary': impact_summary}, style_info=style_info) return V
def run(self, layers): """Risk plugin for volcano hazard on building/structure Input layers: List of layers expected to contain my_hazard: Hazard layer of volcano my_exposure: Vector layer of structure data on the same grid as my_hazard Counts number of building exposed to each volcano hazard zones. Return Map of building exposed to volcanic hazard zones Table with number of buildings affected """ # Identify hazard and exposure layers my_hazard = get_hazard_layer(layers) # Volcano hazard layer my_exposure = get_exposure_layer(layers) is_point_data = False question = get_question(my_hazard.get_name(), my_exposure.get_name(), self) # Input checks if not my_hazard.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % my_hazard.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. I got %s ' 'with layer type %s' % (my_hazard.get_name(), my_hazard.get_geometry_name())) if not (my_hazard.is_polygon_data or my_hazard.is_point_data): raise Exception(msg) if my_hazard.is_point_data: # Use concentric circles radii = self.parameters['distances [km]'] is_point_data = True centers = my_hazard.get_geometry() attributes = my_hazard.get_data() rad_m = [x * 1000 for x in radii] # Convert to meters Z = make_circular_polygon(centers, rad_m, attributes=attributes) # To check category_title = 'Radius' my_hazard = Z category_names = rad_m name_attribute = 'NAME' # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = 'KRB' # FIXME (Ole): Change to English and use translation system category_names = ['Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I'] name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map # Get names of volcanos considered if name_attribute in my_hazard.get_attribute_names(): D = {} for att in my_hazard.get_data(): # Run through all polygons and get unique names D[att[name_attribute]] = None volcano_names = '' for name in D: volcano_names += '%s, ' % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr('Not specified in data') if not category_title in my_hazard.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (my_hazard.get_name(), category_title)) raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(my_hazard, my_exposure) # Initialise attributes of output dataset with all attributes # from input polygon and a building count of zero new_attributes = my_hazard.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count impacted building per polygon and total for attr in P.get_data(): # Update building count for associated polygon poly_id = attr['polygon_id'] if poly_id is not None: new_attributes[poly_id][self.target_field] += 1 # Update building count for each category cat = new_attributes[poly_id][category_title] categories[cat] += 1 # Count totals total = len(my_exposure) # Generate simple impact report blank_cell = '' table_body = [question, TableRow([tr('Volcanos considered'), '%s' % volcano_names, blank_cell], header=True), TableRow([tr('Distance [km]'), tr('Total'), tr('Cumulative')], header=True)] cum = 0 for name in category_names: # prevent key error count = categories.get(name, 0) cum += count if is_point_data: name = int(name) / 1000 table_body.append(TableRow([name, format_int(count), format_int(cum)])) table_body.append(TableRow(tr('Map shows buildings affected in ' 'each of volcano hazard polygons.'))) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total number of buildings %s in the viewable ' 'area') % format_int(total), tr('Only buildings available in OpenStreetMap ' 'are considered.')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('Buildings affected by volcanic hazard zone') # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] building_counts = [x[self.target_field] for x in new_attributes] classes = create_classes(building_counts, len(colours)) interval_classes = humanize_class(classes) style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 0: transparency = 100 style_class['min'] = 0 else: transparency = 30 style_class['min'] = classes[i - 1] style_class['transparency'] = transparency style_class['colour'] = colours[i] style_class['max'] = classes[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='graduatedSymbol') # For printing map purpose map_title = tr('Building affected by volcanic hazard zone') legend_notes = tr('Thousand separator is represented by \'.\'') legend_units = tr('(building)') legend_title = tr('Building count') # Create vector layer and return V = Vector(data=new_attributes, projection=my_hazard.get_projection(), geometry=my_hazard.get_geometry(as_geometry_objects=True), name=tr('Buildings affected by volcanic hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title}, style_info=style_info) return V
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map) """ # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, E) # Extract relevant exposure data #attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate road impact count = 0 #flooded_distance = 0 for i in range(N): # Use interpolated polygon attribute atts = attributes[i] if 'FLOODPRONE' in atts: res = atts['FLOODPRONE'] if res is None: x = False else: x = res.lower() == 'yes' else: # If there isn't a flood prone attribute, # assume that building is wet if inside polygon # as flag by generic attribute AFFECTED res = atts['Affected'] if res is None: x = False else: x = res # Count all roads if x is True: # Count total affected roads count += 1 # Add calculated impact to existing attributes attributes[i][self.target_field] = x if i == 0: print attributes[0].keys() # Generate simple impact report table_body = [ question, TableRow( [tr('Building type'), tr('Temporarily closed'), tr('Total')], header=True), TableRow([tr('All'), count, N]) ] impact_summary = Table(table_body).toNewlineFreeString() #impact_table = impact_summary map_title = tr('Roads inundated') # Create style style_classes = [ dict(label=tr('Not Flooded'), min=0, max=0, colour='#1EFC7C', transparency=0, size=1), dict(label=tr('Flooded'), min=1, max=1, colour='#F31A1C', transparency=0, size=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), geometry_type=I.get_geometry_type(), name=tr('Estimated roads affected'), keywords={ 'impact_summary': impact_summary, 'map_title': map_title, 'target_field': self.target_field }, style_info=style_info) return V
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map). :param layers: List of layers expected to contain. * hazard_layer: Hazard raster layer of flood * exposure_layer: Vector layer of structure data on the same grid as hazard_layer """ # Extract data hazard_layer = get_hazard_layer(layers) # Depth exposure_layer = get_exposure_layer(layers) # Building locations question = get_question(hazard_layer.get_name(), exposure_layer.get_name(), self) # Determine attribute name for hazard levels hazard_attribute = None # Interpolate hazard level to building locations interpolated_layer = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=hazard_attribute) # Extract relevant exposure data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() total_features = len(interpolated_layer) buildings = {} # The number of affected buildings affected_count = 0 # The variable for regions mode affected_buildings = {} for i in range(total_features): # Use interpolated polygon attribute atts = features[i] # FIXME (Ole): Need to agree whether to use one or the # other as this can be very confusing! # For now look for 'affected' first if 'affected' in atts: # E.g. from flood forecast # Assume that building is wet if inside polygon # as flagged by attribute Flooded res = atts['affected'] if res is None: inundated_status = False else: inundated_status = bool(res) elif 'FLOODPRONE' in atts: res = atts['FLOODPRONE'] if res is None: inundated_status = False else: inundated_status = res.lower() == 'yes' elif DEFAULT_ATTRIBUTE in atts: # Check the default attribute assigned for points # covered by a polygon res = atts[DEFAULT_ATTRIBUTE] if res is None: inundated_status = False else: inundated_status = res else: # there is no flood related attribute message = ('No flood related attribute found in %s. I was ' 'looking for either "affected", "FLOODPRONE" or ' '"inapolygon". The latter should have been ' 'automatically set by call to ' 'assign_hazard_values_to_exposure_data(). Sorry I ' 'can\'t help more.') raise Exception(message) # Count affected buildings by usage type if available usage = get_osm_building_usage(attribute_names, features[i]) if usage is not None and usage != 0: key = usage else: key = 'unknown' if key not in buildings: buildings[key] = 0 affected_buildings[key] = 0 # Count all buildings by type buildings[key] += 1 if inundated_status is True: # Count affected buildings by type affected_buildings[key] += 1 # Count total affected buildings affected_count += 1 # Add calculated impact to existing attributes features[i][self.target_field] = int(inundated_status) # Lump small entries and 'unknown' into 'other' category for usage in buildings.keys(): x = buildings[usage] if x < 25 or usage == 'unknown': if 'other' not in buildings: buildings['other'] = 0 affected_buildings['other'] = 0 buildings['other'] += x affected_buildings['other'] += affected_buildings[usage] del buildings[usage] del affected_buildings[usage] # Generate simple impact report table_body = [ question, TableRow([tr('Building type'), tr('Number flooded'), tr('Total')], header=True), TableRow([ tr('All'), format_int(affected_count), format_int(total_features) ]) ] school_closed = 0 hospital_closed = 0 # Generate break down by building usage type if available list_type_attribute = [ 'TYPE', 'type', 'amenity', 'building_t', 'office', 'tourism', 'leisure', 'building' ] intersect_type = set(attribute_names) & set(list_type_attribute) if len(intersect_type) > 0: # Make list of building types building_list = [] for usage in buildings: building_type = usage.replace('_', ' ') # Lookup internationalised value if available building_type = tr(building_type) building_list.append([ building_type.capitalize(), format_int(affected_buildings[usage]), format_int(buildings[usage]) ]) if usage.lower() == 'school': school_closed = affected_buildings[usage] if usage.lower() == 'hospital': hospital_closed = affected_buildings[usage] # Sort alphabetically building_list.sort() table_body.append( TableRow(tr('Breakdown by building type'), header=True)) for row in building_list: s = TableRow(row) table_body.append(s) # Action Checklist Section table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append( TableRow(tr('Are the critical facilities still open?'))) table_body.append( TableRow( tr('Which structures have warning capacity (eg. sirens, speakers, ' 'etc.)?'))) table_body.append( TableRow(tr('Which buildings will be evacuation centres?'))) table_body.append( TableRow(tr('Where will we locate the operations centre?'))) table_body.append( TableRow( tr('Where will we locate warehouse and/or distribution centres?' ))) if school_closed > 0: table_body.append( TableRow( tr('Where will the students from the %s closed schools go to ' 'study?') % format_int(school_closed))) if hospital_closed > 0: table_body.append( TableRow( tr('Where will the patients from the %s closed hospitals go ' 'for treatment and how will we transport them?') % format_int(hospital_closed))) # Notes Section table_body.append(TableRow(tr('Notes'), header=True)) table_body.append( TableRow( tr('Buildings are said to be flooded when in regions marked ' 'as affected'))) # Result impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary # Prepare impact layer map_title = tr('Buildings inundated') legend_title = tr('Structure inundated status') style_classes = [ dict(label=tr('Not Inundated'), value=0, colour='#1EFC7C', transparency=0, size=1), dict(label=tr('Inundated'), value=1, colour='#F31A1C', ztransparency=0, size=1) ] legend_units = tr('(inundated or not inundated)') style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # Create vector layer and return vector_layer = Vector(data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Estimated buildings affected'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_units': legend_units, 'legend_title': legend_title, 'buildings_total': total_features, 'buildings_affected': affected_count }, style_info=style_info) return vector_layer
def run(layers): """Risk plugin for tephra impact """ # Extract data H = get_hazard_layer(layers) # Ash load E = get_exposure_layer(layers) # Building locations # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='load') # Calculate building damage count3 = 0 count2 = 0 count1 = 0 count0 = 0 result = [] for i in range(len(E)): #------------------- # Extract parameters #------------------- load = H.get_data('load', i) #------------------------ # Compute damage level #------------------------ # FIXME: The thresholds have been greatly reduced # for the purpose of demonstration. Any real analyis # should bring them back to 0, 90, 150, 300 if 0.01 <= load < 0.5: # Loss of crops and livestock impact = 0 count0 += 1 elif 0.5 <= load < 2.0: # Cosmetic damage impact = 1 count1 += 1 elif 2.0 <= load < 10.0: # Partial building collapse impact = 2 count2 += 1 elif load >= 10.0: # Complete building collapse impact = 3 count3 += 1 else: impact = 0 count0 += 1 result.append({'DAMAGE': impact, 'ASHLOAD': load}) # Create report impact_summary = ('<font size="3"> <table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' '</table></font>' % ('Beban abu', 'Gedung dampak', '< 0.5 kg/m2', count0, '0.5 - 2 kg/m2', count1, '2 - 10 kg/m2', count2, '> 10 kg/m2', count3)) #'</table>' % # ('Beban abu', 'Gedung dampak', # 'Gangguan (< 90 kg/m2)', count0, # 'Kerusakan kosmetik (90 - 150 kg/m2', count1, # 'parsial runtuhnya (150 - 300 kg/m2', count2, # 'runtuhnya lengkap (> 300 kg/m2', count3)) V = Vector(data=result, projection=E.get_projection(), geometry=E.get_geometry(), name='Estimated ashload damage', keywords={'impact_summary': impact_summary}) return V
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map) """ # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, E) # Extract relevant exposure data #attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate road impact count = 0 #flooded_distance = 0 for i in range(N): # Use interpolated polygon attribute atts = attributes[i] if 'FLOODPRONE' in atts: res = atts['FLOODPRONE'] if res is None: x = False else: x = res.lower() == 'yes' else: # If there isn't a flood prone attribute, # assume that building is wet if inside polygon # as flag by generic attribute AFFECTED res = atts['Affected'] if res is None: x = False else: x = res # Count all roads if x is True: # Count total affected roads count += 1 # Add calculated impact to existing attributes attributes[i][self.target_field] = x if i == 0: print attributes[0].keys() # Generate simple impact report table_body = [question, TableRow([tr('Building type'), tr('Temporarily closed'), tr('Total')], header=True), TableRow([tr('All'), count, N])] impact_summary = Table(table_body).toNewlineFreeString() #impact_table = impact_summary map_title = tr('Roads inundated') # Create style style_classes = [dict(label=tr('Not Flooded'), min=0, max=0, colour='#1EFC7C', transparency=0, size=1), dict(label=tr('Flooded'), min=1, max=1, colour='#F31A1C', transparency=0, size=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), geometry_type=I.get_geometry_type(), name=tr('Estimated roads affected'), keywords={'impact_summary': impact_summary, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) return V
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of volcano depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Input checks if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. ' 'I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not (H.is_polygon_data or H.is_point_data): raise Exception(msg) if H.is_point_data: # Use concentric circles radii = self.parameters['R [km]'] centers = H.get_geometry() attributes = H.get_data() rad_m = [x * 1000 for x in radii] # Convert to meters H = make_circular_polygon(centers, rad_m, attributes=attributes) #H.write_to_file('Evac_zones_%s.shp' % str(radii)) # To check category_title = 'Radius' category_header = tr('Distance [km]') category_names = radii name_attribute = 'NAME' # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = 'KRB' category_header = tr('Category') # FIXME (Ole): Change to English and use translation system category_names = [ 'Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I' ] name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map attributes = H.get_data() # Get names of volcanos considered if name_attribute in H.get_attribute_names(): D = {} for att in H.get_data(): # Run through all polygons and get unique names D[att[name_attribute]] = None volcano_names = '' for name in D: volcano_names += '%s, ' % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr('Not specified in data') if not category_title in H.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (H.get_name(), category_title)) raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon and total evacuated = 0 for attr in P.get_data(): # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category cat = new_attributes[poly_id][category_title] categories[cat] += pop # Count totals total = int(numpy.sum(E.get_data(nan=0))) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 # Count number and cumulative for each zone cum = 0 pops = {} cums = {} for name in category_names: if category_title == 'Radius': key = name * 1000 # Convert to meters else: key = name pop = int(categories[key]) if pop > 1000: pop = pop // 1000 * 1000 cum += pop if cum > 1000: cum = cum // 1000 * 1000 pops[name] = pop cums[name] = cum # Use final accumulation as total number needing evac evacuated = cum # Calculate estimated needs based on BNPB Perka # 7/2008 minimum bantuan # FIXME (Ole): Refactor into one function to be shared rice = int(evacuated * 2.8) drinking_water = int(evacuated * 17.5) water = int(evacuated * 67) family_kits = int(evacuated / 5) toilets = int(evacuated / 20) # Generate impact report for the pdf map blank_cell = '' table_body = [ question, TableRow( [tr('Volcanos considered'), '%s' % volcano_names, blank_cell], header=True), TableRow([ tr('People needing evacuation'), '%s' % format_int(evacuated), blank_cell ], header=True), TableRow( [category_header, tr('Total'), tr('Cumulative')], header=True) ] for name in category_names: table_body.append( TableRow( [name, format_int(pops[name]), format_int(cums[name])])) table_body.extend([ TableRow( tr('Map shows population affected in ' 'each of volcano hazard polygons.')), TableRow([tr('Needs per week'), tr('Total'), blank_cell], header=True), [tr('Rice [kg]'), format_int(rice), blank_cell], [tr('Drinking Water [l]'), format_int(drinking_water), blank_cell], [tr('Clean Water [l]'), format_int(water), blank_cell], [tr('Family Kits'), format_int(family_kits), blank_cell], [tr('Toilets'), format_int(toilets), blank_cell] ]) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([ TableRow(tr('Notes'), header=True), tr('Total population %s in the viewable area') % format_int(total), tr('People need evacuation if they are within the ' 'volcanic hazard zones.') ]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('People affected by volcanic hazard zone') # Define classes for legend for flooded population counts colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] population_counts = [x[self.target_field] for x in new_attributes] cls = [0] + numpy.linspace(1, max(population_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = tr('0') else: label = tr('%i - %i') % (lo, hi) entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=50, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=tr('Population Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(as_geometry_objects=True), name=tr('Population affected by volcanic hazard zone'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field }, style_info=style_info) return V
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Map from different kinds of datasets to Padang vulnerability classes datatype = E.get_keywords()['datatype'] vclass_tag = 'VCLASS' if datatype.lower() == 'osm': # Map from OSM attributes Emap = osm2padang(E) elif datatype.lower() == 'sigab': # Map from SIGAB attributes Emap = sigab2padang(E) else: Emap = E # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, Emap, attribute_name='MMI') # Extract relevant numerical data attributes = I.get_data() N = len(I) # Calculate building damage count_high = count_medium = count_low = count_none = 0 for i in range(N): mmi = float(attributes[i]['MMI']) building_type = Emap.get_data(vclass_tag, i) damage_params = damage_curves[building_type] beta = damage_params['beta'] median = damage_params['median'] percent_damage = lognormal_cdf(mmi, median=median, sigma=beta) * 100 # Add calculated impact to existing attributes attributes[i][self.target_field] = percent_damage # Calculate statistics if percent_damage < 10: count_none += 1 if 10 <= percent_damage < 33: count_low += 1 if 33 <= percent_damage < 66: count_medium += 1 if 66 <= percent_damage: count_high += 1 # Generate impact report table_body = [question, TableRow([_('Buildings'), _('Total')], header=True), TableRow([_('All'), N]), TableRow([_('No damage'), count_none]), TableRow([_('Low damage'), count_low]), TableRow([_('Medium damage'), count_medium]), TableRow([_('High damage'), count_high])] table_body.append(TableRow(_('Notes'), header=True)) table_body.append(_('Levels of impact are defined by post 2009 ' 'Padang earthquake survey conducted by Geoscience ' 'Australia and Institute of Teknologi Bandung.')) table_body.append(_('Unreinforced masonry is assumed where no ' 'structural information is available.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Earthquake damage to buildings') # Create style style_classes = [dict(label=_('No damage'), min=0, max=10, colour='#00ff00', transparency=1), dict(label=_('Low damage'), min=10, max=33, colour='#ffff00', transparency=1), dict(label=_('Medium damage'), min=33, max=66, colour='#ffaa00', transparency=1), dict(label=_('High damage'), min=66, max=100, colour='#ff0000', transparency=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=E.get_projection(), geometry=E.get_geometry(), name='Estimated pct damage', keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return V
def run(self): """Run classified population evacuation Impact Function. Counts number of people exposed to each hazard zones. :returns: Map of population exposed to each hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer """ # Value from layer's keywords self.hazard_class_attribute = self.hazard.keyword('field') self.hazard_class_mapping = self.hazard.keyword('value_map') # TODO: Remove check to self.validate (Ismail) # Input checks message = tr( 'Input hazard must be a polygon layer. I got %s with layer type ' '%s' % (self.hazard.name, self.hazard.layer.get_geometry_name())) if not self.hazard.layer.is_polygon_data: raise Exception(message) # Check if hazard_class_attribute exists in hazard_layer if (self.hazard_class_attribute not in self.hazard.layer.get_attribute_names()): message = tr( 'Hazard data %s does not contain expected hazard ' 'zone attribute "%s". Please change it in the option. ' % (self.hazard.name, self.hazard_class_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword( 'vector_hazard_classification') # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification['classes'] # Initialize OrderedDict of affected buildings self.affected_population = OrderedDict() # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class['key'] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class['name']] = \ self.hazard_class_mapping.pop(vector_hazard_class['key']) # Adding the class name as a key in affected_building self.affected_population[vector_hazard_class['name']] = 0 # Interpolated layer represents grid cell that lies in the polygon interpolated_layer, covered_exposure_layer = \ assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=self.target_field ) # Count total affected population per hazard zone for row in interpolated_layer.get_data(): # Get population at this location population = row[self.target_field] if not numpy.isnan(population): population = float(population) # Update population count for this hazard zone hazard_value = get_key_for_value( row[self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value else: self.affected_population[hazard_value] += population # Count total population from exposure layer self.total_population = int( numpy.nansum(self.exposure.layer.get_data())) # Count total affected population total_affected_population = self.total_affected_population self.unaffected_population = (self.total_population - total_affected_population) self.minimum_needs = [ parameter.serialize() for parameter in filter_needs_parameters( self.parameters['minimum needs']) ] # check for zero impact if total_affected_population == 0: message = no_population_impact_message(self.question) raise ZeroImpactException(message) # Create style colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] classes = create_classes(covered_exposure_layer.get_data().flat[:], len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 1: label = create_label( interval_classes[i], tr('Low Population [%i people/cell]' % classes[i])) elif i == 4: label = create_label( interval_classes[i], tr('Medium Population [%i people/cell]' % classes[i])) elif i == 7: label = create_label( interval_classes[i], tr('High Population [%i people/cell]' % classes[i])) else: label = create_label(interval_classes[i]) style_class['label'] = label style_class['quantity'] = classes[i] style_class['colour'] = colours[i] style_class['transparency'] = 0 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=None, style_classes=style_classes, style_type='rasterStyle') impact_data = self.generate_data() extra_keywords = { 'target_field': self.target_field, 'map_title': self.map_title(), 'legend_notes': self.metadata().key('legend_notes'), 'legend_units': self.metadata().key('legend_units'), 'legend_title': self.metadata().key('legend_title') } impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Raster( data=covered_exposure_layer.get_data(), projection=covered_exposure_layer.get_projection(), geotransform=covered_exposure_layer.get_geotransform(), name=self.map_title(), keywords=impact_layer_keywords, style_info=style_info) impact_layer.impact_data = impact_data self._impact = impact_layer return impact_layer
def run(self, layers): """Earthquake impact to buildings (e.g. from Open Street Map) """ # Thresholds for mmi breakdown t0 = 6 t1 = 7 t2 = 8 class_1 = 'Low' class_2 = 'Medium' class_3 = 'High' # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Define attribute name for hazard levels hazard_attribute = 'mmi' # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data( H, E, attribute_name=hazard_attribute) # Extract relevant exposure data #attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate building impact lo = 0 me = 0 hi = 0 building_values = {} contents_values = {} for key in range(4): building_values[key] = 0 contents_values[key] = 0 for i in range(N): # Classify building according to shake level x = float(attributes[i][hazard_attribute]) # Interpolated MMI val if t0 <= x < t1: lo += 1 cls = 1 elif t1 <= x < t2: me += 1 cls = 2 elif t2 <= x: hi += 1 cls = 3 else: # Buildings not reported for MMI levels < t0 cls = 0 attributes[i][self.target_field] = cls # Generate simple impact report for unspecific buildings table_body = [ question, TableRow(['Hazard Level', 'Buildings Affected'], header=True), TableRow([class_1, lo]), TableRow([class_2, me]), TableRow([class_3, hi]) ] table_body.append(TableRow('Notes', header=True)) table_body.append('High hazard is defined as shake levels greater ' 'than %i on the MMI scale.' % t2) table_body.append('Medium hazard is defined as shake levels ' 'between %i and %i on the MMI scale.' % (t1, t2)) table_body.append('Low hazard is defined as shake levels ' 'between %i and %i on the MMI scale.' % (t0, t1)) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = 'Buildings affected' # Create style style_classes = [ dict(label=class_1, value=1, colour='#ffff00', transparency=1), dict(label=class_2, value=2, colour='#ffaa00', transparency=1), dict(label=class_3, value=3, colour='#ff0000', transparency=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # Create vector layer and return V = Vector(data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), name='Estimated buildings affected', keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field, 'statistics_type': self.statistics_type, 'statistics_classes': self.statistics_classes }, style_info=style_info) return V
def run(self): """Flood impact to buildings (e.g. from Open Street Map).""" self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') threshold = self.parameters['threshold'].value # Flood threshold [m] verify(isinstance(threshold, float), 'Expected thresholds to be a float. Got %s' % str(threshold)) # Determine attribute name for hazard levels hazard_attribute = 'depth' # Interpolate hazard level to building locations interpolated_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=hazard_attribute) # Extract relevant exposure data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() total_features = len(interpolated_layer) # but use the old get_osm_building_usage try: structure_class_field = self.exposure.keyword( 'structure_class_field') except KeywordNotFoundError: structure_class_field = None # Building breakdown self.buildings = {} # Impacted building breakdown self.affected_buildings = OrderedDict([(tr('Flooded'), {}), (tr('Wet'), {}), (tr('Dry'), {})]) for i in range(total_features): # Get the interpolated depth water_depth = float(features[i]['depth']) if water_depth <= 0: inundated_status = 0 # dry elif water_depth >= threshold: inundated_status = 1 # inundated else: inundated_status = 2 # wet # Count affected buildings by usage type if available if (structure_class_field in attribute_names and structure_class_field): usage = features[i].get(structure_class_field, None) else: usage = get_osm_building_usage(attribute_names, features[i]) if usage is None or usage == 0: usage = 'unknown' if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][usage] = OrderedDict([ (tr('Buildings Affected'), 0) ]) # Count all buildings by type self.buildings[usage] += 1 # Add calculated impact to existing attributes features[i][self.target_field] = inundated_status category = [tr('Dry'), tr('Flooded'), tr('Wet')][inundated_status] self.affected_buildings[category][usage][tr( 'Buildings Affected')] += 1 # Lump small entries and 'unknown' into 'other' category # Building threshold #2468 postprocessors = self.parameters['postprocessors'] building_postprocessors = postprocessors['BuildingType'][0] self.building_report_threshold = building_postprocessors.value[0].value self._consolidate_to_other() # Generate simple impact report impact_table = impact_summary = self.html_report() # For printing map purpose map_title = tr('Flooded buildings') legend_title = tr('Flooded structure status') legend_units = tr('(flooded, wet, or dry)') style_classes = [ dict(label=tr('Dry (<= 0 m)'), value=0, colour='#1EFC7C', transparency=0, size=1), dict(label=tr('Wet (0 m - %.1f m)') % threshold, value=2, colour='#FF9900', transparency=0, size=1), dict(label=tr('Flooded (>= %.1f m)') % threshold, value=1, colour='#F31A1C', transparency=0, size=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_title': legend_title, 'legend_units': legend_units, 'buildings_total': total_features, 'buildings_affected': self.total_affected_buildings } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) vector_layer = Vector(data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Estimated buildings affected'), keywords=impact_layer_keywords, style_info=style_info) # Create vector layer and return self._impact = vector_layer return vector_layer
def run(self, layers): """Risk plugin for volcano hazard on building/structure Input layers: List of layers expected to contain my_hazard: Hazard layer of volcano my_exposure: Vector layer of structure data on the same grid as my_hazard Counts number of building exposed to each volcano hazard zones. Return Map of building exposed to volcanic hazard zones Table with number of buildings affected """ # Identify hazard and exposure layers my_hazard = get_hazard_layer(layers) # Volcano hazard layer my_exposure = get_exposure_layer(layers) is_point_data = False question = get_question(my_hazard.get_name(), my_exposure.get_name(), self) # Input checks if not my_hazard.is_vector: msg = "Input hazard %s was not a vector layer as expected " % my_hazard.get_name() raise Exception(msg) msg = "Input hazard must be a polygon or point layer. I got %s " "with layer type %s" % ( my_hazard.get_name(), my_hazard.get_geometry_name(), ) if not (my_hazard.is_polygon_data or my_hazard.is_point_data): raise Exception(msg) if my_hazard.is_point_data: # Use concentric circles radii = self.parameters["distances [km]"] is_point_data = True centers = my_hazard.get_geometry() attributes = my_hazard.get_data() rad_m = [x * 1000 for x in radii] # Convert to meters Z = make_circular_polygon(centers, rad_m, attributes=attributes) # To check category_title = "Radius" my_hazard = Z category_names = rad_m name_attribute = "NAME" # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = "KRB" # FIXME (Ole): Change to English and use translation system category_names = ["Kawasan Rawan Bencana III", "Kawasan Rawan Bencana II", "Kawasan Rawan Bencana I"] name_attribute = "GUNUNG" # As in e.g. BNPB hazard map # Get names of volcanos considered if name_attribute in my_hazard.get_attribute_names(): D = {} for att in my_hazard.get_data(): # Run through all polygons and get unique names D[att[name_attribute]] = None volcano_names = "" for name in D: volcano_names += "%s, " % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr("Not specified in data") if not category_title in my_hazard.get_attribute_names(): msg = "Hazard data %s did not contain expected " "attribute %s " % (my_hazard.get_name(), category_title) # noinspection PyExceptionInherit raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(my_hazard, my_exposure) # Initialise attributes of output dataset with all attributes # from input polygon and a building count of zero new_attributes = my_hazard.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count impacted building per polygon and total for attr in P.get_data(): # Update building count for associated polygon poly_id = attr["polygon_id"] if poly_id is not None: new_attributes[poly_id][self.target_field] += 1 # Update building count for each category cat = new_attributes[poly_id][category_title] categories[cat] += 1 # Count totals total = len(my_exposure) # Generate simple impact report blank_cell = "" table_body = [ question, TableRow([tr("Volcanos considered"), "%s" % volcano_names, blank_cell], header=True), TableRow([tr("Distance [km]"), tr("Total"), tr("Cumulative")], header=True), ] cum = 0 for name in category_names: # prevent key error count = categories.get(name, 0) cum += count if is_point_data: name = int(name) / 1000 table_body.append(TableRow([name, format_int(count), format_int(cum)])) table_body.append(TableRow(tr("Map shows buildings affected in " "each of volcano hazard polygons."))) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend( [ TableRow(tr("Notes"), header=True), tr("Total number of buildings %s in the viewable " "area") % format_int(total), tr("Only buildings available in OpenStreetMap " "are considered."), ] ) impact_summary = Table(table_body).toNewlineFreeString() building_counts = [x[self.target_field] for x in new_attributes] if max(building_counts) == 0 == min(building_counts): table_body = [ question, TableRow([tr("Number of buildings affected"), "%s" % format_int(cum), blank_cell], header=True), ] my_message = Table(table_body).toNewlineFreeString() raise ZeroImpactException(my_message) # Create style colours = ["#FFFFFF", "#38A800", "#79C900", "#CEED00", "#FFCC00", "#FF6600", "#FF0000", "#7A0000"] classes = create_classes(building_counts, len(colours)) interval_classes = humanize_class(classes) style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class["label"] = create_label(interval_classes[i]) if i == 0: transparency = 100 style_class["min"] = 0 else: transparency = 30 style_class["min"] = classes[i - 1] style_class["transparency"] = transparency style_class["colour"] = colours[i] style_class["max"] = classes[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type="graduatedSymbol") # For printing map purpose map_title = tr("Buildings affected by volcanic hazard zone") legend_notes = tr("Thousand separator is represented by %s" % get_thousand_separator()) legend_units = tr("(building)") legend_title = tr("Building count") # Create vector layer and return V = Vector( data=new_attributes, projection=my_hazard.get_projection(), geometry=my_hazard.get_geometry(as_geometry_objects=True), name=tr("Buildings affected by volcanic hazard zone"), keywords={ "impact_summary": impact_summary, "impact_table": impact_table, "target_field": self.target_field, "map_title": map_title, "legend_notes": legend_notes, "legend_units": legend_units, "legend_title": legend_title, }, style_info=style_info, ) return V
def run(self, layers=None): """Risk plugin for classified polygon hazard on building/structure. Counts number of building exposed to each hazard zones. :param layers: List of layers expected to contain. * hazard_layer: Hazard layer * exposure_layer: Vector layer of structure data on the same grid as hazard_layer :returns: Map of building exposed to each hazard zones. Table with number of buildings affected :rtype: dict """ self.validate() self.prepare(layers) # Target Field target_field = 'zone' # Not affected string in the target field not_affected_value = 'Not Affected' # Parameters hazard_zone_attribute = self.parameters['hazard zone attribute'] # Identify hazard and exposure layers hazard_layer = self.hazard exposure_layer = self.exposure # Input checks if not hazard_layer.is_polygon_data: message = ( 'Input hazard must be a polygon. I got %s with ' 'layer type %s' % (hazard_layer.get_name(), hazard_layer.get_geometry_name())) raise Exception(message) # Check if hazard_zone_attribute exists in hazard_layer if hazard_zone_attribute not in hazard_layer.get_attribute_names(): message = ( 'Hazard data %s does not contain expected attribute %s ' % (hazard_layer.get_name(), hazard_zone_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) # Find the target field name that has no conflict with default # target attribute_names = hazard_layer.get_attribute_names() target_field = get_non_conflicting_attribute_name( target_field, attribute_names) # Hazard zone categories from hazard layer self.hazard_zones = list( set(hazard_layer.get_data(hazard_zone_attribute))) self.buildings = {} self.affected_buildings = OrderedDict() for hazard_zone in self.hazard_zones: self.affected_buildings[hazard_zone] = {} # Run interpolation function for polygon2polygon interpolated_layer = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=None) # Extract relevant interpolated data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() for i in range(len(features)): hazard_value = features[i][hazard_zone_attribute] if not hazard_value: hazard_value = not_affected_value features[i][target_field] = hazard_value usage = get_osm_building_usage(attribute_names, features[i]) if usage is None: usage = tr('Unknown') if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][usage] = OrderedDict( [(tr('Buildings Affected'), 0)]) self.buildings[usage] += 1 if hazard_value in self.affected_buildings.keys(): self.affected_buildings[hazard_value][usage][ tr('Buildings Affected')] += 1 # Lump small entries and 'unknown' into 'other' category self._consolidate_to_other() # Generate simple impact report impact_summary = impact_table = self.generate_html_report() # Create style categories = self.hazard_zones categories.append(not_affected_value) colours = color_ramp(len(categories)) style_classes = [] i = 0 for hazard_zone in self.hazard_zones: style_class = dict() style_class['label'] = tr(hazard_zone) style_class['transparency'] = 0 style_class['value'] = hazard_zone style_class['size'] = 1 style_class['colour'] = colours[i] style_classes.append(style_class) i += 1 # Override style info with new classes and name style_info = dict(target_field=target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Buildings affected by each hazard zone') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) legend_units = tr('(building)') legend_title = tr('Building count') # Create vector layer and return impact_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Buildings affected by each hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title}, style_info=style_info) self._impact = impact_layer return impact_layer
def run(self): """Run volcano population evacuation Impact Function. Counts number of people exposed to volcano event. :returns: Map of population exposed to the volcano hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer * RadiiException - When radii are not valid (they need to be monotonically increasing) """ # Parameters self.hazard_class_attribute = self.hazard.keyword('field') name_attribute = self.hazard.keyword('volcano_name_field') self.hazard_class_mapping = self.hazard.keyword('value_map') if has_no_data(self.exposure.layer.get_data(nan=True)): self.no_data_warning = True # Input checks if not self.hazard.layer.is_polygon_data: message = tr( 'Input hazard must be a polygon layer. I got %s with layer ' 'type %s' % ( self.hazard.layer.get_name(), self.hazard.layer.get_geometry_name())) raise Exception(message) # Check if hazard_class_attribute exists in hazard_layer if (self.hazard_class_attribute not in self.hazard.layer.get_attribute_names()): message = tr( 'Hazard data %s did not contain expected attribute ''%s ' % ( self.hazard.layer.get_name(), self.hazard_class_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) features = self.hazard.layer.get_data() # Get names of volcanoes considered if name_attribute in self.hazard.layer.get_attribute_names(): # Run through all polygons and get unique names for row in features: self.volcano_names.add(row[name_attribute]) # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword( 'vector_hazard_classification') # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification['classes'] # Initialize OrderedDict of affected buildings self.affected_population = OrderedDict() # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class['key'] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class['name']] = \ self.hazard_class_mapping.pop(vector_hazard_class['key']) # Adding the class name as a key in affected_building self.affected_population[vector_hazard_class['name']] = 0 # Run interpolation function for polygon2raster interpolated_layer, covered_exposure_layer = \ assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=self.target_field) # Count affected population per polygon and total for row in interpolated_layer.get_data(): # Get population at this location population = row[self.target_field] if not numpy.isnan(population): population = float(population) # Update population count for this hazard zone hazard_value = get_key_for_value( row[self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value self.affected_population[hazard_value] += population # Count totals self.total_population = int( numpy.nansum(self.exposure.layer.get_data())) self.unaffected_population = ( self.total_population - self.total_affected_population) self.minimum_needs = [ parameter.serialize() for parameter in filter_needs_parameters(self.parameters['minimum needs']) ] # check for zero impact if self.total_affected_population == 0: message = no_population_impact_message(self.question) raise ZeroImpactException(message) # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] classes = create_classes( covered_exposure_layer.get_data().flat[:], len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 1: label = create_label( interval_classes[i], tr('Low Population [%i people/cell]' % classes[i])) elif i == 4: label = create_label( interval_classes[i], tr('Medium Population [%i people/cell]' % classes[i])) elif i == 7: label = create_label( interval_classes[i], tr('High Population [%i people/cell]' % classes[i])) else: label = create_label(interval_classes[i]) style_class['label'] = label style_class['quantity'] = classes[i] style_class['colour'] = colours[i] style_class['transparency'] = 0 style_classes.append(style_class) # Override style info with new classes and name style_info = dict( target_field=None, style_classes=style_classes, style_type='rasterStyle') impact_data = self.generate_data() extra_keywords = { 'target_field': self.target_field, 'map_title': self.map_title(), 'legend_notes': self.metadata().key('legend_notes'), 'legend_units': self.metadata().key('legend_units'), 'legend_title': self.metadata().key('legend_title'), 'total_needs': self.total_needs } impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Raster( data=covered_exposure_layer.get_data(), projection=covered_exposure_layer.get_projection(), geotransform=covered_exposure_layer.get_geotransform(), name=self.map_title(), keywords=impact_layer_keywords, style_info=style_info ) impact_layer.impact_data = impact_data self._impact = impact_layer return impact_layer
def run(self, layers): """Impact algorithm """ # Extract data H = get_hazard_layer(layers) # Depth R = get_exposure_layer(layers) # Building locations # Make the delta 10 times the size of the resolution. delta = abs(H.get_geotransform()[1]) * 10 min_value, max_value = H.get_extrema() E = convert_line_to_points(R, delta) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='flood_lev') # Extract relevant numerical data coordinates = E.get_geometry() depth = H.get_data() N = len(depth) # List attributes to carry forward to result layer attributes = E.get_attribute_names() #print attributes #print 'Number of population points', N # Calculate population impact road_impact = [] num_classes = 10 classes = range(num_classes) difference = (max_value - min_value) / num_classes for i in range(N): dep = float(depth[i]['flood_lev']) affected = classes[0] for level in classes: normalized_depth = dep - min_value level_value = level * difference if normalized_depth > level_value: affected = level # Collect depth and calculated damage result_dict = {'AFFECTED': affected, 'DEPTH': dep} # Carry all original attributes forward for key in attributes: result_dict[key] = E.get_data(key, i) # Record result for this feature road_impact.append(result_dict) # Create report impact_summary = ('') # Create vector layer and return V = Vector(data=road_impact, projection=E.get_projection(), geometry=coordinates, name='Estimated roads affected', keywords={'impact_summary': impact_summary}) return V
def run(self, layers=None): """Earthquake impact to buildings (e.g. from OpenStreetMap). :param layers: All the input layers (Hazard Layer and Exposure Layer) """ self.validate() self.prepare(layers) LOGGER.debug("Running earthquake building impact") # merely initialize building_value = 0 contents_value = 0 # Thresholds for mmi breakdown. t0 = self.parameters["low_threshold"] t1 = self.parameters["medium_threshold"] t2 = self.parameters["high_threshold"] # Class Attribute and Label. class_1 = {"label": tr("Low"), "class": 1} class_2 = {"label": tr("Medium"), "class": 2} class_3 = {"label": tr("High"), "class": 3} # Extract data hazard_layer = self.hazard # Depth exposure_layer = self.exposure # Building locations # Define attribute name for hazard levels. hazard_attribute = "mmi" # Determine if exposure data have NEXIS attributes. attribute_names = exposure_layer.get_attribute_names() if "FLOOR_AREA" in attribute_names and "BUILDING_C" in attribute_names and "CONTENTS_C" in attribute_names: self.is_nexis = True else: self.is_nexis = False # Interpolate hazard level to building locations. interpolate_result = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=hazard_attribute ) # Extract relevant exposure data # attribute_names = interpolate_result.get_attribute_names() attributes = interpolate_result.get_data() interpolate_size = len(interpolate_result) # Building breakdown self.buildings = {} # Impacted building breakdown self.affected_buildings = OrderedDict([(tr("High"), {}), (tr("Medium"), {}), (tr("Low"), {})]) for i in range(interpolate_size): # Classify building according to shake level # and calculate dollar losses if self.is_nexis: try: area = float(attributes[i]["FLOOR_AREA"]) except (ValueError, KeyError): # print 'Got area', attributes[i]['FLOOR_AREA'] area = 0.0 try: building_value_density = float(attributes[i]["BUILDING_C"]) except (ValueError, KeyError): # print 'Got bld value', attributes[i]['BUILDING_C'] building_value_density = 0.0 try: contents_value_density = float(attributes[i]["CONTENTS_C"]) except (ValueError, KeyError): # print 'Got cont value', attributes[i]['CONTENTS_C'] contents_value_density = 0.0 building_value = building_value_density * area contents_value = contents_value_density * area usage = get_osm_building_usage(attribute_names, attributes[i]) if usage is None or usage == 0: usage = "unknown" if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): if self.is_nexis: self.affected_buildings[category][usage] = OrderedDict( [ (tr("Buildings Affected"), 0), (tr("Buildings value ($M)"), 0), (tr("Contents value ($M)"), 0), ] ) else: self.affected_buildings[category][usage] = OrderedDict([(tr("Buildings Affected"), 0)]) self.buildings[usage] += 1 try: mmi = float(attributes[i][hazard_attribute]) # MMI except TypeError: mmi = 0.0 if t0 <= mmi < t1: cls = 1 category = tr("Low") elif t1 <= mmi < t2: cls = 2 category = tr("Medium") elif t2 <= mmi: cls = 3 category = tr("High") else: # Not reported for less than level t0 continue attributes[i][self.target_field] = cls self.affected_buildings[category][usage][tr("Buildings Affected")] += 1 if self.is_nexis: self.affected_buildings[category][usage][tr("Buildings value ($M)")] += building_value / 1000000.0 self.affected_buildings[category][usage][tr("Contents value ($M)")] += contents_value / 1000000.0 # Consolidate the small building usage groups < 25 to other self._consolidate_to_other() impact_table = impact_summary = self.generate_html_report() # Create style style_classes = [ dict(label=class_1["label"], value=class_1["class"], colour="#ffff00", transparency=1), dict(label=class_2["label"], value=class_2["class"], colour="#ffaa00", transparency=1), dict(label=class_3["label"], value=class_3["class"], colour="#ff0000", transparency=1), ] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type="categorizedSymbol") # For printing map purpose map_title = tr("Building affected by earthquake") legend_notes = tr("The level of the impact is according to the " "threshold the user input.") legend_units = tr("(mmi)") legend_title = tr("Impact level") # Create vector layer and return result_layer = Vector( data=attributes, projection=interpolate_result.get_projection(), geometry=interpolate_result.get_geometry(), name=tr("Estimated buildings affected"), keywords={ "impact_summary": impact_summary, "impact_table": impact_table, "map_title": map_title, "legend_notes": legend_notes, "legend_units": legend_units, "legend_title": legend_title, "target_field": self.target_field, "statistics_type": self.statistics_type, "statistics_classes": self.statistics_classes, }, style_info=style_info, ) msg = "Created vector layer %s" % str(result_layer) LOGGER.debug(msg) self._impact = result_layer return result_layer
def run(self): """Earthquake impact to buildings (e.g. from OpenStreetMap).""" self.validate() self.prepare() LOGGER.debug('Running earthquake building impact') # merely initialize building_value = 0 contents_value = 0 # Thresholds for mmi breakdown. t0 = self.parameters['low_threshold'].value t1 = self.parameters['medium_threshold'].value t2 = self.parameters['high_threshold'].value # Class Attribute and Label. class_1 = {'label': tr('Low'), 'class': 1} class_2 = {'label': tr('Medium'), 'class': 2} class_3 = {'label': tr('High'), 'class': 3} # Define attribute name for hazard levels. hazard_attribute = 'mmi' # Determine if exposure data have NEXIS attributes. attribute_names = self.exposure.layer.get_attribute_names() if ( 'FLOOR_AREA' in attribute_names and 'BUILDING_C' in attribute_names and 'CONTENTS_C' in attribute_names): self.is_nexis = True else: self.is_nexis = False # Interpolate hazard level to building locations. interpolate_result = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=hazard_attribute ) # Extract relevant exposure data # Try to get the value from keyword, if not exist, it will not fail, # but use the old get_osm_building_usage try: structure_class_field = self.exposure.keyword( 'structure_class_field') except KeywordNotFoundError: structure_class_field = None attributes = interpolate_result.get_data() interpolate_size = len(interpolate_result) # Building breakdown self.buildings = {} # Impacted building breakdown self.affected_buildings = OrderedDict([ (tr('High'), {}), (tr('Medium'), {}), (tr('Low'), {}) ]) removed = [] for i in range(interpolate_size): # Classify building according to shake level # and calculate dollar losses if self.is_nexis: try: area = float(attributes[i]['FLOOR_AREA']) except (ValueError, KeyError): # print 'Got area', attributes[i]['FLOOR_AREA'] area = 0.0 try: building_value_density = float(attributes[i]['BUILDING_C']) except (ValueError, KeyError): # print 'Got bld value', attributes[i]['BUILDING_C'] building_value_density = 0.0 try: contents_value_density = float(attributes[i]['CONTENTS_C']) except (ValueError, KeyError): # print 'Got cont value', attributes[i]['CONTENTS_C'] contents_value_density = 0.0 building_value = building_value_density * area contents_value = contents_value_density * area if (structure_class_field in attribute_names and structure_class_field): usage = attributes[i].get(structure_class_field, None) else: usage = get_osm_building_usage( attribute_names, attributes[i]) if usage is None or usage == 0: usage = 'unknown' if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): if self.is_nexis: self.affected_buildings[category][usage] = OrderedDict( [ (tr('Buildings Affected'), 0), (tr('Buildings value ($M)'), 0), (tr('Contents value ($M)'), 0)]) else: self.affected_buildings[category][usage] = \ OrderedDict([(tr('Buildings Affected'), 0)]) self.buildings[usage] += 1 try: mmi = float(attributes[i][hazard_attribute]) # MMI except TypeError: mmi = 0.0 if t0 <= mmi < t1: cls = 1 category = tr('Low') elif t1 <= mmi < t2: cls = 2 category = tr('Medium') elif t2 <= mmi: cls = 3 category = tr('High') else: # Not reported for less than level t0 removed.append(i) continue attributes[i][self.target_field] = cls self.affected_buildings[ category][usage][tr('Buildings Affected')] += 1 if self.is_nexis: self.affected_buildings[category][usage][ tr('Buildings value ($M)')] += building_value / 1000000.0 self.affected_buildings[category][usage][ tr('Contents value ($M)')] += contents_value / 1000000.0 # remove uncategorized element removed.reverse() geometry = interpolate_result.get_geometry() for i in range(0, len(removed)): del attributes[removed[i]] del geometry[removed[i]] if len(attributes) < 1: raise ZeroImpactException() # Consolidate the small building usage groups < 25 to other self._consolidate_to_other() impact_table = impact_summary = self.html_report() # Create style style_classes = [dict(label=class_1['label'], value=class_1['class'], colour='#ffff00', transparency=1), dict(label=class_2['label'], value=class_2['class'], colour='#ffaa00', transparency=1), dict(label=class_3['label'], value=class_3['class'], colour='#ff0000', transparency=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Building affected by earthquake') legend_notes = tr('The level of the impact is according to the ' 'threshold the user input.') legend_units = tr('(mmi)') legend_title = tr('Impact level') # Create vector layer and return result_layer = Vector( data=attributes, projection=interpolate_result.get_projection(), geometry=geometry, name=tr('Estimated buildings affected'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title, 'target_field': self.target_field, 'statistics_type': self.statistics_type, 'statistics_classes': self.statistics_classes}, style_info=style_info) msg = 'Created vector layer %s' % str(result_layer) LOGGER.debug(msg) self._impact = result_layer return result_layer
def run(self, layers): """Impact algorithm """ # Extract data H = get_hazard_layer(layers) # Depth R = get_exposure_layer(layers) # Building locations # Make the delta 10 times the size of the resolution. delta = abs(H.get_geotransform()[1]) * 10 min_value, max_value = H.get_extrema() E = convert_line_to_points(R, delta) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='flood_lev') # Extract relevant numerical data coordinates = E.get_geometry() depth = H.get_data() N = len(depth) # List attributes to carry forward to result layer attributes = E.get_attribute_names() # print attributes # print 'Number of population points', N # Calculate population impact road_impact = [] num_classes = 10 classes = range(num_classes) difference = (max_value - min_value) / num_classes for i in range(N): dep = float(depth[i]['flood_lev']) affected = classes[0] for level in classes: normalised_depth = dep - min_value level_value = level * difference if normalised_depth > level_value: affected = level # Collect depth and calculated damage result_dict = {'AFFECTED': affected, 'DEPTH': dep} # Carry all original attributes forward for key in attributes: result_dict[key] = E.get_data(key, i) # Record result for this feature road_impact.append(result_dict) # Create report impact_summary = ('') # Create vector layer and return V = Vector(data=road_impact, projection=E.get_projection(), geometry=coordinates, name='Estimated roads affected', keywords={'impact_summary': impact_summary}) return V
def test_data_resampling_example(self): """Raster data is unchanged when going through geonode """ # Name file names for hazard level, exposure and expected fatalities hazard_filename = os.path.join(TESTDATA, '..', 'hazard', 'maumere_aos_depth_20m_land_wgs84.asc') exposure_filename = os.path.join(TESTDATA, 'maumere_pop_prj.shp') #------------ # Hazard data #------------ # Read hazard input data for reference H_ref = read_layer(hazard_filename) A_ref = H_ref.get_data() depth_min_ref, depth_max_ref = H_ref.get_extrema() # Upload to internal geonode hazard_layer = save_to_geonode(hazard_filename, user=self.user) hazard_name = '%s:%s' % (hazard_layer.workspace, hazard_layer.name) # Download data again bbox = get_bounding_box_string(hazard_filename) # The biggest H = download(INTERNAL_SERVER_URL, hazard_name, bbox) A = H.get_data() depth_min, depth_max = H.get_extrema() # FIXME (Ole): The layer read from file is single precision only: # Issue #17 # Here's the explanation why interpolation below produce slightly # different results (but why?) # The layer read from file is single precision which may be due to # the way it is converted from ASC to TIF. In other words the # problem may be in raster.write_to_file. Float64 is # specified there, so this is a mystery. #print 'A', A.dtype # Double precision #print 'A_ref', A_ref.dtype # Single precision # Compare extrema to values from numpy array assert numpy.allclose(depth_max, numpy.nanmax(A), rtol=1.0e-12, atol=1.0e-12) assert numpy.allclose(depth_max_ref, numpy.nanmax(A_ref), rtol=1.0e-12, atol=1.0e-12) # Compare to reference assert numpy.allclose([depth_min, depth_max], [depth_min_ref, depth_max_ref], rtol=1.0e-12, atol=1.0e-12) # Compare extrema to values read off QGIS for this layer assert numpy.allclose([depth_min, depth_max], [0.0, 16.68], rtol=1.0e-6, atol=1.0e-10) # Investigate difference visually #from matplotlib.pyplot import matshow, show #matshow(A) #matshow(A_ref) #matshow(A - A_ref) #show() #print for i in range(A.shape[0]): for j in range(A.shape[1]): if not numpy.isnan(A[i, j]): err = abs(A[i, j] - A_ref[i, j]) if err > 0: msg = ('%i, %i: %.15f, %.15f, %.15f' % (i, j, A[i, j], A_ref[i, j], err)) raise Exception(msg) #if A[i,j] > 16: # print i, j, A[i, j], A_ref[i, j] # Compare elements (nan & numbers) id_nan = numpy.isnan(A) id_nan_ref = numpy.isnan(A_ref) assert numpy.all(id_nan == id_nan_ref) assert numpy.allclose(A[-id_nan], A_ref[-id_nan], rtol=1.0e-15, atol=1.0e-15) #print 'MAX', A[245, 283], A_ref[245, 283] #print 'MAX: %.15f %.15f %.15f' %(A[245, 283], A_ref[245, 283]) assert numpy.allclose(A[245, 283], A_ref[245, 283], rtol=1.0e-15, atol=1.0e-15) #-------------- # Exposure data #-------------- # Read exposure input data for reference E_ref = read_layer(exposure_filename) # Upload to internal geonode exposure_layer = save_to_geonode(exposure_filename, user=self.user) exposure_name = '%s:%s' % (exposure_layer.workspace, exposure_layer.name) # Download data again E = download(INTERNAL_SERVER_URL, exposure_name, bbox) # Check exposure data against reference coordinates = E.get_geometry() coordinates_ref = E_ref.get_geometry() assert numpy.allclose(coordinates, coordinates_ref, rtol=1.0e-12, atol=1.0e-12) attributes = E.get_data() attributes_ref = E_ref.get_data() for i, att in enumerate(attributes): att_ref = attributes_ref[i] for key in att: assert att[key] == att_ref[key] # Test safe's interpolation function I = assign_hazard_values_to_exposure_data(H, E, attribute_name='depth') icoordinates = I.get_geometry() I_ref = assign_hazard_values_to_exposure_data(H_ref, E_ref, attribute_name='depth') icoordinates_ref = I_ref.get_geometry() assert numpy.allclose(coordinates, icoordinates, rtol=1.0e-12, atol=1.0e-12) assert numpy.allclose(coordinates, icoordinates_ref, rtol=1.0e-12, atol=1.0e-12) iattributes = I.get_data() assert numpy.allclose(icoordinates, coordinates) N = len(icoordinates) assert N == 891 # Set tolerance for single precision until issue #17 has been fixed # It appears that the single precision leads to larger interpolation # errors rtol_issue17 = 2.0e-3 atol_issue17 = 1.0e-4 # Verify interpolated values with test result for i in range(N): interpolated_depth_ref = I_ref.get_data()[i]['depth'] interpolated_depth = iattributes[i]['depth'] assert nanallclose(interpolated_depth, interpolated_depth_ref, rtol=rtol_issue17, atol=atol_issue17) pointid = attributes[i]['POINTID'] if pointid == 263: #print i, pointid, attributes[i], #print interpolated_depth, coordinates[i] # Check that location is correct assert numpy.allclose(coordinates[i], [122.20367299, -8.61300358], rtol=1.0e-7, atol=1.0e-12) # This is known to be outside inundation area so should # near zero assert numpy.allclose(interpolated_depth, 0.0, rtol=1.0e-12, atol=1.0e-12) if pointid == 148: # Check that location is correct #print coordinates[i] assert numpy.allclose(coordinates[i], [122.2045912, -8.608483265], rtol=1.0e-7, atol=1.0e-12) # This is in an inundated area with a surrounding depths of # 4.531, 3.911 # 2.675, 2.583 assert interpolated_depth < 4.531 assert interpolated_depth < 3.911 assert interpolated_depth > 2.583 assert interpolated_depth > 2.675 #print interpolated_depth # This is a characterisation test for bilinear interpolation assert numpy.allclose(interpolated_depth, 3.62477215491, rtol=rtol_issue17, atol=1.0e-12) # Check that interpolated points are within range msg = ('Interpolated depth %f at point %i was outside extrema: ' '[%f, %f]. ' % (interpolated_depth, i, depth_min, depth_max)) if not numpy.isnan(interpolated_depth): assert depth_min <= interpolated_depth <= depth_max, msg
def run(self, layers): """Risk plugin for volcano hazard on building/structure. Counts number of building exposed to each volcano hazard zones. :param layers: List of layers expected to contain. * hazard_layer: Hazard layer of volcano * exposure_layer: Vector layer of structure data on the same grid as hazard_layer :returns: Map of building exposed to volcanic hazard zones. Table with number of buildings affected :rtype: dict """ # Identify hazard and exposure layers hazard_layer = get_hazard_layer(layers) # Volcano hazard layer exposure_layer = get_exposure_layer(layers) is_point_data = False question = get_question( hazard_layer.get_name(), exposure_layer.get_name(), self) # Input checks if not hazard_layer.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % hazard_layer.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. I got %s ' 'with layer type %s' % (hazard_layer.get_name(), hazard_layer.get_geometry_name())) if not (hazard_layer.is_polygon_data or hazard_layer.is_point_data): raise Exception(msg) if hazard_layer.is_point_data: # Use concentric circles radii = self.parameters['distances [km]'] is_point_data = True centers = hazard_layer.get_geometry() attributes = hazard_layer.get_data() rad_m = [x * 1000 for x in radii] # Convert to meters hazard_layer = buffer_points(centers, rad_m, data_table=attributes) # To check category_title = 'Radius' category_names = rad_m name_attribute = 'NAME' # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = 'KRB' # FIXME (Ole): Change to English and use translation system category_names = ['Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I'] name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map # Get names of volcanoes considered if name_attribute in hazard_layer.get_attribute_names(): volcano_name_list = [] for row in hazard_layer.get_data(): # Run through all polygons and get unique names volcano_name_list.append(row[name_attribute]) volcano_names = '' for name in volcano_name_list: volcano_names += '%s, ' % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr('Not specified in data') # Check if category_title exists in hazard_layer if not category_title in hazard_layer.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (hazard_layer.get_name(), category_title)) # noinspection PyExceptionInherit raise InaSAFEError(msg) # Find the target field name that has no conflict with default # target attribute_names = hazard_layer.get_attribute_names() new_target_field = get_non_conflicting_attribute_name( self.target_field, attribute_names) self.target_field = new_target_field # Run interpolation function for polygon2raster interpolated_layer = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer) # Initialise attributes of output dataset with all attributes # from input polygon and a building count of zero new_data_table = hazard_layer.get_data() categories = {} for row in new_data_table: row[self.target_field] = 0 category = row[category_title] categories[category] = 0 # Count impacted building per polygon and total for row in interpolated_layer.get_data(): # Update building count for associated polygon poly_id = row['polygon_id'] if poly_id is not None: new_data_table[poly_id][self.target_field] += 1 # Update building count for each category category = new_data_table[poly_id][category_title] categories[category] += 1 # Count totals total = len(exposure_layer) # Generate simple impact report blank_cell = '' table_body = [question, TableRow([tr('Volcanoes considered'), '%s' % volcano_names, blank_cell], header=True), TableRow([tr('Distance [km]'), tr('Total'), tr('Cumulative')], header=True)] cumulative = 0 for name in category_names: # prevent key error count = categories.get(name, 0) cumulative += count if is_point_data: name = int(name) / 1000 table_body.append(TableRow([name, format_int(count), format_int(cumulative)])) table_body.append(TableRow(tr('Map shows buildings affected in ' 'each of volcano hazard polygons.'))) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total number of buildings %s in the viewable ' 'area') % format_int(total), tr('Only buildings available in OpenStreetMap ' 'are considered.')]) impact_summary = Table(table_body).toNewlineFreeString() building_counts = [x[self.target_field] for x in new_data_table] if max(building_counts) == 0 == min(building_counts): table_body = [ question, TableRow([tr('Number of buildings affected'), '%s' % format_int(cumulative), blank_cell], header=True)] my_message = Table(table_body).toNewlineFreeString() raise ZeroImpactException(my_message) # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] # Create Classes classes = create_classes(building_counts, len(colours)) # Create Interval Classes interval_classes = humanize_class(classes) style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 0: style_class['min'] = 0 else: style_class['min'] = classes[i - 1] style_class['transparency'] = 30 style_class['colour'] = colours[i] style_class['max'] = classes[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='graduatedSymbol') # For printing map purpose map_title = tr('Buildings affected by volcanic hazard zone') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) legend_units = tr('(building)') legend_title = tr('Building count') # Create vector layer and return impact_layer = Vector( data=new_data_table, projection=hazard_layer.get_projection(), geometry=hazard_layer.get_geometry(as_geometry_objects=True), name=tr('Buildings affected by volcanic hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title}, style_info=style_info) return impact_layer
def run(self, layers): """Risk plugin for volcano hazard on building/structure. Counts number of building exposed to each volcano hazard zones. :param layers: List of layers expected to contain. * hazard_layer: Hazard layer of volcano * exposure_layer: Vector layer of structure data on the same grid as hazard_layer :returns: Map of building exposed to volcanic hazard zones. Table with number of buildings affected :rtype: dict """ # Parameters not_affected_value = self.parameters['Not affected value'] radii = self.parameters['distances [km]'] target_field = self.parameters['target field'] name_attribute = self.parameters['name attribute'] hazard_zone_attribute = self.parameters['hazard zone attribute'] # Identify hazard and exposure layers hazard_layer = get_hazard_layer(layers) # Volcano hazard layer exposure_layer = get_exposure_layer(layers) # Building exposure layer # Get question question = get_question( hazard_layer.get_name(), exposure_layer.get_name(), self) # Input checks if not hazard_layer.is_vector: message = ('Input hazard %s was not a vector layer as expected ' % hazard_layer.get_name()) raise Exception(message) if not (hazard_layer.is_polygon_data or hazard_layer.is_point_data): message = ( 'Input hazard must be a polygon or point layer. I got %s with ' 'layer type %s' % (hazard_layer.get_name(), hazard_layer.get_geometry_name())) raise Exception(message) if hazard_layer.is_point_data: # Use concentric circles centers = hazard_layer.get_geometry() attributes = hazard_layer.get_data() radii_meter = [x * 1000 for x in radii] # Convert to meters hazard_layer = buffer_points( centers, radii_meter, hazard_zone_attribute, data_table=attributes) # To check category_names = radii_meter else: # FIXME (Ole): Change to English and use translation system # FIXME (Ismail) : Or simply use the values from the hazard layer category_names = ['Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I'] category_names.append(not_affected_value) # Get names of volcanoes considered if name_attribute in hazard_layer.get_attribute_names(): volcano_name_list = set() for row in hazard_layer.get_data(): # Run through all polygons and get unique names volcano_name_list.add(row[name_attribute]) volcano_names = ', '.join(volcano_name_list) else: volcano_names = tr('Not specified in data') # Check if category_title exists in hazard_layer if hazard_zone_attribute not in hazard_layer.get_attribute_names(): message = ( 'Hazard data %s did not contain expected attribute %s ' % (hazard_layer.get_name(), hazard_zone_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) # Find the target field name that has no conflict with default # target attribute_names = hazard_layer.get_attribute_names() target_field = get_non_conflicting_attribute_name( target_field, attribute_names) # Run interpolation function for polygon2raster interpolated_layer = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=None) # Extract relevant exposure data attribute_names = interpolated_layer.get_attribute_names() attribute_names_lower = [ attribute_name.lower() for attribute_name in attribute_names] attributes = interpolated_layer.get_data() interpolate_size = len(interpolated_layer) building_per_category = {} building_usages = [] other_sum = {} for category_name in category_names: building_per_category[category_name] = {} building_per_category[category_name]['total'] = 0 other_sum[category_name] = 0 # Building attribute that should be looked up to get the usage building_type_attributes = [ 'type', 'amenity', 'building_t', 'office', 'tourism', 'leisure', 'use', ] for i in range(interpolate_size): hazard_value = attributes[i][hazard_zone_attribute] if not hazard_value: hazard_value = not_affected_value attributes[i][target_field] = hazard_value if hazard_value in building_per_category.keys(): building_per_category[hazard_value]['total'] += 1 elif not hazard_value: building_per_category[not_affected_value]['total'] += 1 else: building_per_category[hazard_value] = {} building_per_category[hazard_value]['total'] = 1 # Count affected buildings by usage type if available usage = None for building_type_attribute in building_type_attributes: if ( building_type_attribute in attribute_names_lower and ( usage is None or usage == 0)): attribute_index = attribute_names_lower.index( building_type_attribute) field_name = attribute_names[attribute_index] usage = attributes[i][field_name] if ( 'building' in attribute_names_lower and ( usage is None or usage == 0)): attribute_index = attribute_names_lower.index('building') field_name = attribute_names[attribute_index] usage = attributes[i][field_name] if usage == 'yes': usage = 'building' if usage is None or usage == 0: usage = tr('unknown') if usage not in building_usages: building_usages.append(usage) for building in building_per_category.values(): building[usage] = 0 building_per_category[hazard_value][usage] += 1 # Generate simple impact report blank_cell = '' table_body = [question, TableRow([tr('Volcanoes considered'), '%s' % volcano_names, blank_cell], header=True)] table_headers = [tr('Building type')] table_headers += [tr(x) for x in category_names] table_headers += [tr('Total')] table_body += [TableRow(table_headers, header=True)] for building_usage in building_usages: building_usage_good = building_usage.replace('_', ' ') building_usage_good = building_usage_good.capitalize() building_sum = sum([ building_per_category[category_name][building_usage] for category_name in category_names ]) # Filter building type that has no less than 25 items if building_sum >= 25: row = [tr(building_usage_good)] building_sum = 0 for category_name in category_names: building_sub_sum = building_per_category[category_name][ building_usage] row.append(format_int(building_sub_sum)) building_sum += building_sub_sum row.append(format_int(building_sum)) table_body.append(row) else: for category_name in category_names: if category_name in other_sum.keys(): other_sum[category_name] += building_per_category[ category_name][building_usage] else: other_sum[category_name] = building_per_category[ category_name][building_usage] # Adding others building type to the report. other_row = [tr('Other')] other_building_total = 0 for category_name in category_names: other_building_sum = other_sum[category_name] other_row.append(format_int(other_building_sum)) other_building_total += other_building_sum other_row.append(format_int(other_building_total)) table_body.append(other_row) all_row = [tr('Total')] all_row += [format_int(building_per_category[category_name]['total']) for category_name in category_names] total = sum([building_per_category[category_name]['total'] for category_name in category_names]) all_row += [format_int(total)] table_body.append(TableRow(all_row, header=True)) table_body += [TableRow(tr('Map shows buildings affected in each of ' 'volcano hazard polygons.'))] impact_table = Table(table_body).toNewlineFreeString() impact_summary = impact_table # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total number of buildings %s in the viewable ' 'area') % format_int(total), tr('Only buildings available in OpenStreetMap ' 'are considered.')]) # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] colours = colours[::-1] # flip colours = colours[:len(category_names)] style_classes = [] i = 0 for category_name in category_names: style_class = dict() style_class['label'] = tr(category_name) style_class['transparency'] = 0 style_class['value'] = category_name style_class['size'] = 1 if i >= len(category_names): i = len(category_names) - 1 style_class['colour'] = colours[i] i += 1 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Buildings affected by volcanic hazard zone') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) legend_units = tr('(building)') legend_title = tr('Building count') # Create vector layer and return impact_layer = Vector( data=attributes, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(as_geometry_objects=True), name=tr('Buildings affected by volcanic hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title}, style_info=style_info) return impact_layer
def run(self): """Run volcano point population evacuation Impact Function. Counts number of people exposed to volcano event. :returns: Map of population exposed to the volcano hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer * RadiiException - When radii are not valid (they need to be monotonically increasing) """ self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') # Parameters radii = self.parameters['distances'].value # Get parameters from layer's keywords volcano_name_attribute = self.hazard.keyword('volcano_name_field') # Input checks if not self.hazard.layer.is_point_data: msg = ( 'Input hazard must be a polygon or point layer. I got %s with ' 'layer type %s' % (self.hazard.name, self.hazard.layer.get_geometry_name())) raise Exception(msg) data_table = self.hazard.layer.get_data() # Use concentric circles category_title = 'Radius' centers = self.hazard.layer.get_geometry() hazard_layer = buffer_points(centers, radii, category_title, data_table=data_table) # Get names of volcanoes considered if volcano_name_attribute in hazard_layer.get_attribute_names(): volcano_name_list = [] # Run through all polygons and get unique names for row in data_table: volcano_name_list.append(row[volcano_name_attribute]) volcano_names = '' for radius in volcano_name_list: volcano_names += '%s, ' % radius self.volcano_names = volcano_names[:-2] # Strip trailing ', ' # Run interpolation function for polygon2raster interpolated_layer, covered_exposure_layer = \ assign_hazard_values_to_exposure_data( hazard_layer, self.exposure.layer, attribute_name=self.target_field ) # Initialise affected population per categories for radius in radii: category = 'Radius %s km ' % format_int(radius) self.affected_population[category] = 0 if has_no_data(self.exposure.layer.get_data(nan=True)): self.no_data_warning = True # Count affected population per polygon and total for row in interpolated_layer.get_data(): # Get population at this location population = row[self.target_field] if not numpy.isnan(population): population = float(population) # Update population count for this category category = 'Radius %s km ' % format_int(row[category_title]) self.affected_population[category] += population # Count totals self.total_population = population_rounding( int(numpy.nansum(self.exposure.layer.get_data()))) self.minimum_needs = [ parameter.serialize() for parameter in filter_needs_parameters( self.parameters['minimum needs']) ] impact_table = impact_summary = self.html_report() # Create style colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] classes = create_classes(covered_exposure_layer.get_data().flat[:], len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 1: label = create_label( interval_classes[i], tr('Low Population [%i people/cell]' % classes[i])) elif i == 4: label = create_label( interval_classes[i], tr('Medium Population [%i people/cell]' % classes[i])) elif i == 7: label = create_label( interval_classes[i], tr('High Population [%i people/cell]' % classes[i])) else: label = create_label(interval_classes[i]) style_class['label'] = label style_class['quantity'] = classes[i] style_class['colour'] = colours[i] style_class['transparency'] = 0 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=None, style_classes=style_classes, style_type='rasterStyle') # For printing map purpose map_title = tr('People affected by the buffered point volcano') legend_title = tr('Population') legend_units = tr('(people per cell)') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) # Create vector layer and return extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title, 'total_needs': self.total_needs } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) impact_layer = Raster( data=covered_exposure_layer.get_data(), projection=covered_exposure_layer.get_projection(), geotransform=covered_exposure_layer.get_geotransform(), name=tr('People affected by the buffered point volcano'), keywords=impact_layer_keywords, style_info=style_info) self._impact = impact_layer return impact_layer
def run(self): """Run volcano point population evacuation Impact Function. Counts number of people exposed to volcano event. :returns: Map of population exposed to the volcano hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer * RadiiException - When radii are not valid (they need to be monotonically increasing) """ self.validate() self.prepare() # Parameters radii = self.parameters['distances'].value # Get parameters from layer's keywords volcano_name_attribute = self.hazard.keyword('volcano_name_field') # Input checks if not self.hazard.layer.is_point_data: msg = ( 'Input hazard must be a polygon or point layer. I got %s with ' 'layer type %s' % ( self.hazard.name, self.hazard.layer.get_geometry_name())) raise Exception(msg) data_table = self.hazard.layer.get_data() # Use concentric circles category_title = 'Radius' centers = self.hazard.layer.get_geometry() rad_m = [x * 1000 for x in radii] # Convert to meters hazard_layer = buffer_points( centers, rad_m, category_title, data_table=data_table) # Get names of volcanoes considered if volcano_name_attribute in hazard_layer.get_attribute_names(): volcano_name_list = [] # Run through all polygons and get unique names for row in data_table: volcano_name_list.append(row[volcano_name_attribute]) volcano_names = '' for radius in volcano_name_list: volcano_names += '%s, ' % radius self.volcano_names = volcano_names[:-2] # Strip trailing ', ' # Run interpolation function for polygon2raster interpolated_layer, covered_exposure_layer = \ assign_hazard_values_to_exposure_data( hazard_layer, self.exposure.layer, attribute_name=self.target_field ) # Initialise affected population per categories for radius in rad_m: category = 'Distance %s km ' % format_int(radius) self.affected_population[category] = 0 if has_no_data(self.exposure.layer.get_data(nan=True)): self.no_data_warning = True # Count affected population per polygon and total for row in interpolated_layer.get_data(): # Get population at this location population = row[self.target_field] if not numpy.isnan(population): population = float(population) # Update population count for this category category = 'Distance %s km ' % format_int( row[category_title]) self.affected_population[category] += population # Count totals self.total_population = population_rounding( int(numpy.nansum(self.exposure.layer.get_data()))) self.minimum_needs = [ parameter.serialize() for parameter in filter_needs_parameters(self.parameters['minimum needs']) ] impact_table = impact_summary = self.html_report() # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] classes = create_classes( covered_exposure_layer.get_data().flat[:], len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 1: label = create_label( interval_classes[i], tr('Low Population [%i people/cell]' % classes[i])) elif i == 4: label = create_label( interval_classes[i], tr('Medium Population [%i people/cell]' % classes[i])) elif i == 7: label = create_label( interval_classes[i], tr('High Population [%i people/cell]' % classes[i])) else: label = create_label(interval_classes[i]) if i == 0: transparency = 100 else: transparency = 0 style_class['label'] = label style_class['quantity'] = classes[i] style_class['colour'] = colours[i] style_class['transparency'] = transparency style_classes.append(style_class) # Override style info with new classes and name style_info = dict( target_field=None, style_classes=style_classes, style_type='rasterStyle') # For printing map purpose map_title = tr('People affected by the buffered point volcano') legend_title = tr('Population') legend_units = tr('(people per cell)') legend_notes = tr( 'Thousand separator is represented by %s' % get_thousand_separator()) # Create vector layer and return impact_layer = Raster( data=covered_exposure_layer.get_data(), projection=covered_exposure_layer.get_projection(), geotransform=covered_exposure_layer.get_geotransform(), name=tr('People affected by the buffered point volcano'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title, 'total_needs': self.total_needs}, style_info=style_info) self._impact = impact_layer return impact_layer
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Check that hazard is polygon type if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon layer. I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not H.is_polygon_data: raise Exception(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() category_title = 'FLOODPRONE' # FIXME: Should come from keywords categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon, per category and total evacuated = 0 for attr in P.get_data(): # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category cat = new_attributes[poly_id][category_title] categories[cat] += pop # Update total evacuated += pop # Count totals total = int(numpy.sum(E.get_data(nan=0, scaling=False))) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 if evacuated > 1000: evacuated = evacuated // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan rice = evacuated * 2.8 drinking_water = evacuated * 17.5 water = evacuated * 67 family_kits = evacuated / 5 toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [question, TableRow([_('People needing evacuation'), '%i' % evacuated], header=True), TableRow(_('Map shows population affected in each flood ' 'prone area ')), TableRow([_('Needs per week'), _('Total')], header=True), [_('Rice [kg]'), int(rice)], [_('Drinking Water [l]'), int(drinking_water)], [_('Clean Water [l]'), int(water)], [_('Family Kits'), int(family_kits)], [_('Toilets'), int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(_('Notes'), header=True), _('Total population: %i') % total, _('People need evacuation if in area identified ' 'as "Flood Prone"'), _('Minimum needs are defined in BNPB ' 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('People affected by flood prone areas') # Define classes for legend for flooded population counts colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] population_counts = [x['population'] for x in new_attributes] cls = [0] + numpy.linspace(1, max(population_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = _('0') else: label = _('%i - %i') % (lo, hi) entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=0, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=_('Population Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(), name=_('Population affected by flood prone areas'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return V
def run(self): """Classified hazard impact to buildings (e.g. from Open Street Map). """ self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') # Value from layer's keywords # Try to get the value from keyword, if not exist, it will not fail, # but use the old get_osm_building_usage try: structure_class_field = self.exposure.keyword( 'structure_class_field') except KeywordNotFoundError: structure_class_field = None # The 3 classes categorical_hazards = self.parameters['Categorical hazards'].value low_t = categorical_hazards[0].value medium_t = categorical_hazards[1].value high_t = categorical_hazards[2].value # Determine attribute name for hazard levels if self.hazard.layer.is_raster: hazard_attribute = 'level' else: hazard_attribute = None interpolated_result = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=hazard_attribute, mode='constant') # Extract relevant exposure data attribute_names = interpolated_result.get_attribute_names() attributes = interpolated_result.get_data() buildings_total = len(interpolated_result) # Calculate building impact self.buildings = {} self.affected_buildings = OrderedDict([(tr('High Hazard Class'), {}), (tr('Medium Hazard Class'), {}), (tr('Low Hazard Class'), {})]) for i in range(buildings_total): if (structure_class_field and structure_class_field in attribute_names): usage = attributes[i][structure_class_field] else: usage = get_osm_building_usage(attribute_names, attributes[i]) if usage is None or usage == 0: usage = 'unknown' if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][usage] = OrderedDict([ (tr('Buildings Affected'), 0) ]) # Count all buildings by type self.buildings[usage] += 1 attributes[i][self.target_field] = 0 attributes[i][self.affected_field] = 0 level = float(attributes[i]['level']) level = float(numpy_round(level)) if level == high_t: impact_level = tr('High Hazard Class') elif level == medium_t: impact_level = tr('Medium Hazard Class') elif level == low_t: impact_level = tr('Low Hazard Class') else: continue # Add calculated impact to existing attributes attributes[i][self.target_field] = { tr('High Hazard Class'): 3, tr('Medium Hazard Class'): 2, tr('Low Hazard Class'): 1 }[impact_level] attributes[i][self.affected_field] = 1 # Count affected buildings by type self.affected_buildings[impact_level][usage][tr( 'Buildings Affected')] += 1 # Consolidate the small building usage groups < 25 to other # Building threshold #2468 postprocessors = self.parameters['postprocessors'] building_postprocessors = postprocessors['BuildingType'][0] self.building_report_threshold = building_postprocessors.value[0].value self._consolidate_to_other() # Create style style_classes = [ dict(label=tr('High'), value=3, colour='#F31A1C', transparency=0, size=2, border_color='#969696', border_width=0.2), dict(label=tr('Medium'), value=2, colour='#F4A442', transparency=0, size=2, border_color='#969696', border_width=0.2), dict(label=tr('Low'), value=1, colour='#EBF442', transparency=0, size=2, border_color='#969696', border_width=0.2), dict(label=tr('Not Affected'), value=None, colour='#1EFC7C', transparency=0, size=2, border_color='#969696', border_width=0.2) ] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') impact_table = impact_summary = self.html_report() # For printing map purpose map_title = tr('Buildings affected') legend_title = tr('Structure inundated status') legend_units = tr('(Low, Medium, High)') extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.affected_field, 'map_title': map_title, 'legend_units': legend_units, 'legend_title': legend_title, 'buildings_total': buildings_total, 'buildings_affected': self.total_affected_buildings } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return vector_layer = Vector(data=attributes, projection=self.exposure.layer.get_projection(), geometry=self.exposure.layer.get_geometry(), name=tr('Estimated buildings affected'), keywords=impact_layer_keywords, style_info=style_info) self._impact = vector_layer return vector_layer
def run(self, layers): """Earthquake impact to buildings (e.g. from Open Street Map) """ LOGGER.debug('Running earthquake building impact') # Thresholds for mmi breakdown t0 = self.parameters['low_threshold'] t1 = self.parameters['medium_threshold'] t2 = self.parameters['high_threshold'] class_1 = tr('Low') class_2 = tr('Medium') class_3 = tr('High') # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Define attribute name for hazard levels hazard_attribute = 'mmi' # Determine if exposure data have NEXIS attributes attribute_names = E.get_attribute_names() if ('FLOOR_AREA' in attribute_names and 'BUILDING_C' in attribute_names and 'CONTENTS_C' in attribute_names): is_NEXIS = True else: is_NEXIS = False # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, E, attribute_name=hazard_attribute) # Extract relevant exposure data #attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate building impact lo = 0 me = 0 hi = 0 building_values = {} contents_values = {} for key in range(4): building_values[key] = 0 contents_values[key] = 0 for i in range(N): # Classify building according to shake level # and calculate dollar losses if is_NEXIS: try: area = float(attributes[i]['FLOOR_AREA']) except (ValueError, KeyError): #print 'Got area', attributes[i]['FLOOR_AREA'] area = 0.0 try: building_value_density = float(attributes[i]['BUILDING_C']) except (ValueError, KeyError): #print 'Got bld value', attributes[i]['BUILDING_C'] building_value_density = 0.0 try: contents_value_density = float(attributes[i]['CONTENTS_C']) except (ValueError, KeyError): #print 'Got cont value', attributes[i]['CONTENTS_C'] contents_value_density = 0.0 building_value = building_value_density * area contents_value = contents_value_density * area x = float(attributes[i][hazard_attribute]) # MMI if t0 <= x < t1: lo += 1 cls = 1 elif t1 <= x < t2: me += 1 cls = 2 elif t2 <= x: hi += 1 cls = 3 else: # Not reported for less than level t0 cls = 0 attributes[i][self.target_field] = cls if is_NEXIS: # Accumulate values in 1M dollar units building_values[cls] += building_value contents_values[cls] += contents_value if is_NEXIS: # Convert to units of one million dollars for key in range(4): building_values[key] = int(building_values[key] / 1000000) contents_values[key] = int(contents_values[key] / 1000000) if is_NEXIS: # Generate simple impact report for NEXIS type buildings table_body = [question, TableRow([tr('Hazard Level'), tr('Buildings Affected'), tr('Buildings value ($M)'), tr('Contents value ($M)')], header=True), TableRow([class_1, lo, building_values[1], contents_values[1]]), TableRow([class_2, me, building_values[2], contents_values[2]]), TableRow([class_3, hi, building_values[3], contents_values[3]])] else: # Generate simple impact report for unspecific buildings table_body = [question, TableRow([tr('Hazard Level'), tr('Buildings Affected')], header=True), TableRow([class_1, str(lo)]), TableRow([class_2, str(me)]), TableRow([class_3, str(hi)])] table_body.append(TableRow(tr('Notes'), header=True)) table_body.append(tr('High hazard is defined as shake levels greater ' 'than %i on the MMI scale.') % t2) table_body.append(tr('Medium hazard is defined as shake levels ' 'between %i and %i on the MMI scale.') % (t1, t2)) table_body.append(tr('Low hazard is defined as shake levels ' 'between %i and %i on the MMI scale.') % (t0, t1)) if is_NEXIS: table_body.append(tr('Values are in units of 1 million Australian ' 'Dollars')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = tr('Buildings affected') # Create style style_classes = [dict(label=class_1, min=1, max=1, colour='#ffff00', transparency=1), dict(label=class_2, min=2, max=2, colour='#ffaa00', transparency=1), dict(label=class_3, min=3, max=3, colour='#ff0000', transparency=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), name=tr('Estimated buildings affected'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) LOGGER.debug('Created vector layer %s' % str(V)) return V
def run(self, layers): """Earthquake impact to buildings (e.g. from OpenStreetMap). :param layers: All the input layers (Hazard Layer and Exposure Layer) """ LOGGER.debug('Running earthquake building impact') # merely initialize building_value = 0 contents_value = 0 # Thresholds for mmi breakdown. t0 = self.parameters['low_threshold'] t1 = self.parameters['medium_threshold'] t2 = self.parameters['high_threshold'] # Class Attribute and Label. class_1 = {'label': tr('Low'), 'class': 1} class_2 = {'label': tr('Medium'), 'class': 2} class_3 = {'label': tr('High'), 'class': 3} # Extract data hazard_layer = get_hazard_layer(layers) # Depth exposure_layer = get_exposure_layer(layers) # Building locations question = get_question( hazard_layer.get_name(), exposure_layer.get_name(), self ) # Define attribute name for hazard levels. hazard_attribute = 'mmi' # Determine if exposure data have NEXIS attributes. attribute_names = exposure_layer.get_attribute_names() if ('FLOOR_AREA' in attribute_names and 'BUILDING_C' in attribute_names and 'CONTENTS_C' in attribute_names): is_nexis = True else: is_nexis = False # Interpolate hazard level to building locations. my_interpolate_result = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=hazard_attribute ) # Extract relevant exposure data #attribute_names = my_interpolate_result.get_attribute_names() attributes = my_interpolate_result.get_data() interpolate_size = len(my_interpolate_result) # Calculate building impact lo = 0 me = 0 hi = 0 building_values = {} contents_values = {} for key in range(4): building_values[key] = 0 contents_values[key] = 0 for i in range(interpolate_size): # Classify building according to shake level # and calculate dollar losses if is_nexis: try: area = float(attributes[i]['FLOOR_AREA']) except (ValueError, KeyError): #print 'Got area', attributes[i]['FLOOR_AREA'] area = 0.0 try: building_value_density = float(attributes[i]['BUILDING_C']) except (ValueError, KeyError): #print 'Got bld value', attributes[i]['BUILDING_C'] building_value_density = 0.0 try: contents_value_density = float(attributes[i]['CONTENTS_C']) except (ValueError, KeyError): #print 'Got cont value', attributes[i]['CONTENTS_C'] contents_value_density = 0.0 building_value = building_value_density * area contents_value = contents_value_density * area try: x = float(attributes[i][hazard_attribute]) # MMI except TypeError: x = 0.0 if t0 <= x < t1: lo += 1 cls = 1 elif t1 <= x < t2: me += 1 cls = 2 elif t2 <= x: hi += 1 cls = 3 else: # Not reported for less than level t0 cls = 0 attributes[i][self.target_field] = cls if is_nexis: # Accumulate values in 1M dollar units building_values[cls] += building_value contents_values[cls] += contents_value if is_nexis: # Convert to units of one million dollars for key in range(4): building_values[key] = int(building_values[key] / 1000000) contents_values[key] = int(contents_values[key] / 1000000) if is_nexis: # Generate simple impact report for NEXIS type buildings table_body = [question, TableRow([tr('Hazard Level'), tr('Buildings Affected'), tr('Buildings value ($M)'), tr('Contents value ($M)')], header=True), TableRow([class_1['label'], format_int(lo), format_int(building_values[1]), format_int(contents_values[1])]), TableRow([class_2['label'], format_int(me), format_int(building_values[2]), format_int(contents_values[2])]), TableRow([class_3['label'], format_int(hi), format_int(building_values[3]), format_int(contents_values[3])])] else: # Generate simple impact report for unspecific buildings table_body = [question, TableRow([tr('Hazard Level'), tr('Buildings Affected')], header=True), TableRow([class_1['label'], format_int(lo)]), TableRow([class_2['label'], format_int(me)]), TableRow([class_3['label'], format_int(hi)])] table_body.append(TableRow(tr('Notes'), header=True)) table_body.append(tr('High hazard is defined as shake levels greater ' 'than %i on the MMI scale.') % t2) table_body.append(tr('Medium hazard is defined as shake levels ' 'between %i and %i on the MMI scale.') % (t1, t2)) table_body.append(tr('Low hazard is defined as shake levels ' 'between %i and %i on the MMI scale.') % (t0, t1)) if is_nexis: table_body.append(tr('Values are in units of 1 million Australian ' 'Dollars')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary # Create style style_classes = [dict(label=class_1['label'], value=class_1['class'], colour='#ffff00', transparency=1), dict(label=class_2['label'], value=class_2['class'], colour='#ffaa00', transparency=1), dict(label=class_3['label'], value=class_3['class'], colour='#ff0000', transparency=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Building affected by earthquake') legend_notes = tr('The level of the impact is according to the ' 'threshold the user input.') legend_units = tr('(mmi)') legend_title = tr('Impact level') # Create vector layer and return result_layer = Vector( data=attributes, projection=my_interpolate_result.get_projection(), geometry=my_interpolate_result.get_geometry(), name=tr('Estimated buildings affected'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title, 'target_field': self.target_field, 'statistics_type': self.statistics_type, 'statistics_classes': self .statistics_classes}, style_info=style_info) msg = 'Created vector layer %s' % str(result_layer) LOGGER.debug(msg) return result_layer
def run(self, layers): """Impact plugin for hazard impact """ # Extract data H = get_hazard_layer(layers) # Value E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='hazard_lev', mode='constant') # Extract relevant numerical data coordinates = H.get_geometry() category = H.get_data() N = len(category) # List attributes to carry forward to result layer #attributes = E.get_attribute_names() # Calculate building impact according to guidelines count2 = 0 count1 = 0 count0 = 0 building_impact = [] for i in range(N): # Get category value val = float(category[i]['hazard_lev']) # Classify buildings according to value ## if val >= 2.0 / 3: ## affected = 2 ## count2 += 1 ## elif 1.0 / 3 <= val < 2.0 / 3: ## affected = 1 ## count1 += 1 ## else: ## affected = 0 ## count0 += 1 ## FIXME it would be good if the affected were words not numbers ## FIXME need to read hazard layer and see category or keyword if val == 3: affected = 3 count2 += 1 elif val == 2: affected = 2 count1 += 1 elif val == 1: affected = 1 count0 += 1 else: affected = 'None' # Collect depth and calculated damage result_dict = {self.target_field: affected, 'CATEGORY': val} # Record result for this feature building_impact.append(result_dict) # Create impact report # Generate impact summary table_body = [question, TableRow([tr('Category'), tr('Affected')], header=True), TableRow([tr('High'), format_int(count2)]), TableRow([tr('Medium'), format_int(count1)]), TableRow([tr('Low'), format_int(count0)]), TableRow([tr('All'), format_int(N)])] table_body.append(TableRow(tr('Notes'), header=True)) table_body.append(tr('Categorised hazard has only 3' ' classes, high, medium and low.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = tr('Categorised hazard impact on buildings') #FIXME it would be great to do categorized rather than grduated # Create style style_classes = [dict(label=tr('Low'), min=1, max=1, colour='#1EFC7C', transparency=0, size=1), dict(label=tr('Medium'), min=2, max=2, colour='#FFA500', transparency=0, size=1), dict(label=tr('High'), min=3, max=3, colour='#F31A1C', transparency=0, size=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return name = 'Buildings Affected' V = Vector(data=building_impact, projection=E.get_projection(), geometry=coordinates, geometry_type=E.geometry_type, keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field, 'statistics_type': self.statistics_type, 'statistics_classes': self.statistics_classes}, name=name, style_info=style_info) return V
def run(self): """Risk plugin for volcano hazard on building/structure. Counts number of building exposed to each volcano hazard zones. :returns: Map of building exposed to volcanic hazard zones. Table with number of buildings affected :rtype: dict """ self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') # Get parameters from layer's keywords self.hazard_class_attribute = self.hazard.keyword('field') self.name_attribute = self.hazard.keyword('volcano_name_field') self.hazard_class_mapping = self.hazard.keyword('value_map') # Try to get the value from keyword, if not exist, it will not fail, # but use the old get_osm_building_usage try: self.exposure_class_attribute = self.exposure.keyword( 'structure_class_field') except KeywordNotFoundError: self.exposure_class_attribute = None # Input checks if not self.hazard.layer.is_polygon_data: message = ( 'Input hazard must be a polygon. I got %s with ' 'layer type %s' % (self.hazard.name, self.hazard.layer.get_geometry_name())) raise Exception(message) # Check if hazard_zone_attribute exists in hazard_layer if (self.hazard_class_attribute not in self.hazard.layer.get_attribute_names()): message = ( 'Hazard data %s did not contain expected attribute %s ' % (self.hazard.name, self.hazard_class_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) # Get names of volcanoes considered if self.name_attribute in self.hazard.layer.get_attribute_names(): volcano_name_list = set() for row in self.hazard.layer.get_data(): # Run through all polygons and get unique names volcano_name_list.add(row[self.name_attribute]) self.volcano_names = ', '.join(volcano_name_list) else: self.volcano_names = tr('Not specified in data') # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword( 'vector_hazard_classification') # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification['classes'] # Initialize OrderedDict of affected buildings self.affected_buildings = OrderedDict() # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class['key'] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class['name']] = \ self.hazard_class_mapping.pop(vector_hazard_class['key']) # Adding the class name as a key in affected_building self.affected_buildings[vector_hazard_class['name']] = {} # Run interpolation function for polygon2raster interpolated_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer) # Extract relevant exposure data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() self.buildings = {} for i in range(len(features)): # Get the hazard value based on the value mapping in keyword hazard_value = get_key_for_value( features[i][self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value features[i][self.target_field] = get_string(hazard_value) if (self.exposure_class_attribute and self.exposure_class_attribute in attribute_names): usage = features[i][self.exposure_class_attribute] else: usage = get_osm_building_usage(attribute_names, features[i]) if usage in [None, 'NULL', 'null', 'Null', 0]: usage = tr('Unknown') if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][usage] = OrderedDict([ (tr('Buildings Affected'), 0) ]) self.buildings[usage] += 1 if hazard_value in self.affected_buildings.keys(): self.affected_buildings[hazard_value][usage][tr( 'Buildings Affected')] += 1 # Lump small entries and 'unknown' into 'other' category # Building threshold #2468 postprocessors = self.parameters['postprocessors'] building_postprocessors = postprocessors['BuildingType'][0] self.building_report_threshold = building_postprocessors.value[0].value self._consolidate_to_other() # Generate simple impact report impact_summary = impact_table = self.html_report() # Create style colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] colours = colours[::-1] # flip colours = colours[:len(self.affected_buildings.keys())] style_classes = [] i = 0 for category_name in self.affected_buildings.keys(): style_class = dict() style_class['label'] = tr(category_name) style_class['transparency'] = 0 style_class['value'] = category_name style_class['size'] = 1 if i >= len(self.affected_buildings.keys()): i = len(self.affected_buildings.keys()) - 1 style_class['colour'] = colours[i] i += 1 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Buildings affected by volcanic hazard zone') legend_title = tr('Building count') legend_units = tr('(building)') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Buildings affected by volcanic hazard zone'), keywords=impact_layer_keywords, style_info=style_info) self._impact = impact_layer return impact_layer
def run(self): """Run volcano population evacuation Impact Function. Counts number of people exposed to volcano event. :returns: Map of population exposed to the volcano hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer * RadiiException - When radii are not valid (they need to be monotonically increasing) """ self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') # Parameters self.hazard_class_attribute = self.hazard.keyword('field') name_attribute = self.hazard.keyword('volcano_name_field') self.hazard_class_mapping = self.hazard.keyword('value_map') if has_no_data(self.exposure.layer.get_data(nan=True)): self.no_data_warning = True # Input checks if not self.hazard.layer.is_polygon_data: message = tr( 'Input hazard must be a polygon layer. I got %s with layer ' 'type %s' % (self.hazard.layer.get_name(), self.hazard.layer.get_geometry_name())) raise Exception(message) # Check if hazard_class_attribute exists in hazard_layer if (self.hazard_class_attribute not in self.hazard.layer.get_attribute_names()): message = tr( 'Hazard data %s did not contain expected attribute ' '%s ' % (self.hazard.layer.get_name(), self.hazard_class_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(message) features = self.hazard.layer.get_data() # Get names of volcanoes considered if name_attribute in self.hazard.layer.get_attribute_names(): volcano_name_list = [] # Run through all polygons and get unique names for row in features: volcano_name_list.append(row[name_attribute]) self.volcano_names = ', '.join(set(volcano_name_list)) # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword( 'vector_hazard_classification') # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification['classes'] # Initialize OrderedDict of affected buildings self.affected_population = OrderedDict() # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class['key'] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class['name']] = \ self.hazard_class_mapping.pop(vector_hazard_class['key']) # Adding the class name as a key in affected_building self.affected_population[vector_hazard_class['name']] = 0 # Run interpolation function for polygon2raster interpolated_layer, covered_exposure_layer = \ assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=self.target_field) # Count affected population per polygon and total for row in interpolated_layer.get_data(): # Get population at this location population = row[self.target_field] if not numpy.isnan(population): population = float(population) # Update population count for this hazard zone hazard_value = get_key_for_value( row[self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value self.affected_population[hazard_value] += population # Count totals self.total_population = int( numpy.nansum(self.exposure.layer.get_data())) self.unaffected_population = (self.total_population - self.total_affected_population) self.minimum_needs = [ parameter.serialize() for parameter in filter_needs_parameters( self.parameters['minimum needs']) ] impact_table = impact_summary = self.html_report() # check for zero impact if self.total_affected_population == 0: message = no_population_impact_message(self.question) raise ZeroImpactException(message) # Create style colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] classes = create_classes(covered_exposure_layer.get_data().flat[:], len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 1: label = create_label( interval_classes[i], tr('Low Population [%i people/cell]' % classes[i])) elif i == 4: label = create_label( interval_classes[i], tr('Medium Population [%i people/cell]' % classes[i])) elif i == 7: label = create_label( interval_classes[i], tr('High Population [%i people/cell]' % classes[i])) else: label = create_label(interval_classes[i]) style_class['label'] = label style_class['quantity'] = classes[i] style_class['colour'] = colours[i] style_class['transparency'] = 0 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=None, style_classes=style_classes, style_type='rasterStyle') # For printing map purpose map_title = tr('People affected by Volcano Hazard Zones') legend_title = tr('Population') legend_units = tr('(people per cell)') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title, 'total_needs': self.total_needs } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Raster( data=covered_exposure_layer.get_data(), projection=covered_exposure_layer.get_projection(), geotransform=covered_exposure_layer.get_geotransform(), name=tr('People affected by volcano hazard zones'), keywords=impact_layer_keywords, style_info=style_info) self._impact = impact_layer return impact_layer
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map). :param layers: List of layers expected to contain. * hazard_layer: Hazard layer of flood * exposure_layer: Vector layer of structure data on the same grid as hazard_layer """ threshold = self.parameters['threshold [m]'] # Flood threshold [m] verify(isinstance(threshold, float), 'Expected thresholds to be a float. Got %s' % str(threshold)) # Extract data hazard_layer = get_hazard_layer(layers) # Depth exposure_layer = get_exposure_layer(layers) # Building locations question = get_question( hazard_layer.get_name(), exposure_layer.get_name(), self) # Determine attribute name for hazard levels if hazard_layer.is_raster: mode = 'grid' hazard_attribute = 'depth' else: mode = 'regions' hazard_attribute = None # Interpolate hazard level to building locations interpolated_layer = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=hazard_attribute) # Extract relevant exposure data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() total_features = len(interpolated_layer) buildings = {} # The number of affected buildings affected_count = 0 # The variable for grid mode inundated_count = 0 wet_count = 0 dry_count = 0 inundated_buildings = {} wet_buildings = {} dry_buildings = {} # The variable for regions mode affected_buildings = {} if mode == 'grid': for i in range(total_features): # Get the interpolated depth water_depth = float(features[i]['depth']) if water_depth <= 0: inundated_status = 0 # dry elif water_depth >= threshold: inundated_status = 1 # inundated else: inundated_status = 2 # wet # Count affected buildings by usage type if available usage = get_osm_building_usage(attribute_names, features[i]) if usage is not None and usage != 0: key = usage else: key = 'unknown' if key not in buildings: buildings[key] = 0 inundated_buildings[key] = 0 wet_buildings[key] = 0 dry_buildings[key] = 0 # Count all buildings by type buildings[key] += 1 if inundated_status is 0: # Count dry buildings by type dry_buildings[key] += 1 # Count total dry buildings dry_count += 1 if inundated_status is 1: # Count inundated buildings by type inundated_buildings[key] += 1 # Count total dry buildings inundated_count += 1 if inundated_status is 2: # Count wet buildings by type wet_buildings[key] += 1 # Count total wet buildings wet_count += 1 # Add calculated impact to existing attributes features[i][self.target_field] = inundated_status elif mode == 'regions': for i in range(total_features): # Use interpolated polygon attribute atts = features[i] # FIXME (Ole): Need to agree whether to use one or the # other as this can be very confusing! # For now look for 'affected' first if 'affected' in atts: # E.g. from flood forecast # Assume that building is wet if inside polygon # as flagged by attribute Flooded res = atts['affected'] if res is None: inundated_status = False else: inundated_status = bool(res) elif 'FLOODPRONE' in atts: res = atts['FLOODPRONE'] if res is None: inundated_status = False else: inundated_status = res.lower() == 'yes' elif DEFAULT_ATTRIBUTE in atts: # Check the default attribute assigned for points # covered by a polygon res = atts[DEFAULT_ATTRIBUTE] if res is None: inundated_status = False else: inundated_status = res else: # there is no flood related attribute message = ( 'No flood related attribute found in %s. I was ' 'looking for either "affected", "FLOODPRONE" or ' '"inapolygon". The latter should have been ' 'automatically set by call to ' 'assign_hazard_values_to_exposure_data(). Sorry I ' 'can\'t help more.') raise Exception(message) # Count affected buildings by usage type if available usage = get_osm_building_usage(attribute_names, features[i]) if usage is not None and usage != 0: key = usage else: key = 'unknown' if key not in buildings: buildings[key] = 0 affected_buildings[key] = 0 # Count all buildings by type buildings[key] += 1 if inundated_status is True: # Count affected buildings by type affected_buildings[key] += 1 # Count total affected buildings affected_count += 1 # Add calculated impact to existing attributes features[i][self.target_field] = int(inundated_status) else: message = (tr('Unknown hazard type %s. Must be either "depth" or ' '"grid"') % mode) raise Exception(message) if mode == 'grid': affected_count = inundated_count + wet_count # Lump small entries and 'unknown' into 'other' category for usage in buildings.keys(): x = buildings[usage] if x < 25 or usage == 'unknown': if 'other' not in buildings: buildings['other'] = 0 if mode == 'grid': inundated_buildings['other'] = 0 wet_buildings['other'] = 0 dry_buildings['other'] = 0 elif mode == 'regions': affected_buildings['other'] = 0 buildings['other'] += x if mode == 'grid': inundated_buildings['other'] += inundated_buildings[usage] wet_buildings['other'] += wet_buildings[usage] dry_buildings['other'] += dry_buildings[usage] del buildings[usage] del inundated_buildings[usage] del wet_buildings[usage] del dry_buildings[usage] elif mode == 'regions': affected_buildings['other'] += affected_buildings[usage] del buildings[usage] del affected_buildings[usage] # Generate simple impact report table_body = [] if mode == 'grid': table_body = [ question, TableRow([tr('Building type'), tr('Number Inundated'), tr('Number of Wet Buildings'), tr('Number of Dry Buildings'), tr('Total')], header=True), TableRow( [tr('All'), format_int(inundated_count), format_int(wet_count), format_int(dry_count), format_int(total_features)])] elif mode == 'regions': table_body = [ question, TableRow([tr('Building type'), tr('Number flooded'), tr('Total')], header=True), TableRow([tr('All'), format_int(affected_count), format_int(total_features)])] school_closed = 0 hospital_closed = 0 # Generate break down by building usage type if available list_type_attribute = [ 'TYPE', 'type', 'amenity', 'building_t', 'office', 'tourism', 'leisure', 'building'] intersect_type = set(attribute_names) & set(list_type_attribute) if len(intersect_type) > 0: # Make list of building types building_list = [] for usage in buildings: building_type = usage.replace('_', ' ') # Lookup internationalised value if available building_type = tr(building_type) if mode == 'grid': building_list.append([ building_type.capitalize(), format_int(inundated_buildings[usage]), format_int(wet_buildings[usage]), format_int(dry_buildings[usage]), format_int(buildings[usage])]) elif mode == 'regions': building_list.append([ building_type.capitalize(), format_int(affected_buildings[usage]), format_int(buildings[usage])]) if usage.lower() == 'school': school_closed = 0 if mode == 'grid': school_closed += inundated_buildings[usage] school_closed += wet_buildings[usage] elif mode == 'regions': school_closed = affected_buildings[usage] if usage.lower() == 'hospital': hospital_closed = 0 if mode == 'grid': hospital_closed += inundated_buildings[usage] hospital_closed += wet_buildings[usage] elif mode == 'regions': hospital_closed = affected_buildings[usage] # Sort alphabetically building_list.sort() table_body.append(TableRow(tr('Breakdown by building type'), header=True)) for row in building_list: s = TableRow(row) table_body.append(s) # Action Checklist Section table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append(TableRow( tr('Are the critical facilities still open?'))) table_body.append(TableRow( tr('Which structures have warning capacity (eg. sirens, speakers, ' 'etc.)?'))) table_body.append(TableRow( tr('Which buildings will be evacuation centres?'))) table_body.append(TableRow( tr('Where will we locate the operations centre?'))) table_body.append(TableRow( tr('Where will we locate warehouse and/or distribution centres?'))) if school_closed > 0: table_body.append(TableRow( tr('Where will the students from the %s closed schools go to ' 'study?') % format_int(school_closed))) if hospital_closed > 0: table_body.append(TableRow( tr('Where will the patients from the %s closed hospitals go ' 'for treatment and how will we transport them?') % format_int(hospital_closed))) # Notes Section table_body.append(TableRow(tr('Notes'), header=True)) if mode == 'grid': table_body.append(TableRow( tr('Buildings are said to be inundated when flood levels ' 'exceed %.1f m') % threshold)) table_body.append(TableRow( tr('Buildings are said to be wet when flood levels ' 'are greater than 0 m but less than %.1f m') % threshold)) table_body.append(TableRow( tr('Buildings are said to be dry when flood levels ' 'are less than 0 m'))) table_body.append(TableRow( tr('Buildings are said to be closed if they are inundated or ' 'wet'))) table_body.append(TableRow( tr('Buildings are said to be open if they are dry'))) else: table_body.append(TableRow( tr('Buildings are said to be flooded when in regions marked ' 'as affected'))) # Result impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary # Prepare impact layer map_title = tr('Buildings inundated') legend_title = tr('Structure inundated status') legend_units = '' style_classes = [] if mode == 'grid': style_classes = [ dict( label=tr('Dry (<= 0 m)'), value=0, colour='#1EFC7C', transparency=0, size=1 ), dict( label=tr('Wet (0 m - %.1f m)') % threshold, value=2, colour='#FF9900', transparency=0, size=1 ), dict( label=tr('Inundated (>= %.1f m)') % threshold, value=1, colour='#F31A1C', transparency=0, size=1 )] legend_units = tr('(inundated, wet, or dry)') elif mode == 'regions': style_classes = [ dict( label=tr('Not Inundated'), value=0, colour='#1EFC7C', transparency=0, size=1), dict( label=tr('Inundated'), value=1, colour='#F31A1C', ztransparency=0, size=1)] legend_units = tr('(inundated or not inundated)') style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') # Create vector layer and return vector_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Estimated buildings affected'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_units': legend_units, 'legend_title': legend_title, 'buildings_total': total_features, 'buildings_affected': affected_count}, style_info=style_info) return vector_layer
def run(self): """Run volcano point population evacuation Impact Function. Counts number of people exposed to volcano event. :returns: Map of population exposed to the volcano hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer * RadiiException - When radii are not valid (they need to be monotonically increasing) """ # Parameters radii = self.parameters['distances'].value # Get parameters from layer's keywords volcano_name_attribute = self.hazard.keyword('volcano_name_field') data_table = self.hazard.layer.get_data() # Get names of volcanoes considered if volcano_name_attribute in self.hazard.layer.get_attribute_names(): volcano_name_list = [] # Run through all polygons and get unique names for row in data_table: volcano_name_list.append(row[volcano_name_attribute]) volcano_names = '' for radius in volcano_name_list: volcano_names += '%s, ' % radius self.volcano_names = volcano_names[:-2] # Strip trailing ', ' # Run interpolation function for polygon2raster interpolated_layer, covered_exposure_layer = \ assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=self.target_field ) # Initialise affected population per categories for radius in radii: category = 'Radius %s km ' % format_int(radius) self.affected_population[category] = 0 if has_no_data(self.exposure.layer.get_data(nan=True)): self.no_data_warning = True # Count affected population per polygon and total for row in interpolated_layer.get_data(): # Get population at this location population = row[self.target_field] if not numpy.isnan(population): population = float(population) # Update population count for this category category = 'Radius %s km ' % format_int( row[self.hazard_zone_attribute]) self.affected_population[category] += population # Count totals self.total_population = population_rounding( int(numpy.nansum(self.exposure.layer.get_data()))) self.minimum_needs = [ parameter.serialize() for parameter in filter_needs_parameters(self.parameters['minimum needs']) ] # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] classes = create_classes( covered_exposure_layer.get_data().flat[:], len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 1: label = create_label( interval_classes[i], tr('Low Population [%i people/cell]' % classes[i])) elif i == 4: label = create_label( interval_classes[i], tr('Medium Population [%i people/cell]' % classes[i])) elif i == 7: label = create_label( interval_classes[i], tr('High Population [%i people/cell]' % classes[i])) else: label = create_label(interval_classes[i]) style_class['label'] = label style_class['quantity'] = classes[i] style_class['colour'] = colours[i] style_class['transparency'] = 0 style_classes.append(style_class) # Override style info with new classes and name style_info = dict( target_field=None, style_classes=style_classes, style_type='rasterStyle') impact_data = self.generate_data() # Create vector layer and return extra_keywords = { 'target_field': self.target_field, 'map_title': self.metadata().key('map_title'), 'legend_notes': self.metadata().key('legend_notes'), 'legend_units': self.metadata().key('legend_units'), 'legend_title': self.metadata().key('legend_title'), 'total_needs': self.total_needs } impact_layer_keywords = self.generate_impact_keywords(extra_keywords) impact_layer = Raster( data=covered_exposure_layer.get_data(), projection=covered_exposure_layer.get_projection(), geotransform=covered_exposure_layer.get_geotransform(), name=self.metadata().key('layer_name'), keywords=impact_layer_keywords, style_info=style_info) impact_layer.impact_data = impact_data self._impact = impact_layer return impact_layer
def run(self, layers): """Impact plugin for hazard impact """ # Extract data H = get_hazard_layer(layers) # Value E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='hazard_lev', mode='constant') # Extract relevant numerical data coordinates = H.get_geometry() category = H.get_data() N = len(category) # List attributes to carry forward to result layer #attributes = E.get_attribute_names() # Calculate building impact according to guidelines count2 = 0 count1 = 0 count0 = 0 building_impact = [] for i in range(N): # Get category value val = float(category[i]['hazard_lev']) # Classify buildings according to value ## if val >= 2.0 / 3: ## affected = 2 ## count2 += 1 ## elif 1.0 / 3 <= val < 2.0 / 3: ## affected = 1 ## count1 += 1 ## else: ## affected = 0 ## count0 += 1 ## FIXME it would be good if the affected were words not numbers ## FIXME need to read hazard layer and see category or keyword if val == 3: affected = 3 count2 += 1 elif val == 2: affected = 2 count1 += 1 elif val == 1: affected = 1 count0 += 1 else: affected = 'None' # Collect depth and calculated damage result_dict = {self.target_field: affected, 'CATEGORY': val} # Record result for this feature building_impact.append(result_dict) # Create impact report # Generate impact summary table_body = [question, TableRow([tr('Category'), tr('Affected')], header=True), TableRow([tr('High'), format_int(count2)]), TableRow([tr('Medium'), format_int(count1)]), TableRow([tr('Low'), format_int(count0)]), TableRow([tr('All'), format_int(N)])] table_body.append(TableRow(tr('Notes'), header=True)) table_body.append(tr('Categorised hazard has only 3' ' classes, high, medium and low.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = tr('Categorised hazard impact on buildings') #FIXME it would be great to do categorized rather than grduated # Create style style_classes = [dict(label=tr('Low'), min=1, max=1, colour='#1EFC7C', transparency=0, size=1), dict(label=tr('Medium'), min=2, max=2, colour='#FFA500', transparency=0, size=1), dict(label=tr('High'), min=3, max=3, colour='#F31A1C', transparency=0, size=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return name = 'Buildings Affected' V = Vector(data=building_impact, projection=E.get_projection(), geometry=coordinates, geometry_type=E.geometry_type, keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field, 'statistics_type': self.statistics_type, 'statistics_classes': self.statistics_classes}, name=name, style_info=style_info) return V
def run(self): """Flood impact to buildings (e.g. from Open Street Map).""" threshold = self.parameters['threshold'].value # Flood threshold [m] verify(isinstance(threshold, float), 'Expected thresholds to be a float. Got %s' % str(threshold)) # Determine attribute name for hazard levels hazard_attribute = 'depth' # Interpolate hazard level to building locations interpolated_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=hazard_attribute) # Extract relevant exposure data features = interpolated_layer.get_data() total_features = len(interpolated_layer) structure_class_field = self.exposure.keyword('structure_class_field') exposure_value_mapping = self.exposure.keyword('value_mapping') hazard_classes = [tr('Flooded'), tr('Wet'), tr('Dry')] self.init_report_var(hazard_classes) for i in range(total_features): # Get the interpolated depth water_depth = float(features[i]['depth']) if water_depth <= 0: inundated_status = 0 # dry elif water_depth >= threshold: inundated_status = 1 # inundated else: inundated_status = 2 # wet usage = features[i].get(structure_class_field, None) usage = main_type(usage, exposure_value_mapping) # Add calculated impact to existing attributes features[i][self.target_field] = inundated_status category = [ tr('Dry'), tr('Flooded'), tr('Wet')][inundated_status] self.classify_feature(category, usage, True) self.reorder_dictionaries() style_classes = [ dict( label=tr('Dry (<= 0 m)'), value=0, colour='#1EFC7C', transparency=0, size=1 ), dict( label=tr('Wet (0 m - %.1f m)') % threshold, value=2, colour='#FF9900', transparency=0, size=1 ), dict( label=tr('Flooded (>= %.1f m)') % threshold, value=1, colour='#F31A1C', transparency=0, size=1 )] style_info = dict( target_field=self.target_field, style_classes=style_classes, style_type='categorizedSymbol') impact_data = self.generate_data() extra_keywords = { 'target_field': self.target_field, 'map_title': self.map_title(), 'legend_title': self.metadata().key('legend_title'), 'legend_units': self.metadata().key('legend_units'), 'buildings_total': total_features, 'buildings_affected': self.total_affected_buildings } impact_layer_keywords = self.generate_impact_keywords(extra_keywords) impact_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=self.map_title(), keywords=impact_layer_keywords, style_info=style_info) impact_layer.impact_data = impact_data self._impact = impact_layer return impact_layer
def run(self, layers): """Risk plugin for volcano population evacuation. :param layers: List of layers expected to contain where two layers should be present. * hazard_layer: Vector polygon layer of volcano impact zones * exposure_layer: Raster layer of population data on the same grid as hazard_layer Counts number of people exposed to volcano event. :returns: Map of population exposed to the volcano hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer * RadiiException - When radii are not valid (they need to be monotonically increasing) """ # Identify hazard and exposure layers hazard_layer = get_hazard_layer(layers) # Volcano KRB exposure_layer = get_exposure_layer(layers) question = get_question( hazard_layer.get_name(), exposure_layer.get_name(), self) # Input checks if not hazard_layer.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % hazard_layer.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. I got %s with ' 'layer type %s' % (hazard_layer.get_name(), hazard_layer.get_geometry_name())) if not (hazard_layer.is_polygon_data or hazard_layer.is_point_data): raise Exception(msg) data_table = hazard_layer.get_data() if hazard_layer.is_point_data: # Use concentric circles radii = self.parameters['distance [km]'] centers = hazard_layer.get_geometry() rad_m = [x * 1000 for x in radii] # Convert to meters hazard_layer = buffer_points(centers, rad_m, data_table=data_table) category_title = 'Radius' category_header = tr('Distance [km]') category_names = radii name_attribute = 'NAME' # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = 'KRB' category_header = tr('Category') # FIXME (Ole): Change to English and use translation system category_names = ['Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I'] name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map # Get names of volcanoes considered if name_attribute in hazard_layer.get_attribute_names(): volcano_name_list = [] # Run through all polygons and get unique names for row in data_table: volcano_name_list.append(row[name_attribute]) volcano_names = '' for name in volcano_name_list: volcano_names += '%s, ' % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr('Not specified in data') # Check if category_title exists in hazard_layer if not category_title in hazard_layer.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (hazard_layer.get_name(), category_title)) # noinspection PyExceptionInherit raise InaSAFEError(msg) # Find the target field name that has no conflict with default target attribute_names = hazard_layer.get_attribute_names() new_target_field = get_non_conflicting_attribute_name( self.target_field, attribute_names) self.target_field = new_target_field # Run interpolation function for polygon2raster interpolated_layer = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=self.target_field) # Initialise data_table of output dataset with all data_table # from input polygon and a population count of zero new_data_table = hazard_layer.get_data() categories = {} for row in new_data_table: row[self.target_field] = 0 category = row[category_title] categories[category] = 0 # Count affected population per polygon and total for row in interpolated_layer.get_data(): # Get population at this location population = float(row[self.target_field]) # Update population count for associated polygon poly_id = row['polygon_id'] new_data_table[poly_id][self.target_field] += population # Update population count for each category category = new_data_table[poly_id][category_title] categories[category] += population # Count totals total = int(numpy.sum(exposure_layer.get_data(nan=0))) # Don't show digits less than a 1000 total = round_thousand(total) # Count number and cumulative for each zone cumulative = 0 all_categories_population = {} all_categories_cumulative = {} for name in category_names: if category_title == 'Radius': key = name * 1000 # Convert to meters else: key = name # prevent key error population = int(categories.get(key, 0)) population = round_thousand(population) cumulative += population cumulative = round_thousand(cumulative) all_categories_population[name] = population all_categories_cumulative[name] = cumulative # Use final accumulation as total number needing evacuation evacuated = cumulative # Calculate estimated minimum needs minimum_needs = self.parameters['minimum needs'] total_needs = evacuated_population_weekly_needs( evacuated, minimum_needs) # Generate impact report for the pdf map blank_cell = '' table_body = [question, TableRow([tr('Volcanoes considered'), '%s' % volcano_names, blank_cell], header=True), TableRow([tr('People needing evacuation'), '%s' % format_int(evacuated), blank_cell], header=True), TableRow([category_header, tr('Total'), tr('Cumulative')], header=True)] for name in category_names: table_body.append( TableRow([name, format_int(all_categories_population[name]), format_int(all_categories_cumulative[name])])) table_body.extend([ TableRow(tr( 'Map shows the number of people affected in each of volcano ' 'hazard polygons.')), TableRow( [tr('Needs per week'), tr('Total'), blank_cell], header=True), [tr('Rice [kg]'), format_int(total_needs['rice']), blank_cell], [ tr('Drinking Water [l]'), format_int(total_needs['drinking_water']), blank_cell], [tr('Clean Water [l]'), format_int(total_needs['water']), blank_cell], [tr('Family Kits'), format_int(total_needs['family_kits']), blank_cell], [tr('Toilets'), format_int(total_needs['toilets']), blank_cell]]) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend( [TableRow(tr('Notes'), header=True), tr('Total population %s in the exposure layer') % format_int( total), tr('People need evacuation if they are within the ' 'volcanic hazard zones.')]) population_counts = [x[self.target_field] for x in new_data_table] impact_summary = Table(table_body).toNewlineFreeString() # check for zero impact if numpy.nanmax(population_counts) == 0 == numpy.nanmin( population_counts): table_body = [ question, TableRow([tr('People needing evacuation'), '%s' % format_int(evacuated), blank_cell], header=True)] my_message = Table(table_body).toNewlineFreeString() raise ZeroImpactException(my_message) # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] classes = create_classes(population_counts, len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 0: transparency = 100 style_class['min'] = 0 else: transparency = 30 style_class['min'] = classes[i - 1] style_class['transparency'] = transparency style_class['colour'] = colours[i] style_class['max'] = classes[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='graduatedSymbol') # For printing map purpose map_title = tr('People affected by volcanic hazard zone') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) legend_units = tr('(people)') legend_title = tr('Population count') # Create vector layer and return impact_layer = Vector( data=new_data_table, projection=hazard_layer.get_projection(), geometry=hazard_layer.get_geometry(as_geometry_objects=True), name=tr('People affected by volcanic hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title}, style_info=style_info) return impact_layer
def run(self, layers): """Risk plugin for flood population evacuation. :param layers: List of layers expected to contain * hazard_layer : Vector polygon layer of flood depth * exposure_layer : Raster layer of population data on the same grid as hazard_layer Counts number of people exposed to areas identified as flood prone :returns: Map of population exposed to flooding Table with number of people evacuated and supplies required. :rtype: tuple """ # Identify hazard and exposure layers hazard_layer = get_hazard_layer(layers) # Flood inundation exposure_layer = get_exposure_layer(layers) question = get_question(hazard_layer.get_name(), exposure_layer.get_name(), self) # Check that hazard is polygon type if not hazard_layer.is_vector: message = ('Input hazard %s was not a vector layer as expected ' % hazard_layer.get_name()) raise Exception(message) message = ( 'Input hazard must be a polygon layer. I got %s with layer type ' '%s' % (hazard_layer.get_name(), hazard_layer.get_geometry_name())) if not hazard_layer.is_polygon_data: raise Exception(message) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(hazard_layer, exposure_layer, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = hazard_layer.get_data() category_title = 'affected' # FIXME: Should come from keywords deprecated_category_title = 'FLOODPRONE' categories = {} for attr in new_attributes: attr[self.target_field] = 0 try: cat = attr[category_title] except KeyError: try: cat = attr['FLOODPRONE'] categories[cat] = 0 except KeyError: pass # Count affected population per polygon, per category and total affected_population = 0 for attr in P.get_data(): affected = False if 'affected' in attr: res = attr['affected'] if res is None: x = False else: x = bool(res) affected = x elif 'FLOODPRONE' in attr: # If there isn't an 'affected' attribute, res = attr['FLOODPRONE'] if res is not None: affected = res.lower() == 'yes' elif 'Affected' in attr: # Check the default attribute assigned for points # covered by a polygon res = attr['Affected'] if res is None: x = False else: x = res affected = x else: # assume that every polygon is affected (see #816) affected = True # there is no flood related attribute # message = ('No flood related attribute found in %s. ' # 'I was looking for either "Flooded", "FLOODPRONE" ' # 'or "Affected". The latter should have been ' # 'automatically set by call to ' # 'assign_hazard_values_to_exposure_data(). ' # 'Sorry I can\'t help more.') # raise Exception(message) if affected: # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category if len(categories) > 0: try: cat = new_attributes[poly_id][category_title] except KeyError: cat = new_attributes[poly_id][ deprecated_category_title] categories[cat] += pop # Update total affected_population += pop # Estimate number of people in need of evacuation evacuated = (affected_population * self.parameters['evacuation_percentage'] / 100.0) affected_population, rounding = population_rounding_full( affected_population) total = int(numpy.sum(exposure_layer.get_data(nan=0, scaling=False))) # Don't show digits less than a 1000 total = population_rounding(total) evacuated, rounding_evacuated = population_rounding_full(evacuated) minimum_needs = [ parameter.serialize() for parameter in self.parameters['minimum needs'] ] # Generate impact report for the pdf map table_body = [ question, TableRow([ tr('People affected'), '%s*' % (format_int(int(affected_population))) ], header=True), TableRow([ TableCell(tr('* Number is rounded up to the nearest %s') % (rounding), col_span=2) ]), TableRow([ tr('People needing evacuation'), '%s*' % (format_int(int(evacuated))) ], header=True), TableRow([ TableCell(tr('* Number is rounded up to the nearest %s') % (rounding_evacuated), col_span=2) ]), TableRow([ tr('Evacuation threshold'), '%s%%' % format_int(self.parameters['evacuation_percentage']) ], header=True), TableRow( tr('Map shows the number of people affected in each flood prone ' 'area')), TableRow( tr('Table below shows the weekly minimum needs for all ' 'evacuated people')) ] total_needs = evacuated_population_needs(evacuated, minimum_needs) for frequency, needs in total_needs.items(): table_body.append( TableRow([ tr('Needs should be provided %s' % frequency), tr('Total') ], header=True)) for resource in needs: table_body.append( TableRow([ tr(resource['table name']), format_int(resource['amount']) ])) impact_table = Table(table_body).toNewlineFreeString() table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append(TableRow(tr('How will warnings be disseminated?'))) table_body.append(TableRow(tr('How will we reach stranded people?'))) table_body.append(TableRow(tr('Do we have enough relief items?'))) table_body.append( TableRow( tr('If yes, where are they located and how will we distribute ' 'them?'))) table_body.append( TableRow( tr('If no, where can we obtain additional relief items from and ' 'how will we transport them to here?'))) # Extend impact report for on-screen display table_body.extend([ TableRow(tr('Notes'), header=True), tr('Total population: %s') % format_int(total), tr('People need evacuation if in the area identified as ' '"Flood Prone"'), tr('Minimum needs are defined in BNPB regulation 7/2008') ]) impact_summary = Table(table_body).toNewlineFreeString() # Create style # Define classes for legend for flooded population counts colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] population_counts = [x['population'] for x in new_attributes] classes = create_classes(population_counts, len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 0: transparency = 0 style_class['min'] = 0 else: transparency = 0 style_class['min'] = classes[i - 1] style_class['transparency'] = transparency style_class['colour'] = colours[i] style_class['max'] = classes[i] style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, style_type='graduatedSymbol') # For printing map purpose map_title = tr('People affected by flood prone areas') legend_notes = tr('Thousand separator is represented by \'.\'') legend_units = tr('(people per polygon)') legend_title = tr('Population Count') # Create vector layer and return vector_layer = Vector(data=new_attributes, projection=hazard_layer.get_projection(), geometry=hazard_layer.get_geometry(), name=tr('People affected by flood prone areas'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title, 'affected_population': affected_population, 'total_population': total, 'total_needs': total_needs }, style_info=style_info) return vector_layer
def run(self): """Risk plugin for flood population evacuation. Counts number of people exposed to areas identified as flood prone :returns: Map of population exposed to flooding Table with number of people evacuated and supplies required. :rtype: tuple """ self.validate() self.prepare() self.provenance.append_step( 'Calculating Step', 'Impact function is calculating the impact.') # Get parameters from layer's keywords self.hazard_class_attribute = self.hazard.keyword('field') self.hazard_class_mapping = self.hazard.keyword('value_map') # Get the IF parameters self._evacuation_percentage = ( self.parameters['evacuation_percentage'].value) # Check that hazard is polygon type if not self.hazard.layer.is_polygon_data: message = ( 'Input hazard must be a polygon layer. I got %s with layer ' 'type %s' % ( self.hazard.name, self.hazard.layer.get_geometry_name())) raise Exception(message) if has_no_data(self.exposure.layer.get_data(nan=True)): self.no_data_warning = True # Check that affected field exists in hazard layer if (self.hazard_class_attribute in self.hazard.layer.get_attribute_names()): self.use_affected_field = True # Run interpolation function for polygon2raster interpolated_layer, covered_exposure = \ assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=self.target_field) # Data for manipulating the covered_exposure layer new_covered_exposure_data = covered_exposure.get_data() covered_exposure_top_left = numpy.array([ covered_exposure.get_geotransform()[0], covered_exposure.get_geotransform()[3]]) covered_exposure_dimension = numpy.array([ covered_exposure.get_geotransform()[1], covered_exposure.get_geotransform()[5]]) # Count affected population per polygon, per category and total total_affected_population = 0 for attr in interpolated_layer.get_data(): affected = False if self.use_affected_field: row_affected_value = attr[self.hazard_class_attribute] if row_affected_value is not None: affected = get_key_for_value( row_affected_value, self.hazard_class_mapping) else: # assume that every polygon is affected (see #816) affected = self.wet if affected == self.wet: # Get population at this location population = attr[self.target_field] if not numpy.isnan(population): population = float(population) total_affected_population += population else: # If it's not affected, set the value of the impact layer to 0 grid_point = attr['grid_point'] index = numpy.floor( (grid_point - covered_exposure_top_left) / ( covered_exposure_dimension)).astype(int) new_covered_exposure_data[index[1]][index[0]] = 0 # Estimate number of people in need of evacuation if self.use_affected_field: affected_population = tr( 'People within hazard field ("%s") of value "%s"') % ( self.hazard_class_attribute, ','.join([ unicode(hazard_class) for hazard_class in self.hazard_class_mapping[self.wet] ])) else: affected_population = tr('People within any hazard polygon.') self.affected_population[affected_population] = ( total_affected_population) self.total_population = int( numpy.nansum(self.exposure.layer.get_data(scaling=False))) self.unaffected_population = ( self.total_population - self.total_affected_population) self.minimum_needs = [ parameter.serialize() for parameter in filter_needs_parameters(self.parameters['minimum needs']) ] impact_table = impact_summary = self.html_report() # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] classes = create_classes( new_covered_exposure_data.flat[:], len(colours)) # check for zero impact if total_affected_population == 0: message = no_population_impact_message(self.question) raise ZeroImpactException(message) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 1: label = create_label( interval_classes[i], tr('Low Population [%i people/cell]' % classes[i])) elif i == 4: label = create_label( interval_classes[i], tr('Medium Population [%i people/cell]' % classes[i])) elif i == 7: label = create_label( interval_classes[i], tr('High Population [%i people/cell]' % classes[i])) else: label = create_label(interval_classes[i]) style_class['label'] = label style_class['quantity'] = classes[i] style_class['colour'] = colours[i] style_class['transparency'] = 0 style_classes.append(style_class) # Override style info with new classes and name style_info = dict( target_field=None, style_classes=style_classes, style_type='rasterStyle') # For printing map purpose map_title = tr('People affected by flood prone areas') legend_title = tr('Population Count') legend_units = tr('(people per polygon)') legend_notes = tr( 'Thousand separator is represented by %s' % get_thousand_separator()) extra_keywords = { 'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title, 'affected_population': total_affected_population, 'total_population': self.total_population, 'total_needs': self.total_needs } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Raster( data=new_covered_exposure_data, projection=covered_exposure.get_projection(), geotransform=covered_exposure.get_geotransform(), name=tr('People affected by flood prone areas'), keywords=impact_layer_keywords, style_info=style_info) self._impact = impact_layer return impact_layer
def run(self): """Run classified population evacuation Impact Function. Counts number of people exposed to each hazard zones. :returns: Map of population exposed to each hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer """ self.validate() self.prepare() self.provenance.append_step("Calculating Step", "Impact function is calculating the impact.") # Value from layer's keywords self.hazard_class_attribute = self.hazard.keyword("field") self.hazard_class_mapping = self.hazard.keyword("value_map") # TODO: Remove check to self.validate (Ismail) # Input checks message = tr( "Input hazard must be a polygon layer. I got %s with layer type " "%s" % (self.hazard.name, self.hazard.layer.get_geometry_name()) ) if not self.hazard.layer.is_polygon_data: raise Exception(message) # Check if hazard_class_attribute exists in hazard_layer if self.hazard_class_attribute not in self.hazard.layer.get_attribute_names(): message = ( "Hazard data %s does not contain expected hazard " 'zone attribute "%s". Please change it in the option. ' % (self.hazard.name, self.hazard_class_attribute) ) # noinspection PyExceptionInherit raise InaSAFEError(message) # Retrieve the classification that is used by the hazard layer. vector_hazard_classification = self.hazard.keyword("vector_hazard_classification") # Get the dictionary that contains the definition of the classification vector_hazard_classification = definition(vector_hazard_classification) # Get the list classes in the classification vector_hazard_classes = vector_hazard_classification["classes"] # Initialize OrderedDict of affected buildings self.affected_population = OrderedDict() # Iterate over vector hazard classes for vector_hazard_class in vector_hazard_classes: # Check if the key of class exist in hazard_class_mapping if vector_hazard_class["key"] in self.hazard_class_mapping.keys(): # Replace the key with the name as we need to show the human # friendly name in the report. self.hazard_class_mapping[vector_hazard_class["name"]] = self.hazard_class_mapping.pop( vector_hazard_class["key"] ) # Adding the class name as a key in affected_building self.affected_population[vector_hazard_class["name"]] = 0 # Interpolated layer represents grid cell that lies in the polygon interpolated_layer, covered_exposure_layer = assign_hazard_values_to_exposure_data( self.hazard.layer, self.exposure.layer, attribute_name=self.target_field ) # Count total affected population per hazard zone for row in interpolated_layer.get_data(): # Get population at this location population = row[self.target_field] if not numpy.isnan(population): population = float(population) # Update population count for this hazard zone hazard_value = get_key_for_value(row[self.hazard_class_attribute], self.hazard_class_mapping) if not hazard_value: hazard_value = self._not_affected_value self.affected_population[hazard_value] += population # Count total population from exposure layer self.total_population = int(numpy.nansum(self.exposure.layer.get_data())) # Count total affected population total_affected_population = self.total_affected_population self.unaffected_population = self.total_population - total_affected_population self.minimum_needs = [ parameter.serialize() for parameter in filter_needs_parameters(self.parameters["minimum needs"]) ] # check for zero impact if total_affected_population == 0: message = no_population_impact_message(self.question) raise ZeroImpactException(message) impact_table = impact_summary = self.html_report() # Create style colours = ["#FFFFFF", "#38A800", "#79C900", "#CEED00", "#FFCC00", "#FF6600", "#FF0000", "#7A0000"] classes = create_classes(covered_exposure_layer.get_data().flat[:], len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class["label"] = create_label(interval_classes[i]) if i == 1: label = create_label(interval_classes[i], tr("Low Population [%i people/cell]" % classes[i])) elif i == 4: label = create_label(interval_classes[i], tr("Medium Population [%i people/cell]" % classes[i])) elif i == 7: label = create_label(interval_classes[i], tr("High Population [%i people/cell]" % classes[i])) else: label = create_label(interval_classes[i]) style_class["label"] = label style_class["quantity"] = classes[i] style_class["colour"] = colours[i] style_class["transparency"] = 0 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=None, style_classes=style_classes, style_type="rasterStyle") # For printing map purpose map_title = tr("People impacted by each hazard zone") legend_title = tr("Population") legend_units = tr("(people per cell)") legend_notes = tr("Thousand separator is represented by %s" % get_thousand_separator()) extra_keywords = { "impact_summary": impact_summary, "impact_table": impact_table, "target_field": self.target_field, "map_title": map_title, "legend_notes": legend_notes, "legend_units": legend_units, "legend_title": legend_title, } self.set_if_provenance() impact_layer_keywords = self.generate_impact_keywords(extra_keywords) # Create vector layer and return impact_layer = Raster( data=covered_exposure_layer.get_data(), projection=covered_exposure_layer.get_projection(), geotransform=covered_exposure_layer.get_geotransform(), name=tr("People impacted by each hazard zone"), keywords=impact_layer_keywords, style_info=style_info, ) self._impact = impact_layer return impact_layer
def run(layers): """Risk plugin for tephra impact """ # Extract data H = get_hazard_layer(layers) # Ash load E = get_exposure_layer(layers) # Building locations # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='load') # Calculate building damage count3 = 0 count2 = 0 count1 = 0 count0 = 0 result = [] for i in range(len(E)): #------------------- # Extract parameters #------------------- load = H.get_data('load', i) #------------------------ # Compute damage level #------------------------ # FIXME: The thresholds have been greatly reduced # for the purpose of demonstration. Any real analyis # should bring them back to 0, 90, 150, 300 if 0.01 <= load < 0.5: # Loss of crops and livestock impact = 0 count0 += 1 elif 0.5 <= load < 2.0: # Cosmetic damage impact = 1 count1 += 1 elif 2.0 <= load < 10.0: # Partial building collapse impact = 2 count2 += 1 elif load >= 10.0: # Complete building collapse impact = 3 count3 += 1 else: impact = 0 count0 += 1 result.append({'DAMAGE': impact, 'ASHLOAD': load}) # Create report impact_summary = ('<font size="3"> <table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' '</table></font>' % ('Beban abu', 'Gedung dampak', '< 0.5 kg/m2', count0, '0.5 - 2 kg/m2', count1, '2 - 10 kg/m2', count2, '> 10 kg/m2', count3)) #'</table>' % # ('Beban abu', 'Gedung dampak', # 'Gangguan (< 90 kg/m2)', count0, # 'Kerusakan kosmetik (90 - 150 kg/m2', count1, # 'parsial runtuhnya (150 - 300 kg/m2', count2, # 'runtuhnya lengkap (> 300 kg/m2', count3)) V = Vector(data=result, projection=E.get_projection(), geometry=E.get_geometry(), name='Estimated ashload damage', keywords={'impact_summary': impact_summary}) return V
def run(self, layers=None): """Counts number of building exposed to each volcano hazard zones. :param layers: List of layers expected to contain. * hazard_layer: Hazard layer of volcano * exposure_layer: Vector layer of structure data on the same grid as hazard_layer :returns: Map of building exposed to volcanic hazard zones. Table with number of buildings affected :rtype: dict """ self.validate() self.prepare(layers) # Target Field target_field = 'zone' # Hazard Zone Attribute hazard_zone_attribute = 'radius' # Not Affected Value not_affected_value = 'Not Affected' # Parameters radii = self.parameters['distances [km]'] volcano_name_attribute = self.parameters['volcano name attribute'] # Identify hazard and exposure layers hazard_layer = self.hazard # Volcano hazard layer exposure_layer = self.exposure # Building exposure layer # Input checks if not hazard_layer.is_point_data: message = ( 'Input hazard must be a vector point layer. I got %s ' 'with layer type %s' % ( hazard_layer.get_name(), hazard_layer.get_geometry_name())) raise Exception(message) # Make hazard layer by buffering the point centers = hazard_layer.get_geometry() features = hazard_layer.get_data() radii_meter = [x * 1000 for x in radii] # Convert to meters hazard_layer = buffer_points( centers, radii_meter, hazard_zone_attribute, data_table=features) # Category names for the impact zone category_names = radii_meter category_names.append(not_affected_value) # Get names of volcanoes considered if volcano_name_attribute in hazard_layer.get_attribute_names(): volcano_name_list = set() for row in hazard_layer.get_data(): # Run through all polygons and get unique names volcano_name_list.add(row[volcano_name_attribute]) self.volcano_names = ', '.join(volcano_name_list) # Find the target field name that has no conflict with the attribute # names in the hazard layer hazard_attribute_names = hazard_layer.get_attribute_names() target_field = get_non_conflicting_attribute_name( target_field, hazard_attribute_names) # Run interpolation function for polygon2polygon interpolated_layer = assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=None) # Extract relevant interpolated layer data attribute_names = interpolated_layer.get_attribute_names() features = interpolated_layer.get_data() self.buildings = {} self.affected_buildings = OrderedDict() for category in radii_meter: self.affected_buildings[category] = {} # Iterate the interpolated building layer for i in range(len(features)): hazard_value = features[i][hazard_zone_attribute] if not hazard_value: hazard_value = not_affected_value features[i][target_field] = hazard_value # Count affected buildings by usage type if available usage = get_osm_building_usage(attribute_names, features[i]) if usage is [None, 'NULL', 'null', 'Null', 0]: usage = tr('Unknown') if usage not in self.buildings: self.buildings[usage] = 0 for category in self.affected_buildings.keys(): self.affected_buildings[category][ usage] = OrderedDict([ (tr('Buildings Affected'), 0)]) self.buildings[usage] += 1 if hazard_value in self.affected_buildings.keys(): self.affected_buildings[hazard_value][usage][ tr('Buildings Affected')] += 1 # Lump small entries and 'unknown' into 'other' category self._consolidate_to_other() # Generate simple impact report impact_summary = impact_table = self.generate_html_report() # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] colours = colours[::-1] # flip colours = colours[:len(category_names)] style_classes = [] i = 0 for category_name in category_names: style_class = dict() style_class['label'] = tr(category_name) style_class['transparency'] = 0 style_class['value'] = category_name style_class['size'] = 1 if i >= len(category_names): i = len(category_names) - 1 style_class['colour'] = colours[i] i += 1 style_classes.append(style_class) # Override style info with new classes and name style_info = dict(target_field=target_field, style_classes=style_classes, style_type='categorizedSymbol') # For printing map purpose map_title = tr('Buildings affected by volcanic buffered point') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) legend_units = tr('(building)') legend_title = tr('Building count') # Create vector layer and return impact_layer = Vector( data=features, projection=interpolated_layer.get_projection(), geometry=interpolated_layer.get_geometry(), name=tr('Buildings affected by volcanic buffered point'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title}, style_info=style_info) self._impact = impact_layer return impact_layer
def run(self, layers=None): """Run classified population evacuation Impact Function. :param layers: List of layers expected to contain where two layers should be present. * hazard_layer: Vector polygon layer * exposure_layer: Raster layer of population data on the same grid as hazard_layer Counts number of people exposed to each hazard zones. :returns: Map of population exposed to each hazard zone. The returned dict will include a table with number of people evacuated and supplies required. :rtype: dict :raises: * Exception - When hazard layer is not vector layer """ self.validate() self.prepare(layers) # Parameters hazard_zone_attribute = self.parameters['hazard zone attribute'] # Identify hazard and exposure layers hazard_layer = self.hazard exposure_layer = self.exposure # Input checks if not hazard_layer.is_polygon_data: msg = ('Input hazard must be a polygon layer. I got %s with ' 'layer type %s' % (hazard_layer.get_name(), hazard_layer.get_geometry_name())) raise Exception(msg) # Check if hazard_zone_attribute exists in hazard_layer if hazard_zone_attribute not in hazard_layer.get_attribute_names(): msg = ('Hazard data %s does not contain expected hazard ' 'zone attribute "%s". Please change it in the option. ' % (hazard_layer.get_name(), hazard_zone_attribute)) # noinspection PyExceptionInherit raise InaSAFEError(msg) # Get unique hazard zones from the layer attribute self.hazard_zones = list( set(hazard_layer.get_data(hazard_zone_attribute))) # Find the target field name that has no conflict with default target attribute_names = hazard_layer.get_attribute_names() new_target_field = get_non_conflicting_attribute_name( self.target_field, attribute_names) self.target_field = new_target_field # Interpolated layer represents grid cell that lies in the polygon interpolated_layer, covered_exposure_layer = \ assign_hazard_values_to_exposure_data( hazard_layer, exposure_layer, attribute_name=self.target_field ) # Initialise total population affected by each hazard zone affected_population = {} for hazard_zone in self.hazard_zones: affected_population[hazard_zone] = 0 # Count total affected population per hazard zone for row in interpolated_layer.get_data(): # Get population at this location population = row[self.target_field] if not numpy.isnan(population): population = float(population) # Update population count for this hazard zone hazard_zone = row[hazard_zone_attribute] affected_population[hazard_zone] += population # Count total population from exposure layer total_population = population_rounding( int(numpy.nansum(exposure_layer.get_data()))) # Count total affected population total_affected_population = reduce( lambda x, y: x + y, [population for population in affected_population.values()]) # check for zero impact if total_affected_population == 0: table_body = [ self.question, TableRow( [tr('People impacted'), '%s' % format_int(total_affected_population)], header=True)] message = Table(table_body).toNewlineFreeString() raise ZeroImpactException(message) # Generate impact report for the pdf map blank_cell = '' table_body = [ self.question, TableRow( [ tr('People impacted'), '%s' % format_int( population_rounding(total_affected_population)), blank_cell], header=True)] for hazard_zone in self.hazard_zones: table_body.append( TableRow( [ hazard_zone, format_int( population_rounding( affected_population[hazard_zone])) ])) table_body.extend([ TableRow(tr( 'Map shows the number of people impacted in each of the ' 'hazard zones.'))]) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend( [TableRow(tr('Notes'), header=True), tr('Total population: %s in the exposure layer') % format_int( total_population), tr('"nodata" values in the exposure layer are treated as 0 ' 'when counting the affected or total population')] ) impact_summary = Table(table_body).toNewlineFreeString() # Create style colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] classes = create_classes( covered_exposure_layer.get_data().flat[:], len(colours)) interval_classes = humanize_class(classes) # Define style info for output polygons showing population counts style_classes = [] for i in xrange(len(colours)): style_class = dict() style_class['label'] = create_label(interval_classes[i]) if i == 1: label = create_label( interval_classes[i], tr('Low Population [%i people/cell]' % classes[i])) elif i == 4: label = create_label( interval_classes[i], tr('Medium Population [%i people/cell]' % classes[i])) elif i == 7: label = create_label( interval_classes[i], tr('High Population [%i people/cell]' % classes[i])) else: label = create_label(interval_classes[i]) if i == 0: transparency = 100 else: transparency = 0 style_class['label'] = label style_class['quantity'] = classes[i] style_class['colour'] = colours[i] style_class['transparency'] = transparency style_classes.append(style_class) # Override style info with new classes and name style_info = dict( target_field=None, style_classes=style_classes, style_type='rasterStyle') # For printing map purpose map_title = tr('People impacted by each hazard zone') legend_notes = tr('Thousand separator is represented by %s' % get_thousand_separator()) legend_units = tr('(people per cell)') legend_title = tr('Population') # Create vector layer and return impact_layer = Raster( data=covered_exposure_layer.get_data(), projection=covered_exposure_layer.get_projection(), geotransform=covered_exposure_layer.get_geotransform(), name=tr('People impacted by each hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'target_field': self.target_field, 'map_title': map_title, 'legend_notes': legend_notes, 'legend_units': legend_units, 'legend_title': legend_title}, style_info=style_info) self._impact = impact_layer return impact_layer