def run(self, layers): """Risk plugin for volcano population evacuation Input layers: List of layers expected to contain H: Vector polyton layer of volcano impact zones P: Raster layer of population data on the same grid as H Counts number of people exposed to volcano event. Return Map of population exposed to the volcano hazard zone. Table with number of people evacuated and supplies required. """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Input checks if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. ' 'I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not (H.is_polygon_data or H.is_point_data): raise Exception(msg) if H.is_point_data: # Use concentric circles radii = self.parameters['R [km]'] centers = H.get_geometry() attributes = H.get_data() rad_m = [x * 1000 for x in radii] # Convert to meters H = make_circular_polygon(centers, rad_m, attributes=attributes) #H.write_to_file('Evac_zones_%s.shp' % str(radii)) # To check category_title = 'Radius' category_header = tr('Distance [km]') category_names = radii name_attribute = 'NAME' # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = 'KRB' category_header = tr('Category') # FIXME (Ole): Change to English and use translation system category_names = ['Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I'] name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map attributes = H.get_data() # Get names of volcanos considered if name_attribute in H.get_attribute_names(): D = {} for att in H.get_data(): # Run through all polygons and get unique names D[att[name_attribute]] = None volcano_names = '' for name in D: volcano_names += '%s, ' % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr('Not specified in data') if not category_title in H.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (H.get_name(), category_title)) raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon and total evacuated = 0 for attr in P.get_data(): # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category cat = new_attributes[poly_id][category_title] categories[cat] += pop # Count totals total = int(numpy.sum(E.get_data(nan=0))) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 # Count number and cumulative for each zone cum = 0 pops = {} cums = {} for name in category_names: if category_title == 'Radius': key = name * 1000 # Convert to meters else: key = name pop = int(categories[key]) if pop > 1000: pop = pop // 1000 * 1000 cum += pop if cum > 1000: cum = cum // 1000 * 1000 pops[name] = pop cums[name] = cum # Use final accumulation as total number needing evac evacuated = cum # Calculate estimated needs based on BNPB Perka # 7/2008 minimum bantuan # FIXME (Ole): Refactor into one function to be shared rice = int(evacuated * 2.8) drinking_water = int(evacuated * 17.5) water = int(evacuated * 67) family_kits = int(evacuated / 5) toilets = int(evacuated / 20) # Generate impact report for the pdf map blank_cell = '' table_body = [question, TableRow([tr('Volcanos considered'), '%s' % volcano_names, blank_cell], header=True), TableRow([tr('People needing evacuation'), '%s' % format_int(evacuated), blank_cell], header=True), TableRow([category_header, tr('Total'), tr('Cumulative')], header=True)] for name in category_names: table_body.append(TableRow([name, format_int(pops[name]), format_int(cums[name])])) table_body.extend([TableRow(tr('Map shows population affected in ' 'each of volcano hazard polygons.')), TableRow([tr('Needs per week'), tr('Total'), blank_cell], header=True), [tr('Rice [kg]'), format_int(rice), blank_cell], [tr('Drinking Water [l]'), format_int(drinking_water), blank_cell], [tr('Clean Water [l]'), format_int(water), blank_cell], [tr('Family Kits'), format_int(family_kits), blank_cell], [tr('Toilets'), format_int(toilets), blank_cell]]) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total population %s in the viewable area') % format_int(total), tr('People need evacuation if they are within the ' 'volcanic hazard zones.')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('People affected by volcanic hazard zone') # Define classes for legend for flooded population counts colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] population_counts = [x[self.target_field] for x in new_attributes] cls = [0] + numpy.linspace(1, max(population_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = tr('0') else: label = tr('%i - %i') % (lo, hi) entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=50, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=tr('Population Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(as_geometry_objects=True), name=tr('Population affected by volcanic hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) return V
def run(layers, a=0.97429, b=11.037): """Risk plugin for earthquake fatalities Input layers: List of layers expected to contain H: Raster layer of MMI ground shaking P: Raster layer of population data on the same grid as H """ # Identify input layers intensity = get_hazard_layer(layers) # Get population and gender ratio population = gender_ratio = None for layer in get_exposure_layers(layers): keywords = layer.get_keywords() if 'datatype' not in keywords: population = layer else: datatype = keywords['datatype'] if not 'ratio' in datatype: population = layer else: # 'female' in datatype and 'ratio' in datatype: gender_ratio_unit = keywords['unit'] msg = ('Unit for gender ratio must be either ' '"percent" or "ratio"') if gender_ratio_unit not in ['percent', 'ratio']: raise RuntimeError(msg) gender_ratio = layer msg = 'No population layer was found in: %s' % str(layers) if population is None: raise RuntimeError(msg) # Extract data H = intensity.get_data(nan=0) P = population.get_data(nan=0) #print #print 'Population', population.get_name() # Calculate impact F = 10 ** (a * H - b) * P if gender_ratio is not None: # Extract gender ratio at each pixel (as ratio) G = gender_ratio.get_data(nan=0) if gender_ratio_unit == 'percent': G /= 100 # Calculate breakdown P_female = P * G P_male = P - P_female F_female = F * G F_male = F - F_female # Generate text with result for this study count = numpy.nansum(F.flat) total = numpy.nansum(P.flat) # Create report impact_summary = ('<table border="0" width="320px">' ' <tr><td>%s:</td><td>%s</td></tr>' % ('Jumlah Penduduk', format_int(int(total)))) if gender_ratio is not None: impact_summary += (' <tr><td>%s:</td><td>%s</td></tr>' % (' - Wanita', format_int(int(numpy.nansum(P_female.flat))))) impact_summary += (' <tr><td>%s:</td><td>%s</td></tr>' % (' - Pria', format_int(int(numpy.nansum(P_male.flat))))) impact_summary += (' <tr><td>%s:</td><td>%s</td></tr>' % ('Perkiraan Orang Meninggal', format_int(int(count)))) if gender_ratio is not None: impact_summary += (' <tr><td>%s:</td><td>%s</td></tr>' % (' - Wanita', format_int(int(numpy.nansum(F_female.flat))))) impact_summary += (' <tr><td>%s:</td><td>%s</td></tr>' % (' - Pria', format_int(int(numpy.nansum(F_male.flat))))) impact_summary += '</table>' # Create new layer and return R = Raster(F, projection=population.get_projection(), geotransform=population.get_geotransform(), name='Estimated fatalities', keywords={'impact_summary': impact_summary}, style_info=earthquake_fatality_style) # See issue #126 return R
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of volcano depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Input checks if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. ' 'I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not (H.is_polygon_data or H.is_point_data): raise Exception(msg) if H.is_point_data: # Use concentric circles radii = self.parameters['R [km]'] centers = H.get_geometry() attributes = H.get_data() rad_m = [x * 1000 for x in radii] # Convert to meters H = make_circular_polygon(centers, rad_m, attributes=attributes) #H.write_to_file('Evac_zones_%s.shp' % str(radii)) # To check category_title = 'Radius' category_header = tr('Distance [km]') category_names = radii name_attribute = 'NAME' # As in e.g. the Smithsonian dataset else: # Use hazard map category_title = 'KRB' category_header = tr('Category') # FIXME (Ole): Change to English and use translation system category_names = [ 'Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I' ] name_attribute = 'GUNUNG' # As in e.g. BNPB hazard map attributes = H.get_data() # Get names of volcanos considered if name_attribute in H.get_attribute_names(): D = {} for att in H.get_data(): # Run through all polygons and get unique names D[att[name_attribute]] = None volcano_names = '' for name in D: volcano_names += '%s, ' % name volcano_names = volcano_names[:-2] # Strip trailing ', ' else: volcano_names = tr('Not specified in data') if not category_title in H.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (H.get_name(), category_title)) raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon and total evacuated = 0 for attr in P.get_data(): # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category cat = new_attributes[poly_id][category_title] categories[cat] += pop # Count totals total = int(numpy.sum(E.get_data(nan=0))) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 # Count number and cumulative for each zone cum = 0 pops = {} cums = {} for name in category_names: if category_title == 'Radius': key = name * 1000 # Convert to meters else: key = name pop = int(categories[key]) if pop > 1000: pop = pop // 1000 * 1000 cum += pop if cum > 1000: cum = cum // 1000 * 1000 pops[name] = pop cums[name] = cum # Use final accumulation as total number needing evac evacuated = cum # Calculate estimated needs based on BNPB Perka # 7/2008 minimum bantuan # FIXME (Ole): Refactor into one function to be shared rice = int(evacuated * 2.8) drinking_water = int(evacuated * 17.5) water = int(evacuated * 67) family_kits = int(evacuated / 5) toilets = int(evacuated / 20) # Generate impact report for the pdf map blank_cell = '' table_body = [ question, TableRow( [tr('Volcanos considered'), '%s' % volcano_names, blank_cell], header=True), TableRow([ tr('People needing evacuation'), '%s' % format_int(evacuated), blank_cell ], header=True), TableRow( [category_header, tr('Total'), tr('Cumulative')], header=True) ] for name in category_names: table_body.append( TableRow( [name, format_int(pops[name]), format_int(cums[name])])) table_body.extend([ TableRow( tr('Map shows population affected in ' 'each of volcano hazard polygons.')), TableRow([tr('Needs per week'), tr('Total'), blank_cell], header=True), [tr('Rice [kg]'), format_int(rice), blank_cell], [tr('Drinking Water [l]'), format_int(drinking_water), blank_cell], [tr('Clean Water [l]'), format_int(water), blank_cell], [tr('Family Kits'), format_int(family_kits), blank_cell], [tr('Toilets'), format_int(toilets), blank_cell] ]) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([ TableRow(tr('Notes'), header=True), tr('Total population %s in the viewable area') % format_int(total), tr('People need evacuation if they are within the ' 'volcanic hazard zones.') ]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('People affected by volcanic hazard zone') # Define classes for legend for flooded population counts colours = [ '#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000' ] population_counts = [x[self.target_field] for x in new_attributes] cls = [0] + numpy.linspace(1, max(population_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = tr('0') else: label = tr('%i - %i') % (lo, hi) entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=50, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=tr('Population Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(as_geometry_objects=True), name=tr('Population affected by volcanic hazard zone'), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field }, style_info=style_info) return V
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Map from different kinds of datasets to Padang vulnerability classes datatype = E.get_keywords()['datatype'] vclass_tag = 'VCLASS' if datatype.lower() == 'osm': # Map from OSM attributes Emap = osm2padang(E) elif datatype.lower() == 'sigab': # Map from SIGAB attributes Emap = sigab2padang(E) else: Emap = E # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, Emap, attribute_name='MMI') # Extract relevant numerical data attributes = I.get_data() N = len(I) # Calculate building damage count_high = count_medium = count_low = count_none = 0 for i in range(N): mmi = float(attributes[i]['MMI']) building_type = Emap.get_data(vclass_tag, i) damage_params = damage_curves[building_type] beta = damage_params['beta'] median = damage_params['median'] percent_damage = lognormal_cdf(mmi, median=median, sigma=beta) * 100 # Add calculated impact to existing attributes attributes[i][self.target_field] = percent_damage # Calculate statistics if percent_damage < 10: count_none += 1 if 10 <= percent_damage < 33: count_low += 1 if 33 <= percent_damage < 66: count_medium += 1 if 66 <= percent_damage: count_high += 1 # Generate impact report table_body = [question, TableRow([tr('Buildings'), tr('Total')], header=True), TableRow([tr('All'), N]), TableRow([tr('No damage'), format_int(count_none)]), TableRow([tr('Low damage'), format_int(count_low)]), TableRow([tr('Medium damage'), format_int(count_medium)]), TableRow([tr('High damage'), format_int(count_high)])] table_body.append(TableRow(tr('Notes'), header=True)) table_body.append(tr('Levels of impact are defined by post 2009 ' 'Padang earthquake survey conducted by Geoscience ' 'Australia and Institute of Teknologi Bandung.')) table_body.append(tr('Unreinforced masonry is assumed where no ' 'structural information is available.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = tr('Earthquake damage to buildings') # Create style style_classes = [dict(label=tr('No damage'), min=0, max=10, colour='#00ff00', transparency=0), dict(label=tr('Low damage'), min=10, max=33, colour='#ffff00', transparency=0), dict(label=tr('Medium damage'), min=33, max=66, colour='#ffaa00', transparency=0), dict(label=tr('High damage'), min=66, max=100, colour='#ff0000', transparency=0)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=E.get_projection(), geometry=E.get_geometry(), name='Estimated pct damage', keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) return V
class ITBFatalityFunction(FunctionProvider): """Indonesian Earthquake Fatality Model This model was developed by Institut Tecknologi Bandung (ITB) and implemented by Dr Hadi Ghasemi, Geoscience Australia Reference: Indonesian Earthquake Building-Damage and Fatality Models and Post Disaster Survey Guidelines Development, Bali, 27-28 February 2012, 54pp. Algorithm: In this study, the same functional form as Allen (2009) is adopted to express fatality rate as a function of intensity (see Eq. 10 in the report). The Matlab built-in function (fminsearch) for Nelder-Mead algorithm is used to estimate the model parameters. The objective function (L2G norm) that is minimised during the optimisation is the same as the one used by Jaiswal et al. (2010). The coefficients used in the indonesian model are x=0.62275231, y=8.03314466, zeta=2.15 Allen, T. I., Wald, D. J., Earle, P. S., Marano, K. D., Hotovec, A. J., Lin, K., and Hearne, M., 2009. An Atlas of ShakeMaps and population exposure catalog for earthquake loss modeling, Bull. Earthq. Eng. 7, 701-718. Jaiswal, K., and Wald, D., 2010. An empirical model for global earthquake fatality estimation, Earthq. Spectra 26, 1017-1037. Caveats and limitations: The current model is the result of the above mentioned workshop and reflects the best available information. However, the current model has a number of issues listed below and is expected to evolve further over time. 1 - The model is based on limited number of observed fatality rates during 4 past fatal events. 2 - The model clearly over-predicts the fatality rates at intensities higher than VIII. 3 - The model only estimates the expected fatality rate for a given intensity level; however the associated uncertainty for the proposed model is not addressed. 4 - There are few known mistakes in developing the current model: - rounding MMI values to the nearest 0.5, - Implementing Finite-Fault models of candidate events, and - consistency between selected GMPEs with those in use by BMKG. These issues will be addressed by ITB team in the final report. :author Hadi Ghasemi :rating 3 :param requires category=='hazard' and \ subcategory=='earthquake' and \ layertype=='raster' and \ unit=='MMI' :param requires category=='exposure' and \ subcategory=='population' and \ layertype=='raster' """ title = tr('Die or be displaced') defaults = get_defaults() parameters = dict(x=0.62275231, y=8.03314466, # Model coefficients # Rates of people displaced for each MMI level displacement_rate={1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 1.0, 7: 1.0, 8: 1.0, 9: 1.0, 10: 1.0}, # Threshold below which layer should be transparent tolerance=0.01, calculate_displaced_people=True, postprocessors={'Gender': {'on': True}, 'Age': {'on': True, 'params': {'youth_ratio': defaults['YOUTH_RATIO'], 'adult_ratio': defaults['ADULT_RATIO'], 'elder_ratio': defaults['ELDER_RATIO']}}}) def run(self, layers): """Indonesian Earthquake Fatality Model Input layers: List of layers expected to contain H: Raster layer of MMI ground shaking P: Raster layer of population density """ # Establish model coefficients x = self.parameters['x'] y = self.parameters['y'] # Define percentages of people being displaced at each mmi level displacement_rate = self.parameters['displacement_rate'] # Tolerance for transparency tolerance = self.parameters['tolerance'] # Extract input layers intensity = get_hazard_layer(layers) population = get_exposure_layer(layers) question = get_question(intensity.get_name(), population.get_name(), self) # Extract data grids H = intensity.get_data() # Ground Shaking P = population.get_data(scaling=True) # Population Density # Calculate population affected by each MMI level # FIXME (Ole): this range is 2-9. Should 10 be included? mmi_range = range(2, 10) number_of_exposed = {} number_of_displaced = {} number_of_fatalities = {} # Calculate fatality rates for observed Intensity values (H # based on ITB power model R = numpy.zeros(H.shape) for mmi in mmi_range: # Identify cells where MMI is in class i mask = (H > mmi - 0.5) * (H <= mmi + 0.5) # Count population affected by this shake level I = numpy.where(mask, P, 0) # Calculate expected number of fatalities per level fatality_rate = numpy.power(10.0, x * mmi - y) F = fatality_rate * I # Calculate expected number of displaced people per level try: D = displacement_rate[mmi] * I except KeyError, e: msg = 'mmi = %i, I = %s, Error msg: %s' % (mmi, str(I), str(e)) raise InaSAFEError(msg) # Adjust displaced people to disregard fatalities. # Set to zero if there are more fatalities than displaced. D = numpy.where(D > F, D - F, 0) # Sum up numbers for map R += D # Displaced # Generate text with result for this study # This is what is used in the real time system exposure table number_of_exposed[mmi] = numpy.nansum(I.flat) number_of_displaced[mmi] = numpy.nansum(D.flat) number_of_fatalities[mmi] = numpy.nansum(F.flat) # Set resulting layer to NaN when less than a threshold. This is to # achieve transparency (see issue #126). R[R < tolerance] = numpy.nan # Total statistics total = int(round(numpy.nansum(P.flat) / 1000) * 1000) # Compute number of fatalities fatalities = int(round(numpy.nansum(number_of_fatalities.values()) / 1000)) * 1000 # Compute number of people displaced due to building collapse displaced = int(round(numpy.nansum(number_of_displaced.values()) / 1000)) * 1000 # Generate impact report table_body = [question] # Add total fatality estimate #s = str(int(fatalities)).rjust(10) s = format_int(fatalities) table_body.append(TableRow([tr('Number of fatalities'), s], header=True)) if self.parameters['calculate_displaced_people']: # Add total estimate of people displaced #s = str(int(displaced)).rjust(10) s = format_int(displaced) table_body.append(TableRow([tr('Number of people displaced'), s], header=True)) else: displaced = 0 # Add estimate of total population in area #s = str(int(total)).rjust(10) s = format_int(int(total)) table_body.append(TableRow([tr('Total number of people'), s], header=True)) # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan # FIXME: Refactor and share rice = int(displaced * 2.8) drinking_water = int(displaced * 17.5) water = int(displaced * 67) family_kits = int(displaced / 5) toilets = int(displaced / 20) # Generate impact report for the pdf map table_body = [question, TableRow([tr('Fatalities'), '%s' % format_int(fatalities)], header=True), TableRow([tr('People displaced'), '%s' % format_int(displaced)], header=True), TableRow(tr('Map shows density estimate of ' 'displaced population')), TableRow([tr('Needs per week'), tr('Total')], header=True), [tr('Rice [kg]'), format_int(rice)], [tr('Drinking Water [l]'), format_int(drinking_water)], [tr('Clean Water [l]'), format_int(water)], [tr('Family Kits'), format_int(family_kits)], [tr('Toilets'), format_int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() table_body.append(TableRow(tr('Action Checklist:'), header=True)) if fatalities > 0: table_body.append(tr('Are there enough victim identification ' 'units available for %s people?') % format_int(fatalities)) if displaced > 0: table_body.append(tr('Are there enough shelters and relief items ' 'available for %s people?') % format_int(displaced)) table_body.append(TableRow(tr('If yes, where are they located and ' 'how will we distribute them?'))) table_body.append(TableRow(tr('If no, where can we obtain ' 'additional relief items from and ' 'how will we transport them?'))) # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total population: %i') % total, tr('People are considered to be displaced if ' 'they experience and survive a shake level' 'of more than 5 on the MMI scale '), tr('Minimum needs are defined in BNPB ' 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('People in need of evacuation') table_body.append(TableRow(tr('Notes'), header=True)) table_body.append(tr('Fatality model is from ' 'Institute of Teknologi Bandung 2012.')) table_body.append(tr('Population numbers rounded to nearest 1000.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = tr('Earthquake impact to population') # Create style info dynamically classes = numpy.linspace(numpy.nanmin(R.flat[:]), numpy.nanmax(R.flat[:]), 5) style_classes = [dict(colour='#EEFFEE', quantity=classes[0], transparency=100, label=tr('%.2f people/cell') % classes[0]), dict(colour='#FFFF7F', quantity=classes[1], transparency=30), dict(colour='#E15500', quantity=classes[2], transparency=30, label=tr('%.2f people/cell') % classes[2]), dict(colour='#E4001B', quantity=classes[3], transparency=30), dict(colour='#730000', quantity=classes[4], transparency=30, label=tr('%.2f people/cell') % classes[4])] style_info = dict(target_field=None, style_classes=style_classes) # Create new layer and return L = Raster(R, projection=population.get_projection(), geotransform=population.get_geotransform(), keywords={'impact_summary': impact_summary, 'total_population': total, 'total_fatalities': fatalities, 'impact_table': impact_table, 'map_title': map_title}, name=tr('Estimated displaced population'), style_info=style_info) # Maybe return a shape file with contours instead return L
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Vector polygon layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to areas identified as flood prone Return Map of population exposed to flooding Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Check that hazard is polygon type if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon layer. I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not H.is_polygon_data: raise Exception(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() category_title = 'FLOODPRONE' # FIXME: Should come from keywords categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon, per category and total affected_population = 0 for attr in P.get_data(): affected = False if 'affected' in attr: res = attr['affected'] if res is not None: affected = res elif 'FLOODPRONE' in attr: # If there isn't an 'affected' attribute, res = attr['FLOODPRONE'] if res is not None: affected = res.lower() == 'yes' elif 'Affected' in attr: # Check the default attribute assigned for points # covered by a polygon res = attr['Affected'] if res is None: x = False else: x = res else: # there is no flood related attribute msg = ('No flood related attribute found in %s. ' 'I was looking fore either "Flooded", "FLOODPRONE" ' 'or "Affected". The latter should have been ' 'automatically set by call to ' 'assign_hazard_values_to_exposure_data(). ' 'Sorry I can\'t help more.') raise Exception(msg) if affected: # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category cat = new_attributes[poly_id][category_title] categories[cat] += pop # Update total affected_population += pop # Estimate number of people in need of evacuation evacuated = (affected_population * self.parameters['evacuation_percentage'] / 100.0) total = int(numpy.sum(E.get_data(nan=0, scaling=False))) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 if evacuated > 1000: evacuated = evacuated // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan rice = evacuated * 2.8 # 400g per person per day drinking_water = evacuated * 17.5 # 2.5L per person per day water = evacuated * 105 # 15L per person per day family_kits = evacuated / 5 # assume 5 people per family toilets = evacuated / 20 # 20 people to 1 toilet # Generate impact report for the pdf map table_body = [question, TableRow([tr('People affected'), '%s' % format_int(int(affected_population))], header=True), TableRow([tr('People needing evacuation'), '%s*' % format_int(int(evacuated))], header=True), TableRow([ TableCell( tr('* Evacuation count rounded to nearest 1000'), col_span=2)], header=False), TableRow([tr('Evacuation threshold'), '%s%%' % format_int( self.parameters['evacuation_percentage'])], header=True), TableRow(tr('Map shows population affected in each flood' ' prone area ')), TableRow([tr('Needs per week'), tr('Total')], header=True), [tr('Rice [kg]'), format_int(int(rice))], [tr('Drinking Water [l]'), format_int(int(drinking_water))], [tr('Clean Water [l]'), format_int(int(water))], [tr('Family Kits'), format_int(int(family_kits))], [tr('Toilets'), format_int(int(toilets))]] impact_table = Table(table_body).toNewlineFreeString() table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append(TableRow(tr('How will warnings be disseminated?'))) table_body.append(TableRow(tr('How will we reach stranded people?'))) table_body.append(TableRow(tr('Do we have enough relief items?'))) table_body.append(TableRow(tr('If yes, where are they located and how ' 'will we distribute them?'))) table_body.append(TableRow(tr('If no, where can we obtain additional ' 'relief items from and how will we ' 'transport them to here?'))) # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total population: %s') % format_int(total), tr('People need evacuation if in area identified ' 'as "Flood Prone"'), tr('Minimum needs are defined in BNPB ' 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('People affected by flood prone areas') # Define classes for legend for flooded population counts colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] population_counts = [x['population'] for x in new_attributes] cls = [0] + numpy.linspace(1, max(population_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = tr('0') transparency = 100 else: label = tr('%i - %i') % (lo, hi) transparency = 0 entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=transparency, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=tr('Population Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(), name=tr('Population affected by flood prone areas'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) return V
def run(self, layers): """Risk plugin for earthquake school damage """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations keywords = E.get_keywords() if 'datatype' in keywords: datatype = keywords['datatype'] if datatype.lower() == 'osm': # Map from OSM attributes to the guideline classes (URM and RM) E = osm2bnpb(E, target_attribute=self.vclass_tag) elif datatype.lower() == 'sigab': # Map from SIGAB attributes to the guideline classes # (URM and RM) E = sigab2bnpb(E) else: E = unspecific2bnpb(E, target_attribute=self.vclass_tag) else: E = unspecific2bnpb(E, target_attribute=self.vclass_tag) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='MMI') # Extract relevant numerical data coordinates = E.get_geometry() shaking = H.get_data() N = len(shaking) # List attributes to carry forward to result layer attributes = E.get_attribute_names() # Calculate building damage count3 = 0 count2 = 0 count1 = 0 count_unknown = 0 building_damage = [] for i in range(N): mmi = float(shaking[i]['MMI']) building_class = E.get_data(self.vclass_tag, i) lo, hi = damage_parameters[building_class] if numpy.isnan(mmi): # If we don't know the shake level assign Not-a-Number damage = numpy.nan count_unknown += 1 elif mmi < lo: damage = 1 # Low count1 += 1 elif lo <= mmi < hi: damage = 2 # Medium count2 += 1 elif mmi >= hi: damage = 3 # High count3 += 1 else: msg = 'Undefined shakelevel %s' % str(mmi) raise Exception(msg) # Collect shake level and calculated damage result_dict = {self.target_field: damage, 'MMI': mmi} # Carry all orginal attributes forward for key in attributes: result_dict[key] = E.get_data(key, i) # Record result for this feature building_damage.append(result_dict) # Create report impact_summary = ('<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' ' <tr><td>%s:</td><td>%s</td></tr>' ' <tr><td>%s (10-25%%):</td><td>%s</td></tr>' ' <tr><td>%s (25-50%%):</td><td>%s</td></tr>' ' <tr><td>%s (50-100%%):</td><td>%s</td></tr>' % (tr('Buildings'), tr('Total'), tr('All'), format_int(N), tr('Low damage'), format_int(count1), tr('Medium damage'), format_int(count2), tr('High damage'), format_int(count3))) impact_summary += (' <tr><td>%s (NaN):</td><td>%s</td></tr>' % ('Unknown', format_int(count_unknown))) impact_summary += '</table>' # Create style style_classes = [dict(label=tr('Low damage'), min=0.5, max=1.5, colour='#fecc5c', transparency=0), dict(label=tr('Medium damage'), min=1.5, max=2.5, colour='#fd8d3c', transparency=0), dict(label=tr('High damage'), min=2.5, max=3.5, colour='#f31a1c', transparency=0)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=building_damage, projection=E.get_projection(), geometry=coordinates, name='Estimated damage level', keywords={'impact_summary': impact_summary}, style_info=style_info) return V
def run(self, layers): """Earthquake impact to buildings (e.g. from Open Street Map) """ LOGGER.debug("Running earthquake building impact") # Thresholds for mmi breakdown t0 = self.parameters["low_threshold"] t1 = self.parameters["medium_threshold"] t2 = self.parameters["high_threshold"] class_1 = tr("Low") class_2 = tr("Medium") class_3 = tr("High") # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Define attribute name for hazard levels hazard_attribute = "mmi" # Determine if exposure data have NEXIS attributes attribute_names = E.get_attribute_names() if "FLOOR_AREA" in attribute_names and "BUILDING_C" in attribute_names and "CONTENTS_C" in attribute_names: is_NEXIS = True else: is_NEXIS = False # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, E, attribute_name=hazard_attribute) # Extract relevant exposure data # attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate building impact lo = 0 me = 0 hi = 0 building_values = {} contents_values = {} for key in range(4): building_values[key] = 0 contents_values[key] = 0 for i in range(N): # Classify building according to shake level # and calculate dollar losses if is_NEXIS: try: area = float(attributes[i]["FLOOR_AREA"]) except (ValueError, KeyError): # print 'Got area', attributes[i]['FLOOR_AREA'] area = 0.0 try: building_value_density = float(attributes[i]["BUILDING_C"]) except (ValueError, KeyError): # print 'Got bld value', attributes[i]['BUILDING_C'] building_value_density = 0.0 try: contents_value_density = float(attributes[i]["CONTENTS_C"]) except (ValueError, KeyError): # print 'Got cont value', attributes[i]['CONTENTS_C'] contents_value_density = 0.0 building_value = building_value_density * area contents_value = contents_value_density * area x = float(attributes[i][hazard_attribute]) # MMI if t0 <= x < t1: lo += 1 cls = 1 elif t1 <= x < t2: me += 1 cls = 2 elif t2 <= x: hi += 1 cls = 3 else: # Not reported for less than level t0 cls = 0 attributes[i][self.target_field] = cls if is_NEXIS: # Accumulate values in 1M dollar units building_values[cls] += building_value contents_values[cls] += contents_value if is_NEXIS: # Convert to units of one million dollars for key in range(4): building_values[key] = int(building_values[key] / 1000000) contents_values[key] = int(contents_values[key] / 1000000) if is_NEXIS: # Generate simple impact report for NEXIS type buildings table_body = [ question, TableRow( [ tr("Hazard Level"), tr("Buildings Affected"), tr("Buildings value ($M)"), tr("Contents value ($M)"), ], header=True, ), TableRow([class_1, format_int(lo), format_int(building_values[1]), format_int(contents_values[1])]), TableRow([class_2, format_int(me), format_int(building_values[2]), format_int(contents_values[2])]), TableRow([class_3, format_int(hi), format_int(building_values[3]), format_int(contents_values[3])]), ] else: # Generate simple impact report for unspecific buildings table_body = [ question, TableRow([tr("Hazard Level"), tr("Buildings Affected")], header=True), TableRow([class_1, format_int(lo)]), TableRow([class_2, format_int(me)]), TableRow([class_3, format_int(hi)]), ] table_body.append(TableRow(tr("Notes"), header=True)) table_body.append(tr("High hazard is defined as shake levels greater " "than %i on the MMI scale.") % t2) table_body.append( tr("Medium hazard is defined as shake levels " "between %i and %i on the MMI scale.") % (t1, t2) ) table_body.append(tr("Low hazard is defined as shake levels " "between %i and %i on the MMI scale.") % (t0, t1)) if is_NEXIS: table_body.append(tr("Values are in units of 1 million Australian " "Dollars")) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = tr("Buildings affected") # Create style style_classes = [ dict(label=class_1, min=1, max=1, colour="#ffff00", transparency=1), dict(label=class_2, min=2, max=2, colour="#ffaa00", transparency=1), dict(label=class_3, min=3, max=3, colour="#ff0000", transparency=1), ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector( data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), name=tr("Estimated buildings affected"), keywords={ "impact_summary": impact_summary, "impact_table": impact_table, "map_title": map_title, "target_field": self.target_field, "statistics_type": self.statistics_type, "statistics_classes": self.statistics_classes, }, style_info=style_info, ) LOGGER.debug("Created vector layer %s" % str(V)) return V
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Flood inundation [m] population = get_exposure_layer(layers) question = get_question(inundation.get_name(), population.get_name(), self) # Determine depths above which people are regarded affected [m] # Use thresholds from inundation layer if specified thresholds = self.parameters["thresholds"] verify(isinstance(thresholds, list), "Expected thresholds to be a list. Got %s" % str(thresholds)) # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > max threshold P = population.get_data(nan=0.0, scaling=True) # Calculate impact to intermediate thresholds counts = [] for i, lo in enumerate(thresholds): if i == len(thresholds) - 1: # The last threshold I = M = numpy.where(D >= lo, P, 0) else: # Intermediate thresholds hi = thresholds[i + 1] M = numpy.where((D >= lo) * (D < hi), P, 0) # Count val = int(numpy.sum(M)) # Don't show digits less than a 1000 if val > 1000: val = val // 1000 * 1000 counts.append(val) # Count totals evacuated = counts[-1] total = int(numpy.sum(P)) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan # FIXME: Refactor and share # 400g per person per day rice = int(evacuated * 2.8) # 2.5L per person per day drinking_water = int(evacuated * 17.5) # 15L per person per day water = int(evacuated * 105) # assume 5 people per family (not in perka) family_kits = int(evacuated / 5) # 20 people per toilet toilets = int(evacuated / 20) # Generate impact report for the pdf map table_body = [ question, TableRow([(tr("People in %.1f m of water") % thresholds[-1]), "%s" % format_int(evacuated)], header=True), TableRow(tr("Map shows population density needing " "evacuation")), TableRow([tr("Needs per week"), tr("Total")], header=True), [tr("Rice [kg]"), format_int(rice)], [tr("Drinking Water [l]"), format_int(drinking_water)], [tr("Clean Water [l]"), format_int(water)], [tr("Family Kits"), format_int(family_kits)], [tr("Toilets"), format_int(toilets)], ] impact_table = Table(table_body).toNewlineFreeString() table_body.append(TableRow(tr("Action Checklist:"), header=True)) table_body.append(TableRow(tr("How will warnings be disseminated?"))) table_body.append(TableRow(tr("How will we reach stranded people?"))) table_body.append(TableRow(tr("Do we have enough relief items?"))) table_body.append(TableRow(tr("If yes, where are they located and how " "will we distribute them?"))) table_body.append( TableRow( tr( "If no, where can we obtain additional " "relief items from and how will we " "transport them to here?" ) ) ) # Extend impact report for on-screen display table_body.extend( [ TableRow(tr("Notes"), header=True), tr("Total population: %s") % format_int(total), tr("People need evacuation if flood levels " "exceed %(eps).1f m") % {"eps": thresholds[-1]}, tr("Minimum needs are defined in BNPB " "regulation 7/2008"), ] ) if len(counts) > 1: table_body.append(TableRow(tr("Detailed breakdown"), header=True)) for i, val in enumerate(counts[:-1]): s = tr("People in %(lo).1f m to %(hi).1f m of water: %(val)i") % { "lo": thresholds[i], "hi": thresholds[i + 1], "val": format_int(val), } table_body.append(TableRow(s, header=False)) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr("People in need of evacuation") # Generate 8 equidistant classes across the range of flooded population # 8 is the number of classes in the predefined flood population style # as imported classes = numpy.linspace(numpy.nanmin(I.flat[:]), numpy.nanmax(I.flat[:]), 8) # Work out how many decimals to use # Modify labels in existing flood style to show quantities style_classes = style_info["style_classes"] style_classes[1]["label"] = tr("Low [%.2f people/cell]") % classes[1] style_classes[4]["label"] = tr("Medium [%.2f people/cell]") % classes[4] style_classes[7]["label"] = tr("High [%.2f people/cell]") % classes[7] # Override associated quantities in colour style for i in range(len(classes)): if i == 0: transparency = 100 else: transparency = 0 style_classes[i]["quantity"] = classes[i] style_classes[i]["transparency"] = transparency # Title style_info["legend_title"] = tr("Population Density") # Create raster object and return R = Raster( I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=tr("Population which %s") % get_function_title(self), keywords={"impact_summary": impact_summary, "impact_table": impact_table, "map_title": map_title}, style_info=style_info, ) return R
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map) """ threshold = 1.0 # Flood threshold [m] # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Determine attribute name for hazard levels if H.is_raster: mode = 'grid' hazard_attribute = 'depth' else: mode = 'regions' hazard_attribute = None # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, E, attribute_name=hazard_attribute) # Extract relevant exposure data attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate building impact count = 0 buildings = {} affected_buildings = {} for i in range(N): if mode == 'grid': # Get the interpolated depth x = float(attributes[i]['depth']) x = x >= threshold elif mode == 'regions': # Use interpolated polygon attribute atts = attributes[i] # FIXME (Ole): Need to agree whether to use one or the # other as this can be very confusing! # For now look for 'affected' first if 'affected' in atts: # E.g. from flood forecast # Assume that building is wet if inside polygon # as flagged by attribute Flooded res = atts['affected'] if res is None: x = False else: x = bool(res) #if x: # print 'Got affected', x elif 'FLOODPRONE' in atts: res = atts['FLOODPRONE'] if res is None: x = False else: x = res.lower() == 'yes' elif DEFAULT_ATTRIBUTE in atts: # Check the default attribute assigned for points # covered by a polygon res = atts[DEFAULT_ATTRIBUTE] if res is None: x = False else: x = res else: # there is no flood related attribute msg = ('No flood related attribute found in %s. ' 'I was looking for either "affected", "FLOODPRONE" ' 'or "inapolygon". The latter should have been ' 'automatically set by call to ' 'assign_hazard_values_to_exposure_data(). ' 'Sorry I can\'t help more.') raise Exception(msg) else: msg = (tr('Unknown hazard type %s. ' 'Must be either "depth" or "grid"') % mode) raise Exception(msg) # Count affected buildings by usage type if available if 'type' in attribute_names: usage = attributes[i]['type'] else: usage = None if 'amenity' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['amenity'] if 'building_t' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['building_t'] if 'office' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['office'] if 'tourism' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['tourism'] if 'leisure' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['leisure'] if 'building' in attribute_names and (usage is None or usage == 0): usage = attributes[i]['building'] if usage == 'yes': usage = 'building' #LOGGER.debug('usage ') if usage is not None and usage != 0: key = usage else: key = 'unknown' if key not in buildings: buildings[key] = 0 affected_buildings[key] = 0 # Count all buildings by type buildings[key] += 1 if x is True: # Count affected buildings by type affected_buildings[key] += 1 # Count total affected buildings count += 1 # Add calculated impact to existing attributes attributes[i][self.target_field] = x # Lump small entries and 'unknown' into 'other' category for usage in buildings.keys(): x = buildings[usage] if x < 25 or usage == 'unknown': if 'other' not in buildings: buildings['other'] = 0 affected_buildings['other'] = 0 buildings['other'] += x affected_buildings['other'] += affected_buildings[usage] del buildings[usage] del affected_buildings[usage] # Generate csv file of results ## fid = open('C:\dki_table_%s.csv' % H.get_name(), 'wb') ## fid.write('%s, %s, %s\n' % (tr('Building type'), ## tr('Temporarily closed'), ## tr('Total'))) ## fid.write('%s, %i, %i\n' % (tr('All'), count, N)) # Generate simple impact report table_body = [question, TableRow([tr('Building type'), tr('Number flooded'), tr('Total')], header=True), TableRow([tr('All'), format_int(count), format_int(N)])] ## fid.write('%s, %s, %s\n' % (tr('Building type'), ## tr('Temporarily closed'), ## tr('Total'))) school_closed = 0 hospital_closed = 0 # Generate break down by building usage type is available list_type_attribute = ['type', 'amenity', 'building_t', 'office', 'tourism', 'leisure', 'building'] intersect_type = set(attribute_names) & set(list_type_attribute) if len(intersect_type) > 0: # Make list of building types building_list = [] for usage in buildings: building_type = usage.replace('_', ' ') # Lookup internationalised value if available building_type = tr(building_type) #============================================================== # print ('WARNING: %s could not be translated' # % building_type) #============================================================== # FIXME (Sunni) : I change affected_buildings[usage] to string # because it will be replace with   in html building_list.append([building_type.capitalize(), format_int(affected_buildings[usage]), format_int(buildings[usage])]) if building_type == 'school': school_closed = affected_buildings[usage] if building_type == 'hospital': hospital_closed = affected_buildings[usage] ## fid.write('%s, %i, %i\n' % (building_type.capitalize(), ## affected_buildings[usage], ## buildings[usage])) # Sort alphabetically building_list.sort() #table_body.append(TableRow([tr('Building type'), # tr('Temporarily closed'), # tr('Total')], header=True)) table_body.append(TableRow(tr('Breakdown by building type'), header=True)) for row in building_list: s = TableRow(row) table_body.append(s) ## fid.close() table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append(TableRow(tr('Are the critical facilities still ' 'open?'))) table_body.append(TableRow(tr('Which structures have warning capacity ' '(eg. sirens, speakers, etc.)?'))) table_body.append(TableRow(tr('Which buildings will be evacuation ' 'centres?'))) table_body.append(TableRow(tr('Where will we locate the operations ' 'centre?'))) table_body.append(TableRow(tr('Where will we locate warehouse and/or ' 'distribution centres?'))) if school_closed > 0: table_body.append(TableRow(tr('Where will the students from the %s' ' closed schools go to study?') % format_int(school_closed))) if hospital_closed > 0: table_body.append(TableRow(tr('Where will the patients from the %s' ' closed hospitals go for treatment ' 'and how will we transport them?') % format_int(hospital_closed))) table_body.append(TableRow(tr('Notes'), header=True)) assumption = tr('Buildings are said to be flooded when ') if mode == 'grid': assumption += tr('flood levels exceed %.1f m') % threshold else: assumption += tr('in regions marked as affected') table_body.append(assumption) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = tr('Buildings inundated') # Create style style_classes = [dict(label=tr('Not Flooded'), min=0, max=0, colour='#1EFC7C', transparency=0, size=1), dict(label=tr('Flooded'), min=1, max=1, colour='#F31A1C', transparency=0, size=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), name=tr('Estimated buildings affected'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) return V
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of volcano depth P: Raster layer of population data on the same grid as H Counts number of people exposed to each volcano hazard zones. Return Map of population exposed to volcanic hazard zones Table with number of buildings affected """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Input checks if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. ' 'I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not (H.is_polygon_data or H.is_point_data): raise Exception(msg) if H.is_point_data: # Use concentric circles radii = self.parameters['distances'] centers = H.get_geometry() attributes = H.get_data() Z = make_circular_polygon(centers, radii, attributes=attributes) Z.write_to_file('Marapi_evac_zone_%s.shp' % str(radii)) # To check category_title = 'Radius' H = Z #category_names = ['%s m' % x for x in radii] category_names = radii else: # Use hazard map category_title = 'KRB' # FIXME (Ole): Change to English and use translation system category_names = ['Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I'] if not category_title in H.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (H.get_name(), category_title)) raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E) # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon and total total_affected = 0 for attr in P.get_data(): # Update building count for associated polygon poly_id = attr['polygon_id'] if poly_id is not None: new_attributes[poly_id][self.target_field] += 1 # Update building count for each category cat = new_attributes[poly_id][category_title] categories[cat] += 1 # Update total total_affected += 1 # Count totals total = len(E) # Generate simple impact report table_body = [question, TableRow([tr('Buildings'), tr('Total'), tr('Cumulative')], header=True), TableRow([tr('All'), format_int(total_affected), ''])] cum = 0 for name in category_names: count = categories[name] cum += count table_body.append(TableRow([name, format_int(count), format_int(cum)])) table_body.append(TableRow(tr('Map shows buildings affected in ' 'each of volcano hazard polygons.'))) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total number of buildings %s in the viewable ' 'area') % format_int(total), tr('Only buildings available in OpenStreetMap ' 'are considered.')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('Buildings affected by volcanic hazard zone') # Define classes for legend for flooded building counts colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] building_counts = [x[self.target_field] for x in new_attributes] cls = [0] + numpy.linspace(1, max(building_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = tr('0') else: label = tr('%i - %i') % (lo, hi) entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=0, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=tr('Building Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(as_geometry_objects=True), name=tr('Buildings affected by volcanic hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) return V
def run(self, layers): """Plugin for impact of population as derived by categorised hazard Input layers: List of layers expected to contain H: Raster layer of categorised hazard P: Raster layer of population data Counts number of people exposed to each category of the hazard Return Map of population exposed to high category Table with number of people in each category """ # The 3 category high_t = 1 medium_t = 0.66 low_t = 0.34 # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Categorised Hazard population = get_exposure_layer(layers) # Population Raster question = get_question(inundation.get_name(), population.get_name(), self) # Extract data as numeric arrays C = inundation.get_data(nan=0.0) # Category # Calculate impact as population exposed to each category P = population.get_data(nan=0.0, scaling=True) H = numpy.where(C == high_t, P, 0) M = numpy.where(C > medium_t, P, 0) L = numpy.where(C < low_t, P, 0) # Count totals total = int(numpy.sum(P)) high = int(numpy.sum(H)) medium = int(numpy.sum(M)) - int(numpy.sum(H)) low = int(numpy.sum(L)) - int(numpy.sum(M)) total_impact = high + medium + low # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 if total_impact > 1000: total_impact = total_impact // 1000 * 1000 if high > 1000: high = high // 1000 * 1000 if medium > 1000: medium = medium // 1000 * 1000 if low > 1000: low = low // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan ## rice = evacuated * 2.8 ## drinking_water = evacuated * 17.5 ## water = evacuated * 67 ## family_kits = evacuated / 5 ## toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [ question, TableRow([tr("People impacted "), "%s" % format_int(total_impact)], header=True), TableRow([tr("People in high hazard area "), "%s" % format_int(high)], header=True), TableRow([tr("People in medium hazard area "), "%s" % format_int(medium)], header=True), TableRow([tr("People in low hazard area"), "%s" % format_int(low)], header=True), ] ## TableRow([tr('Needs per week'), tr('Total')], ## header=True), ## [tr('Rice [kg]'), int(rice)], ## [tr('Drinking Water [l]'), int(drinking_water)], ## [tr('Clean Water [l]'), int(water)], ## [tr('Family Kits'), int(family_kits)], ## [tr('Toilets'), int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend( [ TableRow(tr("Notes"), header=True), tr("Map shows population density in high or medium " "hazard area"), tr("Total population: %s") % format_int(total), ] ) ## tr('Minimum needs are defined in BNPB ' ## 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr("People in high hazard areas") # Generare 8 equidistant classes across the range of flooded population # 8 is the number of classes in the predefined flood population style # as imported classes = numpy.linspace(numpy.nanmin(M.flat[:]), numpy.nanmax(M.flat[:]), 8) # Modify labels in existing flood style to show quantities style_classes = style_info["style_classes"] style_classes[1]["label"] = tr("Low [%i people/cell]") % classes[1] style_classes[4]["label"] = tr("Medium [%i people/cell]") % classes[4] style_classes[7]["label"] = tr("High [%i people/cell]") % classes[7] style_info["legend_title"] = tr("Population Density") # Create raster object and return R = Raster( M, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=tr("Population which %s") % get_function_title(self), keywords={"impact_summary": impact_summary, "impact_table": impact_table, "map_title": map_title}, style_info=style_info, ) return R
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Flood inundation [m] population = get_exposure_layer(layers) question = get_question(inundation.get_name(), population.get_name(), self) # Determine depths above which people are regarded affected [m] # Use thresholds from inundation layer if specified thresholds = self.parameters['thresholds'] verify(isinstance(thresholds, list), 'Expected thresholds to be a list. Got %s' % str(thresholds)) # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > max threshold P = population.get_data(nan=0.0, scaling=True) # Calculate impact to intermediate thresholds counts = [] for i, lo in enumerate(thresholds): if i == len(thresholds) - 1: # The last threshold I = M = numpy.where(D >= lo, P, 0) else: # Intermediate thresholds hi = thresholds[i + 1] M = numpy.where((D >= lo) * (D < hi), P, 0) # Count val = int(numpy.sum(M)) # Don't show digits less than a 1000 if val > 1000: val = val // 1000 * 1000 counts.append(val) # Count totals evacuated = counts[-1] total = int(numpy.sum(P)) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan # FIXME: Refactor and share rice = int(evacuated * 2.8) drinking_water = int(evacuated * 17.5) water = int(evacuated * 67) family_kits = int(evacuated / 5) toilets = int(evacuated / 20) # Generate impact report for the pdf map table_body = [ question, TableRow([(tr('People in %.1f m of water') % thresholds[-1]), '%s' % format_int(evacuated)], header=True), TableRow(tr('Map shows population density needing ' 'evacuation')), TableRow([tr('Needs per week'), tr('Total')], header=True), [tr('Rice [kg]'), format_int(rice)], [tr('Drinking Water [l]'), format_int(drinking_water)], [tr('Clean Water [l]'), format_int(water)], [tr('Family Kits'), format_int(family_kits)], [tr('Toilets'), format_int(toilets)] ] impact_table = Table(table_body).toNewlineFreeString() table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append(TableRow(tr('How will warnings be disseminated?'))) table_body.append(TableRow(tr('How will we reach stranded people?'))) table_body.append(TableRow(tr('Do we have enough relief items?'))) table_body.append( TableRow( tr('If yes, where are they located and how ' 'will we distribute them?'))) table_body.append( TableRow( tr('If no, where can we obtain additional ' 'relief items from and how will we ' 'transport them to here?'))) # Extend impact report for on-screen display table_body.extend([ TableRow(tr('Notes'), header=True), tr('Total population: %s') % format_int(total), tr('People need evacuation if flood levels ' 'exceed %(eps).1f m') % { 'eps': thresholds[-1] }, tr('Minimum needs are defined in BNPB ' 'regulation 7/2008') ]) if len(counts) > 1: table_body.append(TableRow(tr('Detailed breakdown'), header=True)) for i, val in enumerate(counts[:-1]): s = (tr('People in %(lo).1f m to %(hi).1f m of water: %(val)i') % { 'lo': thresholds[i], 'hi': thresholds[i + 1], 'val': format_int(val) }) table_body.append(TableRow(s, header=False)) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('People in need of evacuation') # Generate 8 equidistant classes across the range of flooded population # 8 is the number of classes in the predefined flood population style # as imported classes = numpy.linspace(numpy.nanmin(I.flat[:]), numpy.nanmax(I.flat[:]), 8) # Modify labels in existing flood style to show quantities style_classes = style_info['style_classes'] style_classes[1]['label'] = tr('Low [%i people/cell]') % classes[1] style_classes[4]['label'] = tr('Medium [%i people/cell]') % classes[4] style_classes[7]['label'] = tr('High [%i people/cell]') % classes[7] # Override associated quantities in colour style for i in range(len(classes)): if i == 0: transparency = 100 else: transparency = 0 style_classes[i]['quantity'] = classes[i] style_classes[i]['transparency'] = transparency # Title style_info['legend_title'] = tr('Population Density') # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=tr('Population which %s') % get_function_title(self), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title }, style_info=style_info) return R
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Flood inundation [m] population = get_exposure_layer(layers) question = get_question(inundation.get_name(), population.get_name(), self) # Determine depths above which people are regarded affected [m] # Use thresholds from inundation layer if specified thresholds = self.parameters['thresholds'] verify(isinstance(thresholds, list), 'Expected thresholds to be a list. Got %s' % str(thresholds)) # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > max threshold P = population.get_data(nan=0.0, scaling=True) # Calculate impact to intermediate thresholds counts = [] for i, lo in enumerate(thresholds): if i == len(thresholds) - 1: # The last threshold I = M = numpy.where(D >= lo, P, 0) else: # Intermediate thresholds hi = thresholds[i + 1] M = numpy.where((D >= lo) * (D < hi), P, 0) # Count val = int(numpy.sum(M)) # Don't show digits less than a 1000 if val > 1000: val = val // 1000 * 1000 counts.append(val) # Count totals evacuated = counts[-1] total = int(numpy.sum(P)) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan # FIXME: Refactor and share rice = int(evacuated * 2.8) drinking_water = int(evacuated * 17.5) water = int(evacuated * 67) family_kits = int(evacuated / 5) toilets = int(evacuated / 20) # Generate impact report for the pdf map table_body = [question, TableRow([(tr('People in %.1f m of water') % thresholds[-1]), '%s' % format_int(evacuated)], header=True), TableRow(tr('Map shows population density needing ' 'evacuation')), TableRow([tr('Needs per week'), tr('Total')], header=True), [tr('Rice [kg]'), format_int(rice)], [tr('Drinking Water [l]'), format_int(drinking_water)], [tr('Clean Water [l]'), format_int(water)], [tr('Family Kits'), format_int(family_kits)], [tr('Toilets'), format_int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() table_body.append(TableRow(tr('Action Checklist:'), header=True)) table_body.append(TableRow(tr('How will warnings be disseminated?'))) table_body.append(TableRow(tr('How will we reach stranded people?'))) table_body.append(TableRow(tr('Do we have enough relief items?'))) table_body.append(TableRow(tr('If yes, where are they located and how ' 'will we distribute them?'))) table_body.append(TableRow(tr('If no, where can we obtain additional ' 'relief items from and how will we ' 'transport them to here?'))) # Extend impact report for on-screen display table_body.extend([TableRow(tr('Notes'), header=True), tr('Total population: %s') % format_int(total), tr('People need evacuation if flood levels ' 'exceed %(eps).1f m') % {'eps': thresholds[-1]}, tr('Minimum needs are defined in BNPB ' 'regulation 7/2008')]) if len(counts) > 1: table_body.append(TableRow(tr('Detailed breakdown'), header=True)) for i, val in enumerate(counts[:-1]): s = (tr('People in %(lo).1f m to %(hi).1f m of water: %(val)i') % {'lo': thresholds[i], 'hi': thresholds[i + 1], 'val': format_int(val)}) table_body.append(TableRow(s, header=False)) impact_summary = Table(table_body).toNewlineFreeString() map_title = tr('People in need of evacuation') # Generate 8 equidistant classes across the range of flooded population # 8 is the number of classes in the predefined flood population style # as imported classes = numpy.linspace(numpy.nanmin(I.flat[:]), numpy.nanmax(I.flat[:]), 8) # Modify labels in existing flood style to show quantities style_classes = style_info['style_classes'] style_classes[1]['label'] = tr('Low [%i people/cell]') % classes[1] style_classes[4]['label'] = tr('Medium [%i people/cell]') % classes[4] style_classes[7]['label'] = tr('High [%i people/cell]') % classes[7] # Override associated quantities in colour style for i in range(len(classes)): if i == 0: transparency = 100 else: transparency = 0 style_classes[i]['quantity'] = classes[i] style_classes[i]['transparency'] = transparency # Title style_info['legend_title'] = tr('Population Density') # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=tr('Population which %s') % get_function_title(self), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return R
def run(layers): """Risk plugin for earthquake fatalities Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H """ # Depth above which people are regarded affected [m] threshold = 0.1 # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Flood inundation [m] # Get population and gender ratio population = gender_ratio = None for layer in get_exposure_layers(layers): keywords = layer.get_keywords() if 'datatype' not in keywords: population = layer else: datatype = keywords['datatype'] if 'ratio' not in datatype: population = layer else: # if 'female' in datatype and 'ratio' in datatype: gender_ratio_unit = keywords['unit'] msg = ('Unit for gender ratio must be either ' '"percent" or "ratio"') if gender_ratio_unit not in ['percent', 'ratio']: raise Exception(msg) gender_ratio = layer msg = 'No population layer was found in: %s' % str(layers) verify(population is not None, msg) # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > threshold if population.get_resolution(native=True, isotropic=True) < 0.0005: # Keep this for backwards compatibility just a little while # This uses the original custom population set and # serves as a reference P = population.get_data(nan=0.0) # Population density pixel_area = 2500 I = numpy.where(D > threshold, P, 0) / 100000.0 * pixel_area else: # This is the new generic way of scaling (issue #168 and #172) P = population.get_data(nan=0.0, scaling=True) I = numpy.where(D > threshold, P, 0) if gender_ratio is not None: # Extract gender ratio at each pixel (as ratio) G = gender_ratio.get_data(nan=0.0) if gender_ratio_unit == 'percent': G /= 100 # Calculate breakdown P_female = P * G P_male = P - P_female I_female = I * G I_male = I - I_female # Generate text with result for this study total = format_int(int(sum(P.flat) / 1000)) count = format_int(int(sum(I.flat) / 1000)) # Create report impact_summary = ('<table border="0" width="320px">' ' <tr><td><b>%s:</b></td>' '<td align="right"><b>%s</b></td></tr>' % ('Jumlah Penduduk', total)) if gender_ratio is not None: total_female = format_int(int(sum(P_female.flat) / 1000)) total_male = format_int(int(sum(P_male.flat) / 1000)) impact_summary += (' <tr><td>%s:</td>' '<td align="right">%s</td></tr>' % (' - Wanita', total_female)) impact_summary += (' <tr><td>%s:</td>' '<td align="right">%s</td></tr>' % (' - Pria', total_male)) impact_summary += '<tr><td> </td></tr>' # Blank row impact_summary += (' <tr><td><b>%s:</b></td>' '<td align="right"><b>%s</b></td></tr>' % ('Perkiraan Jumlah Terdampak (> %.1fm)' % threshold, count)) if gender_ratio is not None: affected_female = format_int(int(sum(I_female.flat) / 1000)) affected_male = format_int(int(sum(I_male.flat) / 1000)) impact_summary += (' <tr><td>%s:</td>' '<td align="right">%s</td></tr>' % (' - Wanita', affected_female)) impact_summary += (' <tr><td>%s:</td>' '<td align="right">%s</td></tr>' % (' - Pria', affected_male)) impact_summary += '</table>' impact_summary += '<br>' # Blank separation row impact_summary += 'Catatan: Semua nomor x 1000' # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name='People affected', keywords={'impact_summary': impact_summary}) return R