def get_plugins_as_table(name=None): """Retrieve a table listing all plugins and their requirements. Or just a single plugin if name is passed. Args: name str optional name of a specific plugin. Returns: table instance containing plugin descriptive data Raises: None """ table_body = [] header = TableRow([_('Title'), _('ID'), _('Requirements')], header=True) table_body.append(header) plugins_dict = dict([(pretty_function_name(p), p) for p in FunctionProvider.plugins]) if name is not None: if isinstance(name, basestring): # Add the names plugins_dict.update( dict([(p.__name__, p) for p in FunctionProvider.plugins])) msg = ('No plugin named "%s" was found. ' 'List of available plugins is: %s' % (name, ', '.join(plugins_dict.keys()))) if name not in plugins_dict: raise RuntimeError(msg) plugins_dict = {name: plugins_dict[name]} else: msg = ('get_plugins expects either no parameters or a string ' 'with the name of the plugin, you passed: ' '%s which is a %s' % (name, type(name))) raise Exception(msg) # Now loop through the plugins adding them to the table for key, func in plugins_dict.iteritems(): for requirement in requirements_collect(func): row = [] row.append(TableCell(get_function_title(func), header=True)) row.append(key) row.append(requirement) table_body.append(TableRow(row)) table = Table(table_body) table.caption = _('Available Impact Functions') return table
def get_plugins_as_table(name=None): """Retrieve a table listing all plugins and their requirements. Or just a single plugin if name is passed. Args: name str optional name of a specific plugin. Returns: table instance containing plugin descriptive data Raises: None """ table_body = [] header = TableRow([_('Title'), _('ID'), _('Requirements')], header=True) table_body.append(header) plugins_dict = dict([(pretty_function_name(p), p) for p in FunctionProvider.plugins]) if name is not None: if isinstance(name, basestring): # Add the names plugins_dict.update(dict([(p.__name__, p) for p in FunctionProvider.plugins])) msg = ('No plugin named "%s" was found. ' 'List of available plugins is: %s' % (name, ', '.join(plugins_dict.keys()))) if name not in plugins_dict: raise RuntimeError(msg) plugins_dict = {name: plugins_dict[name]} else: msg = ('get_plugins expects either no parameters or a string ' 'with the name of the plugin, you passed: ' '%s which is a %s' % (name, type(name))) raise Exception(msg) # Now loop through the plugins adding them to the table for key, func in plugins_dict.iteritems(): for requirement in requirements_collect(func): row = [] row.append(TableCell(get_function_title(func), header=True)) row.append(key) row.append(requirement) table_body.append(TableRow(row)) table = Table(table_body) table.caption = _('Available Impact Functions') return table
def interpolate_raster_vector_points(source, target, layer_name=None, attribute_name=None, mode="linear"): """Interpolate from raster layer to point data Args: * source: Raster data set (grid) * target: Vector data set (points) * layer_name: Optional name of returned interpolated layer. If None the name of target is used for the returned layer. * attribute_name: Name for new attribute. If None (default) the name of layer source is used * mode: 'linear' or 'constant' - determines whether interpolation from grid to points should be bilinear or piecewise constant Output I: Vector data set; points located as target with values interpolated from source """ msg = "There are no data points to interpolate to. Perhaps zoom out " "and try again" verify(len(target) > 0, msg) # Input checks verify(source.is_raster) verify(target.is_vector) verify(target.is_point_data) # FIXME (Ole): Why can we not remove this ??? # It should now be taken care of in the general input_check above # OK - remove when we leave using the form H.interpolate in impact funcs if layer_name is None: layer_name = target.get_name() # Get raster data and corresponding x and y axes A = source.get_data(nan=True) longitudes, latitudes = source.get_geometry() verify(len(longitudes) == A.shape[1]) verify(len(latitudes) == A.shape[0]) # Get vector point geometry as Nx2 array coordinates = numpy.array(target.get_geometry(), dtype="d", copy=False) # Get original attributes attributes = target.get_data() # Create new attribute and interpolate # Remove? N = len(target) if attribute_name is None: attribute_name = source.get_name() try: values = interpolate_raster(longitudes, latitudes, A, coordinates, mode=mode) except (BoundsError, InaSAFEError), e: msg = _( "Could not interpolate from raster layer %(raster)s to " "vector layer %(vector)s. Error message: %(error)s" ) % {"raster": source.get_name(), "vector": target.get_name(), "error": str(e)} raise InaSAFEError(msg)
def get_question(hazard_title, exposure_title, func): """Rephrase the question asked Input hazard_title: string exposure_title: string func: impact function class """ function_title = get_function_title(func) return (_('In the event of <i>%(hazard)s</i> how many ' '<i>%(exposure)s</i> might <i>%(impact)s</i>') % {'hazard': hazard_title.lower(), 'exposure': exposure_title.lower(), 'impact': function_title.lower()})
def get_question(hazard_title, exposure_title, func): """Rephrase the question asked Input hazard_title: string exposure_title: string func: impact function class """ function_title = get_function_title(func) return (_('In the event of <i>%(hazard)s</i> how many ' '<i>%(exposure)s</i> might <i>%(impact)s</i>') % { 'hazard': hazard_title.lower(), 'exposure': exposure_title.lower(), 'impact': function_title.lower() })
def Xtest_Afrikaans(self): """Test that Afrikaans translations are working""" # Note this has really bad side effects - lots of tests suddenly start # breaking when this test is enabled....disabled for now, but I have # left the test here for now as it illustrates one potential avenue # that can be pursued if dynamically changing the language to unit test # different locales ever becomes a requirement. # Be sure nose tests all run cleanly before reintroducing this! # This is part test and part demonstrator of how to reload inasafe # Now see if the same function is delivered for the function # Because of the way impact plugins are loaded in inasafe # (see http://effbot.org/zone/metaclass-plugins.htm) # lang in the context of the ugettext function in inasafe libs # must be imported late so that i18n is set up already from safe.common.utilities import ugettext as _ myUntranslatedString = 'Temporarily Closed' myExpectedString = 'Tydelik gesluit' # afrikaans myTranslation = _(myUntranslatedString) myMessage = '\nTranslated: %s\nGot: %s\nExpected: %s' % ( myUntranslatedString, myTranslation, myExpectedString) assert myTranslation == myExpectedString, myMessage myParent = QWidget() myCanvas = QgsMapCanvas(myParent) myIface = QgisInterface(myCanvas) # reload all inasafe modules so that i18n get picked up afresh # this is the part that produces bad side effects for myMod in sys.modules.values(): try: if ('storage' in str(myMod) or 'impact' in str(myMod)): print 'Reloading:', str(myMod) reload(myMod) except NameError: pass myPlugin = Plugin(myIface) myPlugin.setupI18n('af') # afrikaans myLang = os.environ['LANG'] assert myLang == 'af' from safe_qgis.safe_interface import getSafeImpactFunctions #myFunctions = getSafeImpactFunctions() #print myFunctions myFunctions = getSafeImpactFunctions('Tydelik gesluit') assert len(myFunctions) > 0
def test_ImpactFunctionI18n(self): """Library translations are working.""" # Import this late so that i18n setup is already in place from safe.common.utilities import ugettext as _ myUntranslatedString = 'Temporarily Closed' # Test indonesian too myParent = QWidget() myCanvas = QgsMapCanvas(myParent) myIface = QgisInterface(myCanvas) myPlugin = Plugin(myIface) myPlugin.setupI18n('id') # indonesian myExpectedString = 'Ditutup sementara' myTranslation = _(myUntranslatedString) myMessage = '\nTranslated: %s\nGot: %s\nExpected: %s' % ( myUntranslatedString, myTranslation, myExpectedString) assert myTranslation == myExpectedString, myMessage
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations datatype = E.get_keywords()["datatype"] vclass_tag = "ITB_Class" if datatype.lower() == "osm": # Map from OSM attributes to the ITB building classes # Emap = osm2itb(E) print "osm2itb has not been implemented" elif datatype.lower() == "sigab": # Emap = sigabitb(E) print "sigab2itb has not been implemented" elif datatype.lower() == "itb": Emap = E # Interpolate hazard level to building locations Hi = assign_hazard_values_to_exposure_data(H, Emap, attribute_name="MMI") # Extract relevant numerical data coordinates = Emap.get_geometry() shaking = Hi.get_data() N = len(shaking) # List attributes to carry forward to result layer attributes = Emap.get_attribute_names() # Calculate building damage count50 = 0 count25 = 0 count10 = 0 count0 = 0 building_damage = [] for i in range(N): mmi = float(shaking[i]["MMI"]) building_class = Emap.get_data(vclass_tag, i) building_type = str(building_class) damage_params = vul_curves[building_type] beta = damage_params["beta"] median = damage_params["median"] msg = "Invalid parameter value for " + building_type verify(beta + median > 0.0, msg) percent_damage = lognormal_cdf(mmi, median=median, sigma=beta) * 100 # Collect shake level and calculated damage result_dict = {self.target_field: percent_damage, "MMI": mmi} # Carry all orginal attributes forward for key in attributes: result_dict[key] = Emap.get_data(key, i) # Record result for this feature building_damage.append(result_dict) # Debugging # if percent_damage > 0.01: # print mmi, percent_damage # Calculate statistics if percent_damage < 10: count0 += 1 if 10 <= percent_damage < 33: count10 += 1 if 33 <= percent_damage < 66: count25 += 1 if 66 <= percent_damage: count50 += 1 # fid.close() # Create report Hname = H.get_name() Ename = E.get_name() impact_summary = '<b>In case of "%s" the estimated impact to ' '"%s" ' "is:</b><br><br><p>" % (Hname, Ename) impact_summary += ( '<table border="0" width="320px">' " <tr><th><b>%s</b></th><th><b>%s</b></th></th>" " <tr></tr>" " <tr><td>%s:</td><td>%i</td></tr>" " <tr><td>%s (<10%%):</td><td>%i</td></tr>" " <tr><td>%s (10-33%%):</td><td>%i</td></tr>" " <tr><td>%s (33-66%%):</td><td>%i</td></tr>" " <tr><td>%s (66-100%%):</td><td>%i</td></tr>" "</table></font>" % ( _("Buildings"), _("Total"), _("All"), N, _("No damage"), count0, _("Low damage"), count10, _("Medium damage"), count25, _("High damage"), count50, ) ) impact_summary += "<br>" # Blank separation row impact_summary += "<b>" + _("Assumption") + ":</b><br>" # This is the proper text: # _('Levels of impact are defined by post 2009 ' # 'Padang earthquake survey conducted by Geoscience ' # 'Australia and Institute of Teknologi Bandung.')) # _('Unreinforced masonry is assumed where no ' # 'structural information is available.')) impact_summary += _( "Levels of impact are defined by post 2009 " "Padang earthquake survey conducted by Geoscience " "Australia and Institute of Teknologi Bandung." ) impact_summary += _("Unreinforced masonry is assumed where no " "structural information is available.") # Create style style_classes = [ dict(label=_("No damage"), min=0, max=10, colour="#00ff00", transparency=1), dict(label=_("Low damage"), min=10, max=33, colour="#ffff00", transparency=1), dict(label=_("Medium damage"), min=33, max=66, colour="#ffaa00", transparency=1), dict(label=_("High damage"), min=66, max=100, colour="#ff0000", transparency=1), ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector( data=building_damage, projection=E.get_projection(), geometry=coordinates, name="Estimated pct damage", keywords={"impact_summary": impact_summary}, style_info=style_info, ) return V
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Depth above which people are regarded affected [m] threshold = 1.0 # Threshold [m] # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Flood inundation [m] population = get_exposure_layer(layers) question = get_question(inundation.get_name(), population.get_name(), self) # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > threshold P = population.get_data(nan=0.0, scaling=True) I = numpy.where(D > threshold, P, 0) M = numpy.where(D > 0.5, P, 0) L = numpy.where(D > 0.3, P, 0) # Count totals total = int(numpy.sum(P)) evacuated = int(numpy.sum(I)) medium = int(numpy.sum(M)) - int(numpy.sum(I)) low = int(numpy.sum(L)) - int(numpy.sum(M)) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 if evacuated > 1000: evacuated = evacuated // 1000 * 1000 if medium > 1000: medium = medium // 1000 * 1000 if low > 1000: low = low // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan rice = evacuated * 2.8 drinking_water = evacuated * 17.5 water = evacuated * 67 family_kits = evacuated / 5 toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [question, TableRow([_('People needing evacuation'), '%i' % evacuated], header=True), TableRow(_('Map shows population density needing ' 'evacuation'))] #, ## TableRow([_('People in 50cm to 1m of water '), ## '%i' % medium], ## header=True), ## TableRow([_('People in 30cm to 50cm of water'), ## '%i' % low], ## header=True)] ## TableRow([_('Needs per week'), _('Total')], ## header=True), ## [_('Rice [kg]'), int(rice)], ## [_('Drinking Water [l]'), int(drinking_water)], ## [_('Clean Water [l]'), int(water)], ## [_('Family Kits'), int(family_kits)], ## [_('Toilets'), int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(_('Notes:'), header=True), _('Total population: %i') % total, _('People need evacuation if flood levels ' 'exceed %(eps)i m') % {'eps': threshold}, _('People in 50cm to 1m of water: %i') % medium, _('People in 30cm to 50cm of water: %i') % low]) ## _('Minimum needs are defined in BNPB ' ## 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('People in need of evacuation') style_info['legend_title'] = _('Population Density') # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=_('Population which %s') % get_function_title(self), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return R
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Map from different kinds of datasets to Padang vulnerability classes datatype = E.get_keywords()['datatype'] vclass_tag = 'VCLASS' if datatype.lower() == 'osm': # Map from OSM attributes Emap = osm2padang(E) elif datatype.lower() == 'sigab': # Map from SIGAB attributes Emap = sigab2padang(E) else: Emap = E # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, Emap, attribute_name='MMI') # Extract relevant numerical data attributes = I.get_data() N = len(I) # Calculate building damage count_high = count_medium = count_low = count_none = 0 for i in range(N): mmi = float(attributes[i]['MMI']) building_type = Emap.get_data(vclass_tag, i) damage_params = damage_curves[building_type] beta = damage_params['beta'] median = damage_params['median'] percent_damage = lognormal_cdf(mmi, median=median, sigma=beta) * 100 # Add calculated impact to existing attributes attributes[i][self.target_field] = percent_damage # Calculate statistics if percent_damage < 10: count_none += 1 if 10 <= percent_damage < 33: count_low += 1 if 33 <= percent_damage < 66: count_medium += 1 if 66 <= percent_damage: count_high += 1 # Generate impact report table_body = [ question, TableRow([_('Buildings'), _('Total')], header=True), TableRow([_('All'), N]), TableRow([_('No damage'), count_none]), TableRow([_('Low damage'), count_low]), TableRow([_('Medium damage'), count_medium]), TableRow([_('High damage'), count_high]) ] table_body.append(TableRow(_('Notes'), header=True)) table_body.append( _('Levels of impact are defined by post 2009 ' 'Padang earthquake survey conducted by Geoscience ' 'Australia and Institute of Teknologi Bandung.')) table_body.append( _('Unreinforced masonry is assumed where no ' 'structural information is available.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Earthquake damage to buildings') # Create style style_classes = [ dict(label=_('No damage'), min=0, max=10, colour='#00ff00', transparency=1), dict(label=_('Low damage'), min=10, max=33, colour='#ffff00', transparency=1), dict(label=_('Medium damage'), min=33, max=66, colour='#ffaa00', transparency=1), dict(label=_('High damage'), min=66, max=100, colour='#ff0000', transparency=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=E.get_projection(), geometry=E.get_geometry(), name='Estimated pct damage', keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title }, style_info=style_info) return V
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Flood inundation [m] population = get_exposure_layer(layers) question = get_question(inundation.get_name(), population.get_name(), self) # Determine depths above which people are regarded affected [m] # Use thresholds from inundation layer if specified thresholds = get_thresholds(inundation) if len(thresholds) == 0: # Default threshold thresholds = [1.0] verify(isinstance(thresholds, list), 'Expected thresholds to be a list. Got %s' % str(thresholds)) # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > max threshold P = population.get_data(nan=0.0, scaling=True) # Calculate impact to intermediate thresholds counts = [] for i, lo in enumerate(thresholds): if i == len(thresholds) - 1: # The last threshold I = M = numpy.where(D >= lo, P, 0) else: # Intermediate thresholds hi = thresholds[i + 1] M = numpy.where((D >= lo) * (D < hi), P, 0) # Count val = int(numpy.sum(M)) # Don't show digits less than a 1000 if val > 1000: val = val // 1000 * 1000 counts.append(val) # Count totals evacuated = counts[-1] total = int(numpy.sum(P)) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan rice = evacuated * 2.8 drinking_water = evacuated * 17.5 water = evacuated * 67 family_kits = evacuated / 5 toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [ question, TableRow([_('People needing evacuation'), '%i' % evacuated], header=True), TableRow(_('Map shows population density needing ' 'evacuation')), TableRow([_('Needs per week'), _('Total')], header=True), [_('Rice [kg]'), int(rice)], [_('Drinking Water [l]'), int(drinking_water)], [_('Clean Water [l]'), int(water)], [_('Family Kits'), int(family_kits)], [_('Toilets'), int(toilets)] ] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([ TableRow(_('Notes'), header=True), _('Total population: %i') % total, _('People need evacuation if flood levels ' 'exceed %(eps).1f m') % { 'eps': thresholds[-1] }, _('Minimum needs are defined in BNPB ' 'regulation 7/2008') ]) if len(counts) > 1: table_body.append(TableRow(_('Detailed breakdown'), header=True)) for i, val in enumerate(counts[:-1]): s = ( _('People in %(lo).1f m to %(hi).1f m of water: %(val)i') % { 'lo': thresholds[i], 'hi': thresholds[i + 1], 'val': val }) table_body.append(TableRow(s, header=False)) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('People in need of evacuation') # Generate 8 equidistant classes across the range of flooded population # 8 is the number of classes in the predefined flood population style # as imported classes = numpy.linspace(numpy.nanmin(I.flat[:]), numpy.nanmax(I.flat[:]), 8) # Modify labels in existing flood style to show quantities style_classes = style_info['style_classes'] style_classes[1]['label'] = _('Low [%i people/cell]') % classes[1] style_classes[4]['label'] = _('Medium [%i people/cell]') % classes[4] style_classes[7]['label'] = _('High [%i people/cell]') % classes[7] style_info['legend_title'] = _('Population Density') # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=_('Population which %s') % get_function_title(self), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title }, style_info=style_info) return R
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map) """ threshold = 1.0 # Flood threshold [m] # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Determine attribute name for hazard levels if H.is_raster: hazard_attribute = 'depth' else: hazard_attribute = 'FLOODPRONE' # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, E, attribute_name=hazard_attribute) # Extract relevant exposure data attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate building impact count = 0 buildings = {} affected_buildings = {} for i in range(N): if hazard_attribute == 'depth': # Get the interpolated depth x = float(attributes[i]['depth']) x = x > threshold elif hazard_attribute == 'FLOODPRONE': # Use interpolated polygon attribute atts = attributes[i] if 'FLOODPRONE' in atts: res = atts['FLOODPRONE'] if res is None: x = False else: x = res.lower() == 'yes' else: # If there isn't a flood prone attribute, # assume that building is wet if inside polygon # as flag by generic attribute AFFECTED res = atts['Affected'] if res is None: x = False else: x = res else: msg = (_('Unknown hazard type %s. ' 'Must be either "depth" or "floodprone"') % hazard_attribute) raise Exception(msg) # Count affected buildings by usage type if available if 'type' in attribute_names: usage = attributes[i]['type'] else: usage = None if usage is not None and usage != 0: key = usage else: key = 'unknown' if key not in buildings: buildings[key] = 0 affected_buildings[key] = 0 # Count all buildings by type buildings[key] += 1 if x is True: # Count affected buildings by type affected_buildings[key] += 1 # Count total affected buildings count += 1 # Add calculated impact to existing attributes attributes[i][self.target_field] = x # Lump small entries and 'unknown' into 'other' category for usage in buildings.keys(): x = buildings[usage] if x < 25 or usage == 'unknown': if 'other' not in buildings: buildings['other'] = 0 affected_buildings['other'] = 0 buildings['other'] += x affected_buildings['other'] += affected_buildings[usage] del buildings[usage] del affected_buildings[usage] # Generate csv file of results ## fid = open('C:\dki_table_%s.csv' % H.get_name(), 'wb') ## fid.write('%s, %s, %s\n' % (_('Building type'), ## _('Temporarily closed'), ## _('Total'))) ## fid.write('%s, %i, %i\n' % (_('All'), count, N)) # Generate simple impact report table_body = [question, TableRow([_('Building type'), _('Temporarily closed'), _('Total')], header=True), TableRow([_('All'), count, N])] ## fid.write('%s, %s, %s\n' % (_('Building type'), ## _('Temporarily closed'), ## _('Total'))) # Generate break down by building usage type is available if 'type' in attribute_names: # Make list of building types building_list = [] for usage in buildings: building_type = usage.replace('_', ' ') # Lookup internationalised value if available if building_type in internationalised_values: building_type = internationalised_values[building_type] else: print ('WARNING: %s could not be translated' % building_type) building_list.append([building_type.capitalize(), affected_buildings[usage], buildings[usage]]) ## fid.write('%s, %i, %i\n' % (building_type.capitalize(), ## affected_buildings[usage], ## buildings[usage])) # Sort alphabetically building_list.sort() #table_body.append(TableRow([_('Building type'), # _('Temporarily closed'), # _('Total')], header=True)) table_body.append(TableRow(_('Breakdown by building type'), header=True)) for row in building_list: s = TableRow(row) table_body.append(s) ## fid.close() table_body.append(TableRow(_('Action Checklist:'), header=True)) table_body.append(TableRow(_('Are the critical facilities still ' 'open?'))) table_body.append(TableRow(_('Notes'), header=True)) assumption = _('Buildings are said to be flooded when ') if hazard_attribute == 'depth': assumption += _('flood levels exceed %.1f m') % threshold else: assumption += _('in areas marked as flood prone') table_body.append(assumption) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Buildings inundated') # Create style style_classes = [dict(label=_('Not Flooded'), min=0, max=0, colour='#1EFC7C', transparency=0, size=1), dict(label=_('Flooded'), min=1, max=1, colour='#F31A1C', transparency=0, size=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), name=_('Estimated buildings affected'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return V
def get_plugins_as_table(dict_filter=None): """Retrieve a table listing all plugins and their requirements. Or just a single plugin if name is passed. Args: * dict_filter = dictionary that contains filters - id = list_id - title = list_title - category : list_category - subcategory : list_subcategory - layertype : list_layertype - datatype : list_datatype - unit: list_unit - disabled : list_disabled # not included Returns: * table contains plugins match with dict_filter Raises: None """ if dict_filter is None: dict_filter = {'id': [], 'title': [], 'category': [], 'subcategory': [], 'layertype': [], 'datatype': [], 'unit': []} table_body = [] # use this list for avoiding wrong order in dict atts = ['category', 'subcategory', 'layertype', 'datatype', 'unit'] header = TableRow([_('Title'), _('ID'), _('Category'), _('Sub Category'), _('Layer type'), _('Data type'), _('Unit')], header=True) table_body.append(header) plugins_dict = dict([(pretty_function_name(p), p) for p in FunctionProvider.plugins]) not_found_value = 'N/A' for key, func in plugins_dict.iteritems(): for requirement in requirements_collect(func): dict_found = {'title': False, 'id': False, 'category': False, 'subcategory': False, 'layertype': False, 'datatype': False, 'unit': False} dict_req = parse_single_requirement(str(requirement)) for myKey in dict_found.iterkeys(): myFilter = dict_filter.get(myKey, []) if myKey == 'title': myValue = str(get_function_title(func)) elif myKey == 'id': myValue = str(key) else: myValue = dict_req.get(myKey, not_found_value) if myFilter != []: for myKeyword in myFilter: if type(myValue) == type(str()): if myValue == myKeyword: dict_found[myKey] = True break elif type(myValue) == type(list()): if myKeyword in myValue: dict_found[myKey] = True break else: if myValue.find(str(myKeyword)) != -1: dict_found[myKey] = True break else: dict_found[myKey] = True add_row = True for found_value in dict_found.itervalues(): if not found_value: add_row = False break if add_row: row = [] row.append(TableCell(get_function_title(func), header=True)) row.append(key) for myKey in atts: myValue = pretty_string(dict_req.get(myKey, not_found_value)) row.append(myValue) table_body.append(TableRow(row)) cw = 100 / 7 table_col_width = [str(cw) + '%', str(cw) + '%', str(cw) + '%', str(cw) + '%', str(cw) + '%', str(cw) + '%', str(cw) + '%'] table = Table(table_body, col_width=table_col_width) table.caption = _('Available Impact Functions') return table
def run(self, layers): """Risk plugin for tsunami population """ # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations # Interpolate hazard level to building locations Hi = H.interpolate(E, attribute_name='depth') # Extract relevant numerical data coordinates = Hi.get_geometry() depth = Hi.get_data() N = len(depth) # List attributes to carry forward to result layer attributes = E.get_attribute_names() # Calculate building impact according to guidelines count3 = 0 count1 = 0 count0 = 0 population_impact = [] for i in range(N): if H.is_raster: # Get depth dep = float(depth[i]['depth']) # Classify buildings according to depth if dep >= 3: affected = 3 # FIXME: Colour upper bound is 100 but count3 += 1 # does not catch affected == 100 elif 1 <= dep < 3: affected = 2 count1 += 1 else: affected = 1 count0 += 1 elif H.is_vector: dep = 0 # Just put something here cat = depth[i]['Affected'] if cat is True: affected = 3 count3 += 1 else: affected = 1 count0 += 1 # Collect depth and calculated damage result_dict = {self.target_field: affected, 'DEPTH': dep} # Carry all original attributes forward # FIXME: This should be done in interpolation. Check. #for key in attributes: # result_dict[key] = E.get_data(key, i) # Record result for this feature population_impact.append(result_dict) # Create report Hname = H.get_name() Ename = E.get_name() if H.is_raster: impact_summary = ('<b>In case of "%s" the estimated impact to ' '"%s" ' 'is:</b><br><br><p>' % (Hname, Ename)) impact_summary += ('<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></tr>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' '</table>' % (_('Impact'), _('Number of buildings'), _('Low'), count0, _('Medium'), count1, _('High'), count3)) else: impact_summary = ('<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></tr>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' '</table>' % ('Terdampak oleh tsunami', 'Jumlah gedung', 'Terdampak', count3, 'Tidak terdampak', count0, 'Semua', N)) impact_summary += '<br>' # Blank separation row impact_summary += '<b>' + _('Assumption') + ':</b><br>' impact_summary += ('Levels of impact are defined by BNPB\'s ' '<i>Pengkajian Risiko Bencana</i>') impact_summary += ('<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></tr>' ' <tr></tr>' ' <tr><td>%s:</td><td>%s:</td></tr>' ' <tr><td>%s:</td><td>%s:</td></tr>' ' <tr><td>%s:</td><td>%s:</td></tr>' '</table>' % (_('Impact'), _('Tsunami height'), _('Low'), '<1 m', _('Medium'), '1-3 m', _('High'), '>3 m')) # Create style style_classes = [dict(label='< 1 m', min=0, max=1, colour='#1EFC7C', transparency=0, size=1), dict(label='1 - 3 m', min=1, max=2, colour='#FFA500', transparency=0, size=1), dict(label='> 3 m', min=2, max=4, colour='#F31A1C', transparency=0, size=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return if Hi.is_line_data: name = 'Roads flooded' elif Hi.is_point_data: name = 'Buildings flooded' V = Vector(data=population_impact, projection=E.get_projection(), geometry=coordinates, keywords={'impact_summary': impact_summary}, geometry_type=Hi.geometry_type, name=name, style_info=style_info) return V
class ITBFatalityFunction(FunctionProvider): """Indonesian Earthquake Fatality Model This model was developed by Institut Tecknologi Bandung (ITB) and implemented by Dr Hadi Ghasemi, Geoscience Australia Reference: Indonesian Earthquake Building-Damage and Fatality Models and Post Disaster Survey Guidelines Development, Bali, 27-28 February 2012, 54pp. Algorithm: In this study, the same functional form as Allen (2009) is adopted to express fatality rate as a function of intensity (see Eq. 10 in the report). The Matlab built-in function (fminsearch) for Nelder-Mead algorithm is used to estimate the model parameters. The objective function (L2G norm) that is minimised during the optimisation is the same as the one used by Jaiswal et al. (2010). The coefficients used in the indonesian model are x=0.62275231, y=8.03314466, zeta=2.15 Allen, T. I., Wald, D. J., Earle, P. S., Marano, K. D., Hotovec, A. J., Lin, K., and Hearne, M., 2009. An Atlas of ShakeMaps and population exposure catalog for earthquake loss modeling, Bull. Earthq. Eng. 7, 701-718. Jaiswal, K., and Wald, D., 2010. An empirical model for global earthquake fatality estimation, Earthq. Spectra 26, 1017-1037. Caveats and limitations: The current model is the result of the above mentioned workshop and reflects the best available information. However, the current model has a number of issues listed below and is expected to evolve further over time. 1 - The model is based on limited number of observed fatality rates during 4 past fatal events. 2 - The model clearly over-predicts the fatality rates at intensities higher than VIII. 3 - The model only estimates the expected fatality rate for a given intensity level; however the associated uncertainty for the proposed model is not addressed. 4 - There are few known mistakes in developing the current model: - rounding MMI values to the nearest 0.5, - Implementing Finite-Fault models of candidate events, and - consistency between selected GMPEs with those in use by BMKG. These issues will be addressed by ITB team in the final report. :author Hadi Ghasemi :rating 3 :param requires category == 'hazard' and \ subcategory == 'earthquake' and \ layertype == 'raster' and \ unit == 'MMI' :param requires category == 'exposure' and \ subcategory == 'population' and \ layertype == 'raster' """ title = _('Die') def run(self, layers, x=0.62275231, y=8.03314466): # , zeta=2.15): """Indonesian Earthquake Fatality Model Input layers: List of layers expected to contain H: Raster layer of MMI ground shaking P: Raster layer of population density """ # Define percentages of people being displaced at each mmi level displacement_rate = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0.1, 8: 0.5, 9: 0.75, 10: 1.0} # Extract input layers intensity = get_hazard_layer(layers) population = get_exposure_layer(layers) question = get_question(intensity.get_name(), population.get_name(), self) # Extract data grids H = intensity.get_data() # Ground Shaking P = population.get_data() # Population Density # Calculate population affected by each MMI level # FIXME (Ole): this range is 2-9. Should 10 be included? mmi_range = range(2, 10) number_of_exposed = {} number_of_displaced = {} number_of_fatalities = {} # Calculate fatality rates for observed Intensity values (H # based on ITB power model R = numpy.zeros(H.shape) for mmi in mmi_range: # Identify cells where MMI is in class i mask = (H > mmi - 0.5) * (H <= mmi + 0.5) # Count population affected by this shake level I = numpy.where(mask, P, 0) # Calculate expected number of fatalities per level fatality_rate = numpy.power(10.0, x * mmi - y) F = fatality_rate * I # Calculate expected number of displaced people per level try: D = displacement_rate[mmi] * I except KeyError, e: msg = 'mmi = %i, I = %s, Error msg: %s' % (mmi, str(I), str(e)) fid = open('C:\\error_message.txt', 'wb') fid.write(msg) fid.close() # Sum up numbers for map R += F # Fatalities #R += D # Displaced # Generate text with result for this study # This is what is used in the real time system exposure table number_of_exposed[mmi] = numpy.nansum(I.flat) number_of_displaced[mmi] = numpy.nansum(D.flat) number_of_fatalities[mmi] = numpy.nansum(F.flat) # Set resulting layer to NaN when less than a threshold. This is to # achieve transparency (see issue #126). R[R < 0.01] = numpy.nan # Total statistics total = int(round(numpy.nansum(P.flat) / 1000) * 1000) # Compute number of fatalities fatalities = int(round(numpy.nansum(number_of_fatalities.values()) / 1000)) * 1000 # Compute number of people displaced due to building collapse displaced = int(round(numpy.nansum(number_of_displaced.values()) / 1000)) * 1000 # Compute test number of people displaced # FIXME (Ole): Just a temporary measure to check... displaced_test = 0 for mmi in mmi_range: displaced_test += displacement_rate[mmi] * number_of_exposed[mmi] displaced_test = int(round(displaced_test / 1000)) * 1000 msg = 'Displaced = %i, test = %i' % (displaced, displaced_test) if displaced != displaced_test: raise Exception(msg) # Generate impact report table_body = [question] #TableRow([_('Groundshaking (MMI)'), # _('# people impacted')], # header=True)] # Table of people exposed to each shake level # NOTE (Ole): I have commented this out for the time being. # as not needed. However, had to modify unit test. #for mmi in mmi_range: # s = str(int(number_of_exposed[mmi])).rjust(10) # #print s, len(s) # row = TableRow([mmi, s], # col_align=['right', 'right']) # # # FIXME (Ole): Weirdly enough, the row object # # has align="right" in it, but it doesn't work # #print row # table_body.append(row) # Add total fatality estimate s = str(int(fatalities)).rjust(10) table_body.append(TableRow([_('Number of fatalities'), s], header=True)) # Add total estimate of people displaced s = str(int(displaced)).rjust(10) table_body.append(TableRow([_('Number of people displaced'), s], header=True)) # Add estimate of total population in area s = str(int(total)).rjust(10) table_body.append(TableRow([_('Total number of people'), s], header=True)) table_body.append(TableRow(_('Action Checklist:'), header=True)) if fatalities > 0: table_body.append(_('Are there enough victim identification units ' 'available for %i people?') % fatalities) if displaced > 0: table_body.append(_('Are there enough shelters available for %i ' 'people?') % displaced) table_body.append(TableRow(_('Notes'), header=True)) table_body.append(_('Fatality model is from ' 'Institute of Teknologi Bandung 2012.')) table_body.append(_('Population numbers rounded to nearest 1000.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Earthquake impact to population') # Create style info dynamically classes = numpy.linspace(numpy.nanmin(R.flat[:]), numpy.nanmax(R.flat[:]), 5) style_classes = [dict(colour='#EEFFEE', quantity=classes[0], transparency=100, label=_('%.2f people/cell') % classes[0]), dict(colour='#FFFF7F', quantity=classes[1], transparency=30), dict(colour='#E15500', quantity=classes[2], transparency=30, label=_('%.2f people/cell') % classes[2]), dict(colour='#E4001B', quantity=classes[3], transparency=30), dict(colour='#730000', quantity=classes[4], transparency=30, label=_('%.2f people/cell') % classes[4])] style_info = dict(target_field=None, style_classes=style_classes) # Create new layer and return L = Raster(R, projection=population.get_projection(), geotransform=population.get_geotransform(), keywords={'impact_summary': impact_summary, 'total_population': total, 'total_fatalities': fatalities, 'impact_table': impact_table, 'map_title': map_title}, name=_('Estimated fatalities'), style_info=style_info) # Maybe return a shape file with contours instead return L
def interpolate_raster_vector_points(source, target, layer_name=None, attribute_name=None, mode='linear'): """Interpolate from raster layer to point data Args: * source: Raster data set (grid) * target: Vector data set (points) * layer_name: Optional name of returned interpolated layer. If None the name of target is used for the returned layer. * attribute_name: Name for new attribute. If None (default) the name of layer source is used * mode: 'linear' or 'constant' - determines whether interpolation from grid to points should be bilinear or piecewise constant Output I: Vector data set; points located as target with values interpolated from source """ msg = ('There are no data points to interpolate to. Perhaps zoom out ' 'and try again') verify(len(target) > 0, msg) # Input checks verify(source.is_raster) verify(target.is_vector) verify(target.is_point_data) # FIXME (Ole): Why can we not remove this ??? # It should now be taken care of in the general input_check above # OK - remove when we leave using the form H.interpolate in impact funcs if layer_name is None: layer_name = target.get_name() # Get raster data and corresponding x and y axes A = source.get_data(nan=True) longitudes, latitudes = source.get_geometry() verify(len(longitudes) == A.shape[1]) verify(len(latitudes) == A.shape[0]) # Get vector point geometry as Nx2 array coordinates = numpy.array(target.get_geometry(), dtype='d', copy=False) # Get original attributes attributes = target.get_data() # Create new attribute and interpolate # Remove? N = len(target) if attribute_name is None: attribute_name = source.get_name() try: values = interpolate_raster(longitudes, latitudes, A, coordinates, mode=mode) except (BoundsError, InaSAFEError), e: msg = (_('Could not interpolate from raster layer %(raster)s to ' 'vector layer %(vector)s. Error message: %(error)s') % { 'raster': source.get_name(), 'vector': target.get_name(), 'error': str(e) }) raise InaSAFEError(msg)
def run(self, layers): """Impact plugin for hazard impact """ # Extract data H = get_hazard_layer(layers) # Value E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='hazard_level', mode='constant') # Extract relevant numerical data coordinates = H.get_geometry() category = H.get_data() N = len(category) # List attributes to carry forward to result layer #attributes = E.get_attribute_names() # Calculate building impact according to guidelines count2 = 0 count1 = 0 count0 = 0 building_impact = [] for i in range(N): # Get category value val = float(category[i]['hazard_level']) # Classify buildings according to value ## if val >= 2.0 / 3: ## affected = 2 ## count2 += 1 ## elif 1.0 / 3 <= val < 2.0 / 3: ## affected = 1 ## count1 += 1 ## else: ## affected = 0 ## count0 += 1 ## FIXME it would be good if the affected were words not numbers ## FIXME need to read hazard layer and see category or keyword if val == 3: affected = 3 count2 += 1 elif val == 2: affected = 2 count1 += 1 elif val == 1: affected = 1 count0 += 1 else: affected = 'None' # Collect depth and calculated damage result_dict = {self.target_field: affected, 'CATEGORY': val} # Record result for this feature building_impact.append(result_dict) # Create impact report # Generate impact summary table_body = [question, TableRow([_('Category'), _('Affected')], header=True), TableRow([_('High'), count2]), TableRow([_('Medium'), count1]), TableRow([_('Low'), count0]), TableRow([_('All'), N])] table_body.append(TableRow(_('Notes'), header=True)) table_body.append(_('Categorised hazard has only 3' ' classes, high, medium and low.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Categorised hazard impact on buildings') #FIXME it would be great to do categorized rather than grduated # Create style style_classes = [dict(label=_('Low'), min=1, max=1, colour='#1EFC7C', transparency=0, size=1), dict(label=_('Medium'), min=2, max=2, colour='#FFA500', transparency=0, size=1), dict(label=_('High'), min=3, max=3, colour='#F31A1C', transparency=0, size=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return name = 'Buildings Affected' V = Vector(data=building_impact, projection=E.get_projection(), geometry=coordinates, geometry_type=E.geometry_type, keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, name=name, style_info=style_info) return V
""" # FIXME (Ole): This approach can be generalised to any strings that are not # statically declared such as attribute values. # So, we should merge the two dictionaries and just have one # with strings that need to be recognised by the translation # tools. # Also rename this module to something more fitting, such as # dynamic_translations.py # See issue #168 from safe.common.utilities import ugettext as _ names = { 'title1': _('DKI buildings'), # Bangunan DKI 'title2': _('Jakarta 2007 flood'), # Banjir seperti 2007 'Jakarta 2007 flood': _('Jakarta 2007 flood'), 'A flood in Jakarta like in 2007': _('A flood in Jakarta like ' 'in 2007'), 'title3': _('Jakarta flood like 2007 with pump failure at Pluit, ' 'Ancol and Sunter'), # Banjir 2007 tanpa pompa di # Pluit, Ancol dan Sunter 'Jakarta flood like 2007 with pump failure at Pluit and Ancol': _('Jakarta flood like 2007 with pump failure at ' 'Pluit and Ancol'), 'A flood in Jakarta like in 2007 but with structural improvements':
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Depth above which people are regarded affected [m] threshold = 1.0 # Threshold [m] # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Flood inundation [m] population = get_exposure_layer(layers) question = get_question(inundation.get_name(), population.get_name(), self) # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > threshold P = population.get_data(nan=0.0, scaling=True) I = numpy.where(D > threshold, P, 0) M = numpy.where(D > 0.5, P, 0) L = numpy.where(D > 0.3, P, 0) # Count totals total = int(numpy.sum(P)) evacuated = int(numpy.sum(I)) medium = int(numpy.sum(M)) - int(numpy.sum(I)) low = int(numpy.sum(L)) - int(numpy.sum(M)) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 if evacuated > 1000: evacuated = evacuated // 1000 * 1000 if medium > 1000: medium = medium // 1000 * 1000 if low > 1000: low = low // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan rice = evacuated * 2.8 drinking_water = evacuated * 17.5 water = evacuated * 67 family_kits = evacuated / 5 toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [ question, TableRow([_('People needing evacuation'), '%i' % evacuated], header=True), TableRow(_('Map shows population density needing ' 'evacuation')) ] #, ## TableRow([_('People in 50cm to 1m of water '), ## '%i' % medium], ## header=True), ## TableRow([_('People in 30cm to 50cm of water'), ## '%i' % low], ## header=True)] ## TableRow([_('Needs per week'), _('Total')], ## header=True), ## [_('Rice [kg]'), int(rice)], ## [_('Drinking Water [l]'), int(drinking_water)], ## [_('Clean Water [l]'), int(water)], ## [_('Family Kits'), int(family_kits)], ## [_('Toilets'), int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([ TableRow(_('Notes:'), header=True), _('Total population: %i') % total, _('People need evacuation if flood levels ' 'exceed %(eps)i m') % { 'eps': threshold }, _('People in 50cm to 1m of water: %i') % medium, _('People in 30cm to 50cm of water: %i') % low ]) ## _('Minimum needs are defined in BNPB ' ## 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('People in need of evacuation') style_info['legend_title'] = _('Population Density') # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=_('Population which %s') % get_function_title(self), keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title }, style_info=style_info) return R
def run(self, layers): """Impact plugin for hazard impact """ # Extract data H = get_hazard_layer(layers) # Value E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data( H, E, attribute_name='hazard_level', mode='constant') # Extract relevant numerical data coordinates = H.get_geometry() category = H.get_data() N = len(category) # List attributes to carry forward to result layer #attributes = E.get_attribute_names() # Calculate building impact according to guidelines count2 = 0 count1 = 0 count0 = 0 building_impact = [] for i in range(N): # Get category value val = float(category[i]['hazard_level']) # Classify buildings according to value ## if val >= 2.0 / 3: ## affected = 2 ## count2 += 1 ## elif 1.0 / 3 <= val < 2.0 / 3: ## affected = 1 ## count1 += 1 ## else: ## affected = 0 ## count0 += 1 ## FIXME it would be good if the affected were words not numbers ## FIXME need to read hazard layer and see category or keyword if val == 3: affected = 3 count2 += 1 elif val == 2: affected = 2 count1 += 1 elif val == 1: affected = 1 count0 += 1 else: affected = 'None' # Collect depth and calculated damage result_dict = {self.target_field: affected, 'CATEGORY': val} # Record result for this feature building_impact.append(result_dict) # Create impact report # Generate impact summary table_body = [ question, TableRow([_('Category'), _('Affected')], header=True), TableRow([_('High'), count2]), TableRow([_('Medium'), count1]), TableRow([_('Low'), count0]), TableRow([_('All'), N]) ] table_body.append(TableRow(_('Notes'), header=True)) table_body.append( _('Categorised hazard has only 3' ' classes, high, medium and low.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Categorised hazard impact on buildings') #FIXME it would be great to do categorized rather than grduated # Create style style_classes = [ dict(label=_('Low'), min=1, max=1, colour='#1EFC7C', transparency=0, size=1), dict(label=_('Medium'), min=2, max=2, colour='#FFA500', transparency=0, size=1), dict(label=_('High'), min=3, max=3, colour='#F31A1C', transparency=0, size=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return name = 'Buildings Affected' V = Vector(data=building_impact, projection=E.get_projection(), geometry=coordinates, geometry_type=E.geometry_type, keywords={ 'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title }, name=name, style_info=style_info) return V
def run(self, layers): """Risk plugin for earthquake school damage """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations keywords = E.get_keywords() if 'datatype' in keywords: datatype = keywords['datatype'] if datatype.lower() == 'osm': # Map from OSM attributes to the guideline classes (URM and RM) E = osm2bnpb(E, target_attribute=self.vclass_tag) elif datatype.lower() == 'sigab': # Map from SIGAB attributes to the guideline classes # (URM and RM) E = sigab2bnpb(E) else: E = unspecific2bnpb(E, target_attribute=self.vclass_tag) else: E = unspecific2bnpb(E, target_attribute=self.vclass_tag) # Interpolate hazard level to building locations H = assign_hazard_values_to_exposure_data(H, E, attribute_name='MMI') # Extract relevant numerical data coordinates = E.get_geometry() shaking = H.get_data() N = len(shaking) # List attributes to carry forward to result layer attributes = E.get_attribute_names() # Calculate building damage count3 = 0 count2 = 0 count1 = 0 count_unknown = 0 building_damage = [] for i in range(N): mmi = float(shaking[i]['MMI']) building_class = E.get_data(self.vclass_tag, i) lo, hi = damage_parameters[building_class] if numpy.isnan(mmi): # If we don't know the shake level assign Not-a-Number damage = numpy.nan count_unknown += 1 elif mmi < lo: damage = 1 # Low count1 += 1 elif lo <= mmi < hi: damage = 2 # Medium count2 += 1 elif mmi >= hi: damage = 3 # High count3 += 1 else: msg = 'Undefined shakelevel %s' % str(mmi) raise Exception(msg) # Collect shake level and calculated damage result_dict = {self.target_field: damage, 'MMI': mmi} # Carry all orginal attributes forward for key in attributes: result_dict[key] = E.get_data(key, i) # Record result for this feature building_damage.append(result_dict) # Create report impact_summary = ( '<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s (10-25%%):</td><td>%i</td></tr>' ' <tr><td>%s (25-50%%):</td><td>%i</td></tr>' ' <tr><td>%s (50-100%%):</td><td>%i</td></tr>' % (_('Buildings'), _('Total'), _('All'), N, _('Low damage'), count1, _('Medium damage'), count2, _('High damage'), count3)) impact_summary += (' <tr><td>%s (NaN):</td><td>%i</td></tr>' % ('Unknown', count_unknown)) impact_summary += '</table>' # Create style style_classes = [ dict(label=_('Low damage'), min=0.5, max=1.5, colour='#fecc5c', transparency=1), dict(label=_('Medium damage'), min=1.5, max=2.5, colour='#fd8d3c', transparency=1), dict(label=_('High damage'), min=2.5, max=3.5, colour='#f31a1c', transparency=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=building_damage, projection=E.get_projection(), geometry=coordinates, name='Estimated damage level', keywords={'impact_summary': impact_summary}, style_info=style_info) return V
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Check that hazard is polygon type if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon layer. I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not H.is_polygon_data: raise Exception(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E, attribute_name='population') # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() category_title = 'FLOODPRONE' # FIXME: Should come from keywords categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon, per category and total evacuated = 0 for attr in P.get_data(): # Get population at this location pop = float(attr['population']) # Update population count for associated polygon poly_id = attr['polygon_id'] new_attributes[poly_id][self.target_field] += pop # Update population count for each category cat = new_attributes[poly_id][category_title] categories[cat] += pop # Update total evacuated += pop # Count totals total = int(numpy.sum(E.get_data(nan=0, scaling=False))) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 if evacuated > 1000: evacuated = evacuated // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan rice = evacuated * 2.8 drinking_water = evacuated * 17.5 water = evacuated * 67 family_kits = evacuated / 5 toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [question, TableRow([_('People needing evacuation'), '%i' % evacuated], header=True), TableRow(_('Map shows population affected in each flood ' 'prone area ')), TableRow([_('Needs per week'), _('Total')], header=True), [_('Rice [kg]'), int(rice)], [_('Drinking Water [l]'), int(drinking_water)], [_('Clean Water [l]'), int(water)], [_('Family Kits'), int(family_kits)], [_('Toilets'), int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(_('Notes'), header=True), _('Total population: %i') % total, _('People need evacuation if in area identified ' 'as "Flood Prone"'), _('Minimum needs are defined in BNPB ' 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('People affected by flood prone areas') # Define classes for legend for flooded population counts colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] population_counts = [x['population'] for x in new_attributes] cls = [0] + numpy.linspace(1, max(population_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = _('0') else: label = _('%i - %i') % (lo, hi) entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=0, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=_('Population Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(), name=_('Population affected by flood prone areas'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return V
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations datatype = E.get_keywords()['datatype'] vclass_tag = 'ITB_Class' if datatype.lower() == 'osm': # Map from OSM attributes to the ITB building classes # Emap = osm2itb(E) print 'osm2itb has not been implemented' elif datatype.lower() == 'sigab': # Emap = sigabitb(E) print 'sigab2itb has not been implemented' elif datatype.lower() == 'itb': Emap = E # Interpolate hazard level to building locations Hi = assign_hazard_values_to_exposure_data(H, Emap, attribute_name='MMI') # Extract relevant numerical data coordinates = Emap.get_geometry() shaking = Hi.get_data() N = len(shaking) # List attributes to carry forward to result layer attributes = Emap.get_attribute_names() # Calculate building damage count50 = 0 count25 = 0 count10 = 0 count0 = 0 building_damage = [] for i in range(N): mmi = float(shaking[i]['MMI']) building_class = Emap.get_data(vclass_tag, i) building_type = str(building_class) damage_params = vul_curves[building_type] beta = damage_params['beta'] median = damage_params['median'] msg = 'Invalid parameter value for ' + building_type verify(beta + median > 0.0, msg) percent_damage = lognormal_cdf(mmi, median=median, sigma=beta) * 100 # Collect shake level and calculated damage result_dict = {self.target_field: percent_damage, 'MMI': mmi} # Carry all orginal attributes forward for key in attributes: result_dict[key] = Emap.get_data(key, i) # Record result for this feature building_damage.append(result_dict) # Debugging #if percent_damage > 0.01: # print mmi, percent_damage # Calculate statistics if percent_damage < 10: count0 += 1 if 10 <= percent_damage < 33: count10 += 1 if 33 <= percent_damage < 66: count25 += 1 if 66 <= percent_damage: count50 += 1 # fid.close() # Create report Hname = H.get_name() Ename = E.get_name() impact_summary = ('<b>In case of "%s" the estimated impact to ' '"%s" ' 'is:</b><br><br><p>' % (Hname, Ename)) impact_summary += ( '<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s (<10%%):</td><td>%i</td></tr>' ' <tr><td>%s (10-33%%):</td><td>%i</td></tr>' ' <tr><td>%s (33-66%%):</td><td>%i</td></tr>' ' <tr><td>%s (66-100%%):</td><td>%i</td></tr>' '</table></font>' % (_('Buildings'), _('Total'), _('All'), N, _('No damage'), count0, _('Low damage'), count10, _('Medium damage'), count25, _('High damage'), count50)) impact_summary += '<br>' # Blank separation row impact_summary += '<b>' + _('Assumption') + ':</b><br>' # This is the proper text: #_('Levels of impact are defined by post 2009 ' # 'Padang earthquake survey conducted by Geoscience ' # 'Australia and Institute of Teknologi Bandung.')) #_('Unreinforced masonry is assumed where no ' # 'structural information is available.')) impact_summary += _('Levels of impact are defined by post 2009 ' 'Padang earthquake survey conducted by Geoscience ' 'Australia and Institute of Teknologi Bandung.') impact_summary += _('Unreinforced masonry is assumed where no ' 'structural information is available.') # Create style style_classes = [ dict(label=_('No damage'), min=0, max=10, colour='#00ff00', transparency=1), dict(label=_('Low damage'), min=10, max=33, colour='#ffff00', transparency=1), dict(label=_('Medium damage'), min=33, max=66, colour='#ffaa00', transparency=1), dict(label=_('High damage'), min=66, max=100, colour='#ff0000', transparency=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=building_damage, projection=E.get_projection(), geometry=coordinates, name='Estimated pct damage', keywords={'impact_summary': impact_summary}, style_info=style_info) return V
R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name='Penduduk yang %s' % (get_function_title(self)), keywords={'impact_summary': impact_summary}, style_info=style_info) return R """ from safe.common.utilities import ugettext as _ # Flood population impact raster style style_classes = [ dict(colour='#FFFFFF', quantity=2, transparency=100), dict(label=_('Low'), colour='#38A800', quantity=5, transparency=0), dict(colour='#79C900', quantity=10, transparency=0), dict(colour='#CEED00', quantity=20, transparency=0), dict(label=_('Medium'), colour='#FFCC00', quantity=50, transparency=0), dict(colour='#FF6600', quantity=100, transparency=0), dict(colour='#FF0000', quantity=200, transparency=0), dict(label=_('High'), colour='#7A0000', quantity=300, transparency=0) ] flood_population_style = dict(target_field=None, legend_title=None, style_classes=style_classes) # Earthquake fatality raster style # FIXME (Ole): The styler cannot handle floats yet. Issue #126 #style_classes = [dict(colour='#FFFFFF', quantity=0.0, transparency=100), # dict(colour='#0000FF', quantity=4, transparency=0),
def run(self, layers, x=0.62275231, y=8.03314466): # , zeta=2.15): """Gender specific earthquake impact model Input layers: List of layers expected to contain H: Raster layer of MMI ground shaking P: Raster layer of population density """ # Define percentages of people being displaced at each mmi level displacement_rate = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0.1, 8: 0.5, 9: 0.75, 10: 1.0} # Extract input layers intensity = get_hazard_layer(layers) population = get_exposure_layer(layers) question = get_question(intensity.get_name(), population.get_name(), self) # Extract data grids H = intensity.get_data() # Ground Shaking P = population.get_data() # Population Density # Calculate population affected by each MMI level # FIXME (Ole): this range is 2-9. Should 10 be included? mmi_range = range(2, 10) number_of_exposed = {} number_of_fatalities = {} # Calculate fatality rates for observed Intensity values (H # based on ITB power model R = numpy.zeros(H.shape) for mmi in mmi_range: # Identify cells where MMI is in class i mask = (H > mmi - 0.5) * (H <= mmi + 0.5) # Count population affected by this shake level I = numpy.where(mask, P, 0) # Calculate expected number of fatalities per level fatality_rate = numpy.power(10.0, x * mmi - y) F = fatality_rate * I # Sum up fatalities to create map R += F # Generate text with result for this study # This is what is used in the real time system exposure table number_of_exposed[mmi] = numpy.nansum(I.flat) number_of_fatalities[mmi] = numpy.nansum(F.flat) # Set resulting layer to zero when less than a threshold. This is to # achieve transparency (see issue #126). R[R < 1] = numpy.nan # Total statistics total = numpy.nansum(P.flat) # Compute number of fatalities fatalities = numpy.nansum(number_of_fatalities.values()) # Compute number of people displaced due to building collapse displaced = 0 for mmi in mmi_range: displaced += displacement_rate[mmi] * number_of_exposed[mmi] displaced_women = displaced * 0.52 # Could be made province dependent displaced_pregnant_women = displaced_women * 0.01387 # CHECK # Generate impact report table_body = [question] # Add total fatality estimate s = str(int(fatalities)).rjust(10) table_body.append(TableRow([_('Number of fatalities'), s], header=True)) # Add total estimate of people displaced s = str(int(displaced)).rjust(10) table_body.append(TableRow([_('Number of people displaced'), s], header=True)) s = str(int(displaced_women)).rjust(10) table_body.append(TableRow([_('Number of women displaced'), s], header=True)) s = str(int(displaced_pregnant_women)).rjust(10) table_body.append(TableRow([_('Number of pregnant women displaced'), s], header=True)) table_body.append(TableRow(_('Action Checklist:'), header=True)) table_body.append(_('Are enough shelters available for %i women?') % displaced_women) table_body.append(_('Are enough facilities available to assist %i ' 'pregnant women?') % displaced_pregnant_women) table_body.append(TableRow(_('Notes'), header=True)) table_body.append(_('Fatality model is from ' 'Institute of Teknologi Bandung 2012.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Earthquake impact to population') # Create new layer and return L = Raster(R, projection=population.get_projection(), geotransform=population.get_geotransform(), keywords={'impact_summary': impact_summary, 'total_population': total, 'total_fatalities': fatalities, 'impact_table': impact_table, 'map_title': map_title}, name=_('Estimated fatalities'), style_info=earthquake_fatality_style) # Maybe return a shape file with contours instead return L
def run(self, layers): """Flood impact to buildings (e.g. from Open Street Map) """ threshold = 1.0 # Flood threshold [m] # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Determine attribute name for hazard levels if H.is_raster: hazard_attribute = "depth" else: hazard_attribute = "FLOODPRONE" # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, E, attribute_name=hazard_attribute) # Extract relevant exposure data attribute_names = I.get_attribute_names() attributes = I.get_data() N = len(I) # Calculate building impact count = 0 buildings = {} affected_buildings = {} for i in range(N): if hazard_attribute == "depth": # Get the interpolated depth x = float(attributes[i]["depth"]) x = x > threshold elif hazard_attribute == "FLOODPRONE": # Use interpolated polygon attribute atts = attributes[i] if "FLOODPRONE" in atts: res = atts["FLOODPRONE"] if res is None: x = False else: x = res.lower() == "yes" else: # If there isn't a flood prone attribute, # assume that building is wet if inside polygon # as flag by generic attribute AFFECTED res = atts["Affected"] if res is None: x = False else: x = res else: msg = _("Unknown hazard type %s. " 'Must be either "depth" or "floodprone"') % hazard_attribute raise Exception(msg) # Count affected buildings by usage type if available if "type" in attribute_names: usage = attributes[i]["type"] else: usage = None if usage is not None and usage != 0: key = usage else: key = "unknown" if key not in buildings: buildings[key] = 0 affected_buildings[key] = 0 # Count all buildings by type buildings[key] += 1 if x is True: # Count affected buildings by type affected_buildings[key] += 1 # Count total affected buildings count += 1 # Add calculated impact to existing attributes attributes[i][self.target_field] = x # Lump small entries and 'unknown' into 'other' category for usage in buildings.keys(): x = buildings[usage] if x < 25 or usage == "unknown": if "other" not in buildings: buildings["other"] = 0 affected_buildings["other"] = 0 buildings["other"] += x affected_buildings["other"] += affected_buildings[usage] del buildings[usage] del affected_buildings[usage] # Generate csv file of results ## fid = open('C:\dki_table_%s.csv' % H.get_name(), 'wb') ## fid.write('%s, %s, %s\n' % (_('Building type'), ## _('Temporarily closed'), ## _('Total'))) ## fid.write('%s, %i, %i\n' % (_('All'), count, N)) # Generate simple impact report table_body = [ question, TableRow([_("Building type"), _("Temporarily closed"), _("Total")], header=True), TableRow([_("All"), count, N]), ] ## fid.write('%s, %s, %s\n' % (_('Building type'), ## _('Temporarily closed'), ## _('Total'))) school_closed = 0 hospital_closed = 0 # Generate break down by building usage type is available if "type" in attribute_names: # Make list of building types building_list = [] for usage in buildings: building_type = usage.replace("_", " ") # Lookup internationalised value if available if building_type in internationalised_values: building_type = internationalised_values[building_type] else: print("WARNING: %s could not be translated" % building_type) building_list.append([building_type.capitalize(), affected_buildings[usage], buildings[usage]]) if building_type == "school": school_closed = affected_buildings[usage] if building_type == "hospital": hospital_closed = affected_buildings[usage] ## fid.write('%s, %i, %i\n' % (building_type.capitalize(), ## affected_buildings[usage], ## buildings[usage])) # Sort alphabetically building_list.sort() # table_body.append(TableRow([_('Building type'), # _('Temporarily closed'), # _('Total')], header=True)) table_body.append(TableRow(_("Breakdown by building type"), header=True)) for row in building_list: s = TableRow(row) table_body.append(s) ## fid.close() table_body.append(TableRow(_("Action Checklist:"), header=True)) table_body.append(TableRow(_("Are the critical facilities still " "open?"))) table_body.append(TableRow(_("Which structures have warning capacity " "(eg. sirens, speakers, etc.)?"))) table_body.append(TableRow(_("Which buildings will be evacuation " "centres?"))) table_body.append(TableRow(_("Where will we locate the operations " "centre?"))) table_body.append(TableRow(_("Where will we locate warehouse and/or " "distribution centres?"))) if school_closed > 0: table_body.append( TableRow(_("Where will the students from the %d " "closed schools go to study?") % school_closed) ) if hospital_closed > 0: table_body.append( TableRow( _( "Where will the patients from the %d " "closed hospitals go for treatment " "and how will we transport them?" ) % hospital_closed ) ) table_body.append(TableRow(_("Notes"), header=True)) assumption = _("Buildings are said to be flooded when ") if hazard_attribute == "depth": assumption += _("flood levels exceed %.1f m") % threshold else: assumption += _("in areas marked as flood prone") table_body.append(assumption) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _("Buildings inundated") # Create style style_classes = [ dict(label=_("Not Flooded"), min=0, max=0, colour="#1EFC7C", transparency=0, size=1), dict(label=_("Flooded"), min=1, max=1, colour="#F31A1C", transparency=0, size=1), ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector( data=attributes, projection=I.get_projection(), geometry=I.get_geometry(), name=_("Estimated buildings affected"), keywords={"impact_summary": impact_summary, "impact_table": impact_table, "map_title": map_title}, style_info=style_info, ) return V
def run(self, layers): """Plugin for impact of population as derived by categorised hazard Input layers: List of layers expected to contain H: Raster layer of categorised hazard P: Raster layer of population data Counts number of people exposed to each category of the hazard Return Map of population exposed to high category Table with number of people in each category """ # The 3 category high_t = 1 medium_t = 0.66 low_t = 0.34 # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Categorised Hazard population = get_exposure_layer(layers) # Population Raster question = get_question(inundation.get_name(), population.get_name(), self) # Extract data as numeric arrays C = inundation.get_data(nan=0.0) # Category # Calculate impact as population exposed to each category P = population.get_data(nan=0.0, scaling=True) H = numpy.where(C == high_t, P, 0) M = numpy.where(C > medium_t, P, 0) L = numpy.where(C < low_t, P, 0) # Count totals total = int(numpy.sum(P)) high = int(numpy.sum(H)) medium = int(numpy.sum(M)) - int(numpy.sum(H)) low = int(numpy.sum(L)) - int(numpy.sum(M)) total_impact = high + medium + low # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 if total_impact > 1000: total_impact = total_impact // 1000 * 1000 if high > 1000: high = high // 1000 * 1000 if medium > 1000: medium = medium // 1000 * 1000 if low > 1000: low = low // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan ## rice = evacuated * 2.8 ## drinking_water = evacuated * 17.5 ## water = evacuated * 67 ## family_kits = evacuated / 5 ## toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [question, TableRow([_('People impacted '), '%i' % total_impact], header=True), TableRow([_('People in high hazard area '), '%i' % high], header=True), TableRow([_('People in medium hazard area '), '%i' % medium], header=True), TableRow([_('People in low hazard area'), '%i' % low], header=True)] ## TableRow([_('Needs per week'), _('Total')], ## header=True), ## [_('Rice [kg]'), int(rice)], ## [_('Drinking Water [l]'), int(drinking_water)], ## [_('Clean Water [l]'), int(water)], ## [_('Family Kits'), int(family_kits)], ## [_('Toilets'), int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(_('Notes'), header=True), _('Map shows population density in high or medium ' 'hazard area'), _('Total population: %i') % total]) ## _('Minimum needs are defined in BNPB ' ## 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('People in high hazard areas') # Generare 8 equidistant classes across the range of flooded population # 8 is the number of classes in the predefined flood population style # as imported classes = numpy.linspace(numpy.nanmin(M.flat[:]), numpy.nanmax(M.flat[:]), 8) # Modify labels in existing flood style to show quantities style_classes = style_info['style_classes'] style_classes[1]['label'] = _('Low [%i people/cell]') % classes[1] style_classes[4]['label'] = _('Medium [%i people/cell]') % classes[4] style_classes[7]['label'] = _('High [%i people/cell]') % classes[7] style_info['legend_title'] = _('Population Density') # Create raster object and return R = Raster(M, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=_('Population which %s') % get_function_title(self), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return R
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of volcano depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to volcanic hazard zones Table with number of buildings affected """ # Identify hazard and exposure layers H = get_hazard_layer(layers) # Flood inundation E = get_exposure_layer(layers) question = get_question(H.get_name(), E.get_name(), self) # Input checks if not H.is_vector: msg = ('Input hazard %s was not a vector layer as expected ' % H.get_name()) raise Exception(msg) msg = ('Input hazard must be a polygon or point layer. ' 'I got %s with layer ' 'type %s' % (H.get_name(), H.get_geometry_name())) if not (H.is_polygon_data or H.is_point_data): raise Exception(msg) if H.is_point_data: # Use concentric circles radii = self.parameters['distances'] centers = H.get_geometry() attributes = H.get_data() Z = make_circular_polygon(centers, radii, attributes=attributes) Z.write_to_file('Marapi_evac_zone_%s.shp' % str(radii)) # To check category_title = 'Radius' H = Z #category_names = ['%s m' % x for x in radii] category_names = radii else: # Use hazard map category_title = 'KRB' # FIXME (Ole): Change to English and use translation system category_names = ['Kawasan Rawan Bencana III', 'Kawasan Rawan Bencana II', 'Kawasan Rawan Bencana I'] if not category_title in H.get_attribute_names(): msg = ('Hazard data %s did not contain expected ' 'attribute %s ' % (H.get_name(), category_title)) raise InaSAFEError(msg) # Run interpolation function for polygon2raster P = assign_hazard_values_to_exposure_data(H, E) # Initialise attributes of output dataset with all attributes # from input polygon and a population count of zero new_attributes = H.get_data() categories = {} for attr in new_attributes: attr[self.target_field] = 0 cat = attr[category_title] categories[cat] = 0 # Count affected population per polygon and total total_affected = 0 for attr in P.get_data(): # Update building count for associated polygon poly_id = attr['polygon_id'] if poly_id is not None: new_attributes[poly_id][self.target_field] += 1 # Update building count for each category cat = new_attributes[poly_id][category_title] categories[cat] += 1 # Update total total_affected += 1 # Count totals total = len(E) # Generate simple impact report table_body = [question, TableRow([_('Buildings'), _('Total'), _('Cumulative')], header=True), TableRow([_('All'), str(total_affected), ''])] cum = 0 for name in category_names: count = categories[name] cum += count table_body.append(TableRow([name, str(count), str(cum)])) table_body.append(TableRow(_('Map shows buildings affected in ' 'each of volcano hazard polygons.'))) impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(_('Notes'), header=True), _('Total number of buildings %i in the viewable ' 'area') % total, _('Only buildings available in OpenStreetMap ' 'are considered.')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('Buildings affected by volcanic hazard zone') # Define classes for legend for flooded building counts colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00', '#FFCC00', '#FF6600', '#FF0000', '#7A0000'] building_counts = [x[self.target_field] for x in new_attributes] cls = [0] + numpy.linspace(1, max(building_counts), len(colours)).tolist() # Define style info for output polygons showing population counts style_classes = [] for i, colour in enumerate(colours): lo = cls[i] hi = cls[i + 1] if i == 0: label = _('0') else: label = _('%i - %i') % (lo, hi) entry = dict(label=label, colour=colour, min=lo, max=hi, transparency=0, size=1) style_classes.append(entry) # Override style info with new classes and name style_info = dict(target_field=self.target_field, style_classes=style_classes, legend_title=_('Building Count')) # Create vector layer and return V = Vector(data=new_attributes, projection=H.get_projection(), geometry=H.get_geometry(as_geometry_objects=True), name=_('Buildings affected by volcanic hazard zone'), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title, 'target_field': self.target_field}, style_info=style_info) return V
# Create raster object with this style and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name='Penduduk yang %s' % (get_function_title(self)), keywords={'impact_summary': impact_summary}, style_info=style_info) return R """ from safe.common.utilities import ugettext as _ # Flood population impact raster style style_classes = [dict(colour='#FFFFFF', quantity=2, transparency=100), dict(label=_('Low'), colour='#38A800', quantity=5, transparency=0), dict(colour='#79C900', quantity=10, transparency=0), dict(colour='#CEED00', quantity=20, transparency=0), dict(label=_('Medium'), colour='#FFCC00', quantity=50, transparency=0), dict(colour='#FF6600', quantity=100, transparency=0), dict(colour='#FF0000', quantity=200, transparency=0), dict(label=_('High'), colour='#7A0000', quantity=300, transparency=0)] flood_population_style = dict(target_field=None, legend_title=None, style_classes=style_classes) # Earthquake fatality raster style # FIXME (Ole): The styler cannot handle floats yet. Issue #126
""" # FIXME (Ole): This approach can be generalised to any strings that are not # statically declared such as attribute values. # So, we should merge the two dictionaries and just have one # with strings that need to be recognised by the translation # tools. # Also rename this module to something more fitting, such as # dynamic_translations.py # See issue #168 from safe.common.utilities import ugettext as _ names = {'title1': _('DKI buildings'), # Bangunan DKI 'title2': _('Jakarta 2007 flood'), # Banjir seperti 2007 'Jakarta 2007 flood': _('Jakarta 2007 flood'), 'A flood in Jakarta like in 2007': _('A flood in Jakarta like ' 'in 2007'), 'title3': _('Jakarta flood like 2007 with pump failure at Pluit, ' 'Ancol and Sunter'), # Banjir 2007 tanpa pompa di # Pluit, Ancol dan Sunter 'Jakarta flood like 2007 with pump failure at Pluit and Ancol': _('Jakarta flood like 2007 with pump failure at ' 'Pluit and Ancol'), 'A flood in Jakarta like in 2007 but with structural improvements': _('A flood in Jakarta like in 2007 but with structural ' 'improvements'), 'title4': _('Sea wall collapse at Pluit'), # Dam Pluit Runtuh 'title5': _('Jakarta flood prone areas'), # Daerah Rawan Banjir
def run(self, layers): """Risk plugin for flood population evacuation Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H Counts number of people exposed to flood levels exceeding specified threshold. Return Map of population exposed to flood levels exceeding the threshold Table with number of people evacuated and supplies required """ # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Flood inundation [m] population = get_exposure_layer(layers) question = get_question(inundation.get_name(), population.get_name(), self) # Determine depths above which people are regarded affected [m] # Use thresholds from inundation layer if specified thresholds = get_thresholds(inundation) if len(thresholds) == 0: # Default threshold thresholds = [1.0] verify(isinstance(thresholds, list), 'Expected thresholds to be a list. Got %s' % str(thresholds)) # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > max threshold P = population.get_data(nan=0.0, scaling=True) # Calculate impact to intermediate thresholds counts = [] for i, lo in enumerate(thresholds): if i == len(thresholds) - 1: # The last threshold I = M = numpy.where(D >= lo, P, 0) else: # Intermediate thresholds hi = thresholds[i + 1] M = numpy.where((D >= lo) * (D < hi), P, 0) # Count val = int(numpy.sum(M)) # Don't show digits less than a 1000 if val > 1000: val = val // 1000 * 1000 counts.append(val) # Count totals evacuated = counts[-1] total = int(numpy.sum(P)) # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan rice = evacuated * 2.8 drinking_water = evacuated * 17.5 water = evacuated * 67 family_kits = evacuated / 5 toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [question, TableRow([_('People needing evacuation'), '%i' % evacuated], header=True), TableRow(_('Map shows population density needing ' 'evacuation')), TableRow([_('Needs per week'), _('Total')], header=True), [_('Rice [kg]'), int(rice)], [_('Drinking Water [l]'), int(drinking_water)], [_('Clean Water [l]'), int(water)], [_('Family Kits'), int(family_kits)], [_('Toilets'), int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(_('Notes'), header=True), _('Total population: %i') % total, _('People need evacuation if flood levels ' 'exceed %(eps).1f m') % {'eps': thresholds[-1]}, _('Minimum needs are defined in BNPB ' 'regulation 7/2008')]) if len(counts) > 1: table_body.append(TableRow(_('Detailed breakdown'), header=True)) for i, val in enumerate(counts[:-1]): s = (_('People in %(lo).1f m to %(hi).1f m of water: %(val)i') % {'lo': thresholds[i], 'hi': thresholds[i + 1], 'val': val}) table_body.append(TableRow(s, header=False)) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('People in need of evacuation') # Generate 8 equidistant classes across the range of flooded population # 8 is the number of classes in the predefined flood population style # as imported classes = numpy.linspace(numpy.nanmin(I.flat[:]), numpy.nanmax(I.flat[:]), 8) # Modify labels in existing flood style to show quantities style_classes = style_info['style_classes'] style_classes[1]['label'] = _('Low [%i people/cell]') % classes[1] style_classes[4]['label'] = _('Medium [%i people/cell]') % classes[4] style_classes[7]['label'] = _('High [%i people/cell]') % classes[7] style_info['legend_title'] = _('Population Density') # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=_('Population which %s') % get_function_title(self), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return R
def run(self, layers): """Risk plugin for tsunami population """ # Extract data H = get_hazard_layer(layers) # Depth E = get_exposure_layer(layers) # Building locations # Interpolate hazard level to building locations Hi = H.interpolate(E, attribute_name='depth') # Extract relevant numerical data coordinates = Hi.get_geometry() depth = Hi.get_data() N = len(depth) # List attributes to carry forward to result layer attributes = E.get_attribute_names() # Calculate building impact according to guidelines count3 = 0 count1 = 0 count0 = 0 population_impact = [] for i in range(N): if H.is_raster: # Get depth dep = float(depth[i]['depth']) # Classify buildings according to depth if dep >= 3: affected = 3 # FIXME: Colour upper bound is 100 but count3 += 1 # does not catch affected == 100 elif 1 <= dep < 3: affected = 2 count1 += 1 else: affected = 1 count0 += 1 elif H.is_vector: dep = 0 # Just put something here cat = depth[i]['Affected'] if cat is True: affected = 3 count3 += 1 else: affected = 1 count0 += 1 # Collect depth and calculated damage result_dict = {self.target_field: affected, 'DEPTH': dep} # Carry all original attributes forward # FIXME: This should be done in interpolation. Check. #for key in attributes: # result_dict[key] = E.get_data(key, i) # Record result for this feature population_impact.append(result_dict) # Create report Hname = H.get_name() Ename = E.get_name() if H.is_raster: impact_summary = ('<b>In case of "%s" the estimated impact to ' '"%s" ' 'is:</b><br><br><p>' % (Hname, Ename)) impact_summary += ( '<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></tr>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' '</table>' % (_('Impact'), _('Number of buildings'), _('Low'), count0, _('Medium'), count1, _('High'), count3)) else: impact_summary = ( '<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></tr>' ' <tr></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' '</table>' % ('Terdampak oleh tsunami', 'Jumlah gedung', 'Terdampak', count3, 'Tidak terdampak', count0, 'Semua', N)) impact_summary += '<br>' # Blank separation row impact_summary += '<b>' + _('Assumption') + ':</b><br>' impact_summary += ('Levels of impact are defined by BNPB\'s ' '<i>Pengkajian Risiko Bencana</i>') impact_summary += ('<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></tr>' ' <tr></tr>' ' <tr><td>%s:</td><td>%s:</td></tr>' ' <tr><td>%s:</td><td>%s:</td></tr>' ' <tr><td>%s:</td><td>%s:</td></tr>' '</table>' % (_('Impact'), _('Tsunami height'), _('Low'), '<1 m', _('Medium'), '1-3 m', _('High'), '>3 m')) # Create style style_classes = [ dict(label='< 1 m', min=0, max=1, colour='#1EFC7C', transparency=0, size=1), dict(label='1 - 3 m', min=1, max=2, colour='#FFA500', transparency=0, size=1), dict(label='> 3 m', min=2, max=4, colour='#F31A1C', transparency=0, size=1) ] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return if Hi.is_line_data: name = 'Roads flooded' elif Hi.is_point_data: name = 'Buildings flooded' V = Vector(data=population_impact, projection=E.get_projection(), geometry=coordinates, keywords={'impact_summary': impact_summary}, geometry_type=Hi.geometry_type, name=name, style_info=style_info) return V
def run(self, layers): """Risk plugin for Padang building survey """ # Extract data H = get_hazard_layer(layers) # Ground shaking E = get_exposure_layer(layers) # Building locations question = get_question(H.get_name(), E.get_name(), self) # Map from different kinds of datasets to Padang vulnerability classes datatype = E.get_keywords()['datatype'] vclass_tag = 'VCLASS' if datatype.lower() == 'osm': # Map from OSM attributes Emap = osm2padang(E) elif datatype.lower() == 'sigab': # Map from SIGAB attributes Emap = sigab2padang(E) else: Emap = E # Interpolate hazard level to building locations I = assign_hazard_values_to_exposure_data(H, Emap, attribute_name='MMI') # Extract relevant numerical data attributes = I.get_data() N = len(I) # Calculate building damage count_high = count_medium = count_low = count_none = 0 for i in range(N): mmi = float(attributes[i]['MMI']) building_type = Emap.get_data(vclass_tag, i) damage_params = damage_curves[building_type] beta = damage_params['beta'] median = damage_params['median'] percent_damage = lognormal_cdf(mmi, median=median, sigma=beta) * 100 # Add calculated impact to existing attributes attributes[i][self.target_field] = percent_damage # Calculate statistics if percent_damage < 10: count_none += 1 if 10 <= percent_damage < 33: count_low += 1 if 33 <= percent_damage < 66: count_medium += 1 if 66 <= percent_damage: count_high += 1 # Generate impact report table_body = [question, TableRow([_('Buildings'), _('Total')], header=True), TableRow([_('All'), N]), TableRow([_('No damage'), count_none]), TableRow([_('Low damage'), count_low]), TableRow([_('Medium damage'), count_medium]), TableRow([_('High damage'), count_high])] table_body.append(TableRow(_('Notes'), header=True)) table_body.append(_('Levels of impact are defined by post 2009 ' 'Padang earthquake survey conducted by Geoscience ' 'Australia and Institute of Teknologi Bandung.')) table_body.append(_('Unreinforced masonry is assumed where no ' 'structural information is available.')) impact_summary = Table(table_body).toNewlineFreeString() impact_table = impact_summary map_title = _('Earthquake damage to buildings') # Create style style_classes = [dict(label=_('No damage'), min=0, max=10, colour='#00ff00', transparency=1), dict(label=_('Low damage'), min=10, max=33, colour='#ffff00', transparency=1), dict(label=_('Medium damage'), min=33, max=66, colour='#ffaa00', transparency=1), dict(label=_('High damage'), min=66, max=100, colour='#ff0000', transparency=1)] style_info = dict(target_field=self.target_field, style_classes=style_classes) # Create vector layer and return V = Vector(data=attributes, projection=E.get_projection(), geometry=E.get_geometry(), name='Estimated pct damage', keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return V
def run(self, layers): """Plugin for impact of population as derived by catergorised hazard Input layers: List of layers expected to contain H: Raster layer of catergorised hazard P: Raster layer of population data Counts number of people exposed to each caterogry of the hazard Return Map of population exposed to high catergory Table with number of people in each catergory """ # The 3 catergory high_t = 1 medium_t = 0.66 low_t = 0.34 # Identify hazard and exposure layers inundation = get_hazard_layer(layers) # Categorised Hazard population = get_exposure_layer(layers) # Population Raster question = get_question(inundation.get_name(), population.get_name(), self) # Extract data as numeric arrays C = inundation.get_data(nan=0.0) # Catergory # Calculate impact as population exposed to each catergory P = population.get_data(nan=0.0, scaling=True) H = numpy.where(C == high_t, P, 0) M = numpy.where(C > medium_t, P, 0) L = numpy.where(C < low_t, P, 0) # Count totals total = int(numpy.sum(P)) high = int(numpy.sum(H)) medium = int(numpy.sum(M)) - int(numpy.sum(H)) low = int(numpy.sum(L)) - int(numpy.sum(M)) total_impact = high + medium + low # Don't show digits less than a 1000 if total > 1000: total = total // 1000 * 1000 if total_impact > 1000: total_impact = total_impact // 1000 * 1000 if high > 1000: high = high // 1000 * 1000 if medium > 1000: medium = medium // 1000 * 1000 if low > 1000: low = low // 1000 * 1000 # Calculate estimated needs based on BNPB Perka 7/2008 minimum bantuan ## rice = evacuated * 2.8 ## drinking_water = evacuated * 17.5 ## water = evacuated * 67 ## family_kits = evacuated / 5 ## toilets = evacuated / 20 # Generate impact report for the pdf map table_body = [question, TableRow([_('People impacted '), '%i' % total_impact], header=True), TableRow([_('People in high hazard area '), '%i' % high], header=True), TableRow([_('People in medium hazard area '), '%i' % medium], header=True), TableRow([_('People in low hazard area'), '%i' % low], header=True)] ## TableRow([_('Needs per week'), _('Total')], ## header=True), ## [_('Rice [kg]'), int(rice)], ## [_('Drinking Water [l]'), int(drinking_water)], ## [_('Clean Water [l]'), int(water)], ## [_('Family Kits'), int(family_kits)], ## [_('Toilets'), int(toilets)]] impact_table = Table(table_body).toNewlineFreeString() # Extend impact report for on-screen display table_body.extend([TableRow(_('Notes'), header=True), _('Map shows population density in high or medium ' 'hazard area'), _('Total population: %i') % total]) ## _('Minimum needs are defined in BNPB ' ## 'regulation 7/2008')]) impact_summary = Table(table_body).toNewlineFreeString() map_title = _('People in high hazard areas') # Generare 8 equidistant classes across the range of flooded population # 8 is the number of classes in the predefined flood population style # as imported classes = numpy.linspace(numpy.nanmin(M.flat[:]), numpy.nanmax(M.flat[:]), 8) # Modify labels in existing flood style to show quantities style_classes = style_info['style_classes'] style_classes[1]['label'] = _('Low [%i people/cell]') % classes[1] style_classes[4]['label'] = _('Medium [%i people/cell]') % classes[4] style_classes[7]['label'] = _('High [%i people/cell]') % classes[7] style_info['legend_title'] = _('Population Density') # Create raster object and return R = Raster(M, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name=_('Population which %s') % get_function_title(self), keywords={'impact_summary': impact_summary, 'impact_table': impact_table, 'map_title': map_title}, style_info=style_info) return R