def test_instantiation_of_empty_layers(self): """Vector and Raster objects can be instantiated with None """ v = Vector(None) assert v.get_name().startswith('Vector') r = Raster(None) assert r.get_name().startswith('Raster')
def run(layers): """Risk plugin for earthquake fatalities Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H """ threshold = 1 # Load above which people are regarded affected [kg/m2] # Identify hazard and exposure layers inundation = layers[0] # Tephra load [kg/m2] population = layers[1] # Population density [people/km^2] # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth P = population.get_data(nan=0.0, scaling=True) # Population density # Calculate impact as population exposed to depths > threshold I = numpy.where(D > threshold, P, 0) # Generate text with result for this study number_of_people_affected = numpy.nansum(I.flat) caption = ('%i people affected by ash levels greater ' 'than %i kg/m^2' % (number_of_people_affected, threshold)) # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name='People affected', keywords={'caption': caption}) return R
def run(layers, teta=14.05, beta=0.17, zeta=2.15): """Risk plugin for earthquake fatalities Input H: Numerical array of hazard data E: Numerical array of exposure data """ # Identify input layers intensity = layers[0] population = layers[1] # Extract data H = intensity.get_data(nan=0) P = population.get_data(nan=0) # Calculate impact logHazard = 1 / beta * scipy.log(H / teta) # Convert array to be standard floats expected by cdf arrayout = numpy.array([[float(value) for value in row] for row in logHazard]) F = scipy.stats.norm.cdf(arrayout * P) # Create new layer and return R = Raster(F, projection=population.get_projection(), geotransform=population.get_geotransform(), name='Estimated fatalities') return R
def write_raster_data(data, projection, geotransform, filename): """Write array to raster file with specified metadata and one data layer Input: data: Numpy array containing grid data projection: WKT projection information geotransform: 6 digit vector (top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution). See e.g. http://www.gdal.org/gdal_tutorial.html filename: Output filename Note: The only format implemented is GTiff and the extension must be .tif """ R = Raster(data, projection, geotransform) R.write_to_file(filename)
def write_raster_data(data, projection, geotransform, filename, keywords=None): """Write array to raster file with specified metadata and one data layer Input: data: Numpy array containing grid data projection: WKT projection information geotransform: 6 digit vector (top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution). See e.g. http://www.gdal.org/gdal_tutorial.html filename: Output filename keywords: Optional dictionary Note: The only format implemented is GTiff and the extension must be .tif """ R = Raster(data, projection, geotransform, keywords=keywords) R.write_to_file(filename)
def read_layer(filename): """Read spatial layer from file. This can be either raster or vector data. """ _, ext = os.path.splitext(filename) if ext in ['.asc', '.tif']: return Raster(filename) elif ext in ['.shp', '.gml']: return Vector(filename) else: msg = ('Could not read %s. ' 'Extension "%s" has not been implemented' % (filename, ext)) raise Exception(msg)
def run(self, layers): """Risk plugin for tsunami population """ thresholds = [0.2, 0.3, 0.5, 0.8, 1.0] #threshold = 1 # Depth above which people are regarded affected [m] # Identify hazard and exposure layers inundation = layers[0] # Tsunami inundation [m] population = layers[1] # Population density # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth P = population.get_data(nan=0.0, scaling=True) # Population density # Calculate impact as population exposed to depths > 1m I_map = numpy.where(D > thresholds[-1], P, 0) # Generate text with result for this study number_of_people_affected = numpy.nansum(I_map.flat) # Do breakdown # Create report caption = ('<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' % ('Ambang batas', 'Jumlah orang terdampak')) counts = [] for i, threshold in enumerate(thresholds): I = numpy.where(D > threshold, P, 0) counts.append(numpy.nansum(I.flat)) caption += ' <tr><td>%s m</td><td>%i</td></tr>' % (threshold, counts[i]) caption += '</table>' # Create raster object and return R = Raster(I_map, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name='People affected by more than 1m of inundation', keywords={'caption': caption}) return R
def run(layers, a=0.97429, b=11.037): """Risk plugin for earthquake fatalities Input layers: List of layers expected to contain H: Raster layer of MMI ground shaking P: Raster layer of population data on the same grid as H """ # Identify input layers intensity = layers[0] population = layers[1] # Extract data H = intensity.get_data(nan=0) P = population.get_data(nan=0) # Calculate impact F = 10**(a * H - b) * P # Generate text with result for this study count = numpy.nansum(F.flat) total = numpy.nansum(P.flat) # Create report caption = ('<table border="0" width="320px">' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' '</table>' % ('Jumlah Penduduk', int(total), 'Perkiraan Orang Meninggal', int(count))) # Create new layer and return R = Raster(F, projection=population.get_projection(), geotransform=population.get_geotransform(), name='Estimated fatalities', keywords={'caption': caption}) return R
def run(layers): """Calculate population exposed to different levels of ground shaking Input layers: List of layers expected to contain H: Raster layer of MMI ground shaking P: Raster layer of population density """ # Identify input layers intensity = layers[0] population = layers[1] # Extract data H = intensity.get_data(nan=0) P = population.get_data(nan=0) # Calculate exposure to MMI impact mmi_classes = range(1, 11) # MMI classes considered (1-10) # Form result as keyword strings mmi_str = str(mmi_classes)[1:-1] # Get rid of [] count_str = '' for i in mmi_classes: # Identify cells where MMI is in class i mask = (H >= i - 0.5) * (H < i + 0.5) # Count population affected by this shake level count = round(numpy.nansum(P[mask])) if numpy.isnan(count): count = 0 # Update keyword string count_str += '%i ' % count # Calculate fatality map (FIXME (Ole): Need to replaced by USGS model) a = 0.97429 b = 11.037 F = 10 ** (a * H - b) * P # Generate text with result for this study count = numpy.nansum(F.flat) total = numpy.nansum(P.flat) # Create report caption = ('<table border="0" width="320px">' ' <tr><td>%s:</td><td>%i</td></tr>' ' <tr><td>%s:</td><td>%i</td></tr>' '</table>' % ('Jumlah Penduduk', int(total), 'Perkiraan Orang Meninggal', int(count))) # Create new layer and return R = Raster(F, projection=population.get_projection(), geotransform=population.get_geotransform(), name='Estimated fatalities', keywords={'caption': caption, 'mmi-classes': mmi_str, 'affected-population': count_str}) return R
def run(layers, teta=14.05, beta=0.17, zeta=2.15): """Risk plugin for earthquake fatalities Input H: Numerical array of hazard data E: Numerical array of exposure data Algorithm and coefficients are from: An Empirical Model for Global Earthquake Fatality Estimation. Kishor Jaiswal and David Wald. Earthquake Spectra, Volume 26, No. 4, pages 1017-1037, November 2010. teta=14.05, beta=0.17, zeta=2.1 # Coefficients for Indonesia. """ # Identify input layers intensity = layers[0] population = layers[1] print intensity.get_resolution() print population.get_resolution() # Extract data H = intensity.get_data(nan=0) # Ground Shaking P = population.get_data(nan=0) # Population Density import cPickle name = intensity.get_name() print name fid = open('/home/nielso/population_%s.pck' % name, 'wb') cPickle.dump(P, fid) fid.close() fid = open('/home/nielso/intensity_%s.pck' % name, 'wb') cPickle.dump(H, fid) fid.close() # Calculate population affected by each MMI level mmi_range = range(2, 10) number_of_people_affected = {} for mmi in mmi_range: mask = numpy.logical_and(mmi - 0.5 < H, H <= mmi + 0.5) I = numpy.where(mask, P, 0) # Generate text with result for this study number_of_people_affected[mmi] = numpy.nansum(I.flat) # Calculate impact according to equation (1) in the Kishor and Wald 2010 logHazard = 1 / beta * scipy.log(H / teta) # Convert array to be standard floats expected by cdf arrayout = numpy.array([[float(value) for value in row] for row in logHazard]) F = scipy.stats.norm.cdf(arrayout * P) # Stats total = numpy.nansum(P.flat) fatalities = numpy.nansum(F) print 'Total', total print 'Estimated fatalities', fatalities print 'Min', numpy.amin(F) print 'Max', numpy.amax(F) # Generate text with result for this study caption = generate_exposure_table(mmi_range, number_of_people_affected) caption += generate_fatality_table(fatalities) # Create new layer and return R = Raster(F, projection=population.get_projection(), geotransform=population.get_geotransform(), keywords={'caption': caption}, name='Estimated fatalities') return R
def run(layers): """Risk plugin for earthquake fatalities Input layers: List of layers expected to contain H: Raster layer of flood depth P: Raster layer of population data on the same grid as H """ # Depth above which people are regarded affected [m] threshold = 0.1 thresholds = [0.1, 0.2, 0.3, 0.5, 0.8, 1.0] # Identify hazard and exposure layers inundation = layers[0] # Flood inundation [m] population = layers[1] # Population density [people/100000 m^2] # Extract data as numeric arrays D = inundation.get_data(nan=0.0) # Depth # Calculate impact as population exposed to depths > threshold if population.get_resolution(native=True, isotropic=True) < 0.0005: # Keep this for backwards compatibility just a little while # This uses the original custom population set and # serves as a reference P = population.get_data(nan=0.0) # Population density pixel_area = 2500 I = numpy.where(D > threshold, P, 0) / 100000.0 * pixel_area else: # This is the new generic way of scaling (issue #168 and #172) P = population.get_data(nan=0.0, scaling=True) I = numpy.where(D > threshold, P, 0) # Generate text with result for this study number_of_people_affected = numpy.nansum(I.flat) caption = ('%i people affected by flood levels greater ' 'than %i cm' % (number_of_people_affected, threshold * 100)) # Create report caption = ('<table border="0" width="320px">' ' <tr><th><b>%s</b></th><th><b>%s</b></th></th>' ' <tr></tr>' % ('Min flood levels', 'People affected')) counts = [] for i, threshold in enumerate(thresholds): I_tmp = numpy.where(D > threshold, P, 0) counts.append(numpy.nansum(I_tmp.flat)) caption += ' <tr><td>%s m</td><td>%i</td></tr>' % (threshold, counts[i]) caption += '</table>' # Create raster object and return R = Raster(I, projection=inundation.get_projection(), geotransform=inundation.get_geotransform(), name='People affected', keywords={'caption': caption}) return R
def test_rasters_and_arrays(self): """Consistency of rasters and associated arrays """ # Create test data lon_ul = 100 # Longitude of upper left corner lat_ul = 10 # Latitude of upper left corner numlon = 8 # Number of longitudes numlat = 5 # Number of latitudes dlon = 1 dlat = -1 # Define array where latitudes are rows and longitude columns A1 = numpy.zeros((numlat, numlon)) # Establish coordinates for lower left corner lat_ll = lat_ul - numlat lon_ll = lon_ul # Define pixel centers along each direction lon = numpy.linspace(lon_ll + 0.5, lon_ll + numlon - 0.5, numlon) lat = numpy.linspace(lat_ll + 0.5, lat_ll + numlat - 0.5, numlat) # Define raster with latitudes going bottom-up (south to north). # Longitudes go left-right (west to east) for i in range(numlat): for j in range(numlon): A1[numlat - 1 - i, j] = linear_function(lon[j], lat[i]) # Upper left corner assert A1[0, 0] == 105.25 assert A1[0, 0] == linear_function(lon[0], lat[4]) # Lower left corner assert A1[4, 0] == 103.25 assert A1[4, 0] == linear_function(lon[0], lat[0]) # Upper right corner assert A1[0, 7] == 112.25 assert A1[0, 7] == linear_function(lon[7], lat[4]) # Lower right corner assert A1[4, 7] == 110.25 assert A1[4, 7] == linear_function(lon[7], lat[0]) # Generate raster object and write projection = ('GEOGCS["WGS 84",' 'DATUM["WGS_1984",' 'SPHEROID["WGS 84",6378137,298.2572235630016,' 'AUTHORITY["EPSG","7030"]],' 'AUTHORITY["EPSG","6326"]],' 'PRIMEM["Greenwich",0],' 'UNIT["degree",0.0174532925199433],' 'AUTHORITY["EPSG","4326"]]') geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat) R1 = Raster(A1, projection, geotransform) msg = ('Dimensions of raster array do not match those of ' 'raster object') assert numlat == R1.rows, msg assert numlon == R1.columns, msg # Write back to new (tif) file out_filename = unique_filename(suffix='.tif') R1.write_to_file(out_filename) # Read again and check consistency R2 = read_layer(out_filename) msg = ('Dimensions of written raster array do not match those ' 'of input raster file\n') msg += (' Dimensions of input file ' '%s: (%s, %s)\n' % (R1.filename, numlat, numlon)) msg += (' Dimensions of output file %s: ' '(%s, %s)' % (R2.filename, R2.rows, R2.columns)) assert numlat == R2.rows, msg assert numlon == R2.columns, msg A2 = R2.get_data() assert numpy.allclose(numpy.min(A1), numpy.min(A2)) assert numpy.allclose(numpy.max(A1), numpy.max(A2)) msg = 'Array values of written raster array were not as expected' assert numpy.allclose(A1, A2), msg msg = 'Geotransforms were different' assert R1.get_geotransform() == R2.get_geotransform(), msg p1 = R1.get_projection(proj4=True) p2 = R2.get_projection(proj4=True) msg = 'Projections were different: %s != %s' % (p1, p2) assert p1 == p1, msg # Exercise projection __eq__ method assert R1.projection == R2.projection # Check that equality raises exception when type is wrong try: R1.projection == 234 except TypeError: pass else: msg = 'Should have raised TypeError' raise Exception(msg)