def getRegions(self, lat, lon, depth): """Get information about the tectonic region of a given hypocenter. Args: lat (float): Earthquake hypocentral latitude. lon (float): Earthquake hypocentral longitude. depth (float): Earthquake hypocentral depth. Returns: Series: Pandas series object containing labels: - TectonicRegion: Subduction, Active, Stable, or Volcanic. - DistanceToStable: Distance in km to nearest stable region. - DistanceToActive: Distance in km to nearest active region. - DistanceToSubduction: Distance in km to nearest subduction region. - DistanceToVolcanic: Distance in km to nearest volcanic region. - Oceanic: Boolean indicating if epicenter is in the ocean. - DistanceToOceanic: Distance in km to nearest oceanic region. - DistanceToContinental: Distance in km to nearest continental region. """ regions = OrderedDict() gd = GeoDict.createDictFromCenter(lon, lat, DX, DY, XSPAN, YSPAN) tec_grid = read(self._tectonic_grid, samplegeodict=gd) region_dict = get_dist_to_type(lon, lat, tec_grid, TECTONIC_REGIONS) ocean_grid = read(self._oceanic_grid, samplegeodict=gd) ocean_dict = get_dist_to_type(lon, lat, ocean_grid, OCEANIC_REGIONS) if region_dict['DistanceToActive'] == 0: region_dict['TectonicRegion'] = 'Active' elif region_dict['DistanceToStable'] == 0: region_dict['TectonicRegion'] = 'Stable' elif region_dict['DistanceToSubduction'] == 0: region_dict['TectonicRegion'] = 'Subduction' else: region_dict['TectonicRegion'] = 'Volcanic' region_dict['DistanceToOceanic'] = ocean_dict['DistanceToOceanic'] region_dict['DistanceToContinental'] = ocean_dict[ 'DistanceToContinental'] region_dict['Oceanic'] = False if ocean_dict['DistanceToOceanic'] == 0: region_dict['Oceanic'] = True regions = pd.Series(region_dict, index=[ 'TectonicRegion', 'DistanceToStable', 'DistanceToActive', 'DistanceToSubduction', 'DistanceToVolcanic', 'Oceanic', 'DistanceToOceanic', 'DistanceToContinental' ]) return regions
def getRegions(self, lat, lon, depth): """Get information about the tectonic region of a given hypocenter. Args: lat (float): Earthquake hypocentral latitude. lon (float): Earthquake hypocentral longitude. depth (float): Earthquake hypocentral depth. Returns: Series: Pandas series object containing labels: - TectonicRegion: Subduction, Active, Stable, or Volcanic. - DistanceToStable: Distance in km to nearest stable region. - DistanceToActive: Distance in km to nearest active region. - DistanceToSubduction: Distance in km to nearest subduction region. - DistanceToVolcanic: Distance in km to nearest volcanic region. - Oceanic: Boolean indicating if epicenter is in the ocean. - DistanceToOceanic: Distance in km to nearest oceanic region. - DistanceToContinental: Distance in km to nearest continental region. """ regions = OrderedDict() gd = GeoDict.createDictFromCenter(lon, lat, DX, DY, XSPAN, YSPAN) tec_grid = read(self._tectonic_grid, samplegeodict=gd) region_dict = get_dist_to_type(lon, lat, tec_grid, TECTONIC_REGIONS) ocean_grid = read(self._oceanic_grid, samplegeodict=gd) ocean_dict = get_dist_to_type(lon, lat, ocean_grid, OCEANIC_REGIONS) if region_dict['DistanceToActive'] == 0: region_dict['TectonicRegion'] = 'Active' elif region_dict['DistanceToStable'] == 0: region_dict['TectonicRegion'] = 'Stable' elif region_dict['DistanceToSubduction'] == 0: region_dict['TectonicRegion'] = 'Subduction' else: region_dict['TectonicRegion'] = 'Volcanic' region_dict['DistanceToOceanic'] = ocean_dict['DistanceToOceanic'] region_dict['DistanceToContinental'] = ocean_dict['DistanceToContinental'] region_dict['Oceanic'] = False if ocean_dict['DistanceToOceanic'] == 0: region_dict['Oceanic'] = True regions = pd.Series(region_dict, index=['TectonicRegion', 'DistanceToStable', 'DistanceToActive', 'DistanceToSubduction', 'DistanceToVolcanic', 'Oceanic', 'DistanceToOceanic', 'DistanceToContinental']) return regions
def test_read_whole(): files = {'ESRI Float': 'samplegrid_flt.flt', 'NetCDF 3': 'samplegrid_cdf.cdf'} # where is this script? homedir = os.path.dirname(os.path.abspath(__file__)) # this is an HDF 5 file for ftype, fname in files.items(): datafile = os.path.join(homedir, 'data', fname) grid = read(datafile) assert grid._geodict.xmin == 5.0 print('Successful read of %s' % ftype)
def _create(cls, geodict, defaultVs30, vs30File, padding, resample): if vs30File is not None: fgeodict = get_file_geodict(vs30File) if not resample: if not padding: # we want something that is within and aligned geodict = fgeodict.getBoundsWithin(geodict) else: # we want something that is just aligned, since we're # padding edges geodict = fgeodict.getAligned(geodict) vs30grid = read(vs30File, samplegeodict=geodict, resample=resample, method='linear', doPadding=padding, padValue=defaultVs30) return vs30grid
def test_read_meridian(): # where is this script? homedir = os.path.dirname(os.path.abspath(__file__)) # this is an HDF 5 file datafile = os.path.join(homedir, 'data', 'globalgrid_cdf.cdf') sdict = {'xmin': 180, 'xmax': -120, 'ymin': 0, 'ymax': 30, 'nx': 2, 'ny': 2, 'dx': 60, 'dy': 30} sampledict = GeoDict(sdict) grid = read(datafile, samplegeodict=sampledict) assert np.nansum(grid._data) == 50.0
def test_read_subset_no_resample(): # where is this script? homedir = os.path.dirname(os.path.abspath(__file__)) # this is an HDF 5 file datafile = os.path.join(homedir, 'data', 'samplegrid_cdf.cdf') sdict = {'xmin': 6, 'xmax': 7, 'ymin': 5, 'ymax': 6, 'nx': 2, 'ny': 2, 'dx': 1, 'dy': 1} sampledict = GeoDict(sdict) grid = read(datafile, samplegeodict=sampledict) tdata = np.array([[11, 12], [16, 17]]) np.testing.assert_almost_equal(grid._data, tdata)
def _load(vs30File, samplegeodict=None, resample=False, method='linear', doPadding=False, padValue=np.nan): try: vs30grid = read(vs30File, samplegeodict=samplegeodict, resample=resample, method=method, doPadding=doPadding, padValue=padValue) except Exception as msg1: msg = 'Load failure of %s - error message: "%s"' % ( vs30File, str(msg1)) raise ShakeLibException(msg) if vs30grid.getData().dtype != np.float64: vs30grid.setData(vs30grid.getData().astype(np.float64)) return vs30grid
def test_read_subset_with_padding(): # where is this script? homedir = os.path.dirname(os.path.abspath(__file__)) # this is an HDF 5 file datafile = os.path.join(homedir, 'data', 'samplegrid_cdf.cdf') sdict = {'xmin': 4.5, 'xmax': 5.5, 'ymin': 7.5, 'ymax': 8.5, 'nx': 2, 'ny': 2, 'dx': 1, 'dy': 1} sampledict = GeoDict(sdict) grid = read(datafile, samplegeodict=sampledict, resample=False, doPadding=True) assert grid._data.shape == (3, 3) assert grid._data[1, 1] == 0
def test_resample(): # this should fail # where is this script? homedir = os.path.dirname(os.path.abspath(__file__)) # this is an HDF 5 file datafile = os.path.join(homedir, 'data', 'samplegrid_cdf.cdf') sdict = {'xmin': 6.5, 'xmax': 7.5, 'ymin': 5.5, 'ymax': 6.5, 'nx': 2, 'ny': 2, 'dx': 1, 'dy': 1} sampledict = GeoDict(sdict) grid = read(datafile, samplegeodict=sampledict, resample=True) tdata = np.array([[9, 10], [14, 15]]) np.testing.assert_almost_equal(grid._data, tdata)
def test_read_subset_with_resample_and_padding(): # where is this script? homedir = os.path.dirname(os.path.abspath(__file__)) # this is an HDF 5 file datafile = os.path.join(homedir, 'data', 'samplegrid_cdf.cdf') sdict = {'xmin': 4.5, 'xmax': 5.5, 'ymin': 7.5, 'ymax': 8.5, 'nx': 2, 'ny': 2, 'dx': 1, 'dy': 1} sampledict = GeoDict(sdict) grid = read(datafile, samplegeodict=sampledict, resample=True, doPadding=True) atest = np.array([[np.nan, np.nan], [np.nan, 3.]]) np.testing.assert_almost_equal(grid._data, atest)
def read_user_file_test(fname, xmin, xmax, ymin, ymax): gd = get_file_geodict(fname) sample = GeoDict.createDictFromBox(xmin, xmax, ymin, ymax, gd.dx, gd.dy) t1 = time.time() grid = read(fname, samplegeodict=sample) t2 = time.time() nrows, ncols = grid._data.shape npixels = nrows*ncols print('%.2f seconds to read %i pixels using h5py' % (t2-t1, npixels)) west, east, south, north = (-105.00416666665, -102.98750000804999, 34.98750000805, 37.00416666665) src = rasterio.open(fname, 'r') window = src.window(west, south, east, north) t1 = time.time() data = src.read(window=window) t2 = time.time() print('%.2f seconds to read %i pixels using rasterio' % (t2-t1, npixels)) ratio = grid._data.sum()/data.sum() print('Ratio of h5py data to rasterio data is %.4f' % ratio) src.close()
def draw_contour(shakegrid, popgrid, oceanfile, oceangridfile, cityfile, basename, borderfile=None, is_scenario=False): """Create a contour map showing MMI contours over greyscale population. :param shakegrid: ShakeGrid object. :param popgrid: Grid2D object containing population data. :param oceanfile: String path to file containing ocean vector data in a format compatible with fiona. :param oceangridfile: String path to file containing ocean grid data . :param cityfile: String path to file containing GeoNames cities data. :param basename: String path containing desired output PDF base name, i.e., /home/pager/exposure. ".pdf" and ".png" files will be made. :param make_png: Boolean indicating whether a PNG version of the file should also be created in the same output folder as the PDF. :returns: Tuple containing: - Name of PNG file created, or None if PNG output not specified. - Cities object containing the cities that were rendered on the contour map. """ gd = shakegrid.getGeoDict() # Retrieve the epicenter - this will get used on the map center_lat = shakegrid.getEventDict()['lat'] center_lon = shakegrid.getEventDict()['lon'] # load the ocean grid file (has 1s in ocean, 0s over land) # having this file saves us almost 30 seconds! oceangrid = read(oceangridfile, samplegeodict=gd, resample=True, doPadding=True) # load the cities data, limit to cities within shakemap bounds allcities = Cities.fromDefault() cities = allcities.limitByBounds((gd.xmin, gd.xmax, gd.ymin, gd.ymax)) # define the map # first cope with stupid 180 meridian height = (gd.ymax - gd.ymin) * DEG2KM if gd.xmin < gd.xmax: width = (gd.xmax - gd.xmin) * np.cos(np.radians(center_lat)) * DEG2KM xmin, xmax, ymin, ymax = (gd.xmin, gd.xmax, gd.ymin, gd.ymax) else: xmin, xmax, ymin, ymax = (gd.xmin, gd.xmax, gd.ymin, gd.ymax) xmax += 360 width = ((gd.xmax + 360) - gd.xmin) * \ np.cos(np.radians(center_lat)) * DEG2KM aspect = width / height # if the aspect is not 1, then trim bounds in x or y direction # as appropriate if width > height: dw = (width - height) / 2.0 # this is width in km xmin = xmin + dw / (np.cos(np.radians(center_lat)) * DEG2KM) xmax = xmax - dw / (np.cos(np.radians(center_lat)) * DEG2KM) width = (xmax - xmin) * np.cos(np.radians(center_lat)) * DEG2KM if height > width: dh = (height - width) / 2.0 # this is width in km ymin = ymin + dh / DEG2KM ymax = ymax - dh / DEG2KM height = (ymax - ymin) * DEG2KM aspect = width / height figheight = FIGWIDTH / aspect bbox = (xmin, ymin, xmax, ymax) bounds = (xmin, xmax, ymin, ymax) figsize = (FIGWIDTH, figheight) # Create the MercatorMap object, which holds a separate but identical # axes object used to determine collisions between city labels. mmap = MercatorMap(bounds, figsize, cities, padding=0.5) fig = mmap.figure ax = mmap.axes # this needs to be done here so that city label collision # detection will work fig.canvas.draw() geoproj = mmap.geoproj proj = mmap.proj # project our population grid to the map projection projstr = proj.proj4_init popgrid_proj = popgrid.project(projstr) popdata = popgrid_proj.getData() newgd = popgrid_proj.getGeoDict() # Use our GMT-inspired palette class to create population and MMI colormaps popmap = ColorPalette.fromPreset('pop') mmimap = ColorPalette.fromPreset('mmi') # set the image extent to that of the data img_extent = (newgd.xmin, newgd.xmax, newgd.ymin, newgd.ymax) plt.imshow(popdata, origin='upper', extent=img_extent, cmap=popmap.cmap, vmin=popmap.vmin, vmax=popmap.vmax, zorder=POP_ZORDER, interpolation='nearest') # draw 10m res coastlines ax.coastlines(resolution="10m", zorder=COAST_ZORDER) states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='black', zorder=COAST_ZORDER) # draw country borders using natural earth data set if borderfile is not None: borders = ShapelyFeature( Reader(borderfile).geometries(), ccrs.PlateCarree()) ax.add_feature(borders, zorder=COAST_ZORDER, edgecolor='black', linewidth=2, facecolor='none') # clip the ocean data to the shakemap bbox = (gd.xmin, gd.ymin, gd.xmax, gd.ymax) oceanshapes = _clip_bounds(bbox, oceanfile) ax.add_feature(ShapelyFeature(oceanshapes, crs=geoproj), facecolor=WATERCOLOR, zorder=OCEAN_ZORDER) # So here we're going to project the MMI data to # our mercator map, then smooth and contour that # projected grid. # smooth the MMI data for contouring, themn project mmi = shakegrid.getLayer('mmi').getData() smoothed_mmi = gaussian_filter(mmi, FILTER_SMOOTH) newgd = shakegrid.getGeoDict().copy() smooth_grid = Grid2D(data=smoothed_mmi, geodict=newgd) smooth_grid_merc = smooth_grid.project(projstr) newgd2 = smooth_grid_merc.getGeoDict() # project the ocean grid oceangrid_merc = oceangrid.project(projstr) # create masked arrays using the ocean grid data_xmin, data_xmax = newgd2.xmin, newgd2.xmax data_ymin, data_ymax = newgd2.ymin, newgd2.ymax smooth_data = smooth_grid_merc.getData() landmask = np.ma.masked_where(oceangrid_merc._data == 0.0, smooth_data) oceanmask = np.ma.masked_where(oceangrid_merc._data == 1.0, smooth_data) # contour the data contourx = np.linspace(data_xmin, data_xmax, newgd2.nx) contoury = np.linspace(data_ymin, data_ymax, newgd2.ny) ax.contour( contourx, contoury, np.flipud(oceanmask), linewidths=3.0, linestyles='solid', zorder=1000, cmap=mmimap.cmap, vmin=mmimap.vmin, vmax=mmimap.vmax, levels=np.arange(0.5, 10.5, 1.0), ) ax.contour( contourx, contoury, np.flipud(landmask), linewidths=2.0, linestyles='dashed', zorder=OCEANC_ZORDER, cmap=mmimap.cmap, vmin=mmimap.vmin, vmax=mmimap.vmax, levels=np.arange(0.5, 10.5, 1.0), ) # the idea here is to plot invisible MMI contours at integer levels # and then label them. clabel method won't allow text to appear, # which is this case is kind of ok, because it allows us an # easy way to draw MMI labels as roman numerals. cs_land = plt.contour( contourx, contoury, np.flipud(oceanmask), linewidths=0.0, levels=np.arange(0, 11), alpha=0.0, zorder=CLABEL_ZORDER, ) clabel_text = ax.clabel(cs_land, cs_land.cvalues, colors='k', fmt='%.0f', fontsize=40) for clabel in clabel_text: x, y = clabel.get_position() label_str = clabel.get_text() roman_label = MMI_LABELS[label_str] th = plt.text(x, y, roman_label, zorder=CLABEL_ZORDER, ha='center', va='center', color='black', weight='normal', size=16) th.set_path_effects([ path_effects.Stroke(linewidth=2.0, foreground='white'), path_effects.Normal() ]) cs_ocean = plt.contour( contourx, contoury, np.flipud(landmask), linewidths=0.0, levels=np.arange(0, 11), zorder=CLABEL_ZORDER, ) clabel_text = ax.clabel(cs_ocean, cs_ocean.cvalues, colors='k', fmt='%.0f', fontsize=40) for clabel in clabel_text: x, y = clabel.get_position() label_str = clabel.get_text() roman_label = MMI_LABELS[label_str] th = plt.text(x, y, roman_label, ha='center', va='center', color='black', weight='normal', size=16) th.set_path_effects([ path_effects.Stroke(linewidth=2.0, foreground='white'), path_effects.Normal() ]) # draw meridians and parallels using Cartopy's functions for that gl = ax.gridlines(draw_labels=True, linewidth=2, color=(0.9, 0.9, 0.9), alpha=0.5, linestyle='-', zorder=GRID_ZORDER) gl.xlabels_top = False gl.xlabels_bottom = False gl.ylabels_left = False gl.ylabels_right = False gl.xlines = True # let's floor/ceil the edges to nearest half a degree gxmin = np.floor(xmin * 2) / 2 gxmax = np.ceil(xmax * 2) / 2 gymin = np.floor(ymin * 2) / 2 gymax = np.ceil(ymax * 2) / 2 xlocs = np.linspace(gxmin, gxmax + 0.5, num=5) ylocs = np.linspace(gymin, gymax + 0.5, num=5) gl.xlocator = mticker.FixedLocator(xlocs) gl.ylocator = mticker.FixedLocator(ylocs) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabel_style = {'size': 15, 'color': 'black'} gl.ylabel_style = {'size': 15, 'color': 'black'} # TODO - figure out x/y axes data coordinates # corresponding to 10% from left and 10% from top # use geoproj and proj dleft = 0.01 dtop = 0.97 proj_str = proj.proj4_init merc_to_dd = pyproj.Proj(proj_str) # use built-in transforms to get from axes units to data units display_to_data = ax.transData.inverted() axes_to_display = ax.transAxes # these are x,y coordinates in projected space yleft, t1 = display_to_data.transform( axes_to_display.transform((dleft, 0.5))) t2, xtop = display_to_data.transform(axes_to_display.transform( (0.5, dtop))) # these are coordinates in lon,lat space yleft_dd, t1_dd = merc_to_dd(yleft, t1, inverse=True) t2_dd, xtop_dd = merc_to_dd(t2, xtop, inverse=True) # drawing our own tick labels INSIDE the plot, as # Cartopy doesn't seem to support this. yrange = ymax - ymin xrange = xmax - xmin ddlabelsize = 12 for xloc in gl.xlocator.locs: outside = xloc < xmin or xloc > xmax # don't draw labels when we're too close to either edge near_edge = (xloc - xmin) < (xrange * 0.1) or (xmax - xloc) < (xrange * 0.1) if outside or near_edge: continue xtext = r'$%.1f^\circ$W' % (abs(xloc)) ax.text(xloc, xtop_dd, xtext, fontsize=ddlabelsize, zorder=GRID_ZORDER, ha='center', fontname=DEFAULT_FONT, transform=ccrs.Geodetic()) for yloc in gl.ylocator.locs: outside = yloc < gd.ymin or yloc > gd.ymax # don't draw labels when we're too close to either edge near_edge = (yloc - gd.ymin) < (yrange * 0.1) or (gd.ymax - yloc) < ( yrange * 0.1) if outside or near_edge: continue if yloc < 0: ytext = r'$%.1f^\circ$S' % (abs(yloc)) else: ytext = r'$%.1f^\circ$N' % (abs(yloc)) ax.text(yleft_dd, yloc, ytext, fontsize=ddlabelsize, zorder=GRID_ZORDER, va='center', fontname=DEFAULT_FONT, transform=ccrs.Geodetic()) # draw cities mapcities = mmap.drawCities(shadow=True, zorder=CITIES_ZORDER) # draw the figure border thickly # TODO - figure out how to draw map border # bwidth = 3 # ax.spines['top'].set_visible(True) # ax.spines['left'].set_visible(True) # ax.spines['bottom'].set_visible(True) # ax.spines['right'].set_visible(True) # ax.spines['top'].set_linewidth(bwidth) # ax.spines['right'].set_linewidth(bwidth) # ax.spines['bottom'].set_linewidth(bwidth) # ax.spines['left'].set_linewidth(bwidth) # Get the corner of the map with the lowest population corner_rect, filled_corner = _get_open_corner(popgrid, ax) clat2 = round_to_nearest(center_lat, 1.0) clon2 = round_to_nearest(center_lon, 1.0) # draw a little globe in the corner showing in small-scale # where the earthquake is located. proj = ccrs.Orthographic(central_latitude=clat2, central_longitude=clon2) ax2 = fig.add_axes(corner_rect, projection=proj) ax2.add_feature(cfeature.OCEAN, zorder=0, facecolor=WATERCOLOR, edgecolor=WATERCOLOR) ax2.add_feature(cfeature.LAND, zorder=0, edgecolor='black') ax2.plot([clon2], [clat2], 'w*', linewidth=1, markersize=16, markeredgecolor='k', markerfacecolor='r') ax2.gridlines() ax2.set_global() ax2.outline_patch.set_edgecolor('black') ax2.outline_patch.set_linewidth(2) # Draw the map scale in the unoccupied lower corner. corner = 'lr' if filled_corner == 'lr': corner = 'll' draw_scale(ax, corner, pady=0.05, padx=0.05) # Draw the epicenter as a black star plt.sca(ax) plt.plot(center_lon, center_lat, 'k*', markersize=16, zorder=EPICENTER_ZORDER, transform=geoproj) if is_scenario: plt.text(center_lon, center_lat, 'SCENARIO', fontsize=64, zorder=WATERMARK_ZORDER, transform=geoproj, alpha=0.2, color='red', horizontalalignment='center') # create pdf and png output file names pdf_file = basename + '.pdf' png_file = basename + '.png' # save to pdf plt.savefig(pdf_file) plt.savefig(png_file) return (pdf_file, png_file, mapcities)
def execute(self): """ Raises: NotADirectoryError: When the event data directory does not exist. FileNotFoundError: When the the shake_result HDF file does not exist. """ install_path, data_path = get_config_paths() datadir = os.path.join(data_path, self._eventid, 'current', 'products') if not os.path.isdir(datadir): raise NotADirectoryError('%s is not a valid directory.' % datadir) datafile = os.path.join(datadir, 'shake_result.hdf') if not os.path.isfile(datafile): raise FileNotFoundError('%s does not exist.' % datafile) # Open the ShakeMapOutputContainer and extract the data container = ShakeMapOutputContainer.load(datafile) if container.getDataType() != 'grid': raise NotImplementedError('mapping module can only operate on ' 'gridded data, not sets of points') # get the path to the products.conf file, load the config config_file = os.path.join(install_path, 'config', 'products.conf') spec_file = get_configspec('products') validator = get_custom_validator() config = ConfigObj(config_file, configspec=spec_file) results = config.validate(validator) check_extra_values(config, self.logger) if not isinstance(results, bool) or not results: config_error(config, results) # create contour files self.logger.debug('Mapping...') # get the filter size from the products.conf filter_size = config['products']['contour']['filter_size'] # get the operator setting from config operator = config['products']['mapping']['operator'] # get all of the pieces needed for the mapping functions layers = config['products']['mapping']['layers'] if 'topography' in layers and layers['topography'] != '': topofile = layers['topography'] else: topofile = None if 'roads' in layers and layers['roads'] != '': roadfile = layers['roads'] else: roadfile = None if 'faults' in layers and layers['faults'] != '': faultfile = layers['faults'] else: faultfile = None # Get the number of parallel workers max_workers = config['products']['mapping']['max_workers'] # Reading HDF5 files currently takes a long time, due to poor # programming in MapIO. To save us some time until that issue is # resolved, we'll coarsely subset the topo grid once here and pass # it into both mapping functions # get the bounds of the map info = container.getMetadata() xmin = info['output']['map_information']['min']['longitude'] xmax = info['output']['map_information']['max']['longitude'] ymin = info['output']['map_information']['min']['latitude'] ymax = info['output']['map_information']['max']['latitude'] dy = float( info['output']['map_information']['grid_spacing']['latitude']) dx = float( info['output']['map_information']['grid_spacing']['longitude']) padx = 5 * dx pady = 5 * dy sxmin = float(xmin) - padx sxmax = float(xmax) + padx symin = float(ymin) - pady symax = float(ymax) + pady sampledict = GeoDict.createDictFromBox(sxmin, sxmax, symin, symax, dx, dy) if topofile: topogrid = read(topofile, samplegeodict=sampledict, resample=False) else: tdata = np.full([sampledict.ny, sampledict.nx], 0.0) topogrid = Grid2D(data=tdata, geodict=sampledict) model_config = container.getConfig() imtlist = container.getIMTs() textfile = os.path.join( get_data_path(), 'mapping', 'map_strings.' + config['products']['mapping']['language']) text_dict = get_text_strings(textfile) if config['products']['mapping']['fontfamily'] != '': matplotlib.rcParams['font.family'] = \ config['products']['mapping']['fontfamily'] matplotlib.rcParams['axes.unicode_minus'] = False allcities = Cities.fromDefault() states_provs = None countries = None oceans = None lakes = None extent = (float(xmin), float(ymin), float(xmax), float(ymax)) if 'CALLED_FROM_PYTEST' not in os.environ: states_provs = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='10m', facecolor='none') states_provs = list(states_provs.intersecting_geometries(extent)) if len(states_provs) > 300: states_provs = None else: states_provs = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='10m', facecolor='none') countries = cfeature.NaturalEarthFeature(category='cultural', name='admin_0_countries', scale='10m', facecolor='none') oceans = cfeature.NaturalEarthFeature(category='physical', name='ocean', scale='10m', facecolor=WATERCOLOR) lakes = cfeature.NaturalEarthFeature(category='physical', name='lakes', scale='10m', facecolor=WATERCOLOR) if faultfile is not None: faults = ShapelyFeature(Reader(faultfile).geometries(), ccrs.PlateCarree(), facecolor='none') else: faults = None if roadfile is not None: roads = ShapelyFeature(Reader(roadfile).geometries(), ccrs.PlateCarree(), facecolor='none') if len(list(roads.intersecting_geometries(extent))) > 200: roads = None else: roads = ShapelyFeature(Reader(roadfile).geometries(), ccrs.PlateCarree(), facecolor='none') else: roads = None alist = [] for imtype in imtlist: component, imtype = imtype.split('/') comp = container.getComponents(imtype)[0] d = { 'imtype': imtype, 'topogrid': topogrid, 'allcities': allcities, 'states_provinces': states_provs, 'countries': countries, 'oceans': oceans, 'lakes': lakes, 'roads': roads, 'faults': faults, 'datadir': datadir, 'operator': operator, 'filter_size': filter_size, 'info': info, 'component': comp, 'imtdict': container.getIMTGrids(imtype, comp), 'ruptdict': copy.deepcopy(container.getRuptureDict()), 'stationdict': container.getStationDict(), 'config': model_config, 'tdict': text_dict } alist.append(d) if imtype == 'MMI': g = copy.deepcopy(d) g['imtype'] = 'thumbnail' alist.append(g) h = copy.deepcopy(d) h['imtype'] = 'overlay' alist.append(h) self.contents.addFile('intensityMap', 'Intensity Map', 'Map of macroseismic intensity.', 'intensity.jpg', 'image/jpeg') self.contents.addFile('intensityMap', 'Intensity Map', 'Map of macroseismic intensity.', 'intensity.pdf', 'application/pdf') self.contents.addFile('intensityThumbnail', 'Intensity Thumbnail', 'Thumbnail of intensity map.', 'pin-thumbnail.png', 'image/png') self.contents.addFile( 'intensityOverlay', 'Intensity Overlay and World File', 'Macroseismic intensity rendered as a ' 'PNG overlay and associated world file', 'intensity_overlay.png', 'image/png') self.contents.addFile( 'intensityOverlay', 'Intensity Overlay and World File', 'Macroseismic intensity rendered as a ' 'PNG overlay and associated world file', 'intensity_overlay.pngw', 'text/plain') else: fileimt = oq_to_file(imtype) self.contents.addFile(fileimt + 'Map', fileimt.upper() + ' Map', 'Map of ' + imtype + '.', fileimt + '.jpg', 'image/jpeg') self.contents.addFile(fileimt + 'Map', fileimt.upper() + ' Map', 'Map of ' + imtype + '.', fileimt + '.pdf', 'application/pdf') if max_workers > 0: with cf.ProcessPoolExecutor(max_workers=max_workers) as ex: results = ex.map(make_map, alist) list(results) else: for adict in alist: make_map(adict) container.close()
def getLosses(self, shakefile): """Calculate number of fatalities using semi-empirical approach. :param shakefile: Path to a ShakeMap grid.xml file. :returns: Tuple of: 1) Total number of fatalities 2) Dictionary of residential fatalities per building type, per country. 3) Dictionary of non-residential fatalities per building type, per country. """ # get shakemap geodict shakedict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') # get population geodict popdict = get_file_geodict(self._popfile) # get country code geodict isodict = get_file_geodict(self._isofile) # get urban grid geodict urbdict = get_file_geodict(self._urbanfile) # load all of the grids we need if popdict == shakedict == isodict == urbdict: # special case, probably for testing... shakegrid = ShakeGrid.load(shakefile, adjust='res') popgrid = read(self._popfile) isogrid = read(self._isofile) urbgrid = read(self._urbanfile) else: sampledict = popdict.getBoundsWithin(shakedict) shakegrid = ShakeGrid.load(shakefile, samplegeodict=sampledict, resample=True, method='linear', adjust='res') popgrid = read(self._popfile, samplegeodict=sampledict, resample=False) isogrid = read(self._isofile, samplegeodict=sampledict, resample=True, method='nearest', doPadding=True, padValue=0) urbgrid = read(self._urbanfile, samplegeodict=sampledict, resample=True, method='nearest', doPadding=True, padValue=RURAL) # determine the local apparent time of day (based on longitude) edict = shakegrid.getEventDict() etime = edict['event_timestamp'] elon = edict['lon'] time_of_day, event_year, event_hour = get_time_of_day(etime, elon) # round off our MMI data to nearest 0.5 (5.5 should stay 5.5, 5.4 # should become 5.5, 5.24 should become 5.0, etc.) # TODO: Someday, make this more general to include perhaps grids of all IMT values, or # at least the ones we have collapse data for. mmidata = np.round(shakegrid.getLayer('mmi').getData() / 0.5) * 0.5 # get arrays from our other grids popdata = popgrid.getData() isodata = isogrid.getData() urbdata = urbgrid.getData() # modify the population values for growth rate by country ucodes = np.unique(isodata[~np.isnan(isodata)]) for ccode in ucodes: cidx = (isodata == ccode) popdata[cidx] = self._popgrowth.adjustPopulation( popdata[cidx], ccode, self._popyear, event_year) # create a dictionary containing indoor populations by building type (in cells where MMI >= 6) #popbystruct = get_indoor_pop(mmidata,popdata,urbdata,isodata,time_of_day) # find all mmi values greater than 9, set them to 9 mmidata[mmidata > 9.0] = 9.0 # dictionary containers for sums of fatalities (res/nonres) by building type res_fatal_by_ccode = {} nonres_fatal_by_ccode = {} # fatality sum ntotal = 0 # loop over countries ucodes = np.unique(isodata[~np.isnan(isodata)]) for ucode in ucodes: if ucode == 0: continue res_fatal_by_btype = {} nonres_fatal_by_btype = {} cdict = self._country.getCountry(int(ucode)) ccode = cdict['ISO2'] # get the workforce Series data for the current country wforce = self.getWorkforce(ccode) if wforce is None: logging.info('No workforce data for %s. Skipping.' % (cdict['Name'])) continue # loop over MMI values 6-9 for mmi in np.arange(6, 9.5, 0.5): c1 = (mmidata == mmi) c2 = (isodata == ucode) if ucode > 900 and ucode != CALIFORNIA_US_CCODE: ucode = US_CCODE for dclass in [URBAN, RURAL]: c3 = (urbdata == dclass) # get the population data in those cells at MMI, in country, and density class # I think I want an AND condition here popcells = popdata[c1 & c2 & c3] # get the population distribution across residential, non-residential, and outdoor. res, nonres, outside = pop_dist( popcells, wforce, time_of_day, dclass) # get the inventory for urban residential resrow, nresrow = self.getInventories(ccode, dclass) # TODO - figure out why this is happening, make the following lines # not necessary if 'Unnamed: 0' in resrow: resrow = resrow.drop('Unnamed: 0') if 'Unnamed: 0' in nresrow: nresrow = nresrow.drop('Unnamed: 0') # now multiply the residential/non-residential population through the inventory data numres = len(resrow) numnonres = len(nresrow) resmat = np.reshape( resrow.values, (numres, 1)).astype(np.float32) nresmat = np.reshape( nresrow.values, (numnonres, 1)).astype(np.float32) popres = np.tile(res, (numres, 1)) popnonres = np.tile(nonres, (numnonres, 1)) popresbuilding = (popres * resmat) popnonresbuilding = (popnonres * nresmat) # now we have the residential and non-residental population # distributed through the building types for each cell that matches # MMI,country, and density criteria. # popresbuilding rows are building types, columns are population cells # next, we get the collapse rates for these buildings # and multiply them by the population by building. collapse_res = self.getCollapse(ccode, mmi, resrow) collapse_nonres = self.getCollapse(ccode, mmi, nresrow) resrates = np.reshape( collapse_res.values.astype(np.float32), (numres, 1)) nonresrates = np.reshape( collapse_nonres.values.astype(np.float32), (numnonres, 1)) rescollapse = popresbuilding * resrates nonrescollapse = popnonresbuilding * nonresrates # get the fatality rates given collapse by building type and # multiply through the result of collapse*population per building resfatalcol = self.getFatalityRates( ccode, time_of_day, resrow) nonresfatalcol = self.getFatalityRates( ccode, time_of_day, nresrow) resfatal = np.reshape( resfatalcol.values.astype(np.float32), (numres, 1)) nonresfatal = np.reshape( nonresfatalcol.values.astype(np.float32), (numnonres, 1)) resfat = rescollapse * resfatal nonresfat = nonrescollapse * nonresfatal # zero out the cells where fatalities are less than 1 or nan try: if len(resfat) and len(resfat[0]): resfat[np.ma.masked_less(resfat, 1).mask] = 0.0 except: resfat[np.isnan(resfat)] = 0.0 try: if len(nonresfat) and len(nonresfat[0]): nonresfat[np.ma.masked_less( nonresfat, 1).mask] = 0.0 except: nonresfat[np.isnan(nonresfat)] = 0.0 # sum the fatalities per building through all cells resfatbybuilding = np.nansum(resfat, axis=1) nonresfatbybuilding = np.nansum(nonresfat, axis=1) resfdict = dict( zip(resrow.index, resfatbybuilding.tolist())) nonresfdict = dict( zip(nresrow.index, nonresfatbybuilding.tolist())) res_fatal_by_btype = add_dicts( res_fatal_by_btype, resfdict) nonres_fatal_by_btype = add_dicts( nonres_fatal_by_btype, nonresfdict) # add the fatalities by building type to the dictionary containing fatalities by country res_fatal_by_ccode[ccode] = res_fatal_by_btype.copy() nonres_fatal_by_ccode[ccode] = nonres_fatal_by_btype.copy() # increment the total number of fatalities ntotal += int(sum(res_fatal_by_btype.values()) + sum(nonres_fatal_by_btype.values())) return (ntotal, res_fatal_by_ccode, nonres_fatal_by_ccode)
def calcExposure(self, shakefile): """Calculate population exposure to shaking, per country, plus total exposure across all countries. :param shakefile: Path to ShakeMap grid.xml file. :returns: Dictionary containing country code (ISO2) keys, and values of 10 element arrays representing population exposure to MMI 1-10. Dictionary will contain an additional key 'TotalExposure', with value of exposure across all countries. Dictionary will also contain a field "maximum_border_mmi" which indicates the maximum MMI value along any edge of the ShakeMap. """ # get shakemap geodict shakedict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') # get population geodict popdict = get_file_geodict(self._popfile) # get country code geodict isodict = get_file_geodict(self._isofile) # special case for very high latitude events that may be outside the bounds # of our population data... if not popdict.intersects(shakedict): expdict = {'UK': np.zeros((10,)), 'TotalExposure': np.zeros((10,))} return expdict if popdict == shakedict == isodict: # special case, probably for testing... self._shakegrid = ShakeGrid.load(shakefile, adjust='res') self._popgrid = read(self._popfile) self._isogrid = read(self._isofile) else: sampledict = popdict.getBoundsWithin(shakedict) self._shakegrid = ShakeGrid.load(shakefile, samplegeodict=sampledict, resample=True, method='linear', adjust='res') self._popgrid = read(self._popfile, samplegeodict=sampledict, resample=False, doPadding=True, padValue=np.nan) self._isogrid = read(self._isofile, samplegeodict=sampledict, resample=True, method='nearest', doPadding=True, padValue=0) mmidata = self._shakegrid.getLayer('mmi').getData() popdata = self._popgrid.getData() isodata = self._isogrid.getData() eventyear = self._shakegrid.getEventDict()['event_timestamp'].year # in order to avoid crazy far-future scenarios where PAGER models are probably invalid, # check to see if the time gap between the date of population data collection and event year # reaches either of a couple of different thresholds. if eventyear > self._popyear: tdiff = (eventyear - self._popyear) if tdiff > SCENARIO_WARNING and tdiff < SCENARIO_ERROR: msg = '''The input ShakeMap event year is more than %i years from the population date. PAGER results for events this far in the future may not be valid.''' % SCENARIO_WARNING warnings.warn(msg) if tdiff > SCENARIO_ERROR: msg = '''The input ShakeMap event year is more than %i years from the population date. PAGER results for events this far in the future are not valid. Stopping.''' % SCENARIO_ERROR raise PagerException(msg) ucodes = np.unique(isodata[~np.isnan(isodata)]) for ccode in ucodes: cidx = (isodata == ccode) popdata[cidx] = self._popgrowth.adjustPopulation( popdata[cidx], ccode, self._popyear, eventyear) exposure_dict = calc_exposure(mmidata, popdata, isodata) newdict = {} # Get rolled up exposures total = np.zeros((10,), dtype=np.uint32) for isocode, value in exposure_dict.items(): cdict = self._country.getCountry(int(isocode)) if cdict is None: ccode = 'UK' else: ccode = cdict['ISO2'] newdict[ccode] = value total += value newdict['TotalExposure'] = total # get the maximum MMI value along any of the four map edges nrows, ncols = mmidata.shape top = mmidata[0, 0:ncols].max() bottom = mmidata[nrows - 1, 0:ncols].max() left = mmidata[0:nrows, 0].max() right = mmidata[0:nrows, ncols - 1].max() newdict['maximum_border_mmi'] = np.array( [top, bottom, left, right]).max() return newdict