def do_gridxml(evid, datapath, oc): check_failures(evid, datapath, GridXMLModule) mod = GridXMLModule(evid) mod.execute() mod.writeContents() # # Test that the grid.xml grids actually match what's in # shake_results.hdf # imts = oc.getIMTs() gxml = os.path.join(datapath, evid, 'current', 'products', 'grid.xml') g2d = ShakeGrid.load(gxml) layers = g2d.getData() for imt in imts: component, imt = imt.split('/') comp = oc.getComponents(imt) cdata = oc.getIMTGrids(imt, comp[0])['mean'] # # Do the same conversion to the container data as is # done to the file data # digits = oc.getIMTGrids(imt, comp[0])['mean_metadata']['digits'] vfunc = rounder(digits) if imt == 'MMI': cdata = vfunc(cdata) elif imt == 'PGV': cdata = vfunc(np.exp(cdata)) else: cdata = vfunc(100 * np.exp(cdata)) lname = _oq_to_gridxml(imt).lower() layer = layers[lname] gdata = layer.getData() assert np.allclose(gdata, cdata) # # Do the uncertainty grids # uxml = os.path.join(datapath, evid, 'current', 'products', 'uncertainty.xml') u2d = ShakeGrid.load(uxml) ulayers = u2d.getData() for imt in imts: component, imt = imt.split('/') comp = oc.getComponents(imt) cdata = oc.getIMTGrids(imt, comp[0])['std'] # # The stddevs just get rounded # digits = oc.getIMTGrids(imt, comp[0])['std_metadata']['digits'] vfunc = rounder(digits) cdata = vfunc(cdata) lname = 'std' + _oq_to_gridxml(imt).lower() layer = ulayers[lname] gdata = layer.getData() assert np.allclose(gdata, cdata)
def test(): shakefile = os.path.join(homedir, 'data', 'northridge.xml') t1 = datetime.datetime.now() sgrid = ShakeGrid.load(shakefile, adjust='res') t2 = datetime.datetime.now() origin = {} origin['id'] = sgrid._eventDict['event_id'] origin['source'] = sgrid._eventDict['event_network'] origin['time'] = sgrid._eventDict['event_timestamp'] origin['lat'] = sgrid._eventDict['lat'] origin['lon'] = sgrid._eventDict['lon'] origin['depth'] = sgrid._eventDict['depth'] origin['magnitude'] = sgrid._eventDict['magnitude'] header = {} header['type'] = 'shakemap' header['version'] = sgrid._shakeDict['shakemap_version'] header['process_time'] = sgrid._shakeDict['process_timestamp'] header['code_version'] = sgrid._shakeDict['code_version'] header['originator'] = sgrid._shakeDict['shakemap_originator'] header['product_id'] = sgrid._shakeDict['shakemap_id'] header['map_status'] = sgrid._shakeDict['map_status'] header['event_type'] = sgrid._shakeDict['shakemap_event_type'] layers = collections.OrderedDict() for (layername, layerdata) in sgrid.getData().items(): layers[layername] = layerdata.getData() tdict = { 'name': 'fred', 'family': { 'wife': 'wilma', 'daughter': 'pebbles' } } mgrid = MultiHazardGrid(layers, sgrid.getGeoDict(), origin, header, metadata={'flintstones': tdict}) tdir = tempfile.mkdtemp() testfile = os.path.join(tdir, 'test.hdf') try: mgrid.save(testfile) t3 = datetime.datetime.now() mgrid2 = MultiHazardGrid.load(testfile) t4 = datetime.datetime.now() xmlmb = os.path.getsize(shakefile) / float(1e6) hdfmb = os.path.getsize(testfile) / float(1e6) xmltime = (t2 - t1).seconds + (t2 - t1).microseconds / float(1e6) hdftime = (t4 - t3).seconds + (t4 - t3).microseconds / float(1e6) print('Input XML file size: %.2f MB (loading time %.3f seconds)' % (xmlmb, xmltime)) print('Output HDF file size: %.2f MB (loading time %.3f seconds)' % (hdfmb, hdftime)) except DataSetException as obj: pass finally: if os.path.isdir(tdir): shutil.rmtree(tdir)
def test(): homedir = os.path.dirname( os.path.abspath(__file__)) # where is this script? cityfile = os.path.join(homedir, '..', 'data', 'cities1000.txt') shakefile1 = os.path.join(homedir, '..', 'data', 'eventdata', 'northridge', 'northridge_grid.xml') shakefile2 = os.path.join(homedir, '..', 'data', 'eventdata', 'lomaprieta', 'lomaprieta_grid.xml') shakefiles = [shakefile1, shakefile2] lengths = [11, 11] first_city = ['Santa Clarita', 'Lexington Hills'] last_city = ['Bakersfield', 'Fresno'] ic = 0 cities = Cities.loadFromGeoNames(cityfile) for shakefile in shakefiles: shakemap = ShakeGrid.load(shakefile, adjust='res') # get the top ten (by population) nearby cities clat = shakemap.getEventDict()['lat'] clon = shakemap.getEventDict()['lon'] nearcities = cities.limitByRadius(clat, clon, 100) nearcities.sortByColumns('pop', ascending=False) nearcities = Cities(nearcities._dataframe.iloc[0:10]) mmigrid = shakemap.getLayer('mmi') pc = PagerCities(cities, mmigrid) rows = pc.getCityTable(nearcities) print('Testing that number of cities retrieved is consistent...') assert len(rows) == lengths[ic] assert rows.iloc[0]['name'] == first_city[ic] assert rows.iloc[-1]['name'] == last_city[ic] print('Passed.') ic += 1
def get_bounds(shakefile, parameter='pga', threshold=2.0): """ Get the boundaries of the shakemap that include all areas with shaking above the defined threshold. Args: shakefile (str): Path to shakemap file. parameter (str): Either 'pga' or 'pgv'. threshold (float): Minimum value of parameter of interest, in units of %g for pga and cm/s for pgv. The default value of 2% g is based on minimum pga threshold ever observed to have triggered landslides by Jibson and Harp (2016). Returns: dict: A dictionary with keys 'xmin', 'xmax', 'ymin', and 'ymax' that defines the boundaries in geographic coordinates. """ shakemap = ShakeGrid.load(shakefile, adjust='res') if parameter == 'pga': vals = shakemap.getLayer('pga') elif parameter == 'pgv': vals = shakemap.getLayer('pgv') else: raise Exception('parameter not valid') xmin, xmax, ymin, ymax = vals.getBounds() lons = np.linspace(xmin, xmax, vals.getGeoDict().nx) lats = np.linspace(ymax, ymin, vals.getGeoDict().ny) row, col = np.where(vals.getData() > float(threshold)) lonmin = lons[col].min() lonmax = lons[col].max() latmin = lats[row].min() latmax = lats[row].max() # dummy fillers, only really care about bounds boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100} if xmin < lonmin: boundaries1['xmin'] = lonmin else: boundaries1['xmin'] = xmin if xmax > lonmax: boundaries1['xmax'] = lonmax else: boundaries1['xmax'] = xmax if ymin < latmin: boundaries1['ymin'] = latmin else: boundaries1['ymin'] = ymin if ymax > latmax: boundaries1['ymax'] = latmax else: boundaries1['ymax'] = ymax return boundaries1
def sampleFromShakeMap(shakefile,layer,xypoints): """ Sample ShakeMap grid file at each of a set of XY (decimal degrees) points. :param shakefile: Grid2D object at which to sample data. :param xypoints: 2D numpy array of XY points, decimal degrees. :returns: 1D numpy array of grid values at each of input XY points. """ shakegrid = ShakeGrid.load(shakefile,fixFileGeoDict='corner') return sampleFromMultiGrid(shakegrid,layer,points)
def sampleFromShakeMap(shakefile, layer, xypoints): """Sample ShakeMap grid file at each of a set of XY (decimal degrees) points. :param shakefile: Grid2D object at which to sample data. :param xypoints: 2D numpy array of XY points, decimal degrees. :returns: 1D numpy array of grid values at each of input XY points. """ shakegrid = ShakeGrid.load(shakefile, fixFileGeoDict='corner') return sampleFromMultiGrid(shakegrid, layer, xypoints)
def test(): shakefile = os.path.join(homedir,'data','northridge.xml') t1 = datetime.datetime.now() sgrid = ShakeGrid.load(shakefile,adjust='res') t2 = datetime.datetime.now() origin = {} origin['id'] = sgrid._eventDict['event_id'] origin['source'] = sgrid._eventDict['event_network'] origin['time'] = sgrid._eventDict['event_timestamp'] origin['lat'] = sgrid._eventDict['lat'] origin['lon'] = sgrid._eventDict['lon'] origin['depth'] = sgrid._eventDict['depth'] origin['magnitude'] = sgrid._eventDict['magnitude'] header = {} header['type'] = 'shakemap' header['version'] = sgrid._shakeDict['shakemap_version'] header['process_time'] = sgrid._shakeDict['process_timestamp'] header['code_version'] = sgrid._shakeDict['code_version'] header['originator'] = sgrid._shakeDict['shakemap_originator'] header['product_id'] = sgrid._shakeDict['shakemap_id'] header['map_status'] = sgrid._shakeDict['map_status'] header['event_type'] = sgrid._shakeDict['shakemap_event_type'] layers = collections.OrderedDict() for (layername,layerdata) in sgrid.getData().items(): layers[layername] = layerdata.getData() tdict = {'name':'fred','family':{'wife':'wilma','daughter':'pebbles'}} mgrid = MultiHazardGrid(layers,sgrid.getGeoDict(),origin,header,metadata={'flintstones':tdict}) tdir = tempfile.mkdtemp() testfile = os.path.join(tdir,'test.hdf') try: mgrid.save(testfile) t3 = datetime.datetime.now() mgrid2 = MultiHazardGrid.load(testfile) t4 = datetime.datetime.now() xmlmb = os.path.getsize(shakefile)/float(1e6) hdfmb = os.path.getsize(testfile)/float(1e6) xmltime = (t2-t1).seconds + (t2-t1).microseconds/float(1e6) hdftime = (t4-t3).seconds + (t4-t3).microseconds/float(1e6) print('Input XML file size: %.2f MB (loading time %.3f seconds)' % (xmlmb,xmltime)) print('Output HDF file size: %.2f MB (loading time %.3f seconds)' % (hdfmb,hdftime)) except DataSetException as obj: pass finally: if os.path.isdir(tdir): shutil.rmtree(tdir)
def test_read(): xmlfile = os.path.join(homedir,'data','northridge.xml') tdir = tempfile.mkdtemp() testfile = os.path.join(tdir,'test.xml') try: shakegrid = ShakeGrid.load(xmlfile,adjust='res') t1 = time.time() shakegrid.save(testfile) t2 = time.time() print('Saving shakemap took %.2f seconds' % (t2-t1)) except Exception as error: print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile,str(error))) assert 0 == 1 finally: if os.path.isdir(tdir): shutil.rmtree(tdir)
def test_read(): xmlfile = os.path.join(homedir, 'data', 'northridge.xml') tdir = tempfile.mkdtemp() testfile = os.path.join(tdir, 'test.xml') try: shakegrid = ShakeGrid.load(xmlfile, adjust='res') t1 = time.time() shakegrid.save(testfile) t2 = time.time() print('Saving shakemap took %.2f seconds' % (t2 - t1)) except Exception as error: print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile, str(error))) assert 0 == 1 finally: if os.path.isdir(tdir): shutil.rmtree(tdir)
def realizations(total_real, my_reals, radius, variables, grid_arr, mu_arr, sigma_arr, list_sizes_grid, list_sizes_mu, shakegrid, voi, comm, dir, output_dir): ''' Function realizations uses output from the main function in loop.py to compute realizations of the spatially variable random field. :param total_real: integer, total number of realizations assigned to each core :param my_reals: numpy array, which realizations each core is computing :param radius: float, radius of influence :param variable: dict, output from initialize function in setup.py :param grid_arr: numpy array of all grid array values, note that these are all combined into one large array these are indices that each grid point depends on :param mu_arr: numpy array of all mu arrays, note that these are all combined into one large array Sig12.T*Sig11inv :param sigma_arr: numpy array of R values :param list_sizes_grid numpy array, the number of elements of grid_arr belonging to each grid point :param list_sizes_mu: numpy array, the number of elements of mu)arr belonging to each grid point :param shakegrid: shakegrid object :param voi: string, intensity measure :param comm: mpi communicator :param dir: string, path to inputs folder Outputs are saved to a file. If multiple grid.xml files are used, the epsilon matrices will be saved to file. Otherwise realizations of the spatially variable ShakeMap will be saved. ''' num_realizations = np.size(my_reals) if num_realizations == 0: return my_rank = comm.Get_rank() size = comm.Get_size() # Determine if multiple grid.xml files are to be used. multiple_maps = 0 isd = True while isd == True: isd = os.path.isdir(os.path.join(dir,'%i'%(multiple_maps+1))) if isd == True: multiple_maps += 1 # Set data file names. If multiple realizations are used, store epsilon if multiple_maps > 0: write_correlation = True filename = os.path.join(output_dir,'Epsilon_%s_%i.hdf5'%(voi,my_rank)) else: write_correlation = False filename = os.path.join(output_dir,'SVSM_%s_%i.hdf5'%(voi, my_rank)) shakemap = shakegrid.getLayer(voi) N = variables['N'] M = variables['M'] event_attr = shakegrid.getEventDict() grid_attr = shakegrid.getGeoDict() # Set up dictionaries to store data uncertaintydata, data, data_new, sm_dict = {},{}, {}, {} uncertaintydata['map0'] = variables['uncertaintydata'] stationlist = os.path.join(dir,'stationlist.xml') stationdata = readStation(stationlist) data['map0'] = variables['data'] sm_dict['map0'] = shakemap # If there are multiple maps, store other maps data for i in range(1, multiple_maps+1): folder = '%i/'%i sm_grid = ShakeGrid.load(os.path.join(dir,folder,'grid.xml'), adjust = 'res') sm_dict['map%i'%i] = sm_grid.getLayer(voi) event_attr = sm_grid.getEventDict() unc_grid = ShakeGrid.load(os.path.join(dir,folder,'uncertainty.xml'), adjust = 'res') stationlist = os.path.join(dir,folder,'stationlist.xml') stationdata = readStation(stationlist) voi_list = [] voi_list.append(voi) variables = initialize(sm_grid, unc_grid, stationdata, dir, voi_list) uncertaintydata["map{0}".format(i)] = variables['uncertaintydata'] data["map{0}".format(i)] = variables['data'] list_size_mu = np.reshape(list_sizes_mu, [M*N,1]) list_size_grid = np.reshape(list_sizes_grid, [M*N,1]) sigma_arr = np.reshape(sigma_arr, [M*N,1]) # Set header information for file. Change if neccesary f = h5py.File(filename, 'w') f.attrs['Conventions'] = 'COARDS, CF-1.5' f.attrs['title'] = 'filename' f.attrs['history'] = 'Created with python MultiHazardGrid.save(%s)' % filename f.attrs['GMT_version'] = 'NA' xvar = np.linspace(grid_attr.xmin,grid_attr.xmax,grid_attr.nx) yvar = np.linspace(grid_attr.ymin,grid_attr.ymax,grid_attr.ny) x = f.create_dataset('x',data=xvar,compression='gzip',shape=xvar.shape,dtype=str(xvar.dtype)) x.attrs['CLASS'] = 'DIMENSION_SCALE' x.attrs['NAME'] = 'x' x.attrs['_Netcdf4Dimid'] = 0 #no idea what this is x.attrs['long_name'] = 'x' x.attrs['actual_range'] = np.array((xvar[0],xvar[-1])) y = f.create_dataset('y',data=yvar,compression='gzip',shape=yvar.shape,dtype=str(yvar.dtype)) y.attrs['CLASS'] = 'DIMENSION_SCALE' y.attrs['NAME'] = 'y' y.attrs['_Netcdf4Dimid'] = 1 #no idea what this is y.attrs['long_name'] = 'y' y.attrs['actual_range'] = np.array((yvar[0],yvar[-1])) # Compute realizations of the field, COR for j in range(0, num_realizations): X = np.zeros([M*N,1]) for i in range(0,M*N): st_g = np.sum(list_size_grid[0:i]) st_m = np.sum(list_size_mu[0:i]) end_g = st_g + list_size_grid[i] end_m = st_m + list_size_mu[i] rand_arr = np.random.randn() nzeros = list_size_mu[i] - list_size_grid[i] x = np.append(np.zeros(nzeros), X[np.array(grid_arr[st_g:end_g], dtype = 'i')]) mu = np.dot(mu_arr[st_m:end_m], x) X[i] = mu + rand_arr * sigma_arr[i] COR = np.reshape(X, [M,N]) layerkey = 'realization_%i'%j # Write data to file if write_correlation == True: dset = f.create_dataset(layerkey,data=COR,compression='gzip') dset.attrs['long_name'] = layerkey else: for i in range(0, multiple_maps+1): xx = 'map%i'%i X = np.multiply(COR, uncertaintydata[xx][voi]) DATA_NEW = data[xx][voi]*np.exp(X) dset = f.create_dataset(layerkey,data=DATA_NEW,compression='gzip') dset.attrs['long_name'] = layerkey if np.mod(j+1, 25) == 0: print('Done with', j+1, 'of', num_realizations, 'iterations.') f.close()
def godt2008(shakefile, config, uncertfile=None, saveinputs=False, displmodel=None, bounds=None, slopediv=100., codiv=10., numstd=None, trimfile=None): """ This function runs the Godt and others (2008) global method for a given ShakeMap. The Factor of Safety is calculated using infinite slope analysis assumuing dry conditions. The method uses threshold newmark displacement and estimates areal coverage by doing the calculations for each slope quantile. Args: shakefile (str): Path to shakemap xml file. config (ConfigObj): ConfigObj of config file containing inputs required for running the model uncertfile (str): Path to shakemap uncertainty xml file (optional). saveinputs (bool): Whether or not to return the model input layers, False (default) returns only the model output (one layer). displmodel (str): Newmark displacement regression model to use * ``'J_PGA'`` (default) -- PGA-based model, equation 6 from Jibson (2007). * ``'J_PGA_M'`` -- PGA and M-based model, equation 7 from Jibson (2007). * ``'RS_PGA_M'`` -- PGA and M-based model from from Rathje and Saygili (2009). * ``'RS_PGA_PGV'`` -- PGA and PGV-based model, equation 6 from Saygili and Rathje (2008). bounds (dict): Optional dictionary with keys 'xmin', 'xmax', 'ymin', 'ymax' that defines a subset of the shakemap area to compute. slopediv (float): Divide slope by this number to get slope in degrees (Verdin datasets need to be divided by 100). codiv (float): Divide cohesion input layer by this number (For Godt method, need to divide by 10 because that is how it was calibrated). numstd (float): Number of (+/-) standard deviations to use if uncertainty is computed (uncertfile must be supplied). trimfile (str): shapefile of earth's land masses to trim offshore areas of model Returns: dict: Dictionary containing output and input layers (if saveinputs=True): .. code-block:: python { 'grid': mapio grid2D object, 'label': 'label for colorbar and top line of subtitle', 'type': 'output or input to model', 'description': {'name': 'short reference of model', 'longref': 'full model reference', 'units': 'units of output', 'shakemap': 'information about shakemap used', 'event_id': 'shakemap event id', 'parameters': 'dictionary of model parameters used' } } Raises: NameError: when unable to parse the config correctly (probably a formatting issue in the configfile) or when unable to find the shakefile (Shakemap filepath) -- these cause program to end. """ # TODO: # - Add 'all' -- averages Dn from all four equations, add term to # convert PGA and PGV to Ia and use other equations, add Ambraseys and # Menu (1988) option. # Empty refs slopesref = 'unknown' slopelref = 'unknown' cohesionlref = 'unknown' cohesionsref = 'unknown' frictionsref = 'unknown' frictionlref = 'unknown' modellref = 'unknown' modelsref = 'unknown' # See if trimfile exists if trimfile is not None: if not os.path.exists(trimfile): print('trimfile defined does not exist: %s\n' 'Ocean will not be trimmed' % trimfile) trimfile = None if os.path.splitext(trimfile)[1] != '.shp': print('trimfile must be a shapefile, ocean will not be trimmed') trimfile = None # Parse config try: # May want to add error handling so if refs aren't given, just # includes unknown slopefilepath = config['godt_2008']['layers']['slope']['filepath'] slopeunits = config['godt_2008']['layers']['slope']['units'] cohesionfile = config['godt_2008']['layers']['cohesion']['file'] cohesionunits = config['godt_2008']['layers']['cohesion']['units'] frictionfile = config['godt_2008']['layers']['friction']['file'] frictionunits = config['godt_2008']['layers']['friction']['units'] thick = float(config['godt_2008']['parameters']['thick']) uwt = float(config['godt_2008']['parameters']['uwt']) nodata_cohesion = \ float(config['godt_2008']['parameters']['nodata_cohesion']) nodata_friction = \ float(config['godt_2008']['parameters']['nodata_friction']) dnthresh = float(config['godt_2008']['parameters']['dnthresh']) fsthresh = float(config['godt_2008']['parameters']['fsthresh']) acthresh = float(config['godt_2008']['parameters']['acthresh']) try: slopemin = float(config['godt_2008']['parameters']['slopemin']) except: slopemin = 0.01 print('No slopemin found in config file, using 0.01 deg ' 'for slope minimum') except Exception as e: raise NameError('Could not parse configfile, %s' % e) if displmodel is None: try: displmodel = config['godt_2008']['parameters']['displmodel'] except: print('No regression model specified, using default of J_PGA_M') displmodel = 'J_PGA_M' # TO DO: ADD ERROR CATCHING ON UNITS, MAKE SURE THEY ARE WHAT THEY SHOULD # BE FOR THIS MODEL try: # Try to fetch source information from config modelsref = config['godt_2008']['shortref'] modellref = config['godt_2008']['longref'] slopesref = config['godt_2008']['layers']['slope']['shortref'] slopelref = config['godt_2008']['layers']['slope']['longref'] cohesionsref = config['godt_2008']['layers']['cohesion']['shortref'] cohesionlref = config['godt_2008']['layers']['cohesion']['longref'] frictionsref = config['godt_2008']['layers']['friction']['shortref'] frictionlref = config['godt_2008']['layers']['friction']['longref'] except: print('Was not able to retrieve all references from config file. ' 'Continuing') # Figure out how/if need to cut anything geodict = ShakeGrid.getFileGeoDict(shakefile) # , adjust='res') if bounds is not None: # Make sure bounds are within ShakeMap Grid if geodict.xmin < geodict.xmax: # only if signs are not opposite if (geodict.xmin > bounds['xmin'] or geodict.xmax < bounds['xmax'] or geodict.ymin > bounds['ymin'] or geodict.ymax < bounds['ymax']): print('Specified bounds are outside shakemap area, using ' 'ShakeMap bounds instead.') bounds = None if bounds is not None: tempgdict = GeoDict.createDictFromBox(bounds['xmin'], bounds['xmax'], bounds['ymin'], bounds['ymax'], geodict.dx, geodict.dy, inside=False) # If Shakemap geodict crosses 180/-180 line, fix geodict so things don't break if geodict.xmin > geodict.xmax: if tempgdict.xmin < 0: geodict._xmin -= 360. else: geodict._xmax += 360. geodict = geodict.getBoundsWithin(tempgdict) basegeodict, firstcol = GDALGrid.getFileGeoDict( os.path.join(slopefilepath, 'slope_min.bil')) if basegeodict == geodict: sampledict = geodict else: sampledict = basegeodict.getBoundsWithin(geodict) # Do we need to subdivide baselayer? if 'divfactor' in config['godt_2008'].keys(): divfactor = float(config['godt_2008']['divfactor']) if divfactor != 1.: # adjust sampledict so everything will be resampled (cut one cell # of each edge so will be inside bounds) newxmin = sampledict.xmin - sampledict.dx/2. + \ sampledict.dx/(2.*divfactor) + sampledict.dx newymin = sampledict.ymin - sampledict.dy/2. + \ sampledict.dy/(2.*divfactor) + sampledict.dy newxmax = sampledict.xmax + sampledict.dx/2. - \ sampledict.dx/(2.*divfactor) - sampledict.dx newymax = sampledict.ymax + sampledict.dy/2. - \ sampledict.dy/(2.*divfactor) - sampledict.dy newdx = sampledict.dx / divfactor newdy = sampledict.dy / divfactor sampledict = GeoDict.createDictFromBox(newxmin, newxmax, newymin, newymax, newdx, newdy, inside=True) tmpdir = tempfile.mkdtemp() # Load in ShakeMap and get new geodictionary temp = ShakeGrid.load(shakefile) # , adjust='res') junkfile = os.path.join(tmpdir, 'temp.bil') GDALGrid.copyFromGrid(temp.getLayer('pga')).save(junkfile) pga = quickcut(junkfile, sampledict, precise=True, method='bilinear') os.remove(junkfile) GDALGrid.copyFromGrid(temp.getLayer('pgv')).save(junkfile) pgv = quickcut(junkfile, sampledict, precise=True, method='bilinear') os.remove(junkfile) # Update geodictionary sampledict = pga.getGeoDict() t2 = temp.getEventDict() M = t2['magnitude'] event_id = t2['event_id'] shakedict = temp.getShakeDict() del (temp) # read in uncertainty if present if uncertfile is not None: try: temp = ShakeGrid.load(uncertfile) # , adjust='res') GDALGrid.copyFromGrid(temp.getLayer('stdpga')).save(junkfile) uncertpga = quickcut(junkfile, sampledict, precise=True, method='bilinear', override=True) os.remove(junkfile) GDALGrid.copyFromGrid(temp.getLayer('stdpgv')).save(junkfile) uncertpgv = quickcut(junkfile, sampledict, precise=True, method='bilinear', override=True) os.remove(junkfile) except: print('Could not read uncertainty file, ignoring uncertainties') uncertfile = None if numstd is None: numstd = 1. # Read in all the slope files, divide all by 100 to get to slope in # degrees (because input files are multiplied by 100.) slopes = [] quantiles = [ 'slope_min.bil', 'slope10.bil', 'slope30.bil', 'slope50.bil', 'slope70.bil', 'slope90.bil', 'slope_max.bil' ] for quant in quantiles: tmpslp = quickcut(os.path.join(slopefilepath, quant), sampledict) tgd = tmpslp.getGeoDict() if tgd != sampledict: raise Exception('Input layers are not aligned to same geodict') else: slopes.append(tmpslp.getData() / slopediv) slopestack = np.dstack(slopes) # Change any zero slopes to a very small number to avoid dividing by # zero later slopestack[slopestack == 0] = 1e-8 # Read in the cohesion and friction files and duplicate layers so they # are same shape as slope structure tempco = quickcut(cohesionfile, sampledict, method='near') tempco = tempco.getData()[:, :, np.newaxis] / codiv cohesion = np.repeat(tempco, 7, axis=2) cohesion[cohesion == -999.9] = nodata_cohesion cohesion = np.nan_to_num(cohesion) cohesion[cohesion == 0] = nodata_cohesion tempfric = quickcut(frictionfile, sampledict, method='near') tempfric = tempfric.getData().astype(float)[:, :, np.newaxis] friction = np.repeat(tempfric, 7, axis=2) friction[friction == -9999] = nodata_friction friction = np.nan_to_num(friction) friction[friction == 0] = nodata_friction # Do the calculations using Jibson (2007) PGA only model for Dn FS = (cohesion / (uwt * thick * np.sin(slopestack * (np.pi / 180.))) + np.tan(friction * (np.pi / 180.)) / np.tan(slopestack * (np.pi / 180.))) FS[FS < fsthresh] = fsthresh # Compute critical acceleration, in g # This gives ac in g, equations that multiply by g give ac in m/s2 Ac = (FS - 1) * np.sin(slopestack * (np.pi / 180.)).astype(float) Ac[Ac < acthresh] = acthresh # Get PGA in g (PGA is %g in ShakeMap, convert to g) PGA = np.repeat(pga.getData()[:, :, np.newaxis] / 100., 7, axis=2).astype(float) if 'PGV' in displmodel: # Load in PGV also, in cm/sec PGV = np.repeat(pgv.getData()[:, :, np.newaxis], 7, axis=2).astype(float) else: PGV = None if uncertfile is not None: stdpga = np.repeat(uncertpga.getData()[:, :, np.newaxis], 7, axis=2).astype(float) stdpgv = np.repeat(uncertpgv.getData()[:, :, np.newaxis], 7, axis=2).astype(float) # estimate PGA +- 1std PGAmin = np.exp(np.log(PGA * 100) - numstd * stdpga) / 100 PGAmax = np.exp(np.log(PGA * 100) + numstd * stdpga) / 100 if 'PGV' in displmodel: PGVmin = np.exp(np.log(PGV) - numstd * stdpgv) PGVmax = np.exp(np.log(PGV) + numstd * stdpgv) else: PGVmin = None PGVmax = None # Ignore errors so still runs when Ac > PGA, just leaves nan instead # of crashing. np.seterr(invalid='ignore') Dn, logDnstd, logtype = NMdisp(Ac, PGA, model=displmodel, M=M, PGV=PGV) if uncertfile is not None: Dnmin, logDnstdmin, logtype = NMdisp(Ac, PGAmin, model=displmodel, M=M, PGV=PGVmin) Dnmax, logDnstdmax, logtype = NMdisp(Ac, PGAmax, model=displmodel, M=M, PGV=PGVmax) PROB = Dn.copy() PROB[PROB < dnthresh] = 0. PROB[PROB >= dnthresh] = 1. PROB = np.sum(PROB, axis=2) if uncertfile is not None: PROBmin = Dnmin.copy() PROBmin[PROBmin <= dnthresh] = 0. PROBmin[PROBmin > dnthresh] = 1. PROBmin = np.sum(PROBmin, axis=2) PROBmax = Dnmax.copy() PROBmax[PROBmax <= dnthresh] = 0. PROBmax[PROBmax > dnthresh] = 1. PROBmax = np.sum(PROBmax, axis=2) PROB[PROB == 1.] = 0.01 PROB[PROB == 2.] = 0.10 PROB[PROB == 3.] = 0.30 PROB[PROB == 4.] = 0.50 PROB[PROB == 5.] = 0.70 PROB[PROB == 6.] = 0.90 PROB[PROB == 7.] = 0.99 if uncertfile is not None: PROBmin[PROBmin == 1.] = 0.01 PROBmin[PROBmin == 2.] = 0.10 PROBmin[PROBmin == 3.] = 0.30 PROBmin[PROBmin == 4.] = 0.50 PROBmin[PROBmin == 5.] = 0.70 PROBmin[PROBmin == 6.] = 0.90 PROBmin[PROBmin == 7.] = 0.99 PROBmax[PROBmax == 1.] = 0.01 PROBmax[PROBmax == 2.] = 0.10 PROBmax[PROBmax == 3.] = 0.30 PROBmax[PROBmax == 4.] = 0.50 PROBmax[PROBmax == 5.] = 0.70 PROBmax[PROBmax == 6.] = 0.90 PROBmax[PROBmax == 7.] = 0.99 if slopemin is not None: PROB[slopestack[:, :, 6] <= slopemin] = 0. # uncert too if uncertfile is not None: PROBmin[slopestack[:, :, 6] <= slopemin] = 0. PROBmax[slopestack[:, :, 6] <= slopemin] = 0. # Turn output and inputs into into grids and put in mapLayers dictionary maplayers = collections.OrderedDict() shakedetail = '%s_ver%s' % (shakedict['shakemap_id'], shakedict['shakemap_version']) description = { 'name': modelsref, 'longref': modellref, 'units': 'Proportion of Area Affected', 'shakemap': shakedetail, 'event_id': event_id, 'parameters': { 'displmodel': displmodel, 'thickness_m': thick, 'unitwt_kNm3': uwt, 'dnthresh_cm': dnthresh, 'acthresh_g': acthresh, 'fsthresh': fsthresh, 'modeltype': 'Landslide' } } PROBgrid = GDALGrid(PROB, sampledict) if trimfile is not None: PROBgrid = trim_ocean(PROBgrid, trimfile) maplayers['model'] = { 'grid': PROBgrid, 'label': 'Landslide - Proportion of Area Affected', 'type': 'output', 'description': description } if uncertfile is not None: PROBmingrid = GDALGrid(PROBmin, sampledict) PROBmaxgrid = GDALGrid(PROBmax, sampledict) if trimfile is not None: PROBmingrid = trim_ocean(PROBmingrid, trimfile) PROBmaxgrid = trim_ocean(PROBmaxgrid, trimfile) maplayers['modelmin'] = { 'grid': PROBmingrid, 'label': 'Landslide Probability-%1.2fstd' % numstd, 'type': 'output', 'description': description } maplayers['modelmax'] = { 'grid': PROBmaxgrid, 'label': 'Landslide Probability+%1.2fstd' % numstd, 'type': 'output', 'description': description } if saveinputs is True: maplayers['pga'] = { 'grid': GDALGrid(PGA[:, :, 0], sampledict), 'label': 'PGA (g)', 'type': 'input', 'description': { 'units': 'g', 'shakemap': shakedetail } } if 'PGV' in displmodel: maplayers['pgv'] = { 'grid': GDALGrid(PGV[:, :, 0], sampledict), 'label': 'PGV (cm/s)', 'type': 'input', 'description': { 'units': 'cm/s', 'shakemap': shakedetail } } maplayers['minFS'] = { 'grid': GDALGrid(np.min(FS, axis=2), sampledict), 'label': 'Min Factor of Safety', 'type': 'input', 'description': { 'units': 'unitless' } } maplayers['max slope'] = { 'grid': GDALGrid(slopestack[:, :, -1], sampledict), 'label': r'Maximum slope ($^\circ$)', 'type': 'input', 'description': { 'units': 'degrees', 'name': slopesref, 'longref': slopelref } } maplayers['cohesion'] = { 'grid': GDALGrid(cohesion[:, :, 0], sampledict), 'label': 'Cohesion (kPa)', 'type': 'input', 'description': { 'units': 'kPa (adjusted)', 'name': cohesionsref, 'longref': cohesionlref } } maplayers['friction angle'] = { 'grid': GDALGrid(friction[:, :, 0], sampledict), 'label': r'Friction angle ($^\circ$)', 'type': 'input', 'description': { 'units': 'degrees', 'name': frictionsref, 'longref': frictionlref } } if uncertfile is not None: maplayers['pgamin'] = { 'grid': GDALGrid(PGAmin[:, :, 0], sampledict), 'label': 'PGA - %1.2fstd (g)' % numstd, 'type': 'input', 'description': { 'units': 'g', 'shakemap': shakedetail } } maplayers['pgamax'] = { 'grid': GDALGrid(PGAmax[:, :, 0], sampledict), 'label': 'PGA + %1.2fstd (g)' % numstd, 'type': 'input', 'description': { 'units': 'g', 'shakemap': shakedetail } } if 'PGV' in displmodel: if uncertfile is not None: maplayers['pgvmin'] = { 'grid': GDALGrid(PGVmin[:, :, 0], sampledict), 'label': 'PGV - %1.2fstd (cm/s)' % numstd, 'type': 'input', 'description': { 'units': 'cm/s', 'shakemap': shakedetail } } maplayers['pgvmax'] = { 'grid': GDALGrid(PGVmax[:, :, 0], sampledict), 'label': 'PGV + %1.2fstd (cm/s)' % numstd, 'type': 'input', 'description': { 'units': 'cm/s', 'shakemap': shakedetail } } shutil.rmtree(tmpdir) return maplayers
def holzer_liq(shakefile, config, uncertfile=None, saveinputs=False, modeltype=None, displmodel=None, probtype=None, bounds=None): """ Method for computing the probability of liquefaction using the Holzer method using the Wills et al. (2015) Vs30 map of California to define the susceptibility classes and the Fan et al. global water table model. """ layers = config['holzer_liq_cal']['layers'] vs30_file = layers['vs30']['file'] wtd_file = layers['watertable']['file'] shkgdict = ShakeGrid.getFileGeoDict(shakefile) fgeodict = GMTGrid.getFileGeoDict(vs30_file)[0] #--------------------------------------------------------------------------- # Loading info #--------------------------------------------------------------------------- shakemap = ShakeGrid.load(shakefile, fgeodict, resample=True, method='linear', doPadding=True) PGA = shakemap.getLayer('pga').getData() / 100 # convert to g griddict, eventdict, specdict, fields, uncertainties = getHeaderData( shakefile) mag = eventdict['magnitude'] #--------------------------------------------------------------------------- # Logistic funciton parameters from Vs30 #--------------------------------------------------------------------------- vs30_grid = GMTGrid.load(vs30_file) vs30 = vs30_grid.getData() a0 = np.zeros_like(vs30) b0 = np.zeros_like(vs30) c0 = np.zeros_like(vs30) a1 = np.zeros_like(vs30) b1 = np.zeros_like(vs30) c1 = np.zeros_like(vs30) for k, v in config['holzer_liq_cal']['parameters'].items(): ind = np.where(vs30 == float(v[0])) a0[ind] = v[1] b0[ind] = v[2] c0[ind] = v[3] a1[ind] = v[4] b1[ind] = v[5] c1[ind] = v[6] #--------------------------------------------------------------------------- # Water table #--------------------------------------------------------------------------- wtd_grid = GMTGrid.load(wtd_file, fgeodict, resample=True, method=layers['watertable']['interpolation'], doPadding=True) tmp = wtd_grid._data tmp = np.nan_to_num(tmp) # Compute water weights w0, w1 = get_water_weights(tmp) #--------------------------------------------------------------------------- # Compute probability of liquefaction #--------------------------------------------------------------------------- prob0 = get_prob(PGA, a0, b0, c0, mag) prob1 = get_prob(PGA, a1, b1, c1, mag) prob = prob0 * w0 + prob1 * w1 #--------------------------------------------------------------------------- # Turn output and inputs into into grids and put in maplayers dictionary #--------------------------------------------------------------------------- maplayers = collections.OrderedDict() temp = shakemap.getShakeDict() shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version']) modelsref = config['holzer_liq_cal']['shortref'] modellref = config['holzer_liq_cal']['longref'] modeltype = 'Holzer/Wills' maplayers['model'] = { 'grid': GDALGrid(prob, fgeodict), 'label': 'Probability', 'type': 'output', 'description': { 'name': modelsref, 'longref': modellref, 'units': 'coverage', 'shakemap': shakedetail, 'parameters': { 'modeltype': modeltype } } } if saveinputs is True: maplayers['pga'] = { 'grid': GDALGrid(PGA, fgeodict), 'label': 'PGA (g)', 'type': 'input', 'description': { 'units': 'g', 'shakemap': shakedetail } } maplayers['vs30'] = { 'grid': GDALGrid(vs30, fgeodict), 'label': 'Vs30 (m/s)', 'type': 'input', 'description': { 'units': 'm/s' } } maplayers['wtd'] = { 'grid': GDALGrid(wtd_grid._data, fgeodict), 'label': 'wtd (m)', 'type': 'input', 'description': { 'units': 'm' } } return maplayers
def draw_contour(shakefile, popfile, oceanfile, oceangridfile, cityfile, basename, borderfile=None, is_scenario=False): """Create a contour map showing population (greyscale) underneath contoured MMI. :param shakefile: String path to ShakeMap grid.xml file. :param popfile: String path to GDALGrid-compliant file containing population data. :param oceanfile: String path to file containing ocean vector data in a format compatible with fiona. :param oceangridfile: String path to file containing ocean grid data . :param cityfile: String path to file containing GeoNames cities data. :param basename: String path containing desired output PDF base name, i.e., /home/pager/exposure. ".pdf" and ".png" files will be made. :param make_png: Boolean indicating whether a PNG version of the file should also be created in the same output folder as the PDF. :returns: Tuple containing: - Name of PNG file created, or None if PNG output not specified. - Cities object containing the cities that were rendered on the contour map. """ #load the shakemap - for the time being, we're interpolating the #population data to the shakemap, which would be important #if we were doing math with the pop values. We're not, so I think it's ok. shakegrid = ShakeGrid.load(shakefile, adjust='res') gd = shakegrid.getGeoDict() #Retrieve the epicenter - this will get used on the map clat = shakegrid.getEventDict()['lat'] clon = shakegrid.getEventDict()['lon'] #Load the population data, sample to shakemap popgrid = GDALGrid.load(popfile, samplegeodict=gd, resample=True) #load the ocean grid file (has 1s in ocean, 0s over land) #having this file saves us almost 30 seconds! oceangrid = GDALGrid.load(oceangridfile, samplegeodict=gd, resample=True) #load the cities data, limit to cities within shakemap bounds allcities = Cities.fromDefault() cities = allcities.limitByBounds((gd.xmin, gd.xmax, gd.ymin, gd.ymax)) #define the map #first cope with stupid 180 meridian height = (gd.ymax - gd.ymin) * 111.191 if gd.xmin < gd.xmax: width = (gd.xmax - gd.xmin) * np.cos(np.radians(clat)) * 111.191 xmin, xmax, ymin, ymax = (gd.xmin, gd.xmax, gd.ymin, gd.ymax) else: xmin, xmax, ymin, ymax = (gd.xmin, gd.xmax, gd.ymin, gd.ymax) xmax += 360 width = ( (gd.xmax + 360) - gd.xmin) * np.cos(np.radians(clat)) * 111.191 aspect = width / height #if the aspect is not 1, then trim bounds in x or y direction as appropriate if width > height: dw = (width - height) / 2.0 #this is width in km xmin = xmin + dw / (np.cos(np.radians(clat)) * 111.191) xmax = xmax - dw / (np.cos(np.radians(clat)) * 111.191) width = (xmax - xmin) * np.cos(np.radians(clat)) * 111.191 if height > width: dh = (height - width) / 2.0 #this is width in km ymin = ymin + dh / 111.191 ymax = ymax - dh / 111.191 height = (ymax - ymin) * 111.191 aspect = width / height figheight = FIGWIDTH / aspect bbox = (xmin, ymin, xmax, ymax) bounds = (xmin, xmax, ymin, ymax) figsize = (FIGWIDTH, figheight) #Create the MercatorMap object, which holds a separate but identical #axes object used to determine collisions between city labels. mmap = MercatorMap(bounds, figsize, cities, padding=0.5) fig = mmap.figure ax = mmap.axes #this needs to be done here so that city label collision detection will work fig.canvas.draw() clon = xmin + (xmax - xmin) / 2 clat = ymin + (ymax - ymin) / 2 geoproj = mmap.geoproj proj = mmap.proj #project our population grid to the map projection projstr = proj.proj4_init popgrid_proj = popgrid.project(projstr) popdata = popgrid_proj.getData() newgd = popgrid_proj.getGeoDict() # Use our GMT-inspired palette class to create population and MMI colormaps popmap = ColorPalette.fromPreset('pop') mmimap = ColorPalette.fromPreset('mmi') #set the image extent to that of the data img_extent = (newgd.xmin, newgd.xmax, newgd.ymin, newgd.ymax) plt.imshow(popdata, origin='upper', extent=img_extent, cmap=popmap.cmap, vmin=popmap.vmin, vmax=popmap.vmax, zorder=POP_ZORDER, interpolation='nearest') #draw 10m res coastlines ax.coastlines(resolution="10m", zorder=COAST_ZORDER) #draw country borders using natural earth data set if borderfile is not None: borders = ShapelyFeature( Reader(borderfile).geometries(), ccrs.PlateCarree()) ax.add_feature(borders, zorder=COAST_ZORDER, edgecolor='black', linewidth=2, facecolor='none') #clip the ocean data to the shakemap bbox = (gd.xmin, gd.ymin, gd.xmax, gd.ymax) oceanshapes = _clip_bounds(bbox, oceanfile) ax.add_feature(ShapelyFeature(oceanshapes, crs=geoproj), facecolor=WATERCOLOR, zorder=OCEAN_ZORDER) #It turns out that when presented with a map that crosses the 180 meridian, #the matplotlib/cartopy contouring routine thinks that the 180 meridian is a map boundary #and only plots one side of the contour. Contouring the geographic MMI data and then #projecting the resulting contour vectors does the trick. Sigh. #define contour grid spacing contoury = np.linspace(ymin, ymax, gd.ny) contourx = np.linspace(xmin, xmax, gd.nx) #smooth the MMI data for contouring mmi = shakegrid.getLayer('mmi').getData() smoothed_mmi = gaussian_filter(mmi, FILTER_SMOOTH) #create masked arrays of the ocean grid landmask = np.ma.masked_where(oceangrid._data == 0.0, smoothed_mmi) oceanmask = np.ma.masked_where(oceangrid._data == 1.0, smoothed_mmi) #contour the data land_contour = plt.contour(contourx, contoury, np.flipud(oceanmask), linewidths=3.0, linestyles='solid', zorder=LANDC_ZORDER, cmap=mmimap.cmap, vmin=mmimap.vmin, vmax=mmimap.vmax, levels=np.arange(0.5, 10.5, 1.0), transform=geoproj) ocean_contour = plt.contour(contourx, contoury, np.flipud(landmask), linewidths=2.0, linestyles='dashed', zorder=OCEANC_ZORDER, cmap=mmimap.cmap, vmin=mmimap.vmin, vmax=mmimap.vmax, levels=np.arange(0.5, 10.5, 1.0), transform=geoproj) #the idea here is to plot invisible MMI contours at integer levels and then label them. #clabel method won't allow text to appear, which is this case is kind of ok, because #it allows us an easy way to draw MMI labels as roman numerals. cs_land = plt.contour(contourx, contoury, np.flipud(oceanmask), linewidths=0.0, levels=np.arange(0, 11), zorder=CLABEL_ZORDER, transform=geoproj) clabel_text = ax.clabel(cs_land, np.arange(0, 11), colors='k', zorder=CLABEL_ZORDER, fmt='%.0f', fontsize=40) for clabel in clabel_text: x, y = clabel.get_position() label_str = clabel.get_text() roman_label = MMI_LABELS[label_str] th = plt.text(x, y, roman_label, zorder=CLABEL_ZORDER, ha='center', va='center', color='black', weight='normal', size=16) th.set_path_effects([ path_effects.Stroke(linewidth=2.0, foreground='white'), path_effects.Normal() ]) cs_ocean = plt.contour(contourx, contoury, np.flipud(landmask), linewidths=0.0, levels=np.arange(0, 11), zorder=CLABEL_ZORDER, transform=geoproj) clabel_text = ax.clabel(cs_ocean, np.arange(0, 11), colors='k', zorder=CLABEL_ZORDER, fmt='%.0f', fontsize=40) for clabel in clabel_text: x, y = clabel.get_position() label_str = clabel.get_text() roman_label = MMI_LABELS[label_str] th = plt.text(x, y, roman_label, zorder=CLABEL_ZORDER, ha='center', va='center', color='black', weight='normal', size=16) th.set_path_effects([ path_effects.Stroke(linewidth=2.0, foreground='white'), path_effects.Normal() ]) #draw meridians and parallels using Cartopy's functions for that gl = ax.gridlines(draw_labels=True, linewidth=2, color=(0.9, 0.9, 0.9), alpha=0.5, linestyle='-', zorder=GRID_ZORDER) gl.xlabels_top = False gl.xlabels_bottom = False gl.ylabels_left = False gl.ylabels_right = False gl.xlines = True step = 1 #let's floor/ceil the edges to nearest half a degree gxmin = np.floor(xmin * 2) / 2 gxmax = np.ceil(xmax * 2) / 2 gymin = np.floor(ymin * 2) / 2 gymax = np.ceil(ymax * 2) / 2 xlocs = np.linspace(gxmin, gxmax + 0.5, num=5) ylocs = np.linspace(gymin, gymax + 0.5, num=5) gl.xlocator = mticker.FixedLocator(xlocs) gl.ylocator = mticker.FixedLocator(ylocs) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabel_style = {'size': 15, 'color': 'black'} gl.ylabel_style = {'size': 15, 'color': 'black'} #TODO - figure out x/y axes data coordinates corresponding to 10% from left #and 10% from top #use geoproj and proj dleft = 0.01 dtop = 0.97 proj_str = proj.proj4_init merc_to_dd = pyproj.Proj(proj_str) #use built-in transforms to get from axes units to data units display_to_data = ax.transData.inverted() axes_to_display = ax.transAxes #these are x,y coordinates in projected space yleft, t1 = display_to_data.transform( axes_to_display.transform((dleft, 0.5))) t2, xtop = display_to_data.transform(axes_to_display.transform( (0.5, dtop))) #these are coordinates in lon,lat space yleft_dd, t1_dd = merc_to_dd(yleft, t1, inverse=True) t2_dd, xtop_dd = merc_to_dd(t2, xtop, inverse=True) #drawing our own tick labels INSIDE the plot, as Cartopy doesn't seem to support this. yrange = ymax - ymin xrange = xmax - xmin for xloc in gl.xlocator.locs: outside = xloc < xmin or xloc > xmax #don't draw labels when we're too close to either edge near_edge = (xloc - xmin) < (xrange * 0.1) or (xmax - xloc) < (xrange * 0.1) if outside or near_edge: continue xtext = r'$%.1f^\circ$W' % (abs(xloc)) ax.text(xloc, xtop_dd, xtext, fontsize=14, zorder=GRID_ZORDER, ha='center', fontname=DEFAULT_FONT, transform=ccrs.Geodetic()) for yloc in gl.ylocator.locs: outside = yloc < gd.ymin or yloc > gd.ymax #don't draw labels when we're too close to either edge near_edge = (yloc - gd.ymin) < (yrange * 0.1) or (gd.ymax - yloc) < ( yrange * 0.1) if outside or near_edge: continue if yloc < 0: ytext = r'$%.1f^\circ$S' % (abs(yloc)) else: ytext = r'$%.1f^\circ$N' % (abs(yloc)) thing = ax.text(yleft_dd, yloc, ytext, fontsize=14, zorder=GRID_ZORDER, va='center', fontname=DEFAULT_FONT, transform=ccrs.Geodetic()) #draw cities mapcities = mmap.drawCities(shadow=True, zorder=CITIES_ZORDER) #draw the figure border thickly #TODO - figure out how to draw map border # bwidth = 3 # ax.spines['top'].set_visible(True) # ax.spines['left'].set_visible(True) # ax.spines['bottom'].set_visible(True) # ax.spines['right'].set_visible(True) # ax.spines['top'].set_linewidth(bwidth) # ax.spines['right'].set_linewidth(bwidth) # ax.spines['bottom'].set_linewidth(bwidth) # ax.spines['left'].set_linewidth(bwidth) #Get the corner of the map with the lowest population corner_rect, filled_corner = _get_open_corner(popgrid, ax) clat2 = round_to_nearest(clat, 1.0) clon2 = round_to_nearest(clon, 1.0) #draw a little globe in the corner showing in small-scale where the earthquake is located. proj = ccrs.Orthographic(central_latitude=clat2, central_longitude=clon2) ax2 = fig.add_axes(corner_rect, projection=proj) ax2.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=WATERCOLOR, edgecolor=WATERCOLOR) ax2.add_feature(cartopy.feature.LAND, zorder=0, edgecolor='black') ax2.plot([clon2], [clat2], 'w*', linewidth=1, markersize=16, markeredgecolor='k', markerfacecolor='r') gh = ax2.gridlines() ax2.set_global() ax2.outline_patch.set_edgecolor('black') ax2.outline_patch.set_linewidth(2) #Draw the map scale in the unoccupied lower corner. corner = 'lr' if filled_corner == 'lr': corner = 'll' draw_scale(ax, corner, pady=0.05, padx=0.05) #Draw the epicenter as a black star plt.sca(ax) plt.plot(clon, clat, 'k*', markersize=16, zorder=EPICENTER_ZORDER, transform=geoproj) if is_scenario: plt.text(clon, clat, 'SCENARIO', fontsize=64, zorder=WATERMARK_ZORDER, transform=geoproj, alpha=0.2, color='red', horizontalalignment='center') #create pdf and png output file names pdf_file = basename + '.pdf' png_file = basename + '.png' #save to pdf plt.savefig(pdf_file) plt.savefig(png_file) return (pdf_file, png_file, mapcities)
def computeHagg(grid2D, proj='moll', probthresh=0.0, shakefile=None, shakethreshtype='pga', shakethresh=0.0): """ Computes the Aggregate Hazard (Hagg) which is equal to the probability * area of grid cell For models that compute areal coverage, this is equivalant to the total predicted area affected in km2. Args: grid2D: grid2D object of model output. proj: projection to use to obtain equal area, 'moll' mollweide, or 'laea' lambert equal area. probthresh: Probability threshold, any values less than this will not be included in aggregate hazard estimation. shakefile: Optional, path to shakemap file to use for ground motion threshold. shakethreshtype: Optional, Type of ground motion to use for shakethresh, 'pga', 'pgv', or 'mmi'. shakethresh: Optional, Float or list of shaking thresholds in %g for pga, cm/s for pgv, float for mmi. Returns: Aggregate hazard (float) if no shakethresh or only one shakethresh was defined, otherwise, a list of floats of aggregate hazard for all shakethresh values. """ Hagg = [] bounds = grid2D.getBounds() lat0 = np.mean((bounds[2], bounds[3])) lon0 = np.mean((bounds[0], bounds[1])) projs = ('+proj=%s +lat_0=%f +lon_0=%f +x_0=0 +y_0=0 +ellps=WGS84 ' '+units=km +no_defs' % (proj, lat0, lon0)) geodict = grid2D.getGeoDict() if shakefile is not None: if type(shakethresh) != list and type(shakethresh) != np.ndarray: shakethresh = [shakethresh] for shaket in shakethresh: if shaket < 0.: raise Exception('shaking threshold must be equal or greater ' 'than zero') tmpdir = tempfile.mkdtemp() # resample shakemap to grid2D temp = ShakeGrid.load(shakefile) junkfile = os.path.join(tmpdir, 'temp.bil') GDALGrid.copyFromGrid(temp.getLayer(shakethreshtype)).save(junkfile) shk = quickcut(junkfile, geodict, precise=True, method='bilinear') shutil.rmtree(tmpdir) if shk.getGeoDict() != geodict: raise Exception('shakemap was not resampled to exactly the same ' 'geodict as the model') if probthresh < 0.: raise Exception('probability threshold must be equal or greater ' 'than zero') grid = grid2D.project(projection=projs, method='bilinear') geodictRS = grid.getGeoDict() cell_area_km2 = geodictRS.dx * geodictRS.dy model = grid.getData() model[np.isnan(model)] = -1. if shakefile is not None: for shaket in shakethresh: modcop = model.copy() shkgrid = shk.project(projection=projs) shkdat = shkgrid.getData() # use -1 to avoid nan errors and warnings, will always be thrown # out because default is 0. shkdat[np.isnan(shkdat)] = -1. modcop[shkdat < shaket] = -1. Hagg.append(np.sum(modcop[modcop >= probthresh] * cell_area_km2)) else: Hagg.append(np.sum(model[model >= probthresh] * cell_area_km2)) if len(Hagg) == 1: Hagg = Hagg[0] return Hagg
def get_exposures(grid, pop_file, shakefile=None, shakethreshtype=None, shakethresh=None, probthresh=None): """ Get exposure-based statistics. Args: grid: Model grid. pop_file (str): Path to the landscan population grid. shakefile (str): Optional, path to shakemap file to use for ground motion threshold. shakethreshtype(str): Optional, Type of ground motion to use for shakethresh, 'pga', 'pgv', or 'mmi'. shakethresh: Optional, Float or list of shaking thresholds in %g for pga, cm/s for pgv, float for mmi. probthresh: Optional, None or float, exclude any cells with probabilities less than or equal to this value Returns: dict: Dictionary with keys named exp_pop_# where # is the shakethresh """ # If probthresh defined, zero out any areas less than or equal to probthresh # before proceeding if probthresh is not None: origdata = grid.getData() moddat = origdata.copy() moddat[moddat <= probthresh] = 0.0 moddat[np.isnan(origdata)] = float('nan') else: moddat = grid.getData() mdict = grid.getGeoDict() # Cut out area from population file popcut = quickcut(pop_file, mdict, precise=False, extrasamp=2., method='nearest') popdat = popcut.getData() pdict = popcut.getGeoDict() # Pad grid with nans to beyond extent of pdict pad_dict = {} pad_dict['padleft'] = int( np.abs(np.ceil((mdict.xmin - pdict.xmin) / mdict.dx))) pad_dict['padright'] = int( np.abs(np.ceil((pdict.xmax - mdict.xmax) / mdict.dx))) pad_dict['padbottom'] = int( np.abs(np.ceil((mdict.ymin - pdict.ymin) / mdict.dy))) pad_dict['padtop'] = int( np.abs(np.ceil((pdict.ymax - mdict.ymax) / mdict.dy))) padgrid, mdict2 = Grid2D.padGrid(moddat, mdict, pad_dict) # padds with inf padgrid[np.isinf(padgrid)] = float('nan') # change to pad with nan padgrid = Grid2D(data=padgrid, geodict=mdict2) # Turn into grid2d object # Resample model grid so as to be the nearest integer multiple of popdict factor = np.round(pdict.dx / mdict2.dx) # Create geodictionary that is a factor of X higher res but otherwise # identical ndict = GeoDict.createDictFromBox(pdict.xmin, pdict.xmax, pdict.ymin, pdict.ymax, pdict.dx / factor, pdict.dy / factor) # Resample grid2 = padgrid.interpolate2(ndict, method='linear') # Get proportion of each cell that has values (to account properly # for any nans) prop = block_reduce(~np.isnan(grid2.getData().copy()), block_size=(int(factor), int(factor)), cval=float('nan'), func=np.sum) / (factor**2.) # Now block reduce to same geodict as popfile modresamp = block_reduce(grid2.getData().copy(), block_size=(int(factor), int(factor)), cval=float('nan'), func=np.nanmean) exp_pop = {} if shakefile is not None: # Resample shakefile to population grid # , doPadding=True, padValue=0.) shakemap = ShakeGrid.load(shakefile, resample=False) shakemap = shakemap.getLayer(shakethreshtype) shakemap = shakemap.interpolate2(pdict) shkdat = shakemap.getData() for shaket in shakethresh: threshmult = shkdat > shaket threshmult = threshmult.astype(float) exp_pop['exp_pop_%1.2fg' % (shaket / 100., )] = np.nansum( popdat * prop * modresamp * threshmult) else: exp_pop['exp_pop_0.00g'] = np.nansum(popdat * prop * modresamp) return exp_pop
def computeHagg(grid2D, proj='moll', probthresh=0., shakefile=None, shakethreshtype='pga', shakethresh=0., stdgrid2D=None, stdtype='full', maxP=1., sill1=None, range1=None): """ Computes the Aggregate Hazard (Hagg) which is equal to the probability * area of grid cell For models that compute areal coverage, this is equivalant to the total predicted area affected in km2. Args: grid2D: grid2D object of model output. proj: projection to use to obtain equal area, 'moll' mollweide, or 'laea' lambert equal area. probthresh: Probability threshold, any values less than this will not be included in aggregate hazard estimation. shakefile: Optional, path to shakemap file to use for ground motion threshold. shakethreshtype: Optional, Type of ground motion to use for shakethresh, 'pga', 'pgv', or 'mmi'. shakethresh: Optional, Float or list of shaking thresholds in %g for pga, cm/s for pgv, float for mmi. stdgrid2D: grid2D object of model standard deviations (optional) stdtype (str): assumption of spatial correlation used to compute the stdev of the statistics, 'max', 'min', 'mean' of max and min, or 'full' (default) which estimates the range of correlation and accounts for covariance. Will return 'mean' if ridge and sill cannot be estimated. maxP (float): the maximum possible probability of the model sill1 (float): If known, the sill of the variogram of grid2D, will be estimated if None and stdtype='full' range1 (float): If known, the range of the variogram of grid2D, will be estimated if None and stdtype='full' Returns: dict: Dictionary with keys: hagg_#g where # is the shakethresh std_# if stdgrid2D is supplied (stdev of exp_pop) hlim_#, the maximum exposure value possible with the applied thresholds and given maxP value cell_area_km2 grid cell area p_hagg_# beta distribution shape factor p (sometimes called alpha) q_hagg_# beta distribution shape factor q (sometimes called beta) """ bounds = grid2D.getBounds() lat0 = np.mean((bounds[2], bounds[3])) lon0 = np.mean((bounds[0], bounds[1])) projs = ('+proj=%s +lat_0=%f +lon_0=%f +x_0=0 +y_0=0 +ellps=WGS84 ' '+units=km +no_defs' % (proj, lat0, lon0)) geodict = grid2D.getGeoDict() if shakefile is not None: if shakethresh < 0.: raise Exception('shaking threshold must be equal or greater ' 'than zero') # resample shakemap to grid2D temp = ShakeGrid.load(shakefile) shk = temp.getLayer(shakethreshtype) shk = shk.interpolate2(geodict) if shk.getGeoDict() != geodict: raise Exception('shakemap was not resampled to exactly the same ' 'geodict as the model') if probthresh < 0.: raise Exception('probability threshold must be equal or greater ' 'than zero') grid = grid2D.project(projection=projs, method='bilinear') geodictRS = grid.getGeoDict() cell_area_km2 = geodictRS.dx * geodictRS.dy model = grid.getData().copy() Hagg = {} if shakefile is not None: shkgrid = shk.project(projection=projs) shkdat = shkgrid.getData() model[shkdat < shakethresh] = float('nan') else: shakethresh = 0. shkdat = None mu = np.nansum(model[model >= probthresh] * cell_area_km2) Hagg['hagg_%1.2fg' % (shakethresh/100.,)] = mu Hagg['cell_area_km2'] = cell_area_km2 N = np.nansum([model >= probthresh]) #Hagg['N_%1.2fg' % (shakethresh/100.,)] = N hlim = cell_area_km2*N*maxP Hagg['hlim_%1.2fg' % (shakethresh/100.,)] = hlim if stdgrid2D is not None: stdgrid = GDALGrid.copyFromGrid(stdgrid2D) # Make a copy stdgrid = stdgrid.project(projection=projs, method='bilinear') std = stdgrid.getData().copy() if np.nanmax(std) > 0. and np.nanmax(model) >= probthresh: totalmin = cell_area_km2 * np.sqrt(np.nansum((std[model >= probthresh])**2.)) totalmax = np.nansum(std[model >= probthresh] * cell_area_km2) if stdtype == 'full': if sill1 is None or range1 is None: range1, sill1 = semivario(grid.getData().copy(), probthresh, shakethresh=shakethresh, shakegrid=shkdat) if range1 is None: # Use mean Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = (totalmax+totalmin)/2. else: # Zero out std at cells where the model probability was below # the threshold because we aren't including those cells in Hagg stdz = std.copy() stdz[model < probthresh] = 0. svar1 = svar(stdz, range1, sill1, scale=cell_area_km2) Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = np.sqrt(svar1) #Hagg['hagg_range_%1.2fg' % (shakethresh/100.,)] = range1 #Hagg['hagg_sill_%1.2fg' % (shakethresh/100.,)] = sill1 elif stdtype == 'max': Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = totalmax elif stdtype == 'min': Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = totalmin else: Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = (totalmax+totalmin)/2. var = Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)]**2. # Beta distribution shape factors Hagg['p_hagg_%1.2fg' % (shakethresh/100.,)] = (mu/hlim)*((hlim*mu-mu**2)/var-1) Hagg['q_hagg_%1.2fg' % (shakethresh/100.,)] = (1-mu/hlim)*((hlim*mu-mu**2)/var-1) else: print('No model values above threshold, skipping uncertainty ' 'and filling with zeros') Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = 0. Hagg['p_hagg_%1.2fg' % (shakethresh/100.,)] = 0. Hagg['q_hagg_%1.2fg' % (shakethresh/100.,)] = 0. else: print('No uncertainty provided, filling with zeros') Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = 0. Hagg['p_hagg_%1.2fg' % (shakethresh/100.,)] = 0. Hagg['q_hagg_%1.2fg' % (shakethresh/100.,)] = 0. return Hagg
def _test_intensity(): datadir = os.path.abspath( os.path.join(homedir, '..', 'data', 'eventdata', 'northridge')) shakefile = os.path.join(datadir, 'northridge_grid.xml') topofile = os.path.join(datadir, 'northridge_topo.grd') faultfile = os.path.join(datadir, 'northridge_fault.txt') cityfile = os.path.join(datadir, 'northridge_cities.txt') coastfile = os.path.join(datadir, 'northridge_coastline.json') countryfile = os.path.join(datadir, 'northridge_countries.json') statefile = os.path.join(datadir, 'northridge_states.json') lakefile = os.path.join(datadir, 'northridge_lakes.json') oceanfile = os.path.join(datadir, 'northridge_ocean.json') stationfile = os.path.join(datadir, 'northridge_stations.db') roadfile = os.path.join(datadir, 'northridge_roads.json') tancptfile = os.path.join(shakedir, 'shakemap', 'mapping', 'tan.cpt') shakecptfile = os.path.join(shakedir, 'shakemap', 'mapping', 'shakecpt.cpt') layerdict = { 'coast': coastfile, 'ocean': oceanfile, 'lake': lakefile, 'country': countryfile, 'roads': roadfile, 'state': statefile } tancolormap = GMTColorMap.loadFromCPT(tancptfile) shakecolormap = GMTColorMap.loadFromCPT(shakecptfile) cities = BasemapCities.loadFromCSV(cityfile) shakemap = ShakeGrid.load(shakefile, adjust='res') stations = StationList(stationfile) fault = Fault.readFaultFile(faultfile) edict = shakemap.getEventDict() eventdict = { 'lat': edict['lat'], 'lon': edict['lon'], 'depth': edict['depth'], 'mag': edict['magnitude'], 'time': edict['event_timestamp'] } source = Source(eventdict, fault) maker = MapMaker(shakemap, topofile, stations, fault, layerdict, source, cities) # draw intensity map outfolder = os.path.expanduser('~') maker.setIntensityLayer('mmi') maker.setIntensityGMTColorMap(shakecolormap) intensity_map = maker.drawIntensityMap(outfolder) print('Intensity map saved as: %s' % intensity_map) # draw contour maps maker.setContourGMTColorMap(tancolormap) # Draw pgv contours maker.setContourLayer('pgv') contour_pgv_map = maker.drawContourMap(outfolder) print('PGV contour map saved as: %s' % contour_pgv_map) # Draw pga contours maker.setContourLayer('pga') contour_pga_map = maker.drawContourMap(outfolder) print('PGA contour map saved as: %s' % contour_pga_map) # Draw psa0.3 contours maker.setContourLayer('psa03') contour_psa03_map = maker.drawContourMap(outfolder) print('PSA0.3 contour map saved as: %s' % contour_psa03_map) # Draw psa1.0 contours maker.setContourLayer('psa10') contour_psa10_map = maker.drawContourMap(outfolder) print('PSA1.0 contour map saved as: %s' % contour_psa10_map) # Draw psa3.0 contours maker.setContourLayer('psa30') contour_psa30_map = maker.drawContourMap(outfolder) print('PSA3.0 contour map saved as: %s' % contour_psa30_map)
def __init__(self, shakefile, config, uncertfile=None, saveinputs=False, slopefile=None, slopediv=1., bounds=None, numstd=1): """Set up the logistic model # ADD BOUNDS TO THIS MODEL :param config: configobj (config .ini file read in using configobj) defining the model and its inputs. Only one model should be described in each config file. :type config: dictionary :param shakefile: Full file path to shakemap.xml file for the event of interest :type shakefile: string :param uncertfile: Full file path to xml file of shakemap uncertainties :type uncertfile: string :param saveinputs: if True, saves all the input layers as Grid2D objects in addition to the model if false, it will just output the model :type saveinputs: boolean :param slopefile: optional file path to slopefile that will be resampled to the other input files for applying thresholds OVERWRITES VALUE IN CONFIG :type slopefile: string :param slopediv: number to divide slope by to get to degrees (usually will be default of 1.) :type slopediv: float :param numstd: number of +/- standard deviations to use if uncertainty is computed (uncertfile is not None) """ mnames = getLogisticModelNames(config) if len(mnames) == 0: raise Exception( 'No config file found or problem with config file format') if len(mnames) > 1: raise Exception( 'Config file contains more than one model which is no longer allowed,\ update your config file to the newer format') self.model = mnames[0] self.config = config cmodel = config[self.model] self.modeltype = cmodel['gfetype'] self.coeffs = validateCoefficients(cmodel) self.layers = validateLayers( cmodel) # key = layer name, value = file name self.terms, timeField = validateTerms(cmodel, self.coeffs, self.layers) self.interpolations = validateInterpolations(cmodel, self.layers) self.units = validateUnits(cmodel, self.layers) self.gmused = [ value for term, value in cmodel['terms'].items() if 'pga' in value.lower() or 'pgv' in value.lower() or 'mmi' in value.lower() ] self.modelrefs, self.longrefs, self.shortrefs = validateRefs(cmodel) self.numstd = numstd if cmodel['baselayer'] not in list(self.layers.keys()): raise Exception( 'You must specify a base layer corresponding to one of the files in the layer section.' ) self.saveinputs = saveinputs if slopefile is None: try: self.slopefile = cmodel['slopefile'] except: print( 'Could not find slopefile term in config, no slope thresholds will be applied\n' ) self.slopefile = None else: self.slopefile = slopefile self.slopediv = slopediv #get the geodict for the shakemap geodict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') griddict, eventdict, specdict, fields, uncertainties = getHeaderData( shakefile) #YEAR = eventdict['event_timestamp'].year MONTH = MONTHS[(eventdict['event_timestamp'].month) - 1] #DAY = eventdict['event_timestamp'].day #HOUR = eventdict['event_timestamp'].hour #now find the layer that is our base layer and get the largest bounds we can guarantee not to exceed shakemap bounds basefile = self.layers[cmodel['baselayer']] ftype = getFileType(basefile) if ftype == 'esri': basegeodict, firstcol = GDALGrid.getFileGeoDict(basefile) sampledict = basegeodict.getBoundsWithin(geodict) elif ftype == 'gmt': basegeodict, firstcol = GMTGrid.getFileGeoDict(basefile) sampledict = basegeodict.getBoundsWithin(geodict) else: raise Exception( 'All predictor variable grids must be a valid GMT or ESRI file type' ) #now load the shakemap, resampling and padding if necessary if ShakeGrid.getFileGeoDict(shakefile, adjust='res') == sampledict: self.shakemap = ShakeGrid.load(shakefile, adjust='res') flag = 1 else: self.shakemap = ShakeGrid.load(shakefile, samplegeodict=sampledict, resample=True, doPadding=True, adjust='res') flag = 0 # take uncertainties into account if uncertfile is not None: try: if flag == 1: self.uncert = ShakeGrid.load(uncertfile, adjust='res') else: self.uncert = ShakeGrid.load(uncertfile, samplegeodict=sampledict, resample=True, doPadding=True, adjust='res') except: print( 'Could not read uncertainty file, ignoring uncertainties') self.uncert = None else: self.uncert = None #load the predictor layers into a dictionary self.layerdict = {} # key = layer name, value = grid object for layername, layerfile in self.layers.items(): if isinstance(layerfile, list): for lfile in layerfile: if timeField == 'MONTH': if lfile.find(MONTH) > -1: layerfile = lfile ftype = getFileType(layerfile) interp = self.interpolations[layername] if ftype == 'gmt': if GMTGrid.getFileGeoDict( layerfile)[0] == sampledict: lyr = GMTGrid.load(layerfile) else: lyr = GMTGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True) elif ftype == 'esri': if GDALGrid.getFileGeoDict( layerfile)[0] == sampledict: lyr = GDALGrid.load(layerfile) else: lyr = GDALGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True) else: msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % ( layername, layerfile) raise Exception(msg) self.layerdict[layername] = lyr else: #first, figure out what kind of file we have (or is it a directory?) ftype = getFileType(layerfile) interp = self.interpolations[layername] if ftype == 'gmt': if GMTGrid.getFileGeoDict(layerfile)[0] == sampledict: lyr = GMTGrid.load(layerfile) else: lyr = GMTGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True) elif ftype == 'esri': if GDALGrid.getFileGeoDict(layerfile)[0] == sampledict: lyr = GDALGrid.load(layerfile) else: lyr = GDALGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True) else: msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % ( layername, layerfile) raise Exception(msg) self.layerdict[layername] = lyr shapes = {} for layername, layer in self.layerdict.items(): shapes[layername] = layer.getData().shape self.nuggets = [str(self.coeffs['b0'])] ckeys = list(self.terms.keys()) ckeys.sort() for key in ckeys: term = self.terms[key] coeff = self.coeffs[key] self.nuggets.append('(%g * %s)' % (coeff, term)) self.equation = ' + '.join(self.nuggets) if self.uncert is not None: self.nugmin = copy.copy(self.nuggets) self.nugmax = copy.copy(self.nuggets) # Find the term with the shakemap input and replace for these nuggets for k, nug in enumerate(self.nuggets): if "self.shakemap.getLayer('pga').getData()" in nug: self.nugmin[k] = self.nugmin[k].replace( "self.shakemap.getLayer('pga').getData()", "(np.exp(np.log(self.shakemap.getLayer('pga').getData())\ - self.numstd * self.uncert.getLayer('stdpga').getData()))" ) self.nugmax[k] = self.nugmax[k].replace( "self.shakemap.getLayer('pga').getData()", "(np.exp(np.log(self.shakemap.getLayer('pga').getData())\ + self.numstd * self.uncert.getLayer('stdpga').getData()))" ) elif "self.shakemap.getLayer('pgv').getData()" in nug: self.nugmin[k] = self.nugmin[k].replace( "self.shakemap.getLayer('pgv').getData()", "(np.exp(np.log(self.shakemap.getLayer('pgv').getData())\ - self.numstd * self.uncert.getLayer('stdpgv').getData()))" ) self.nugmax[k] = self.nugmax[k].replace( "self.shakemap.getLayer('pgv').getData()", "(np.exp(np.log(self.shakemap.getLayer('pgv').getData())\ + self.numstd * self.uncert.getLayer('stdpgv').getData()))" ) elif "self.shakemap.getLayer('mmi').getData()" in nug: self.nugmin[k] = self.nugmin[k].replace( "self.shakemap.getLayer('mmi').getData()", "(np.exp(np.log(self.shakemap.getLayer('mmi').getData())\ - self.numstd * self.uncert.getLayer('stdmmi').getData()))" ) self.nugmax[k] = self.nugmax[k].replace( "self.shakemap.getLayer('mmi').getData()", "(np.exp(np.log(self.shakemap.getLayer('mmi').getData())\ + self.numstd * self.uncert.getLayer('stdmmi').getData()))" ) self.equationmin = ' + '.join(self.nugmin) self.equationmax = ' + '.join(self.nugmax) else: self.equationmin = None self.equationmax = None self.geodict = self.shakemap.getGeoDict() try: self.slopemin = float(config[self.model]['slopemin']) self.slopemax = float(config[self.model]['slopemax']) except: print( 'could not find slopemin and/or slopemax in config, no limits will be applied' ) self.slopemin = 0. self.slopemax = 90.
def __init__(self, shakefile, config, uncertfile=None, saveinputs=False, slopefile=None, bounds=None, slopemod=None, trimfile=None): """ Sets up the logistic model Args: shakefile (str): Path to shakemap grid.xml file for the event. config: configobj object defining the model and its inputs. Only one model should be described in each config file. uncertfile (str): Path to uncertainty.xml file. saveinputs (bool): Save input layers as Grid2D objects in addition to the model? If false (the default), it will just output the model. slopefile (str): Optional path to slopefile that will be resampled to the other input files for applying thresholds. OVERWRITES VALUE IN CONFIG. bounds (dict): Default of None uses ShakeMap boundaries, otherwise a dictionary of boundaries to cut to like .. code-block:: python bounds = { 'xmin': lonmin, 'xmax': lonmax, 'ymin': latmin, 'ymax': latmax } slopemod (str): How slope input should be modified to be in degrees: e.g., ``np.arctan(slope) * 180. / np.pi`` or ``slope/100.`` (note that this may be in the config file already). trimfile (str): shapefile of earth's landmasses to use to cut offshore areas. """ mnames = getLogisticModelNames(config) if len(mnames) == 0: raise Exception('No config file found or problem with config ' 'file format') if len(mnames) > 1: raise Exception('Config file contains more than one model which ' 'is no longer allowed, update your config file ' 'to the newer format') self.model = mnames[0] self.config = config cmodel = config[self.model] self.modeltype = cmodel['gfetype'] self.coeffs = validateCoefficients(cmodel) # key = layer name, value = file name self.layers = validateLayers(cmodel) self.terms, timeField = validateTerms(cmodel, self.coeffs, self.layers) self.interpolations = validateInterpolations(cmodel, self.layers) self.units = validateUnits(cmodel, self.layers) self.gmused = [ value for term, value in cmodel['terms'].items() if 'pga' in value.lower() or 'pgv' in value.lower() or 'mmi' in value.lower() ] self.modelrefs, self.longrefs, self.shortrefs = validateRefs(cmodel) #self.numstd = numstd self.clips = validateClips(cmodel, self.layers, self.gmused) self.notes = '' if cmodel['baselayer'] not in list(self.layers.keys()): raise Exception('You must specify a base layer corresponding to ' 'one of the files in the layer section.') self.saveinputs = saveinputs if slopefile is None: try: self.slopefile = cmodel['slopefile'] except: # print('Slopefile not specified in config, no slope ' # 'thresholds will be applied\n') self.slopefile = None else: self.slopefile = slopefile if slopemod is None: try: self.slopemod = cmodel['slopemod'] except: self.slopemod = None # See if trimfile exists if trimfile is not None: if not os.path.exists(trimfile): print('trimfile defined does not exist: %s\nOcean will not be ' 'trimmed' % trimfile) self.trimfile = None elif os.path.splitext(trimfile)[1] != '.shp': print('trimfile must be a shapefile, ocean will not be ' 'trimmed') self.trimfile = None else: self.trimfile = trimfile else: self.trimfile = None # Get month of event griddict, eventdict, specdict, fields, uncertainties = \ getHeaderData(shakefile) MONTH = MONTHS[(eventdict['event_timestamp'].month) - 1] # Figure out how/if need to cut anything geodict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') if bounds is not None: # Make sure bounds are within ShakeMap Grid if geodict.xmin < geodict.xmax: # only if signs are not opposite if (geodict.xmin > bounds['xmin'] or geodict.xmax < bounds['xmax'] or geodict.ymin > bounds['ymin'] or geodict.ymax < bounds['ymax']): print('Specified bounds are outside shakemap area, using ' 'ShakeMap bounds instead.') bounds = None if bounds is not None: tempgdict = GeoDict.createDictFromBox(bounds['xmin'], bounds['xmax'], bounds['ymin'], bounds['ymax'], geodict.dx, geodict.dy, inside=False) # If Shakemap geodict crosses 180/-180 line, fix geodict so things don't break if geodict.xmin > geodict.xmax: if tempgdict.xmin < 0: geodict._xmin -= 360. else: geodict._xmax += 360. gdict = geodict.getBoundsWithin(tempgdict) else: gdict = geodict # Now find the layer that is our base layer and get the largest bounds # we can guarantee not to exceed shakemap bounds basefile = self.layers[cmodel['baselayer']] ftype = getFileType(basefile) if ftype == 'esri': basegeodict, firstcol = GDALGrid.getFileGeoDict(basefile) if basegeodict == gdict: sampledict = gdict else: sampledict = basegeodict.getBoundsWithin(gdict) elif ftype == 'gmt': basegeodict, firstcol = GMTGrid.getFileGeoDict(basefile) if basegeodict == gdict: sampledict = gdict else: sampledict = basegeodict.getBoundsWithin(gdict) else: raise Exception('All predictor variable grids must be a valid ' 'GMT or ESRI file type.') # Do we need to subdivide baselayer? if 'divfactor' in self.config[self.model].keys(): divfactor = float(self.config[self.model]['divfactor']) if divfactor != 1.: # adjust sampledict so everything will be resampled newxmin = sampledict.xmin - sampledict.dx / \ 2. + sampledict.dx/(2.*divfactor) newymin = sampledict.ymin - sampledict.dy / \ 2. + sampledict.dy/(2.*divfactor) newxmax = sampledict.xmax + sampledict.dx / \ 2. - sampledict.dx/(2.*divfactor) newymax = sampledict.ymax + sampledict.dy / \ 2. - sampledict.dy/(2.*divfactor) newdx = sampledict.dx / divfactor newdy = sampledict.dy / divfactor sampledict = GeoDict.createDictFromBox(newxmin, newxmax, newymin, newymax, newdx, newdy, inside=True) # Find slope thresholds, if applicable self.slopemin = 'none' self.slopemax = 'none' if self.slopefile is not None: try: self.slopemin = float(config[self.model]['slopemin']) self.slopemax = float(config[self.model]['slopemax']) except: print('Could not find slopemin and/or slopemax in config, ' 'limits. No slope thresholds will be applied.') self.slopemin = 'none' self.slopemax = 'none' # Make temporary directory for hdf5 pytables file storage self.tempdir = tempfile.mkdtemp() # now load the shakemap, resampling and padding if necessary temp = ShakeGrid.load(shakefile) # , adjust='res') self.shakedict = temp.getShakeDict() self.eventdict = temp.getEventDict() self.shakemap = {} # Read both PGA and PGV in, may need them for thresholds for gm in ['pga', 'pgv']: junkfile = os.path.join(self.tempdir, 'temp.bil') GDALGrid.copyFromGrid(temp.getLayer(gm)).save(junkfile) if gm in self.interpolations.keys(): intermeth = self.interpolations[gm] else: intermeth = 'bilinear' junkgrid = quickcut(junkfile, sampledict, precise=True, method=intermeth) if gm in self.clips: junkgrid.setData( np.clip(junkgrid.getData(), self.clips[gm][0], self.clips[gm][1])) self.shakemap[gm] = TempHdf( junkgrid, os.path.join(self.tempdir, '%s.hdf5' % gm)) os.remove(junkfile) del (temp) # get updated geodict sampledict = junkgrid.getGeoDict() # take uncertainties into account, if available if uncertfile is not None: self.uncert = {} try: # Only read in the ones that will be needed temp = ShakeGrid.load(uncertfile) already = [] for gm in self.gmused: if 'pgv' in gm: gmsimp = 'pgv' elif 'pga' in gm: gmsimp = 'pga' elif 'mmi' in gm: gmsimp = 'mmi' if gmsimp in already: continue junkfile = os.path.join(self.tempdir, 'temp.bil') GDALGrid.copyFromGrid(temp.getLayer('std%s' % gmsimp)).save(junkfile) if gmsimp in self.interpolations.keys(): intermeth = self.interpolations[gmsimp] else: intermeth = 'bilinear' junkgrid = quickcut(junkfile, sampledict, precise=True, method=intermeth) if gmsimp in self.clips: junkgrid.setData( np.clip(junkgrid.getData(), self.clips[gmsimp][0], self.clips[gmsimp][1])) self.uncert['std' + gmsimp] = TempHdf( junkgrid, os.path.join(self.tempdir, 'std%s.hdf5' % gmsimp)) already.append(gmsimp) os.remove(junkfile) del (temp) except: print('Could not read uncertainty file, ignoring ' 'uncertainties') self.uncert = None else: self.uncert = None # Load the predictor layers, save as hdf5 temporary files, put file # locations into a dictionary. # Will be replaced in the next section if a slopefile was defined self.nonzero = None # key = layer name, value = grid object self.layerdict = {} didslope = False for layername, layerfile in self.layers.items(): start = timer() if isinstance(layerfile, list): for lfile in layerfile: if timeField == 'MONTH': if lfile.find(MONTH) > -1: layerfile = lfile ftype = getFileType(layerfile) interp = self.interpolations[layername] temp = quickcut(layerfile, sampledict, precise=True, method=interp) if layername in self.clips: temp.setData( np.clip(temp.getData(), self.clips[layername][0], self.clips[layername][1])) self.layerdict[layername] = TempHdf( temp, os.path.join(self.tempdir, '%s.hdf5' % layername)) del (temp) else: interp = self.interpolations[layername] temp = quickcut(layerfile, sampledict, precise=True, method=interp) if layername in self.clips: temp.setData( np.clip(temp.getData(), self.clips[layername][0], self.clips[layername][1])) if layername == 'rock': # Convert unconsolidated sediments to a more reasonable coefficient sub1 = temp.getData() # Change to mixed sed rock coeff sub1[sub1 <= -3.21] = -1.36 temp.setData(sub1) self.notes += 'unconsolidated sediment coefficient changed\ to -1.36 (weaker) from -3.22 to better reflect that this \ unit is not actually strong\n' self.layerdict[layername] = TempHdf( temp, os.path.join(self.tempdir, '%s.hdf5' % layername)) td = temp.getGeoDict() if td != sampledict: raise Exception( 'Geodictionaries of resampled files do not match') if layerfile == self.slopefile: flag = 0 if self.slopemin == 'none' and self.slopemax == 'none': flag = 1 if self.slopemod is None: slope1 = temp.getData().astype(float) slope = 0 else: try: slope = temp.getData().astype(float) slope1 = eval(self.slopemod) except: print('slopemod provided not valid, continuing ' 'without slope thresholds.') flag = 1 if flag == 0: nonzero = np.array([(slope1 > self.slopemin) & (slope1 <= self.slopemax)]) self.nonzero = nonzero[0, :, :] del (slope1) del (slope) else: # Still remove areas where the slope equals exactly # 0.0 to remove offshore liq areas. nonzero = np.array([slope1 != 0.0]) self.nonzero = nonzero[0, :, :] del (slope1) didslope = True del (temp) print('Loading %s layer: %1.1f sec' % (layername, timer() - start)) if didslope is False and self.slopefile is not None: # Slope didn't get read in yet temp = quickcut(self.slopefile, sampledict, precise=True, method='bilinear') flag = 0 if self.slopemin == 'none' and self.slopemax == 'none': flag = 1 if self.slopemod is None: slope1 = temp.getData().astype(float) slope = 0 else: try: slope = temp.getData().astype(float) slope1 = eval(self.slopemod) except: print('slopemod provided not valid, continuing without ' 'slope thresholds') flag = 1 if flag == 0: nonzero = np.array([ (slope1 > self.slopemin) & (slope1 <= self.slopemax) ]) self.nonzero = nonzero[0, :, :] del (slope1) del (slope) else: # Still remove areas where the slope equals exactly # 0.0 to remove offshore liq areas. nonzero = np.array([slope1 != 0.0]) self.nonzero = nonzero[0, :, :] del (slope1) self.nuggets = [str(self.coeffs['b0'])] ckeys = list(self.terms.keys()) ckeys.sort() for key in ckeys: term = self.terms[key] coeff = self.coeffs[key] self.nuggets.append('(%g * %s)' % (coeff, term)) self.equation = ' + '.join(self.nuggets) self.geodict = sampledict
def _test_intensity(): datadir = os.path.abspath(os.path.join( homedir, '..', 'data', 'eventdata', 'northridge')) shakefile = os.path.join(datadir, 'northridge_grid.xml') topofile = os.path.join(datadir, 'northridge_topo.grd') faultfile = os.path.join(datadir, 'northridge_fault.txt') cityfile = os.path.join(datadir, 'northridge_cities.txt') coastfile = os.path.join(datadir, 'northridge_coastline.json') countryfile = os.path.join(datadir, 'northridge_countries.json') statefile = os.path.join(datadir, 'northridge_states.json') lakefile = os.path.join(datadir, 'northridge_lakes.json') oceanfile = os.path.join(datadir, 'northridge_ocean.json') stationfile = os.path.join(datadir, 'northridge_stations.db') roadfile = os.path.join(datadir, 'northridge_roads.json') tancptfile = os.path.join(shakedir, 'shakemap', 'mapping', 'tan.cpt') shakecptfile = os.path.join( shakedir, 'shakemap', 'mapping', 'shakecpt.cpt') layerdict = {'coast': coastfile, 'ocean': oceanfile, 'lake': lakefile, 'country': countryfile, 'roads': roadfile, 'state': statefile} tancolormap = ColorPalette.fromPreset('shaketopo') shakecolormap = ColorPalette.fromPreset('mmi') cities = BasemapCities.loadFromCSV(cityfile) shakemap = ShakeGrid.load(shakefile, adjust='res') stations = StationList(stationfile) fault = Fault.readFaultFile(faultfile) edict = shakemap.getEventDict() eventdict = {'lat': edict['lat'], 'lon': edict['lon'], 'depth': edict['depth'], 'mag': edict['magnitude'], 'time': edict['event_timestamp']} source = Source(eventdict, fault) maker = MapMaker(shakemap, topofile, stations, fault, layerdict, source, cities) # draw intensity map outfolder = os.path.expanduser('~') maker.setIntensityLayer('mmi') maker.setIntensityGMTColorMap(shakecolormap) intensity_map = maker.drawIntensityMap(outfolder) print('Intensity map saved as: %s' % intensity_map) # draw contour maps maker.setContourGMTColorMap(tancolormap) # Draw pgv contours maker.setContourLayer('pgv') contour_pgv_map = maker.drawContourMap(outfolder) print('PGV contour map saved as: %s' % contour_pgv_map) # Draw pga contours maker.setContourLayer('pga') contour_pga_map = maker.drawContourMap(outfolder) print('PGA contour map saved as: %s' % contour_pga_map) # Draw psa0.3 contours maker.setContourLayer('psa03') contour_psa03_map = maker.drawContourMap(outfolder) print('PSA0.3 contour map saved as: %s' % contour_psa03_map) # Draw psa1.0 contours maker.setContourLayer('psa10') contour_psa10_map = maker.drawContourMap(outfolder) print('PSA1.0 contour map saved as: %s' % contour_psa10_map) # Draw psa3.0 contours maker.setContourLayer('psa30') contour_psa30_map = maker.drawContourMap(outfolder) print('PSA3.0 contour map saved as: %s' % contour_psa30_map)
def HAZUS(shakefile, config, uncertfile=None, saveinputs=False, modeltype='coverage', regressionmodel='J_PGA', probtype='jibson2000', bounds=None): """ Runs HAZUS landslide procedure (FEMA, 2003, Chapter 4) using susceptiblity categories from defined by HAZUS manual (I-X) :param shakefile: URL or complete file path to the location of the Shakemap to use as input :type shakefile: string: :param config: Model configuration file object containing locations of input files and other input values config = ConfigObj(configfilepath) :type config: ConfigObj :param saveinputs: Whether or not to return the model input layers, False (defeault) returns only the model output (one layer) :type saveinputs: boolean :param modeltype: 'coverage' if critical acceleration is exceeded by pga, this gives the estimated areal coverage of landsliding for that cell 'dn_hazus' - Outputs Newmark displacement using HAZUS methods without relating to probability of failure 'dn_prob' - Estimates Newmark displacement using HAZUS methods and relates to probability of failure using param probtype 'ac_classic_dn' - Uses the critical acceleration defined by HAZUS methodology and uses regression model defined by regressionmodel param to get Newmark displacement without relating to probability of failure 'ac_classic_prob' - Uses the critical acceleration defined by HAZUS methodology and uses regression model defined by regressionmodel param to get Newmark displacement and probability defined by probtype method :type modeltype: string :param regressionmodel: Newmark displacement regression model to use 'J_PGA' (default) - PGA-based model from Jibson (2007) - equation 6 'J_PGA_M' - PGA and M-based model from Jibson (2007) - equation 7 'RS_PGA_M' - PGA and M-based model from from Rathje and Saygili (2009) 'RS_PGA_PGV' - PGA and PGV-based model from Saygili and Rathje (2008) - equation 6 :type regressionmodel: string :param probtype: Method used to estimate probability. Entering 'jibson2000' uses equation 5 from Jibson et al. (2000) to estimate probability from Newmark displacement. 'threshold' uses a specified threshold of Newmark displacement (defined in config file) and assumes anything greather than this threshold fails :type probtype: string :param bounds: Boundaries to compute over if different from ShakeMap boundaries as dictionary with keys 'xmin', 'xmax', 'ymin', 'ymax' :returns maplayers: Dictionary containing output and input layers (if saveinputs=True) along with metadata formatted like maplayers['layer name']={'grid': mapio grid2D object, 'label': 'label for colorbar and top line of subtitle', 'type': 'output or input to model', 'description': 'detailed description of layer for subtitle, potentially including source information'} :type maplayers: OrderedDict """ # Empty refs suslref = 'unknown' sussref = 'unknown' modellref = 'unknown' modelsref = 'unknown' # Parse config and read in files sus = None susdat = None if uncertfile is not None: print('ground motion uncertainty option not implemented yet') # Read in susceptiblity file #try: susfile = config['mechanistic_models']['hazus']['layers']['susceptibility']['file'] shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') susdict = GDALGrid.getFileGeoDict(susfile) if bounds is not None: # Make sure bounds are within ShakeMap Grid if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds['xmax'] or shkgdict.ymin > bounds['ymin'] or shkgdict.ymax < bounds['ymax']: print('Specified bounds are outside shakemap area, using ShakeMap bounds instead') bounds = None if bounds is not None: tempgdict1 = GeoDict({'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': 100., 'dy': 100., 'nx': 100., 'ny': 100.}, adjust='res') tempgdict = susdict.getBoundsWithin(tempgdict1) else: tempgdict = susdict.getBoundsWithin(shkgdict) sus = GDALGrid.load(susfile, samplegeodict=tempgdict, resample=False) gdict = sus.getGeoDict() susdat = sus.getData() #except Exception as e: # raise IOError('Unable to read in susceptibility category file specified in config, %s,' % e) # return try: # Try to fetch source information from config modelsref = config['mechanistic_models']['hazus']['shortref'] modellref = config['mechanistic_models']['hazus']['longref'] sussref = config['mechanistic_models']['hazus']['layers']['susceptibility']['shortref'] suslref = config['mechanistic_models']['hazus']['layers']['susceptibility']['longref'] except: print('Was not able to retrieve all references from config file. Continuing') try: dnthresh = float(config['mechanistic_models']['hazus']['values']['dnthresh']) except: if probtype == 'threshold': dnthresh = 5. print('Unable to find dnthresh in config, using 5cm') # Load in shakemap, resample to susceptibility file shakemap = ShakeGrid.load(shakefile, adjust='res') PGA = shakemap.getLayer('pga').subdivide(gdict).getData().astype(float)/100. # in units of g PGV = shakemap.getLayer('pgv').subdivide(gdict).getData().astype(float) # cm/sec M = shakemap.getEventDict()['magnitude'] # Get critical accelerations in g Ac = np.empty(np.shape(susdat)) Ac[(susdat < 1) & (susdat > 10)] = 9999. Ac[susdat == 1] = 0.6 Ac[susdat == 2] = 0.5 Ac[susdat == 3] = 0.4 Ac[susdat == 4] = 0.35 Ac[susdat == 5] = 0.3 Ac[susdat == 6] = 0.25 Ac[susdat == 7] = 0.2 Ac[susdat == 8] = 0.15 Ac[susdat == 9] = 0.1 Ac[susdat == 10] = 0.05 # can delete sus and susdat now, if don't need to output it, to free up memory if saveinputs is False: del susdat, sus if modeltype == 'coverage': areal = np.zeros(np.shape(PGA)) # This seems to be slow for large matrices areal[(PGA >= Ac) & (Ac == 0.6)] = 0.01 areal[(PGA >= Ac) & (Ac == 0.5)] = 0.02 areal[(PGA >= Ac) & (Ac == 0.4)] = 0.03 areal[(PGA >= Ac) & (Ac == 0.35)] = 0.05 areal[(PGA >= Ac) & (Ac == 0.3)] = 0.08 areal[(PGA >= Ac) & (Ac == 0.25)] = 0.1 areal[(PGA >= Ac) & (Ac == 0.2)] = 0.15 areal[(PGA >= Ac) & (Ac == 0.15)] = 0.2 areal[(PGA >= Ac) & (Ac == 0.1)] = 0.25 areal[(PGA >= Ac) & (Ac == 0.05)] = 0.3 # # But this way is even slower, takes 2x as long # numrows, numcols = np.shape(areal) # for j in np.arange(numrows): # for k in np.arange(numcols): # acval = Ac[j, k] # if PGA[j, k] >= acval: # if acval == 0.6: # areal[j, k] = 0.01 # elif acval == 0.5: # areal[j, k] = 0.02 # elif acval == 0.4: # areal[j, k] = 0.03 # elif acval == 0.35: # areal[j, k] = 0.05 # elif acval == 0.3: # areal[j, k] = 0.08 # elif acval == 0.25: # areal[j, k] = 0.1 # elif acval == 0.2: # areal[j, k] = 0.15 # elif acval == 0.15: # areal[j, k] = 0.2 # elif acval == 0.1: # areal[j, k] = 0.25 # elif acval == 0.05: # areal[j, k] = 0.3 elif modeltype == 'dn_hazus' or modeltype == 'dn_prob': ed_low, ed_high = est_disp(Ac, PGA) ed_mean = np.mean((np.dstack((ed_low, ed_high))), axis=2) # Get mean estimated displacements dn = ed_mean * numcycles(M) * PGA else: # Calculate newmark displacement using a regression model if regressionmodel is 'J_PGA': dn = J_PGA(Ac, PGA) elif regressionmodel is 'J_PGA_M': dn = J_PGA_M(Ac, PGA, M) elif regressionmodel is 'RS_PGA_M': dn = RS_PGA_M(Ac, PGA, M) elif regressionmodel is 'RS_PGA_PGV': dn = RS_PGA_PGV(Ac, PGA, PGV) else: print('Unrecognized model, using J_PGA\n') dn = J_PGA(Ac, PGA) # Calculate probability from dn, if necessary for selected model if modeltype == 'ac_classic_prob' or modeltype == 'dn_prob': if probtype.lower() in 'jibson2000': PROB = 0.335*(1-np.exp(-0.048*dn**1.565)) dnthresh = None elif probtype.lower() in 'threshold': PROB = dn.copy() PROB[PROB <= dnthresh] = 0 PROB[PROB > dnthresh] = 1 else: raise NameError('invalid probtype, assuming jibson2000') PROB = 0.335*(1-np.exp(-0.048*dn**1.565)) dnthresh = None # Turn output and inputs into into grids and put in maplayers dictionary maplayers = collections.OrderedDict() temp = shakemap.getShakeDict() shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version']) if modeltype == 'coverage': maplayers['model'] = {'grid': GDALGrid(areal, gdict), 'label': 'Areal coverage', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'coverage', 'shakemap': shakedetail, 'parameters': {'modeltype': modeltype}}} elif modeltype == 'dn_hazus': maplayers['model'] = {'grid': GDALGrid(dn, gdict), 'label': 'Dn (cm)', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'displacement', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'modeltype': modeltype}}} elif modeltype == 'ac_classic_dn': maplayers['model'] = {'grid': GDALGrid(dn, gdict), 'label': 'Dn (cm)', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'displacement', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'modeltype': modeltype}}} elif modeltype == 'dn_prob': maplayers['model'] = {'grid': GDALGrid(PROB, gdict), 'label': 'Landslide Probability', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'probability', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'dnthresh_cm': dnthresh, 'modeltype': modeltype, 'probtype': probtype}}} elif modeltype == 'ac_classic_prob': maplayers['model'] = {'grid': GDALGrid(PROB, gdict), 'label': 'Landslide Probability', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'probability', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'dnthresh_cm': dnthresh, 'modeltype': modeltype, 'probtype': probtype}}} if saveinputs is True: maplayers['suscat'] = {'grid': sus, 'label': 'Susceptibility Category', 'type': 'input', 'description': {'name': sussref, 'longref': suslref, 'units': 'Category'}} maplayers['Ac'] = {'grid': GDALGrid(Ac, gdict), 'label': 'Ac (g)', 'type': 'output', 'description': {'units': 'g', 'shakemap': shakedetail}} maplayers['pga'] = {'grid': GDALGrid(PGA, gdict), 'label': 'PGA (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}} if 'pgv' in regressionmodel.lower(): maplayers['pgv'] = {'grid': GDALGrid(PGV, gdict), 'label': 'PGV (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}} if 'dn' not in modeltype.lower() and modeltype != 'coverage': maplayers['dn'] = {'grid': GDALGrid(dn, gdict), 'label': 'Dn (cm)', 'type': 'output', 'description': {'units': 'displacement', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'modeltype': modeltype}}} return maplayers
def godt2008(shakefile, config, uncertfile=None, saveinputs=False, regressionmodel='J_PGA', bounds=None, slopediv=100., codiv=10.): """ This function runs the Godt et al. (2008) global method for a given ShakeMap. The Factor of Safety is calculated using infinite slope analysis assumuing dry conditions. The method uses threshold newmark displacement and estimates areal coverage by doing the calculations for each slope quantile TO DO - add 'all' - averages Dn from all four equations, add term to convert PGA and PGV to Ia and use other equations, add Ambraseys and Menu (1988) option :param shakefile: url or filepath to shakemap xml file :type shakefile: string :param config: ConfigObj of config file containing inputs required for running the model :type config: ConfigObj :param saveinputs: Whether or not to return the model input layers, False (defeault) returns only the model output (one layer) :type saveinputs: boolean :param regressionmodel: Newmark displacement regression model to use 'J_PGA' (default) - PGA-based model from Jibson (2007) - equation 6 'J_PGA_M' - PGA and M-based model from Jibson (2007) - equation 7 'RS_PGA_M' - PGA and M-based model from from Rathje and Saygili (2009) 'RS_PGA_PGV' - PGA and PGV-based model from Saygili and Rathje (2008) - equation 6 :type regressionmodel: string :param probtype: Method used to estimate probability. Entering 'jibson2000' uses equation 5 from Jibson et al. (2000) to estimate probability from Newmark displacement. 'threshold' uses a specified threshold of Newmark displacement (defined in config file) and assumes anything greather than this threshold fails :type probtype: string :param slopediv: Divide slope by this number to get slope in degrees (Verdin datasets need to be divided by 100) :type slopediv: float :param codiv: Divide cohesion by this number to get reasonable numbers (For Godt method, need to divide by 10 because that is how it was calibrated, but values are reasonable without multiplying for regular analysis) :type codiv: float :returns maplayers: Dictionary containing output and input layers (if saveinputs=True) along with metadata formatted like maplayers['layer name']={'grid': mapio grid2D object, 'label': 'label for colorbar and top line of subtitle', 'type': 'output or input to model', 'description': 'detailed description of layer for subtitle, potentially including source information'} :type maplayers: OrderedDict :raises NameError: when unable to parse the config correctly (probably a formatting issue in the configfile) or when unable to find the shakefile (Shakemap URL or filepath) - these cause program to end """ # Empty refs slopesref = 'unknown' slopelref = 'unknown' cohesionlref = 'unknown' cohesionsref = 'unknown' frictionsref = 'unknown' frictionlref = 'unknown' modellref = 'unknown' modelsref = 'unknown' if uncertfile is not None: print('ground motion uncertainty option not implemented yet') # Parse config try: # May want to add error handling so if refs aren't given, just includes unknown slopefilepath = config['mechanistic_models']['godt_2008']['layers']['slope']['filepath'] slopeunits = config['mechanistic_models']['godt_2008']['layers']['slope']['units'] cohesionfile = config['mechanistic_models']['godt_2008']['layers']['cohesion']['file'] cohesionunits = config['mechanistic_models']['godt_2008']['layers']['cohesion']['units'] frictionfile = config['mechanistic_models']['godt_2008']['layers']['friction']['file'] frictionunits = config['mechanistic_models']['godt_2008']['layers']['friction']['units'] thick = float(config['mechanistic_models']['godt_2008']['parameters']['thick']) uwt = float(config['mechanistic_models']['godt_2008']['parameters']['uwt']) nodata_cohesion = float(config['mechanistic_models']['godt_2008']['parameters']['nodata_cohesion']) nodata_friction = float(config['mechanistic_models']['godt_2008']['parameters']['nodata_friction']) dnthresh = float(config['mechanistic_models']['godt_2008']['parameters']['dnthresh']) fsthresh = float(config['mechanistic_models']['godt_2008']['parameters']['fsthresh']) acthresh = float(config['mechanistic_models']['godt_2008']['parameters']['acthresh']) except Exception as e: raise NameError('Could not parse configfile, %s' % e) return # TO DO, ADD ERROR CATCHING ON UNITS, MAKE SURE THEY ARE WHAT THEY SHOULD BE FOR THIS MODEL try: # Try to fetch source information from config modelsref = config['mechanistic_models']['godt_2008']['shortref'] modellref = config['mechanistic_models']['godt_2008']['longref'] slopesref = config['mechanistic_models']['godt_2008']['layers']['slope']['shortref'] slopelref = config['mechanistic_models']['godt_2008']['layers']['slope']['longref'] cohesionsref = config['mechanistic_models']['godt_2008']['layers']['cohesion']['shortref'] cohesionlref = config['mechanistic_models']['godt_2008']['layers']['cohesion']['longref'] frictionsref = config['mechanistic_models']['godt_2008']['layers']['friction']['shortref'] frictionlref = config['mechanistic_models']['godt_2008']['layers']['friction']['longref'] except: print('Was not able to retrieve all references from config file. Continuing') # Load in shakefile if not os.path.isfile(shakefile): if isURL(shakefile): shakefile = getGridURL(shakefile) # returns a file object else: raise NameError('Could not find "%s" as a file or a valid url' % (shakefile)) return shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') if bounds is not None: # Make sure bounds are within ShakeMap Grid if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds['xmax'] or shkgdict.ymin > bounds['ymin'] or shkgdict.ymax < bounds['ymax']: print('Specified bounds are outside shakemap area, using ShakeMap bounds instead') bounds = None if bounds is not None: tempgdict = GeoDict({'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': shkgdict.dx, 'dy': shkgdict.dy, 'nx': shkgdict.nx, 'ny': shkgdict.ny}, adjust='res') gdict = shkgdict.getBoundsWithin(tempgdict) shakemap = ShakeGrid.load(shakefile, samplegeodict=gdict, adjust='bounds') else: shakemap = ShakeGrid.load(shakefile, adjust='res') shkgdict = shakemap.getGeoDict() # Get updated geodict M = shakemap.getEventDict()['magnitude'] # Read in all the slope files, divide all by 100 to get to slope in degrees (because input files are multiplied by 100.) slopes = [] slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope_min.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv) slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope10.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv) slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope30.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv) slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope50.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv) slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope70.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv) slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope90.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv) slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope_max.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv) slopestack = np.dstack(slopes) # Change any zero slopes to a very small number to avoid dividing by zero later slopestack[slopestack == 0] = 1e-8 # Read in the cohesion and friction files and duplicate layers so they are same shape as slope structure cohesion = np.repeat(GDALGrid.load(cohesionfile, samplegeodict=shakemap.getGeoDict(), resample=True, method='nearest').getData()[:, :, np.newaxis]/codiv, 7, axis=2) cohesion[cohesion == -999.9] = nodata_cohesion cohesion[cohesion == 0] = nodata_cohesion friction = np.repeat(GDALGrid.load(frictionfile, samplegeodict=shakemap.getGeoDict(), resample=True, method='nearest').getData().astype(float)[:, :, np.newaxis], 7, axis=2) friction[friction == -9999] = nodata_friction friction[friction == 0] = nodata_friction # Do the calculations using Jibson (2007) PGA only model for Dn FS = cohesion/(uwt*thick*np.sin(slopestack*(np.pi/180.))) + np.tan(friction*(np.pi/180.))/np.tan(slopestack*(np.pi/180.)) FS[FS < fsthresh] = fsthresh # Compute critical acceleration, in g Ac = (FS-1)*np.sin(slopestack*(np.pi/180.)).astype(float) # This gives ac in g, equations that multiply by g give ac in m/s2 Ac[Ac < acthresh] = acthresh # Get PGA in g (PGA is %g in ShakeMap, convert to g) PGA = np.repeat(shakemap.getLayer('pga').getData()[:, :, np.newaxis]/100., 7, axis=2).astype(float) if 'PGV' in regressionmodel: # Load in PGV also, in cm/sec PGV = np.repeat(shakemap.getLayer('pgv').getData()[:, :, np.newaxis], 7, axis=2).astype(float) np.seterr(invalid='ignore') # Ignore errors so still runs when Ac > PGA, just leaves nan instead of crashing if regressionmodel is 'J_PGA': Dn = J_PGA(Ac, PGA) if regressionmodel is 'J_PGA_M': Dn = J_PGA_M(Ac, PGA, M) if regressionmodel is 'RS_PGA_M': Dn = RS_PGA_M(Ac, PGA, M) if regressionmodel is 'RS_PGA_PGV': Dn = RS_PGA_PGV(Ac, PGA, PGV) PROB = Dn.copy() PROB[PROB < dnthresh] = 0. PROB[PROB >= dnthresh] = 1. PROB = np.sum(PROB, axis=2) PROB[PROB == 1.] = 0.01 PROB[PROB == 2.] = 0.10 PROB[PROB == 3.] = 0.30 PROB[PROB == 4.] = 0.50 PROB[PROB == 5.] = 0.70 PROB[PROB == 6.] = 0.90 PROB[PROB == 7.] = 0.99 # Turn output and inputs into into grids and put in mapLayers dictionary maplayers = collections.OrderedDict() temp = shakemap.getShakeDict() shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version']) description = {'name': modelsref, 'longref': modellref, 'units': 'coverage', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'thickness_m': thick, 'unitwt_kNm3': uwt, 'dnthresh_cm': dnthresh, 'acthresh_g': acthresh, 'fsthresh': fsthresh}} maplayers['model'] = {'grid': GDALGrid(PROB, shakemap.getGeoDict()), 'label': 'Areal coverage', 'type': 'output', 'description': description} if saveinputs is True: maplayers['pga'] = {'grid': GDALGrid(PGA[:, :, 0], shakemap.getGeoDict()), 'label': 'PGA (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}} if 'PGV' in regressionmodel: maplayers['pgv'] = {'grid': GDALGrid(PGV[:, :, 0], shakemap.getGeoDict()), 'label': 'PGV (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}} maplayers['minFS'] = {'grid': GDALGrid(np.min(FS, axis=2), shakemap.getGeoDict()), 'label': 'Min Factor of Safety', 'type': 'input', 'description': {'units': 'unitless'}} maplayers['max slope'] = {'grid': GDALGrid(slopestack[:, :, -1], shakemap.getGeoDict()), 'label': 'Maximum slope ($^\circ$)', 'type': 'input', 'description': {'units': 'degrees', 'name': slopesref, 'longref': slopelref}} maplayers['cohesion'] = {'grid': GDALGrid(cohesion[:, :, 0], shakemap.getGeoDict()), 'label': 'Cohesion (kPa)', 'type': 'input', 'description': {'units': 'kPa (adjusted)', 'name': cohesionsref, 'longref': cohesionlref}} maplayers['friction angle'] = {'grid': GDALGrid(friction[:, :, 0], shakemap.getGeoDict()), 'label': 'Friction angle ($^\circ$)', 'type': 'input', 'description': {'units': 'degrees', 'name': frictionsref, 'longref': frictionlref}} return maplayers
def classic(shakefile, config, uncertfile=None, saveinputs=False, regressionmodel='J_PGA', probtype='jibson2000', slopediv=1., codiv=1., bounds=None): """This function uses the Newmark method to estimate probability of failure at each grid cell. Factor of Safety and critcal accelerations are calculated following Jibson et al. (2000) and the Newmark displacement is estimated using PGA, PGV, and/or Magnitude (depending on equation used) from Shakemap with regression equations from Jibson (2007), Rathje and Saygili (2008) and Saygili and Rathje (2009) :param shakefile: URL or complete file path to the location of the Shakemap to use as input :type shakefile: string: :param config: Model configuration file object containing locations of input files and other input values config = ConfigObj(configfilepath) :type config: ConfigObj :param uncertfile: complete file path to the location of the uncertainty.xml for the shakefile, if this is not None, it will compute the model for +-std in addition to the best estimate :param saveinputs: Whether or not to return the model input layers, False (defeault) returns only the model output (one layer) :type saveinputs: boolean :param regressionmodel: Newmark displacement regression model to use 'J_PGA' (default) - PGA-based model from Jibson (2007) - equation 6 'J_PGA_M' - PGA and M-based model from Jibson (2007) - equation 7 'RS_PGA_M' - PGA and M-based model from from Rathje and Saygili (2009) 'RS_PGA_PGV' - PGA and PGV-based model from Saygili and Rathje (2008) - equation 6 :type regressionmodel: string :param probtype: Method used to estimate probability. Entering 'jibson2000' uses equation 5 from Jibson et al. (2000) to estimate probability from Newmark displacement. 'threshold' uses a specified threshold of Newmark displacement (defined in config file) and assumes anything greather than this threshold fails :type probtype: string :param slopediv: Divide slope by this number to get slope in degrees (Verdin datasets need to be divided by 100) :type slopediv: float :param codiv: Divide cohesion by this number to get reasonable numbers (For Godt method, need to divide by 10 because that is how it was calibrated, but values are reasonable without multiplying for regular analysis) :type codiv: float :returns maplayers: Dictionary containing output and input layers (if saveinputs=True) along with metadata formatted like maplayers['layer name']={'grid': mapio grid2D object, 'label': 'label for colorbar and top line of subtitle', 'type': 'output or input to model', 'description': 'detailed description of layer for subtitle, potentially including source information'} :type maplayers: OrderedDict :raises NameError: when unable to parse the config correctly (probably a formatting issue in the configfile) or when unable to find the shakefile (Shakemap URL or filepath) - these cause program to end :raises NameError: when probtype does not match a predifined probability type, will cause to default to 'jibson2000' """ # Empty refs slopesref = 'unknown' slopelref = 'unknown' cohesionlref = 'unknown' cohesionsref = 'unknown' frictionsref = 'unknown' frictionlref = 'unknown' modellref = 'unknown' modelsref = 'unknown' # Parse config - should make it so it uses defaults if any are missing... try: slopefile = config['mechanistic_models']['classic_newmark']['layers']['slope']['file'] slopeunits = config['mechanistic_models']['classic_newmark']['layers']['slope']['units'] cohesionfile = config['mechanistic_models']['classic_newmark']['layers']['cohesion']['file'] cohesionunits = config['mechanistic_models']['classic_newmark']['layers']['cohesion']['units'] frictionfile = config['mechanistic_models']['classic_newmark']['layers']['friction']['file'] frictionunits = config['mechanistic_models']['classic_newmark']['layers']['friction']['units'] thick = float(config['mechanistic_models']['classic_newmark']['parameters']['thick']) uwt = float(config['mechanistic_models']['classic_newmark']['parameters']['uwt']) nodata_cohesion = float(config['mechanistic_models']['classic_newmark']['parameters']['nodata_cohesion']) nodata_friction = float(config['mechanistic_models']['classic_newmark']['parameters']['nodata_friction']) try: dnthresh = float(config['mechanistic_models']['classic_newmark']['parameters']['dnthresh']) except: if probtype == 'threshold': dnthresh = 5. print('Unable to find dnthresh in config, using 5cm') else: dnthresh = None fsthresh = float(config['mechanistic_models']['classic_newmark']['parameters']['fsthresh']) acthresh = float(config['mechanistic_models']['classic_newmark']['parameters']['acthresh']) slopethresh = float(config['mechanistic_models']['classic_newmark']['parameters']['slopethresh']) try: m = float(config['mechanistic_models']['classic_newmark']['parameters']['m']) except: print('no constant saturated thickness specified, m=0 if no watertable file is found') m = 0. except Exception as e: raise NameError('Could not parse configfile, %s' % e) return try: # Try to fetch source information from config modelsref = config['mechanistic_models']['classic_newmark']['shortref'] modellref = config['mechanistic_models']['classic_newmark']['longref'] slopesref = config['mechanistic_models']['classic_newmark']['layers']['slope']['shortref'] slopelref = config['mechanistic_models']['classic_newmark']['layers']['slope']['longref'] cohesionsref = config['mechanistic_models']['classic_newmark']['layers']['cohesion']['shortref'] cohesionlref = config['mechanistic_models']['classic_newmark']['layers']['cohesion']['longref'] frictionsref = config['mechanistic_models']['classic_newmark']['layers']['friction']['shortref'] frictionlref = config['mechanistic_models']['classic_newmark']['layers']['friction']['longref'] except: print('Was not able to retrieve all references from config file. Continuing') # Cut and resample all files shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') slpdict = GDALGrid.getFileGeoDict(slopefile) if bounds is not None: # Make sure bounds are within ShakeMap Grid if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds['xmax'] or shkgdict.ymin > bounds['ymin'] or shkgdict.ymax < bounds['ymax']: print('Specified bounds are outside shakemap area, using ShakeMap bounds instead') bounds = None if bounds is not None: tempgdict = GeoDict({'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': 100., 'dy': 100., 'nx': 100., 'ny': 100.}, adjust='res') gdict = slpdict.getBoundsWithin(tempgdict) else: # Get boundaries from shakemap if not specified shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') slpdict = GDALGrid.getFileGeoDict(slopefile) gdict = slpdict.getBoundsWithin(shkgdict) # Load in slope file slopegrid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False) gdict = slopegrid.getGeoDict() # Get this again just in case it changed slope = slopegrid.getData().astype(float)/slopediv # Adjust slope to degrees, if needed # Change any zero slopes to a very small number to avoid dividing by zero later slope[slope == 0] = 1e-8 # Load in shakefile if not os.path.isfile(shakefile): if isURL(shakefile): shakefile = getGridURL(shakefile) # returns a file object else: raise NameError('Could not find "%s" as a file or a valid url' % (shakefile)) return # Load in shakemap, resample to slope file (this will be important when go to higher res) shakemap = ShakeGrid.load(shakefile, samplegeodict=gdict, resample=True, method='linear', adjust='res') M = shakemap.getEventDict()['magnitude'] # Read in uncertainty layer, if present if uncertfile is not None: try: uncert = ShakeGrid.load(uncertfile, samplegeodict=gdict, resample=True, method='linear', adjust='res') except: print('Could not read uncertainty file, ignoring uncertainties') uncertfile = None # Read in the cohesion and friction files, resampled to slope grid cohesion = GDALGrid.load(cohesionfile, samplegeodict=gdict, resample=True, method='nearest').getData().astype(float)/codiv cohesion[np.isnan(cohesion)] = nodata_cohesion friction = GDALGrid.load(frictionfile, samplegeodict=gdict, resample=True, method='nearest').getData().astype(float) friction[np.isnan(friction)] = nodata_friction # See if there is a water table depth file and read it in if there is try: waterfile = config['mechanistic_models']['classic_newmark']['layers']['watertable']['file'] watertable = GDALGrid.load(waterfile, samplegeodict=gdict, resample=True, method='linear').getData() # Needs to be in meters! uwtw = float(config['mechanistic_models']['classic_newmark']['parameters']['uwtw']) try: watersref = config['mechanistic_models']['classic_newmark']['layers']['watertable']['shortref'] waterlref = config['mechanistic_models']['classic_newmark']['layers']['watertable']['longref'] except: print('Was not able to retrieve water table references from config file. Continuing') except: print(('Water table file not specified or readable, assuming constant saturated thickness proportion of %0.1f' % m)) watertable = None try: uwtw = float(config['mechanistic_models']['classic_newmark']['parameters']['uwtw']) except: print('Could not read soil wet unit weight, using 18.8 kN/m3') uwtw = 18.8 # Factor of safety if watertable is not None: watertable[watertable > thick] = thick m = (thick - watertable)/thick FS = cohesion/(uwt*thick*np.sin(slope*(np.pi/180.))) + np.tan(friction*(np.pi/180.))/np.tan(slope*(np.pi/180.)) - (m*uwtw*np.tan(friction*(np.pi/180.)))/(uwt*np.tan(slope*(np.pi/180.))) FS[FS < fsthresh] = fsthresh # Compute critical acceleration, in g Ac = (FS-1.)*np.sin(slope*(np.pi/180.)) # This gives ac in g, equations that multiply by g give ac in m/s2 Ac[Ac < acthresh] = acthresh Ac[slope < slopethresh] = float('nan') # Get PGA in g (PGA is %g in ShakeMap, convert to g) PGA = shakemap.getLayer('pga').getData().astype(float)/100. PGV = shakemap.getLayer('pgv').getData().astype(float) if uncertfile is not None: stdpga = uncert.getLayer('stdpga') stdpgv = uncert.getLayer('stdpgv') # Estimate PGA +- 1std PGAmin = np.exp(np.log(PGA*100.) - stdpga.getData())/100. PGAmax = np.exp(np.log(PGA*100.) + stdpga.getData())/100. PGVmin = np.exp(np.log(PGV) - stdpgv.getData()) PGVmax = np.exp(np.log(PGV) + stdpgv.getData()) np.seterr(invalid='ignore') # Ignore errors so still runs when Ac > PGA, just leaves nan instead of crashing if regressionmodel is 'J_PGA': Dn = J_PGA(Ac, PGA) if uncertfile is not None: Dnmin = J_PGA(Ac, PGAmin) Dnmax = J_PGA(Ac, PGAmax) elif regressionmodel is 'J_PGA_M': Dn = J_PGA_M(Ac, PGA, M) if uncertfile is not None: Dnmin = J_PGA_M(Ac, PGAmin, M) Dnmax = J_PGA_M(Ac, PGAmax, M) elif regressionmodel is 'RS_PGA_M': Dn = RS_PGA_M(Ac, PGA, M) if uncertfile is not None: Dnmin = RS_PGA_M(Ac, PGAmin, M) Dnmax = RS_PGA_M(Ac, PGAmax, M) elif regressionmodel is 'RS_PGA_PGV': Dn = RS_PGA_PGV(Ac, PGA, PGV) if uncertfile is not None: Dnmin = RS_PGA_PGV(Ac, PGAmin, PGVmin) Dnmax = RS_PGA_PGV(Ac, PGAmax, PGVmax) else: print('Unrecognized regression model, aborting') return units = 'probability' label = 'Landslide Probability' if probtype.lower() in 'jibson2000': PROB = 0.335*(1-np.exp(-0.048*Dn**1.565)) dnthresh = None if uncertfile is not None: PROBmin = 0.335*(1-np.exp(-0.048*Dnmin**1.565)) PROBmax = 0.335*(1-np.exp(-0.048*Dnmax**1.565)) elif probtype.lower() in 'threshold': PROB = Dn.copy() PROB[PROB <= dnthresh] = 0 PROB[PROB > dnthresh] = 1 units = 'prediction' label = 'Predicted Landslides' if uncertfile is not None: PROBmin = Dnmin.copy() PROBmin[PROBmin <= dnthresh] = 0 PROBmin[PROBmin > dnthresh] = 1 PROBmax = Dnmax.copy() PROBmax[PROBmax <= dnthresh] = 0 PROBmax[PROBmax > dnthresh] = 1 else: raise NameError('invalid probtype, assuming jibson2000') PROB = 0.335*(1-np.exp(-0.048*Dn**1.565)) dnthresh = None if uncertfile is not None: PROBmin = 0.335*(1-np.exp(-0.048*Dnmin**1.565)) PROBmax = 0.335*(1-np.exp(-0.048*Dnmax**1.565)) # Turn output and inputs into into grids and put in mapLayers dictionary maplayers = collections.OrderedDict() temp = shakemap.getShakeDict() shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version']) if watertable is not None: des = 'variable' else: des = m description = {'name': modelsref, 'longref': modellref, 'units': units, 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'thickness_m': thick, 'unitwt_kNm3': uwt, 'dnthresh_cm': dnthresh, 'acthresh_g': acthresh, 'fsthresh': fsthresh, 'slopethresh': slopethresh, 'sat_proportion': des}} maplayers['model'] = {'grid': GDALGrid(PROB, gdict), 'label': label, 'type': 'output', 'description': description} if uncertfile is not None: maplayers['modelmin'] = {'grid': GDALGrid(PROBmin, gdict), 'label': label+' -1std', 'type': 'output', 'description': description} maplayers['modelmax'] = {'grid': GDALGrid(PROBmax, gdict), 'label': label+' +1std', 'type': 'output', 'description': description} if saveinputs is True: maplayers['pga'] = {'grid': GDALGrid(PGA, gdict), 'label': 'PGA (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}} maplayers['FS'] = {'grid': GDALGrid(FS, gdict), 'label': 'Factor of Safety', 'type': 'input', 'description': {'units': 'unitless'}} maplayers['Ac'] = {'grid': GDALGrid(Ac, gdict), 'label': 'Critical acceleration (g)', 'type': 'input'} maplayers['Dn'] = {'grid': GDALGrid(Dn, gdict), 'label': 'Newmark Displacement (cm)', 'type': 'input'} maplayers['slope'] = {'grid': GDALGrid(slope, gdict), 'label': 'Max slope ($^\circ$)', 'type': 'input', 'description': {'units': 'degrees', 'name': slopesref, 'longref': slopelref}} maplayers['cohesion'] = {'grid': GDALGrid(cohesion, gdict), 'label': 'Cohesion (kPa)', 'type': 'input', 'description': {'units': 'kPa (adjusted)', 'name': cohesionsref, 'longref': cohesionlref}} maplayers['friction angle'] = {'grid': GDALGrid(friction, gdict), 'label': 'Friction angle ($^\circ$)', 'type': 'input', 'description': {'units': 'degrees', 'name': frictionsref, 'longref': frictionlref}} if uncertfile is not None: maplayers['pgamin'] = {'grid': GDALGrid(PGAmin, gdict), 'label': 'PGA - 1std (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}} maplayers['pgamax'] = {'grid': GDALGrid(PGAmax, gdict), 'label': 'PGA + 1std (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}} if 'PGV' in regressionmodel: maplayers['pgv'] = {'grid': GDALGrid(PGV, gdict), 'label': 'PGV (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}} if uncertfile is not None: maplayers['pgvmin'] = {'grid': GDALGrid(PGVmin, gdict), 'label': 'PGV - 1std (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}} maplayers['pgvmax'] = {'grid': GDALGrid(PGVmax, gdict), 'label': 'PGV + 1std (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}} if watertable is not None: maplayers['sat thick prop'] = {'grid': GDALGrid(m, gdict), 'label': 'Saturated thickness proprtion [0,1]', 'type': 'input', 'description': {'units': 'meters', 'name': watersref, 'longref': waterlref}} return maplayers
def computePexp(grid, pop_file, shakefile=None, shakethreshtype='pga', shakethresh=0., probthresh=0., stdgrid2D=None, stdtype='full', maxP=1., sill1=None, range1=None): """ Get exposure-based statistics. Args: grid: Model grid. pop_file (str): Path to the landscan population grid. shakefile (str): Optional, path to shakemap file to use for ground motion threshold. shakethreshtype(str): Optional, Type of ground motion to use for shakethresh, 'pga', 'pgv', or 'mmi'. shakethresh: Float or list of shaking thresholds in %g for pga, cm/s for pgv, float for mmi. probthresh: Float, exclude any cells with probabilities less than or equal to this value stdgrid2D: grid2D object of model standard deviations (optional) stdtype (str): assumption of spatial correlation used to compute the stdev of the statistics, 'max', 'min', 'mean' of max and min, or 'full' (default) which estimates the range of correlation and accounts for covariance. Will return 'mean' if ridge and sill cannot be estimated. maxP (float): the maximum possible probability of the model sill1 (float): If known, the sill of the variogram of grid2D, will be estimated if None and stdtype='full' range1 (float): If known, the range of the variogram of grid2D, will be estimated if None and stdtype='full' Returns: dict: Dictionary with keys named exp_pop_# where # is the shakethresh and exp_std_# if stdgrid2D is supplied (stdev of exp_pop) and elim_#, the maximum exposure value possible with the applied thresholds and given maxP value p_exp_# beta distribution shape factor p (sometimes called alpha) q_exp_# beta distribution shape factor q (sometimes called beta) """ model = grid.getData().copy() mdict = grid.getGeoDict() # Figure out difference in resolution of popfile to shakefile ptemp, J = GDALGrid.getFileGeoDict(pop_file) factor = ptemp.dx/mdict.dx # Cut out area from population file popcut1 = quickcut(pop_file, mdict, precise=False, extrasamp=2., method='nearest') #tot1 = np.sum(popcut1.getData()) # Adjust for factor to prepare for upsampling to avoid creating new people popcut1.setData(popcut1.getData()/factor**2) # Upsample to mdict popcut = popcut1.interpolate2(mdict, method='nearest') popdat = popcut.getData() exp_pop = {} if shakefile is not None: if shakethresh < 0.: raise Exception('shaking threshold must be equal or greater ' 'than zero') # resample shakemap to grid2D temp = ShakeGrid.load(shakefile) shk = temp.getLayer(shakethreshtype) shk = shk.interpolate2(mdict) if shk.getGeoDict() != mdict: raise Exception('shakemap was not resampled to exactly the same ' 'geodict as the model') shkdat = shk.getData() model[shkdat < shakethresh] = float('nan') else: shakethresh = 0. shkdat = None mu = np.nansum(model[model >= probthresh] * popdat[model >= probthresh]) exp_pop['exp_pop_%1.2fg' % (shakethresh/100.,)] = mu #N = np.nansum([model >= probthresh]) #exp_pop['N_%1.2fg' % (shakethresh/100.,)] = N elim = np.nansum(popdat[model >= probthresh])*maxP exp_pop['elim_%1.2fg' % (shakethresh/100.,)] = elim if stdgrid2D is not None: std = stdgrid2D.getData().copy() if np.nanmax(std) > 0. and np.nanmax(model) >= probthresh: totalmin = np.sqrt(np.nansum((popdat[model >= probthresh]*std[model >= probthresh])**2.)) totalmax = np.nansum(std[model >= probthresh] * popdat[model >= probthresh]) if stdtype=='full': if sill1 is None or range1 is None: modelfresh = grid.getData().copy() range1, sill1 = semivario(modelfresh, probthresh, shakethresh=shakethresh, shakegrid=shkdat) if range1 is None: # Use mean exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = (totalmax+totalmin)/2. else: # Zero out std at cells where the model probability was below # the threshold because we aren't including those cells in Hagg stdz = std.copy() stdz[model < probthresh] = 0. svar1 = svar(stdz, range1, sill1, scale=popdat) exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = np.sqrt(svar1) #exp_pop['exp_range_%1.2fg' % (shakethresh/100.,)] = range1 #exp_pop['exp_sill_%1.2fg' % (shakethresh/100.,)] = sill1 elif stdtype == 'max': exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = totalmax elif stdtype == 'min': exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = totalmin else: exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = (totalmax+totalmin)/2. # Beta distribution shape factors var = exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)]**2. exp_pop['p_exp_%1.2fg' % (shakethresh/100.,)] = (mu/elim)*((elim*mu-mu**2)/var-1) exp_pop['q_exp_%1.2fg' % (shakethresh/100.,)] = (1-mu/elim)*((elim*mu-mu**2)/var-1) else: print('no std values above zero, filling with zeros') exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = 0. exp_pop['p_exp_%1.2fg' % (shakethresh/100.,)] = 0. exp_pop['q_exp_%1.2fg' % (shakethresh/100.,)] = 0. else: exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = 0. exp_pop['p_exp_%1.2fg' % (shakethresh/100.,)] = 0. exp_pop['q_exp_%1.2fg' % (shakethresh/100.,)] = 0. return exp_pop
def getDataFrames(sampleparams,shakeparams,predictors,outparams): """ Return Pandas training and testing data frames containing sampled data from hazard coverage, ShakeMap, and predictor data sets. :param sampleparams: Dictionary with at least these values: - coverage: Name of hazard coverage shapefile (decimal degrees). Required. - dx: Float desired sample resolution, and can be overridden by nmax, below (meters). Required. - cb: Desired class balance, i.e., fraction of sampled points that should be from hazard polygons. Optional for polygons, Required for points. - nmax: Maximum number of possible yes/no sample points (usually set to avoid memory issues). Optional. - nsamp: Number of total hazard and no-hazard sample points to collect. Required. - touch_center: Boolean (0 or 1) indicating whether polygons must touch the center of the cell in order for that cell to count as a "yes" sample point. - testpercent: Fraction of sampled points to be used for testing (1-testpercent) will be used for training. Optional, defaults to 0 - extent: xmin,xmax,ymin,ymax OR convex #geographic extent within which to sample data. Four numbers are interpreted as bounding box, the word convex will be interpreted to mean a convex hull. Default (not specified) will mean the bounding box of the hazard coverage. Optional. - h1: Minimum buffer size for sampling non-hazard points when input coverage takes the form of points. Optional for polygons, required for points. - h2: Maximum buffer size for sampling non-hazard points when input coverage takes the form of points. Optional for polygons, required for points. :param shakeparams: Dictionary with at least these values: - shakemap: Name of shakemap file to use for sampling hazard values. Required. - shakemap_uncertainty: Name of shakemap uncertainty file to use for sampling hazard uncertainty values. Optional. :param predictors: Dictionary with at least these values: - layername: Path to ESRI shapefile, or grid in GMT or ESRI format which represents predictor data. Required. - layername_sampling: 'nearest' or 'linear', optional for grids, not used for shapefiles. - layername_attribute: Name of attribute in shapefile which should be sampled at hazard/non-hazard points. Required for points. :param outparams: Dictionary with at least these values: - folder: Name of folder where all output (data frames, plots) will be written. Will be created if does not exist. Required. - basename: The name that will be included in all output file names (i.e., northridge_train.csv). Required. :returns: Tuple of (training,testing) Pandas data frames. """ coverage = sampleparams['coverage'] f = fiona.collection(coverage,'r') cbounds = f.bounds f.close() dx = sampleparams['dx'] cb = sampleparams['cb'] nmax = sampleparams['nmax'] nsamp = sampleparams['nsamp'] touch_center = sampleparams['touch_center'] testpercent = sampleparams['testpercent'] extent = sampleparams['extent'] h1 = sampleparams['h1'] h2 = sampleparams['h2'] yestest,yestrain,notest,notrain,xvar,yvar,pshapes,proj = sampleFromFile(coverage,predictors,dx=dx,nmax=nmax,testPercent=testpercent, touch_center=touch_center,classBalance=cb,extent=extent, Nsamp=nsamp,h1=h1,h2=h2) traincolumns = OrderedDict() testcolumns = OrderedDict() if (100-testpercent) > 0: traincolumns['lat'] = np.concatenate((yestrain[:,1],notrain[:,1])) traincolumns['lon'] = np.concatenate((yestrain[:,0],notrain[:,0])) traincolumns['coverage'] = np.concatenate((np.ones_like(yestrain[:,1]),np.zeros_like(notrain[:,1]))) if testpercent > 0: testcolumns['lat'] = np.concatenate((yestest[:,1],notest[:,1])) testcolumns['lon'] = np.concatenate((yestest[:,0],notest[:,0])) testcolumns['coverage'] = np.concatenate((np.ones_like(yestest[:,1]),np.zeros_like(notest[:,1]))) for predname,predfile in predictors.items(): if not os.path.isfile(predfile): continue ftype = getFileType(predfile) if ftype == 'shapefile': attribute = predictors[predname+'_attribute'] shapes = subsetShapes(predfile,cbounds) yes_test_samples = sampleShapes(shapes,yestest,attribute) no_test_samples = sampleShapes(shapes,notest,attribute) yes_train_samples = sampleShapes(shapes,yestrain,attribute) no_train_samples = sampleShapes(shapes,notrain,attribute) testcolumns[predname] = np.squeeze(np.concatenate((yes_test_samples,no_test_samples))) traincolumns[predname] = np.squeeze(np.concatenate((yes_train_samples,no_train_samples))) elif ftype == 'grid': method = 'nearest' if predname+'_sampling' in predictors: method = predictors[predname+'_sampling'] if testpercent > 0: yes_test_samples = sampleGridFile(predfile,yestest,method=method) no_test_samples = sampleGridFile(predfile,notest,method=method) testcolumns[predname] = np.squeeze(np.concatenate((yes_test_samples,no_test_samples))) if (100-testpercent) > 0: yes_train_samples = sampleGridFile(predfile,yestrain,method=method) no_train_samples = sampleGridFile(predfile,notrain,method=method) traincolumns[predname] = np.squeeze(np.concatenate((yes_train_samples,no_train_samples))) else: continue #attribute or sampling method key #sample the shakemap layers = ['mmi','pga','pgv','psa03','psa10','psa30'] shakegrid = ShakeGrid.load(shakeparams['shakemap'],adjust='res') for layer in layers: yes_test_samples = sampleFromMultiGrid(shakegrid,layer,yestest) no_test_samples = sampleFromMultiGrid(shakegrid,layer,notest) yes_train_samples = sampleFromMultiGrid(shakegrid,layer,yestrain) no_train_samples = sampleFromMultiGrid(shakegrid,layer,notrain) if testpercent > 0: testcolumns[layer] = np.squeeze(np.concatenate((yes_test_samples,no_test_samples))) if (100-testpercent) > 0: traincolumns[layer] = np.squeeze(np.concatenate((yes_train_samples,no_train_samples))) dftest = pd.DataFrame(testcolumns) dftrain = pd.DataFrame(traincolumns) return (dftrain,dftest)
def __init__(self, config, shakefile, model, uncertfile=None): """Set up the logistic model :param config: configobj (config .ini file read in using configobj) defining the model and its inputs :type config: dictionary :param shakefile: Full file path to shakemap.xml file for the event of interest :type shakefile: string :param model: Name of model defined in config that should be run for the event of interest :type model: string :param uncertfile: :type uncertfile: """ if model not in getLogisticModelNames(config): raise Exception('Could not find a model called "%s" in config %s.' % (model, config)) #do everything here short of calculations - parse config, assemble eqn strings, load data. self.model = model cmodel = config['logistic_models'][model] self.modeltype = cmodel['gfetype'] self.coeffs = validateCoefficients(cmodel) self.layers = validateLayers(cmodel) # key = layer name, value = file name self.terms, timeField = validateTerms(cmodel, self.coeffs, self.layers) self.interpolations = validateInterpolations(cmodel, self.layers) self.units = validateUnits(cmodel, self.layers) self.gmused = [value for term, value in cmodel['terms'].items() if 'pga' in value.lower() or 'pgv' in value.lower() or 'mmi' in value.lower()] self.modelrefs, self.longrefs, self.shortrefs = validateRefs(cmodel) if 'baselayer' not in cmodel: raise Exception('You must specify a base layer file in config.') if cmodel['baselayer'] not in list(self.layers.keys()): raise Exception('You must specify a base layer corresponding to one of the files in the layer section.') #get the geodict for the shakemap geodict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') griddict, eventdict, specdict, fields, uncertainties = getHeaderData(shakefile) #YEAR = eventdict['event_timestamp'].year MONTH = MONTHS[(eventdict['event_timestamp'].month)-1] #DAY = eventdict['event_timestamp'].day #HOUR = eventdict['event_timestamp'].hour #now find the layer that is our base layer and get the largest bounds we can guarantee not to exceed shakemap bounds basefile = self.layers[cmodel['baselayer']] ftype = getFileType(basefile) if ftype == 'esri': basegeodict, firstcol = GDALGrid.getFileGeoDict(basefile) sampledict = basegeodict.getBoundsWithin(geodict) elif ftype == 'gmt': basegeodict, firstcol = GMTGrid.getFileGeoDict(basefile) sampledict = basegeodict.getBoundsWithin(geodict) else: raise Exception('All predictor variable grids must be a valid GMT or ESRI file type') #now load the shakemap, resampling and padding if necessary self.shakemap = ShakeGrid.load(shakefile, samplegeodict=sampledict, resample=True, doPadding=True, adjust='res') # take uncertainties into account if uncertfile is not None: try: self.uncert = ShakeGrid.load(uncertfile, samplegeodict=sampledict, resample=True, doPadding=True, adjust='res') except: print('Could not read uncertainty file, ignoring uncertainties') self.uncert = None else: self.uncert = None #load the predictor layers into a dictionary self.layerdict = {} # key = layer name, value = grid object for layername, layerfile in self.layers.items(): if isinstance(layerfile, list): for lfile in layerfile: if timeField == 'MONTH': if lfile.find(MONTH) > -1: layerfile = lfile ftype = getFileType(layerfile) interp = self.interpolations[layername] if ftype == 'gmt': lyr = GMTGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True) elif ftype == 'esri': lyr = GDALGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True) else: msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % (layername, layerfile) raise Exception(msg) self.layerdict[layername] = lyr else: #first, figure out what kind of file we have (or is it a directory?) ftype = getFileType(layerfile) interp = self.interpolations[layername] if ftype == 'gmt': lyr = GMTGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True) elif ftype == 'esri': lyr = GDALGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True) else: msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % (layername, layerfile) raise Exception(msg) self.layerdict[layername] = lyr shapes = {} for layername, layer in self.layerdict.items(): shapes[layername] = layer.getData().shape self.nuggets = [str(self.coeffs['b0'])] ckeys = list(self.terms.keys()) ckeys.sort() for key in ckeys: term = self.terms[key] coeff = self.coeffs[key] self.nuggets.append('(%g * %s)' % (coeff, term)) self.equation = ' + '.join(self.nuggets) if self.uncert is not None: self.nugmin = copy.copy(self.nuggets) self.nugmax = copy.copy(self.nuggets) # Find the term with the shakemap input and replace for these nuggets for k, nug in enumerate(self.nuggets): if "self.shakemap.getLayer('pga').getData()" in nug: self.nugmin[k] = self.nugmin[k].replace("self.shakemap.getLayer('pga').getData()", "(np.exp(np.log(self.shakemap.getLayer('pga').getData()) - self.uncert.getLayer('stdpga').getData()))") self.nugmax[k] = self.nugmax[k].replace("self.shakemap.getLayer('pga').getData()", "(np.exp(np.log(self.shakemap.getLayer('pga').getData()) + self.uncert.getLayer('stdpga').getData()))") elif "self.layerdict['pgv'].getData()" in nug: self.nugmin[k] = self.nugmin[k].replace("self.shakemap.getLayer('pgv').getData()", "(np.exp(np.log(self.shakemap.getLayer('pgv').getData()) - self.uncert.getLayer('stdpgv').getData()))") self.nugmax[k] = self.nugmax[k].replace("self.shakemap.getLayer('pgv').getData()", "(np.exp(np.log(self.shakemap.getLayer('pgv').getData()) + self.uncert.getLayer('stdpgv').getData()))") elif "self.layerdict['mmi'].getData()" in nug: self.nugmin[k] = self.nugmin[k].replace("self.shakemap.getLayer('mmi').getData()", "(np.exp(np.log(self.shakemap.getLayer('mmi').getData()) - self.uncert.getLayer('stdmmi').getData()))") self.nugmax[k] = self.nugmax[k].replace("self.shakemap.getLayer('mmi').getData()", "(np.exp(np.log(self.shakemap.getLayer('mmi').getData()) + self.uncert.getLayer('stdmmi').getData()))") self.equationmin = ' + '.join(self.nugmin) self.equationmax = ' + '.join(self.nugmax) else: self.equationmin = None self.equationmax = None self.geodict = self.shakemap.getGeoDict() try: self.slopemin = float(config['logistic_models'][model]['slopemin']) self.slopemax = float(config['logistic_models'][model]['slopemax']) except: print('could not find slopemin and/or slopemax in config, no limits will be applied') self.slopemin = 0. self.slopemax = 90.
def create_info(event_dir, lsmodels, lqmodels, eventsource='', eventsourcecode='', point=True): """Create info.json for ground failure product. Args: event_dir (srt): Directory containing ground failure results. lsmodels (list): List of dictionaries of model summary info compiled by the hazdev function. If not specified, code will search for the hdf5 files for the preferred model and will create this dictionary and will apply default colorbars and bins. lqmodels (list): Same as above for liquefaction. point (bool): if True, event is a point source and warning should be displayed Returns: creates info.json for this event """ filenames = [] # Find the shakemap grid.xml file with open(os.path.join(event_dir, 'shakefile.txt'), 'r') as f: shakefile = f.read() files = os.listdir(event_dir) # Get all info from dictionaries of preferred events, add in extent # and filename for lsm in lsmodels: # Add extent and filename for preferred model if lsm['preferred']: filesnippet = lsm['id'] # Read in extents flnm = '%s_extent.json' % filesnippet ls_extent_file = [f for f in files if flnm in f] if len(ls_extent_file) == 1: ls_file = os.path.join(event_dir, ls_extent_file[0]) with open(ls_file) as f: ls_extent = json.load(f) else: raise OSError("Landslide extent not found.") lsm['extent'] = ls_extent # lsm['filename'] = flnm lsext = lsm['zoomext'] # Get zoom extent ls_alert = lsm['alert'] rmkeys = ['bin_edges', 'bin_colors', 'zoomext'] else: # Remove any alert keys rmkeys = ['bin_edges', 'bin_colors', 'zoomext', 'population_alert', 'alert', 'hazard_alert'] for key in rmkeys: if key in lsm: lsm.pop(key) for lqm in lqmodels: if lqm['preferred']: filesnippet = lqm['id'] # Read in extents flnm = '%s_extent.json' % filesnippet lq_extent_file = [f2 for f2 in files if flnm in f2] if len(lq_extent_file) == 1: lq_file = os.path.join(event_dir, lq_extent_file[0]) with open(lq_file) as f: lq_extent = json.load(f) else: raise OSError("Liquefaction extent not found.") lqm['extent'] = lq_extent # lqm['filename'] = flnm lqext = lqm['zoomext'] # Get zoom extent lq_alert = lqm['alert'] rmkeys = ['bin_edges', 'bin_colors', 'zoomext'] else: # Remove any alert keys rmkeys = ['bin_edges', 'bin_colors', 'zoomext', 'population_alert', 'alert', 'hazard_alert'] for key in rmkeys: if key in lqm: lqm.pop(key) # Try to get event info shake_grid = ShakeGrid.load(shakefile, adjust='res') event_dict = shake_grid.getEventDict() sm_dict = shake_grid.getShakeDict() base_url = 'https://earthquake.usgs.gov/earthquakes/eventpage/' # Is this a point source? # point = is_grid_point_source(shake_grid) # Temporarily hard code this until we can get a better solution via # new grid.xml attributes. #point = True net = eventsource code = eventsourcecode time = event_dict['event_timestamp'].strftime('%Y-%m-%dT%H:%M:%SZ') event_url = '%s%s%s#executive' % (base_url, net, code) # Get extents that work for both unless one is green and the other isn't if lq_alert == 'green' and ls_alert != 'green' and ls_alert is not None: xmin = lsext['xmin'] xmax = lsext['xmax'] ymin = lsext['ymin'] ymax = lsext['ymax'] elif lq_alert != 'green' and ls_alert == 'green' and lq_alert is not None: xmin = lqext['xmin'] xmax = lqext['xmax'] ymin = lqext['ymin'] ymax = lqext['ymax'] else: xmin = np.min((lqext['xmin'], lsext['xmin'])) xmax = np.max((lqext['xmax'], lsext['xmax'])) ymin = np.min((lqext['ymin'], lsext['ymin'])) ymax = np.max((lqext['ymax'], lsext['ymax'])) # Should we display the warning about point source? rupture_warning = False if point and event_dict['magnitude'] > 6.5: rupture_warning = True # Create info.json for website rendering and metadata purposes info_dict = { 'Summary': { 'code': code, 'net': net, 'magnitude': event_dict['magnitude'], 'depth': event_dict['depth'], 'time': time, 'lat': event_dict['lat'], 'lon': event_dict['lon'], 'event_url': event_url, 'shakemap_version': sm_dict['shakemap_version'], 'rupture_warning': rupture_warning, 'point_source': point, 'zoom_extent': [xmin, xmax, ymin, ymax] }, 'Landslides': lsmodels, 'Liquefaction': lqmodels } info_file = os.path.join(event_dir, 'info.json') with open(info_file, 'w') as f: json.dump(info_dict, f) # allow_nan=False) filenames.append(info_file) return filenames
def _test_intensity(): datadir = os.path.abspath(os.path.join(homedir, "..", "data", "eventdata", "northridge")) shakefile = os.path.join(datadir, "northridge_grid.xml") topofile = os.path.join(datadir, "northridge_topo.grd") rupturefile = os.path.join(datadir, "northridge_fault.txt") cityfile = os.path.join(datadir, "northridge_cities.txt") coastfile = os.path.join(datadir, "northridge_coastline.json") countryfile = os.path.join(datadir, "northridge_countries.json") statefile = os.path.join(datadir, "northridge_states.json") lakefile = os.path.join(datadir, "northridge_lakes.json") oceanfile = os.path.join(datadir, "northridge_ocean.json") stationfile = os.path.join(datadir, "northridge_stations.db") roadfile = os.path.join(datadir, "northridge_roads.json") tancptfile = os.path.join(shakedir, "shakemap", "mapping", "tan.cpt") shakecptfile = os.path.join(shakedir, "shakemap", "mapping", "shakecpt.cpt") layerdict = { "coast": coastfile, "ocean": oceanfile, "lake": lakefile, "country": countryfile, "roads": roadfile, "state": statefile, } tancolormap = ColorPalette.fromPreset("shaketopo") shakecolormap = ColorPalette.fromPreset("mmi") cities = BasemapCities.loadFromCSV(cityfile) shakemap = ShakeGrid.load(shakefile, adjust="res") stations = StationList(stationfile) rupture = QuadRupture.readRuptureFile(rupturefile) edict = shakemap.getEventDict() eventdict = { "lat": edict["lat"], "lon": edict["lon"], "depth": edict["depth"], "mag": edict["magnitude"], "time": edict["event_timestamp"], } source = Source(eventdict, rupture) maker = MapMaker(shakemap, topofile, stations, rupture, layerdict, source, cities) # draw intensity map outfolder = os.path.expanduser("~") maker.setIntensityLayer("mmi") maker.setIntensityGMTColorMap(shakecolormap) intensity_map = maker.drawIntensityMap(outfolder) print("Intensity map saved as: %s" % intensity_map) # draw contour maps maker.setContourGMTColorMap(tancolormap) # Draw pgv contours maker.setContourLayer("pgv") contour_pgv_map = maker.drawContourMap(outfolder) print("PGV contour map saved as: %s" % contour_pgv_map) # Draw pga contours maker.setContourLayer("pga") contour_pga_map = maker.drawContourMap(outfolder) print("PGA contour map saved as: %s" % contour_pga_map) # Draw psa0.3 contours maker.setContourLayer("psa03") contour_psa03_map = maker.drawContourMap(outfolder) print("PSA0.3 contour map saved as: %s" % contour_psa03_map) # Draw psa1.0 contours maker.setContourLayer("psa10") contour_psa10_map = maker.drawContourMap(outfolder) print("PSA1.0 contour map saved as: %s" % contour_psa10_map) # Draw psa3.0 contours maker.setContourLayer("psa30") contour_psa30_map = maker.drawContourMap(outfolder) print("PSA3.0 contour map saved as: %s" % contour_psa30_map)
def computeParea(grid2D, proj='moll', probthresh=0.0, shakefile=None, shakethreshtype='pga', shakethresh=0.0): """ Alternative to Aggregate Hazard (Hagg), which is equal to the the sum of the area of grid cells that exceeds a given probability. Args: grid2D: grid2D object of model output. proj: projection to use to obtain equal area, 'moll' mollweide, or 'laea' lambert equal area. probthresh: Optional, Float or list of probability thresholds. shakefile: Optional, path to shakemap file to use for ground motion threshold. shakethreshtype: Optional, Type of ground motion to use for shakethresh, 'pga', 'pgv', or 'mmi'. shakethresh: Optional, Float of shaking thresholds in %g for pga, cm/s for pgv, float for mmi. Returns: Parea (float) if no or only one probthresh defined, otherwise, a list of floats of Parea corresponding to all specified probthresh values. """ if type(probthresh) != list and type(probthresh) != np.ndarray: probthresh = [probthresh] Parea = [] bounds = grid2D.getBounds() lat0 = np.mean((bounds[2], bounds[3])) lon0 = np.mean((bounds[0], bounds[1])) projs = ('+proj=%s +lat_0=%f +lon_0=%f +x_0=0 +y_0=0 +ellps=WGS84 ' '+units=km +no_defs' % (proj, lat0, lon0)) geodict = grid2D.getGeoDict() if shakefile is not None: if shakethresh < 0.: raise Exception('shaking threshold must be equal or greater ' 'than zero') tmpdir = tempfile.mkdtemp() # resample shakemap to grid2D temp = ShakeGrid.load(shakefile) junkfile = os.path.join(tmpdir, 'temp.bil') GDALGrid.copyFromGrid(temp.getLayer(shakethreshtype)).save(junkfile) shk = quickcut(junkfile, geodict, precise=True, method='bilinear') shutil.rmtree(tmpdir) if shk.getGeoDict() != geodict: raise Exception('shakemap was not resampled to exactly the same ' 'geodict as the model') grid = grid2D.project(projection=projs) geodictRS = grid.getGeoDict() cell_area_km2 = geodictRS.dx * geodictRS.dy model = grid.getData() model[np.isnan(model)] = -1. for probt in probthresh: if probt < 0.: raise Exception('probability threshold must be equal or greater ' 'than zero') modcop = model.copy() if shakefile is not None: shkgrid = shk.project(projection=projs) shkdat = shkgrid.getData() # use -1 to avoid nan errors and warnings, will always be thrown # out because default probthresh is 0 and must be positive. shkdat[np.isnan(shkdat)] = -1. modcop[shkdat < shakethresh] = -1. one_mat = np.ones_like(modcop) Parea.append(np.sum(one_mat[modcop >= probt] * cell_area_km2)) if len(Parea) == 1: Parea = Parea[0] return Parea
def test_save(): tdir = tempfile.mkdtemp() testfile = os.path.join(tdir, 'test.xml') try: print('Testing save/read functionality for shakemap grids...') pga = np.arange(0, 16, dtype=np.float32).reshape(4, 4) pgv = np.arange(1, 17, dtype=np.float32).reshape(4, 4) mmi = np.arange(2, 18, dtype=np.float32).reshape(4, 4) geodict = GeoDict({ 'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0, 'dy': 1.0, 'ny': 4, 'nx': 4 }) layers = OrderedDict() layers['pga'] = pga layers['pgv'] = pgv layers['mmi'] = mmi shakeDict = { 'event_id': 'usabcd1234', 'shakemap_id': 'usabcd1234', 'shakemap_version': 1, 'code_version': '4.0', 'process_timestamp': datetime.utcnow(), 'shakemap_originator': 'us', 'map_status': 'RELEASED', 'shakemap_event_type': 'ACTUAL' } eventDict = { 'event_id': 'usabcd1234', 'magnitude': 7.6, 'depth': 1.4, 'lat': 2.0, 'lon': 2.0, 'event_timestamp': datetime.utcnow(), 'event_network': 'us', 'event_description': 'sample event' } uncDict = {'pga': (0.0, 0), 'pgv': (0.0, 0), 'mmi': (0.0, 0)} shake = ShakeGrid(layers, geodict, eventDict, shakeDict, uncDict) print('Testing save/read functionality...') shake.save(testfile, version=3) shake2 = ShakeGrid.load(testfile) for layer in ['pga', 'pgv', 'mmi']: tdata = shake2.getLayer(layer).getData() np.testing.assert_almost_equal(tdata, layers[layer]) print('Passed save/read functionality for shakemap grids.') print('Testing getFileGeoDict method...') fgeodict = ShakeGrid.getFileGeoDict(testfile) print('Passed save/read functionality for shakemap grids.') print('Testing loading with bounds (no resampling or padding)...') sampledict = GeoDict({ 'xmin': -0.5, 'xmax': 3.5, 'ymin': -0.5, 'ymax': 3.5, 'dx': 1.0, 'dy': 1.0, 'ny': 5, 'nx': 5 }) shake3 = ShakeGrid.load(testfile, samplegeodict=sampledict, resample=False, doPadding=False, padValue=np.nan) tdata = shake3.getLayer('pga').getData() np.testing.assert_almost_equal(tdata, layers['pga']) print('Passed loading with bounds (no resampling or padding)...') print('Testing loading shakemap with padding, no resampling...') newdict = GeoDict({ 'xmin': -0.5, 'xmax': 4.5, 'ymin': -0.5, 'ymax': 4.5, 'dx': 1.0, 'dy': 1.0, 'ny': 6, 'nx': 6 }) shake4 = ShakeGrid.load(testfile, samplegeodict=newdict, resample=False, doPadding=True, padValue=np.nan) output = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, 1.0, 2.0, 3.0, np.nan], [np.nan, 4.0, 5.0, 6.0, 7.0, np.nan], [np.nan, 8.0, 9.0, 10.0, 11.0, np.nan], [np.nan, 12.0, 13.0, 14.0, 15.0, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]]) tdata = shake4.getLayer('pga').getData() np.testing.assert_almost_equal(tdata, output) print('Passed loading shakemap with padding, no resampling...') #make a bigger grid pga = np.arange(0, 36, dtype=np.float32).reshape(6, 6) pgv = np.arange(1, 37, dtype=np.float32).reshape(6, 6) mmi = np.arange(2, 38, dtype=np.float32).reshape(6, 6) layers = OrderedDict() layers['pga'] = pga layers['pgv'] = pgv layers['mmi'] = mmi geodict = GeoDict({ 'xmin': 0.5, 'xmax': 5.5, 'ymin': 0.5, 'ymax': 5.5, 'dx': 1.0, 'dy': 1.0, 'ny': 6, 'nx': 6 }) shake = ShakeGrid(layers, geodict, eventDict, shakeDict, uncDict) shake.save(testfile, version=3) print('Testing resampling, no padding...') littledict = GeoDict({ 'xmin': 2.0, 'xmax': 4.0, 'ymin': 2.0, 'ymax': 4.0, 'dx': 1.0, 'dy': 1.0, 'ny': 3, 'nx': 3 }) shake5 = ShakeGrid.load(testfile, samplegeodict=littledict, resample=True, doPadding=False, padValue=np.nan) output = np.array([[10.5, 11.5, 12.5], [16.5, 17.5, 18.5], [22.5, 23.5, 24.5]]) tdata = shake5.getLayer('pga').getData() np.testing.assert_almost_equal(tdata, output) print('Passed resampling, no padding...') print('Testing resampling and padding...') pga = np.arange(0, 16, dtype=np.float32).reshape(4, 4) pgv = np.arange(1, 17, dtype=np.float32).reshape(4, 4) mmi = np.arange(2, 18, dtype=np.float32).reshape(4, 4) geodict = GeoDict({ 'xmin': 0.5, 'ymax': 3.5, 'ymin': 0.5, 'xmax': 3.5, 'dx': 1.0, 'dy': 1.0, 'ny': 4, 'nx': 4 }) layers = OrderedDict() layers['pga'] = pga layers['pgv'] = pgv layers['mmi'] = mmi shake = ShakeGrid(layers, geodict, eventDict, shakeDict, uncDict) shake.save(testfile, version=3) bigdict = GeoDict({ 'xmin': 0.0, 'xmax': 4.0, 'ymin': 0.0, 'ymax': 4.0, 'dx': 1.0, 'dy': 1.0, 'ny': 5, 'nx': 5 }) shake6 = ShakeGrid.load(testfile, samplegeodict=bigdict, resample=True, doPadding=True, padValue=np.nan) tdata = shake6.getLayer('pga').getData() output = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 2.5, 3.5, 4.5, np.nan], [np.nan, 6.5, 7.5, 8.5, np.nan], [np.nan, 10.5, 11.5, 12.5, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan]]) np.testing.assert_almost_equal(tdata, output) print('Passed resampling and padding...') except Exception as error: print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile, str(error))) assert 0 == 1 finally: if os.path.isdir(tdir): shutil.rmtree(tdir)
def create_info(event_dir, lsmodels=None, lqmodels=None, eventsource='', eventsourcecode='', point=True): """Create info.json for ground failure product. Args: event_dir (srt): Directory containing ground failure results. lsmodels (list): List of dictionaries of model summary info compiled by the hazdev function. If not specified, code will search for the hdf5 files for the preferred model and will create this dictionary and will apply default colorbars and bins. lqmodels (list): Same as above for liquefaction. point (bool): if True, event is a point source and warning should be displayed Returns: creates info.json for this event """ filenames = [] # Find the shakemap grid.xml file with open(os.path.join(event_dir, 'shakefile.txt'), 'r') as f: shakefile = f.read() files = os.listdir(event_dir) if lsmodels is None and lqmodels is None: # Read in the "preferred" model for landslides and liquefaction ls_mod_file = [f2 for f2 in files if 'jessee_2017.hdf5' in f2] if len(ls_mod_file) == 1: ls_file = os.path.join(event_dir, ls_mod_file[0]) ls_mod = loadlayers(ls_file) # get extents lsext = get_zoomextent(ls_mod['model']['grid']) else: raise OSError("Preferred landslide model result not found.") lq_mod_file = [f2 for f2 in files if 'zhu_2017_general.hdf5' in f2] if len(lq_mod_file) == 1: lq_file = os.path.join(event_dir, lq_mod_file[0]) lq_mod = loadlayers(lq_file) # get extents lqext = get_zoomextent(lq_mod['model']['grid']) else: raise OSError("Preferred liquefaction model result not found.") # Read in extents ls_extent_file = [ f2 for f2 in files if 'jessee_2017_extent.json' in f2 ] if len(ls_extent_file) == 1: ls_file = os.path.join(event_dir, ls_extent_file[0]) with open(ls_file) as f: jessee_extent = json.load(f) else: raise OSError("Landslide extent not found.") lq_extent_file = [ f2 for f2 in files if 'zhu_2017_general_extent.json' in f2 ] if len(lq_extent_file) == 1: lq_file = os.path.join(event_dir, lq_extent_file[0]) with open(lq_file) as f: zhu_extent = json.load(f) else: raise OSError("Liquefaction extent not found.") # Read in default paths to get location of the population grid default_file = os.path.join(os.path.expanduser('~'), '.gfail_defaults') defaults = ConfigObj(default_file) pop_file = defaults['popfile'] # Landslide alert statistics ls_stats = computeStats(ls_mod['model']['grid'], probthresh=None, shakefile=shakefile, shakethresh=10.0, shakethreshtype='pga', statprobthresh=None, pop_file=pop_file) # Liquefaction alert statistics lq_stats = computeStats(lq_mod['model']['grid'], probthresh=None, shakefile=shakefile, shakethresh=10.0, shakethreshtype='pga', statprobthresh=None, pop_file=pop_file) # Get alert levels ls_haz_level = ls_stats['hagg_0.10g'] lq_haz_level = lq_stats['hagg_0.10g'] ls_pop_level = ls_stats['exp_pop_0.10g'] lq_pop_level = lq_stats['exp_pop_0.10g'] # If hazard alert level is less than 0.1, zero it out # (due to rounding to 2 sig digits later, this can give # overly precise results, e.g., 0.000012 if we don't clip, # but this doesn't happen with pop alerts because they are # integers) if ls_haz_level < 0.1: ls_haz_level = 0.0 if lq_haz_level < 0.1: lq_haz_level = 0.0 # Convert levels into categories alert_info = get_alert(ls_haz_level, lq_haz_level, ls_pop_level, lq_pop_level) # Unpack info (I think we are now assuming that the statements will be # constructed on the website and so we don't need them here) ls_haz_alert, ls_pop_alert, lq_haz_alert, lq_pop_alert, \ ls_alert, lq_alert = alert_info if lsmodels is None: lsmodels = [{ 'id': 'nowicki_jessee_2017', 'title': 'Nowicki Jessee and others (2017)', 'overlay': 'jessee_2017.png', 'extent': jessee_extent, 'units': "Proportion of area affected", 'preferred': True, 'alert': ls_alert, 'hazard_alert': { 'color': ls_haz_alert, 'value': set_num_precision(ls_haz_level, 2, 'float'), 'parameter': 'Aggregate Hazard', 'units': 'km^2' }, 'population_alert': { 'color': ls_pop_alert, 'value': set_num_precision(ls_pop_level, 2, 'int'), 'parameter': 'Population exposure', 'units': 'people' }, 'probability': { 'max': float("%.2f" % ls_stats['Max']), 'std': float("%.2f" % ls_stats['Std']), 'hagg0.1g': float("%.2f" % ls_stats['hagg_0.10g']), 'popexp0.1g': float("%.2f" % ls_stats['exp_pop_0.10g']) } }] if lqmodels is None: lqmodels = [{ 'id': 'zhu_2017', 'title': 'Zhu and others (2017)', 'overlay': 'zhu_2017.png', 'extent': zhu_extent, 'units': "Proportion of area affected", 'preferred': True, 'alert': lq_alert, 'hazard_alert': { 'color': lq_haz_alert, 'value': set_num_precision(lq_haz_level, 2, 'float'), 'parameter': 'Aggregate Hazard', 'units': 'km^2' }, 'population_alert': { 'color': lq_pop_alert, 'value': set_num_precision(lq_pop_level, 2, 'int'), 'parameter': 'Population exposure', 'units': 'people' }, 'probability': { 'max': float("%.2f" % lq_stats['Max']), 'std': float("%.2f" % lq_stats['Std']), 'hagg0.1g': float("%.2f" % ls_stats['hagg_0.10g']), 'popexp0.1g': float("%.2f" % ls_stats['exp_pop_0.10g']) } }] else: # Get all info from dictionaries of preferred events, add in extent # and filename for lsm in lsmodels: # Add extent and filename for preferred model if lsm['preferred']: filesnippet = lsm['id'] # Read in extents flnm = '%s_extent.json' % filesnippet ls_extent_file = [f for f in files if flnm in f] if len(ls_extent_file) == 1: ls_file = os.path.join(event_dir, ls_extent_file[0]) with open(ls_file) as f: ls_extent = json.load(f) else: raise OSError("Landslide extent not found.") lsm['extent'] = ls_extent # lsm['filename'] = flnm lsext = lsm['zoomext'] # Get zoom extent ls_alert = lsm['alert'] rmkeys = ['bin_edges', 'bin_colors', 'zoomext'] else: # Remove any alert keys rmkeys = [ 'bin_edges', 'bin_colors', 'zoomext', 'population_alert', 'alert', 'hazard_alert' ] for key in rmkeys: if key in lsm: lsm.pop(key) for lqm in lqmodels: if lqm['preferred']: filesnippet = lqm['id'] # Read in extents flnm = '%s_extent.json' % filesnippet lq_extent_file = [f2 for f2 in files if flnm in f2] if len(lq_extent_file) == 1: lq_file = os.path.join(event_dir, lq_extent_file[0]) with open(lq_file) as f: lq_extent = json.load(f) else: raise OSError("Liquefaction extent not found.") lqm['extent'] = lq_extent # lqm['filename'] = flnm lqext = lqm['zoomext'] # Get zoom extent lq_alert = lqm['alert'] rmkeys = ['bin_edges', 'bin_colors', 'zoomext'] else: # Remove any alert keys rmkeys = [ 'bin_edges', 'bin_colors', 'zoomext', 'population_alert', 'alert', 'hazard_alert' ] for key in rmkeys: if key in lqm: lqm.pop(key) # Try to get event info shake_grid = ShakeGrid.load(shakefile, adjust='res') event_dict = shake_grid.getEventDict() sm_dict = shake_grid.getShakeDict() base_url = 'https://earthquake.usgs.gov/earthquakes/eventpage/' # Is this a point source? # point = is_grid_point_source(shake_grid) # Temporarily hard code this until we can get a better solution via # new grid.xml attributes. #point = True net = eventsource code = eventsourcecode time = event_dict['event_timestamp'].strftime('%Y-%m-%dT%H:%M:%SZ') event_url = '%s%s%s#executive' % (base_url, net, code) # Get extents that work for both unless one is green and the other isn't if lq_alert == 'green' and ls_alert != 'green' and ls_alert is not None: xmin = lsext['xmin'] xmax = lsext['xmax'] ymin = lsext['ymin'] ymax = lsext['ymax'] elif lq_alert != 'green' and ls_alert == 'green' and lq_alert is not None: xmin = lqext['xmin'] xmax = lqext['xmax'] ymin = lqext['ymin'] ymax = lqext['ymax'] else: xmin = np.min((lqext['xmin'], lsext['xmin'])) xmax = np.max((lqext['xmax'], lsext['xmax'])) ymin = np.min((lqext['ymin'], lsext['ymin'])) ymax = np.max((lqext['ymax'], lsext['ymax'])) # Should we display the warning about point source? rupture_warning = False if point and event_dict['magnitude'] > 6.5: rupture_warning = True # Create info.json for website rendering and metadata purposes info_dict = { 'Summary': { 'code': code, 'net': net, 'magnitude': event_dict['magnitude'], 'depth': event_dict['depth'], 'time': time, 'lat': event_dict['lat'], 'lon': event_dict['lon'], 'event_url': event_url, 'shakemap_version': sm_dict['shakemap_version'], 'rupture_warning': rupture_warning, 'point_source': point, 'zoom_extent': [xmin, xmax, ymin, ymax] }, 'Landslides': lsmodels, 'Liquefaction': lqmodels } info_file = os.path.join(event_dir, 'info.json') with open(info_file, 'w') as f: json.dump(info_dict, f) # allow_nan=False) filenames.append(info_file) return filenames
def test_map(): tdir = tempfile.mkdtemp() try: mapname = os.path.join(tdir, 'test_map.pdf') # where is this script? homedir = os.path.dirname(os.path.abspath(__file__)) datadir = os.path.join(homedir, '..', 'data', 'nyc') shakefile = os.path.join(datadir, 'm5.8.nyc.grid.xml') shakegrid = ShakeGrid.load(shakefile) countyfile = os.path.join(datadir, 'county_results.txt') tractfile = os.path.join(datadir, 'tract_results.txt') damage_occupancy = os.path.join(datadir, 'building_damage_occup.txt') hazinfo = HazusInfo(tdir, tractfile, countyfile, damage_occupancy) green, yellow, red = hazinfo.createTaggingTables() green_table = ('\\begin{tabularx}{3.4cm}{lr}\n& \\\\\n\\' 'multicolumn{2}{c}{\\textbf{INSPECTED}} ' '\\\\\n\\textbf{Occupancy} & \\textbf{\\#' ' of tags} \\\\\nResidential & 35k \\\\\nCommercial' ' & 4k \\\\\nIndustrial & 900 \\\\\nEducation & 158 ' '\\\\\nAgriculture & 94 \\\\\nGovernment & 77 \\\\\n' '\\end{tabularx}') yellow_table = ('\\begin{tabularx}{3.4cm}{lr}\n& \\\\\n\\multicolumn' '{2}{c}{\\textbf{RESTRICTED USE}} \\\\\n\\textbf' '{Occupancy} & \\textbf{\\# of tags} \\\\\n' 'Residential & 1k \\\\\nCommercial & 161 \\\\\n' 'Industrial & 30 \\\\\nEducation & 5 \\\\\n' 'Agriculture & 2 \\\\\nGovernment & 2 \\\\\n\\' 'end{tabularx}') red_table = ('\\begin{tabularx}{3.4cm}{lr}\n& \\\\\n\\multicolumn' '{2}{c}{\\textbf{UNSAFE}} \\\\\n\\textbf{Occupancy} ' '& \\textbf{\\# of tags} \\\\\nResidential & 101 ' '\\\\\nCommercial & 12 \\\\\nIndustrial & 1 \\\\\n' 'Education & 0 \\\\\nAgriculture & 0 \\\\\n' 'Government & 0 \\\\\n\\end{tabularx}') assert green == green_table assert yellow == yellow_table assert red == red_table print('Green:') print(green) print() print('Yellow:') print(yellow) print() print('Red:') print(red) loss = hazinfo.createEconTable() loss_table = ('\\begin{tabularx}{\\barwidth}{lc*{1}{>{\\raggedleft' '\\arraybackslash}X}}\n\\hline\n\\textbf{County} & ' '\\textbf{State} & \\textbf{Total (\\textdollar M)} ' '\\\\\n\\hline\n\\truncate{4cm}{Kings} & NY & 3,183 ' '\\\\\n\\truncate{4cm}{Richmond} & NY & 493 \\\\\n\\' 'truncate{4cm}{New York} & NY & 272 \\\\\n\\' 'truncate{4cm}{Queens} & NY & 202 \\\\\n\\truncate' '{4cm}{Hudson} & NJ & 132 \\\\\n\\truncate{4cm}{Union} ' '& NJ & 93 \\\\\n\\truncate{4cm}{Monmouth} & NJ & 74 ' '\\\\\n\\multicolumn{2}{l}{\\textbf{Total (19 counties)}} ' '& \\multicolumn{1}{>{\\raggedleft}X}{\\textbf{4,643}} ' '\\\\\n\\hline\n\\end{tabularx}') assert loss == loss_table print() print('Econ Losses:') print(loss) injury = hazinfo.createInjuryTable() injury_table = ('\\begin{tabularx}{\\barwidth}{lc*{2}{>{\\raggedleft' '\\arraybackslash}X}}\n\\hline\n\\textbf{County} & ' '\\textbf{State} & \\textbf{Population} & ' '\\textbf{Total NFI} \\\\\n\\hline\n\\truncate{2.4cm}' '{Kings} & NY & 2,505k & 302 \\\\\n\\truncate{2.4cm}' '{Richmond} & NY & 469k & 36 \\\\\n\\truncate{2.4cm}' '{New York} & NY & 1,586k & 15 \\\\\n\\truncate{2.4cm}' '{Queens} & NY & 2,231k & 32 \\\\\n\\truncate{2.4cm}' '{Hudson} & NJ & 634k & 12 \\\\\n\\truncate{2.4cm}' '{Union} & NJ & 536k & 10 \\\\\n\\truncate{2.4cm}' '{Monmouth} & NJ & 630k & 7 \\\\\n\\multicolumn' '{2}{l}{\\textbf{Total (19 counties)}} & ' '\\multicolumn{1}{>{\\raggedleft}X}{\\textbf' '{18,517k}} & \\multicolumn{1}{>{\\raggedleft}' 'X}{\\textbf{447}} \\\\\n\\hline\n\\end{tabularx}') assert injury == injury_table print() print('Injuries:') print(injury) shelter = hazinfo.createShelterTable() shelter_table = ('\\begin{tabularx}{\\barwidth}{lc*{3}{>{\\raggedleft' '\\arraybackslash}X}}\n\\hline\n\\' ' & & \\textbf{Total}' ' & \\textbf{Displ} & \\textbf{Total} \\\\\n\\' ' & & \\textbf{House}' ' & \\textbf{House} & \\textbf{People} \\\\\n\\' 'textbf{County} & \\textbf{State} & \\textbf{holds}' ' & \\textbf{holds} & \\\\\n\\hline\n\\truncate' '{2.4cm}{Kings} & NY & 917k & 2k & 2k \\\\\n\\' 'truncate{2.4cm}{Richmond} & NY & 166k & 162 & 106' ' \\\\\n\\truncate{2.4cm}{New York} & NY & 764k & ' '102 & 52 \\\\\n\\truncate{2.4cm}{Queens} & NY & ' '780k & 146 & 106 \\\\\n\\truncate{2.4cm}{Hudson}' ' & NJ & 246k & 72 & 46 \\\\\n\\truncate{2.4cm}' '{Union} & NJ & 188k & 44 & 36 \\\\\n\\' 'truncate{2.4cm}{Monmouth} & NJ & 234k & ' '18 & 10 \\\\\n\\multicolumn{2}{l}{\\textbf' '{Total (19 counties)}} & \\multicolumn{1}' '{>{\\raggedleft}X}{\\textbf{6,794k}} & ' '\\multicolumn{1}{>{\\raggedleft}X}{\\textbf{3k}}' ' & \\multicolumn{1}{>{\\raggedleft}X}{\\textbf{2k}}' ' \\\\\n\\hline\n\\end{tabularx}') assert shelter == shelter_table print() print('Shelter Needs:') print(loss) debris = hazinfo.createDebrisTable() debris_table = ('\\begin{tabularx}{\\barwidth}{l*{1}{>{\\raggedleft' '\\arraybackslash}X}}\n\\hline\n\\ ' '& \\textbf{Tons} \\\\\n\\textbf{Category} & ' '\\textbf{(millions)} \\\\\n\\hline\nBrick / Wood ' '& 0.449 \\\\\nReinforced Concrete / Steel & 0.149' ' \\\\\n\\textbf{Total} & \\textbf{0.598} \\\\\n&' ' \\\\\n& \\\\\n\\textbf{Truck Loads (@25 tons/' 'truck)} & \\textbf{23,908} \\\\\n\\end{tabularx}') assert debris == debris_table print() print('Debris:') print(debris) model_config = {} model_config['states'] = os.path.join(datadir, 'nyc_states.shp') model_config['counties'] = os.path.join(datadir, 'nyc_counties.shp') model_config['tracts'] = os.path.join(datadir, 'nyc_tracts.shp') model_config['ocean_vectors'] = os.path.join(datadir, 'nyc_oceans.shp') hazinfo.drawHazusMap(shakegrid, mapname, model_config) assert os.path.isfile(mapname) except Exception: assert False finally: shutil.rmtree(tdir)
def __init__(self,config,shakefile,model): if model not in getLogisticModelNames(config): raise Exception('Could not find a model called "%s" in config %s.' % (model,config)) #do everything here short of calculations - parse config, assemble eqn strings, load data. self.model = model cmodel = config['logistic_models'][model] self.coeffs = validateCoefficients(cmodel) self.layers = validateLayers(cmodel)#key = layer name, value = file name self.terms,timeField = validateTerms(cmodel,self.coeffs,self.layers) self.interpolations = validateInterpolations(cmodel,self.layers) self.units = validateUnits(cmodel,self.layers) if 'baselayer' not in cmodel: raise Exception('You must specify a base layer file in config.') if cmodel['baselayer'] not in list(self.layers.keys()): raise Exception('You must specify a base layer corresponding to one of the files in the layer section.') #get the geodict for the shakemap geodict = ShakeGrid.getFileGeoDict(shakefile,adjust='res') griddict,eventdict,specdict,fields,uncertainties = getHeaderData(shakefile) YEAR = eventdict['event_timestamp'].year MONTH = MONTHS[(eventdict['event_timestamp'].month)-1] DAY = eventdict['event_timestamp'].day HOUR = eventdict['event_timestamp'].hour #now find the layer that is our base layer and get the largest bounds we can guaranteed not to exceed shakemap bounds basefile = self.layers[cmodel['baselayer']] ftype = getFileType(basefile) if ftype == 'esri': basegeodict = GDALGrid.getFileGeoDict(basefile) sampledict = basegeodict.getBoundsWithin(geodict) elif ftype == 'gmt': basegeodict = GMTGrid.getFileGeoDict(basefile) sampledict = basegeodict.getBoundsWithin(geodict) else: raise Exception('All predictor variable grids must be a valid GMT or ESRI file type') #now load the shakemap, resampling and padding if necessary self.shakemap = ShakeGrid.load(shakefile,samplegeodict=sampledict,resample=True,doPadding=True,adjust='res') #load the predictor layers into a dictionary self.layerdict = {} #key = layer name, value = grid object for layername,layerfile in self.layers.items(): if isinstance(layerfile,list): for lfile in layerfile: if timeField == 'MONTH': if lfile.find(MONTH) > -1: layerfile = lfile ftype = getFileType(layerfile) interp = self.interpolations[layername] if ftype == 'gmt': lyr = GMTGrid.load(layerfile,sampledict,resample=True,method=interp,doPadding=True) elif ftype == 'esri': lyr = GDALGrid.load(layerfile,sampledict,resample=True,method=interp,doPadding=True) else: msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % (layername,layerfile) raise Exception(msg) self.layerdict[layername] = lyr else: #first, figure out what kind of file we have (or is it a directory?) ftype = getFileType(layerfile) interp = self.interpolations[layername] if ftype == 'gmt': lyr = GMTGrid.load(layerfile,sampledict,resample=True,method=interp,doPadding=True) elif ftype == 'esri': lyr = GDALGrid.load(layerfile,sampledict,resample=True,method=interp,doPadding=True) else: msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % (layername,layerfile) raise Exception(msg) self.layerdict[layername] = lyr shapes = {} for layername,layer in self.layerdict.items(): shapes[layername] = layer.getData().shape x = 1 self.nuggets = [str(self.coeffs['b0'])] ckeys = list(self.terms.keys()) ckeys.sort() for key in ckeys: term = self.terms[key] coeff = self.coeffs[key] self.nuggets.append('(%g * %s)' % (coeff, term)) self.equation = ' + '.join(self.nuggets) self.geodict = self.shakemap.getGeoDict()
def modelMap(grids, shakefile=None, suptitle=None, inventory_shapefile=None, plotorder=None, maskthreshes=None, colormaps=None, boundaries=None, zthresh=0, scaletype='continuous', lims=None, logscale=False, ALPHA=0.7, maproads=True, mapcities=True, isScenario=False, roadfolder=None, topofile=None, cityfile=None, oceanfile=None, roadcolor='#6E6E6E', watercolor='#B8EEFF', countrycolor='#177F10', outputdir=None, savepdf=True, savepng=True, showplots=False, roadref='unknown', cityref='unknown', oceanref='unknown', printparam=False, ds=True, dstype='mean', upsample=False): """ This function creates maps of mapio grid layers (e.g. liquefaction or landslide models with their input layers) All grids must use the same bounds TO DO change so that all input layers do not have to have the same bounds, test plotting multiple probability layers, and add option so that if PDF and PNG aren't output, opens plot on screen using plt.show() :param grids: Dictionary of N layers and metadata formatted like: maplayers['layer name']={ 'grid': mapio grid2D object, 'label': 'label for colorbar and top line of subtitle', 'type': 'output or input to model', 'description': 'detailed description of layer for subtitle'}. Layer names must be unique. :type name: Dictionary or Ordered dictionary - import collections; grids = collections.OrderedDict() :param shakefile: optional ShakeMap file (url or full file path) to extract information for labels and folder names :type shakefile: Shakemap Event Dictionary :param suptitle: This will be displayed at the top of the plots and in the figure names :type suptitle: string :param plotorder: List of keys describing the order to plot the grids, if None and grids is an ordered dictionary, it will use the order of the dictionary, otherwise it will choose order which may be somewhat random but it will always put a probability grid first :type plotorder: list :param maskthreshes: N x 1 array or list of lower thresholds for masking corresponding to order in plotorder or order of OrderedDict if plotorder is None. If grids is not an ordered dict and plotorder is not specified, this will not work right. If None (default), nothing will be masked :param colormaps: List of strings of matplotlib colormaps (e.g. cm.autumn_r) corresponding to plotorder or order of dictionary if plotorder is None. The list can contain both strings and None e.g. colormaps = ['cm.autumn', None, None, 'cm.jet'] and None's will default to default colormap :param boundaries: None to show entire study area, 'zoom' to zoom in on the area of action (only works if there is a probability layer) using zthresh as a threshold, or a dictionary defining lats and lons in the form of boundaries.xmin = minlon, boundaries.xmax = maxlon, boundaries.ymin = min lat, boundaries.ymax = max lat :param zthresh: threshold for computing zooming bounds, only used if boundaries = 'zoom' :type zthresh: float :param scaletype: Type of scale for plotting, 'continuous' or 'binned' - will be reflected in colorbar :type scaletype: string :param lims: None or Nx1 list of tuples or numpy arrays corresponding to plotorder defining the limits for saturating the colorbar (vmin, vmax) if scaletype is continuous or the bins to use (clev) if scaletype if binned. The list can contain tuples, arrays, and Nones, e.g. lims = [(0., 10.), None, (0.1, 1.5), np.linspace(0., 1.5, 15)]. When None is specified, the program will estimate the limits, when an array is specified but the scale type is continuous, vmin will be set to min(array) and vmax will be set to max(array) :param lims: None or Nx1 list of Trues and Falses corresponding to plotorder defining whether to use a linear or log scale (log10) for plotting the layer. This will be reflected in the labels :param ALPHA: Transparency for mapping, if there is a hillshade that will plot below each layer, it is recommended to set this to at least 0.7 :type ALPHA: float :param maproads: Whether to show roads or not, default True, but requires that roadfile is specified and valid to work :type maproads: boolean :param mapcities: Whether to show cities or not, default True, but requires that cityfile is specified and valid to work :type mapcities: boolean :param isScenario: Whether this is a scenario (True) or a real event (False) (default False) :type isScenario: boolean :param roadfolder: Full file path to folder containing road shapefiles :type roadfolder: string :param topofile: Full file path to topography grid (GDAL compatible) - this is only needed to make a hillshade if a premade hillshade is not specified :type topofile: string :param cityfile: Full file path to Pager file containing city & population information :type cityfile: string :param roadcolor: Color to use for roads, if plotted, default #6E6E6E :type roadcolor: Hex color or other matplotlib compatible way of defining color :param watercolor: Color to use for oceans, lakes, and rivers, default #B8EEFF :type watercolor: Hex color or other matplotlib compatible way of defining color :param countrycolor: Color for country borders, default #177F10 :type countrycolor: Hex color or other matplotlib compatible way of defining color :param outputdir: File path for outputting figures, if edict is defined, a subfolder based on the event id will be created in this folder. If None, will use current directory :param savepdf: True to save pdf figure, False to not :param savepng: True to save png figure, False to not :param ds: True to allow downsampling for display (necessary when arrays are quite large, False to not allow) :param dstype: What function to use in downsampling, options are 'min', 'max', 'median', or 'mean' :param upsample: True to upsample the layer to the DEM resolution for better looking hillshades :returns: * PDF and/or PNG of map * Downsampled and trimmed version of input grids. If no modification was needed for plotting, this will be identical to grids but without the metadata """ if suptitle is None: suptitle = ' ' plt.ioff() defaultcolormap = cm.jet if shakefile is not None: edict = ShakeGrid.load(shakefile, adjust='res').getEventDict() temp = ShakeGrid.load(shakefile, adjust='res').getShakeDict() edict['eventid'] = temp['shakemap_id'] edict['version'] = temp['shakemap_version'] else: edict = None # Get output file location if outputdir is None: print('No output location given, using current directory for outputs\n') outputdir = os.getcwd() if edict is not None: outfolder = os.path.join(outputdir, edict['event_id']) else: outfolder = outputdir if not os.path.isdir(outfolder): os.makedirs(outfolder) # Get plotting order, if not specified if plotorder is None: plotorder = list(grids.keys()) # Get boundaries to use for all plots cut = True if boundaries is None: cut = False keytemp = list(grids.keys()) boundaries = grids[keytemp[0]]['grid'].getGeoDict() elif boundaries == 'zoom': # Find probability layer (will just take the maximum bounds if there is # more than one) keytemp = list(grids.keys()) key1 = [key for key in keytemp if 'model' in key.lower()] if len(key1) == 0: print('Could not find model layer to use for zoom, using default boundaries') keytemp = list(grids.keys()) boundaries = grids[keytemp[0]]['grid'].getGeoDict() else: lonmax = -1.e10 lonmin = 1.e10 latmax = -1.e10 latmin = 1.e10 for key in key1: # get lat lons of areas affected and add, if no areas affected, # switch to shakemap boundaries temp = grids[key]['grid'] xmin, xmax, ymin, ymax = temp.getBounds() lons = np.linspace(xmin, xmax, temp.getGeoDict().nx) lats = np.linspace(ymax, ymin, temp.getGeoDict().ny) # backwards so it plots right row, col = np.where(temp.getData() > float(zthresh)) lonmin = lons[col].min() lonmax = lons[col].max() latmin = lats[row].min() latmax = lats[row].max() # llons, llats = np.meshgrid(lons, lats) # make meshgrid # llons1 = llons[temp.getData() > float(zthresh)] # llats1 = llats[temp.getData() > float(zthresh)] # if llons1.min() < lonmin: # lonmin = llons1.min() # if llons1.max() > lonmax: # lonmax = llons1.max() # if llats1.min() < latmin: # latmin = llats1.min() # if llats1.max() > latmax: # latmax = llats1.max() boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100} # dummy fillers, only really care about bounds if xmin < lonmin-0.15*(lonmax-lonmin): boundaries1['xmin'] = lonmin-0.1*(lonmax-lonmin) else: boundaries1['xmin'] = xmin if xmax > lonmax+0.15*(lonmax-lonmin): boundaries1['xmax'] = lonmax+0.1*(lonmax-lonmin) else: boundaries1['xmax'] = xmax if ymin < latmin-0.15*(latmax-latmin): boundaries1['ymin'] = latmin-0.1*(latmax-latmin) else: boundaries1['ymin'] = ymin if ymax > latmax+0.15*(latmax-latmin): boundaries1['ymax'] = latmax+0.1*(latmax-latmin) else: boundaries1['ymax'] = ymax boundaries = GeoDict(boundaries1, adjust='res') else: # SEE IF BOUNDARIES ARE SAME AS BOUNDARIES OF LAYERS keytemp = list(grids.keys()) tempgdict = grids[keytemp[0]]['grid'].getGeoDict() if np.abs(tempgdict.xmin-boundaries['xmin']) < 0.05 and \ np.abs(tempgdict.ymin-boundaries['ymin']) < 0.05 and \ np.abs(tempgdict.xmax-boundaries['xmax']) < 0.05 and \ np.abs(tempgdict.ymax - boundaries['ymax']) < 0.05: print('Input boundaries are almost the same as specified boundaries, no cutting needed') boundaries = tempgdict cut = False else: try: if boundaries['xmin'] > boundaries['xmax'] or \ boundaries['ymin'] > boundaries['ymax']: print('Input boundaries are not usable, using default boundaries') keytemp = list(grids.keys()) boundaries = grids[keytemp[0]]['grid'].getGeoDict() cut = False else: # Build dummy GeoDict boundaries = GeoDict({'xmin': boundaries['xmin'], 'xmax': boundaries['xmax'], 'ymin': boundaries['ymin'], 'ymax': boundaries['ymax'], 'dx': 100., 'dy': 100., 'ny': 100., 'nx': 100.}, adjust='res') except: print('Input boundaries are not usable, using default boundaries') keytemp = list(grids.keys()) boundaries = grids[keytemp[0]]['grid'].getGeoDict() cut = False # Pull out bounds for various uses bxmin, bxmax, bymin, bymax = boundaries.xmin, boundaries.xmax, boundaries.ymin, boundaries.ymax # Determine if need a single panel or multi-panel plot and if multi-panel, # how many and how it will be arranged fig = plt.figure() numpanels = len(grids) if numpanels == 1: rowpan = 1 colpan = 1 # create the figure and axes instances. fig.set_figwidth(5) elif numpanels == 2 or numpanels == 4: rowpan = np.ceil(numpanels/2.) colpan = 2 fig.set_figwidth(13) else: rowpan = np.ceil(numpanels/3.) colpan = 3 fig.set_figwidth(15) if rowpan == 1: fig.set_figheight(rowpan*6.0) else: fig.set_figheight(rowpan*5.3) # Need to update naming to reflect the shakemap version once can get # getHeaderData to work, add edict['version'] back into title, maybe # shakemap id also? fontsizemain = 14. fontsizesub = 12. fontsizesmallest = 10. if rowpan == 1.: fontsizemain = 12. fontsizesub = 10. fontsizesmallest = 8. if edict is not None: if isScenario: title = edict['event_description'] else: timestr = edict['event_timestamp'].strftime('%b %d %Y') title = 'M%.1f %s v%i - %s' % (edict['magnitude'], timestr, edict['version'], edict['event_description']) plt.suptitle(title+'\n'+suptitle, fontsize=fontsizemain) else: plt.suptitle(suptitle, fontsize=fontsizemain) clear_color = [0, 0, 0, 0.0] # Cut all of them and release extra memory xbuff = (bxmax-bxmin)/10. ybuff = (bymax-bymin)/10. cutxmin = bxmin-xbuff cutymin = bymin-ybuff cutxmax = bxmax+xbuff cutymax = bymax+ybuff if cut is True: newgrids = collections.OrderedDict() for k, layer in enumerate(plotorder): templayer = grids[layer]['grid'] try: newgrids[layer] = {'grid': templayer.cut(cutxmin, cutxmax, cutymin, cutymax, align=True)} except Exception as e: print(('Cutting failed, %s, continuing with full layers' % e)) newgrids = grids continue del templayer gc.collect() else: newgrids = grids tempgdict = newgrids[list(grids.keys())[0]]['grid'].getGeoDict() # Upsample layers to same as topofile if desired for better looking hillshades if upsample is True and topofile is not None: try: topodict = GDALGrid.getFileGeoDict(topofile) if topodict.dx >= tempgdict.dx or topodict.dy >= tempgdict.dy: print('Upsampling not possible, resolution of results already smaller than DEM') pass else: tempgdict1 = GeoDict({'xmin': tempgdict.xmin-xbuff, 'ymin': tempgdict.ymin-ybuff, 'xmax': tempgdict.xmax+xbuff, 'ymax': tempgdict.ymax+ybuff, 'dx': topodict.dx, 'dy': topodict.dy, 'nx': topodict.nx, 'ny': topodict.ny}, adjust='res') tempgdict2 = tempgdict1.getBoundsWithin(tempgdict) for k, layer in enumerate(plotorder): newgrids[layer]['grid'] = newgrids[layer]['grid'].subdivide(tempgdict2) except: print('Upsampling failed, continuing') # Downsample all of them for plotting, if needed, and replace them in # grids (to save memory) tempgrid = newgrids[list(grids.keys())[0]]['grid'] xsize = tempgrid.getGeoDict().nx ysize = tempgrid.getGeoDict().ny inchesx, inchesy = fig.get_size_inches() divx = int(np.round(xsize/(500.*inchesx))) divy = int(np.round(ysize/(500.*inchesy))) xmin, xmax, ymin, ymax = tempgrid.getBounds() gdict = tempgrid.getGeoDict() # Will be replaced if downsampled del tempgrid gc.collect() if divx <= 1: divx = 1 if divy <= 1: divy = 1 if (divx > 1. or divy > 1.) and ds: if dstype == 'max': func = np.nanmax elif dstype == 'min': func = np.nanmin elif dstype == 'med': func = np.nanmedian else: func = np.nanmean for k, layer in enumerate(plotorder): layergrid = newgrids[layer]['grid'] dat = block_reduce(layergrid.getData().copy(), block_size=(divy, divx), cval=float('nan'), func=func) if k == 0: lons = block_reduce(np.linspace(xmin, xmax, layergrid.getGeoDict().nx), block_size=(divx,), func=np.mean, cval=float('nan')) if math.isnan(lons[-1]): lons[-1] = lons[-2] + (lons[1]-lons[0]) lats = block_reduce(np.linspace(ymax, ymin, layergrid.getGeoDict().ny), block_size=(divy,), func=np.mean, cval=float('nan')) if math.isnan(lats[-1]): lats[-1] = lats[-2] + (lats[1]-lats[0]) gdict = GeoDict({'xmin': lons.min(), 'xmax': lons.max(), 'ymin': lats.min(), 'ymax': lats.max(), 'dx': np.abs(lons[1]-lons[0]), 'dy': np.abs(lats[1]-lats[0]), 'nx': len(lons), 'ny': len(lats)}, adjust='res') newgrids[layer]['grid'] = Grid2D(dat, gdict) del layergrid, dat else: lons = np.linspace(xmin, xmax, xsize) lats = np.linspace(ymax, ymin, ysize) # backwards so it plots right side up #make meshgrid llons1, llats1 = np.meshgrid(lons, lats) # See if there is an oceanfile for masking bbox = PolygonSH(((cutxmin, cutymin), (cutxmin, cutymax), (cutxmax, cutymax), (cutxmax, cutymin))) if oceanfile is not None: try: f = fiona.open(oceanfile) oc = next(f) f.close shapes = shape(oc['geometry']) # make boundaries into a shape ocean = shapes.intersection(bbox) except: print('Not able to read specified ocean file, will use default ocean masking') oceanfile = None if inventory_shapefile is not None: try: f = fiona.open(inventory_shapefile) invshp = list(f.items(bbox=(bxmin, bymin, bxmax, bymax))) f.close() inventory = [shape(inv[1]['geometry']) for inv in invshp] except: print('unable to read inventory shapefile specified, will not plot inventory') inventory_shapefile = None # # Find cities that will be plotted if mapcities is True and cityfile is not None: try: mycity = BasemapCities.loadFromGeoNames(cityfile=cityfile) bcities = mycity.limitByBounds((bxmin, bxmax, bymin, bymax)) #bcities = bcities.limitByPopulation(40000) bcities = bcities.limitByGrid(nx=4, ny=4, cities_per_grid=2) except: print('Could not read in cityfile, not plotting cities') mapcities = False cityfile = None # Load in topofile if topofile is not None: try: topomap = GDALGrid.load(topofile, resample=True, method='linear', samplegeodict=gdict) except: topomap = GMTGrid.load(topofile, resample=True, method='linear', samplegeodict=gdict) topodata = topomap.getData().copy() # mask oceans if don't have ocean shapefile if oceanfile is None: topodata = maskoceans(llons1, llats1, topodata, resolution='h', grid=1.25, inlands=True) else: print('no hillshade is possible\n') topomap = None topodata = None # Load in roads, if needed if maproads is True and roadfolder is not None: try: roadslist = [] for folder in os.listdir(roadfolder): road1 = os.path.join(roadfolder, folder) shpfiles = glob.glob(os.path.join(road1, '*.shp')) if len(shpfiles): shpfile = shpfiles[0] f = fiona.open(shpfile) shapes = list(f.items(bbox=(bxmin, bymin, bxmax, bymax))) for shapeid, shapedict in shapes: roadslist.append(shapedict) f.close() except: print('Not able to plot roads') roadslist = None val = 1 for k, layer in enumerate(plotorder): layergrid = newgrids[layer]['grid'] if 'label' in list(grids[layer].keys()): label1 = grids[layer]['label'] else: label1 = layer try: sref = grids[layer]['description']['name'] except: sref = None ax = fig.add_subplot(rowpan, colpan, val) val += 1 clat = bymin + (bymax-bymin)/2.0 clon = bxmin + (bxmax-bxmin)/2.0 # setup of basemap ('lcc' = lambert conformal conic). # use major and minor sphere radii from WGS84 ellipsoid. m = Basemap(llcrnrlon=bxmin, llcrnrlat=bymin, urcrnrlon=bxmax, urcrnrlat=bymax, rsphere=(6378137.00, 6356752.3142), resolution='l', area_thresh=1000., projection='lcc', lat_1=clat, lon_0=clon, ax=ax) x1, y1 = m(llons1, llats1) # get projection coordinates axsize = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) if k == 0: wid, ht = axsize.width, axsize.height if colormaps is not None and \ len(colormaps) == len(newgrids) and \ colormaps[k] is not None: palette = colormaps[k] else: # Find preferred default color map for each type of layer if 'prob' in layer.lower() or 'pga' in layer.lower() or \ 'pgv' in layer.lower() or 'cohesion' in layer.lower() or \ 'friction' in layer.lower() or 'fs' in layer.lower(): palette = cm.jet elif 'slope' in layer.lower(): palette = cm.gnuplot2 elif 'precip' in layer.lower(): palette = cm2.s3pcpn else: palette = defaultcolormap if topodata is not None: if k == 0: ptopo = m.transform_scalar( np.flipud(topodata), lons+0.5*gdict.dx, lats[::-1]-0.5*gdict.dy, np.round(300.*wid), np.round(300.*ht), returnxy=False, checkbounds=False, order=1, masked=False) #use lightsource class to make our shaded topography ls = LightSource(azdeg=135, altdeg=45) ls1 = LightSource(azdeg=120, altdeg=45) ls2 = LightSource(azdeg=225, altdeg=45) intensity1 = ls1.hillshade(ptopo, fraction=0.25, vert_exag=1.) intensity2 = ls2.hillshade(ptopo, fraction=0.25, vert_exag=1.) intensity = intensity1*0.5 + intensity2*0.5 #hillshm_im = m.transform_scalar(np.flipud(hillshm), lons, lats[::-1], np.round(300.*wid), np.round(300.*ht), returnxy=False, checkbounds=False, order=0, masked=False) #m.imshow(hillshm_im, cmap='Greys', vmin=0., vmax=3., zorder=1, interpolation='none') # vmax = 3 to soften colors to light gray #m.pcolormesh(x1, y1, hillshm, cmap='Greys', linewidth=0., rasterized=True, vmin=0., vmax=3., edgecolors='none', zorder=1); # plt.draw() # Get the data dat = layergrid.getData().copy() # mask out anything below any specified thresholds # Might need to move this up to before downsampling...might give illusion of no hazard in places where there is some that just got averaged out if maskthreshes is not None and len(maskthreshes) == len(newgrids): if maskthreshes[k] is not None: dat[dat <= maskthreshes[k]] = float('NaN') dat = np.ma.array(dat, mask=np.isnan(dat)) if logscale is not False and len(logscale) == len(newgrids): if logscale[k] is True: dat = np.log10(dat) label1 = r'$log_{10}$(' + label1 + ')' if scaletype.lower() == 'binned': # Find order of range to know how to scale order = np.round(np.log(np.nanmax(dat) - np.nanmin(dat))) if order < 1.: scal = 10**-order else: scal = 1. if lims is None or len(lims) != len(newgrids): clev = (np.linspace(np.floor(scal*np.nanmin(dat)), np.ceil(scal*np.nanmax(dat)), 10))/scal else: if lims[k] is None: clev = (np.linspace(np.floor(scal*np.nanmin(dat)), np.ceil(scal*np.nanmax(dat)), 10))/scal else: clev = lims[k] # Adjust to colorbar levels dat[dat < clev[0]] = clev[0] for j, level in enumerate(clev[:-1]): dat[(dat >= clev[j]) & (dat < clev[j+1])] = clev[j] # So colorbar saturates at top dat[dat > clev[-1]] = clev[-1] #panelhandle = m.contourf(x1, y1, datm, clev, cmap=palette, linewidth=0., alpha=ALPHA, rasterized=True) vmin = clev[0] vmax = clev[-1] else: if lims is not None and len(lims) == len(newgrids): if lims[k] is None: vmin = np.nanmin(dat) vmax = np.nanmax(dat) else: vmin = lims[k][0] vmax = lims[k][-1] else: vmin = np.nanmin(dat) vmax = np.nanmax(dat) # Mask out cells overlying oceans or block with a shapefile if available if oceanfile is None: dat = maskoceans(llons1, llats1, dat, resolution='h', grid=1.25, inlands=True) else: #patches = [] if type(ocean) is PolygonSH: ocean = [ocean] for oc in ocean: patch = getProjectedPatch(oc, m, edgecolor="#006280", facecolor=watercolor, lw=0.5, zorder=4.) #x, y = m(oc.exterior.xy[0], oc.exterior.xy[1]) #xy = zip(x, y) #patch = Polygon(xy, facecolor=watercolor, edgecolor="#006280", lw=0.5, zorder=4.) ##patches.append(Polygon(xy, facecolor=watercolor, edgecolor=watercolor, zorder=500.)) ax.add_patch(patch) ##ax.add_collection(PatchCollection(patches)) if inventory_shapefile is not None: for in1 in inventory: if 'point' in str(type(in1)): x, y = in1.xy x = x[0] y = y[0] m.scatter(x, y, c='m', s=50, latlon=True, marker='^', zorder=100001) else: x, y = m(in1.exterior.xy[0], in1.exterior.xy[1]) xy = list(zip(x, y)) patch = Polygon(xy, facecolor='none', edgecolor='k', lw=0.5, zorder=10.) #patches.append(Polygon(xy, facecolor=watercolor, edgecolor=watercolor, zorder=500.)) ax.add_patch(patch) palette.set_bad(clear_color, alpha=0.0) # Plot it up dat_im = m.transform_scalar( np.flipud(dat), lons+0.5*gdict.dx, lats[::-1]-0.5*gdict.dy, np.round(300.*wid), np.round(300.*ht), returnxy=False, checkbounds=False, order=0, masked=True) if topodata is not None: # Drape over hillshade #turn data into an RGBA image cmap = palette #adjust data so scaled between vmin and vmax and between 0 and 1 dat1 = dat_im.copy() dat1[dat1 < vmin] = vmin dat1[dat1 > vmax] = vmax dat1 = (dat1 - vmin)/(vmax-vmin) rgba_img = cmap(dat1) maskvals = np.dstack((dat1.mask, dat1.mask, dat1.mask)) rgb = np.squeeze(rgba_img[:, :, 0:3]) rgb[maskvals] = 1. draped_hsv = ls.blend_hsv(rgb, np.expand_dims(intensity, 2)) m.imshow(draped_hsv, zorder=3., interpolation='none') # This is just a dummy layer that will be deleted to make the # colorbar look right panelhandle = m.imshow(dat_im, cmap=palette, zorder=0., vmin=vmin, vmax=vmax) else: panelhandle = m.imshow(dat_im, cmap=palette, zorder=3., vmin=vmin, vmax=vmax, interpolation='none') #panelhandle = m.pcolormesh(x1, y1, dat, linewidth=0., cmap=palette, vmin=vmin, vmax=vmax, alpha=ALPHA, rasterized=True, zorder=2.); #panelhandle.set_edgecolors('face') # add colorbar cbfmt = '%1.1f' if vmax is not None and vmin is not None: if (vmax - vmin) < 1.: cbfmt = '%1.2f' elif vmax > 5.: # (vmax - vmin) > len(clev): cbfmt = '%1.0f' #norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) if scaletype.lower() == 'binned': cbar = fig.colorbar(panelhandle, spacing='proportional', ticks=clev, boundaries=clev, fraction=0.036, pad=0.04, format=cbfmt, extend='both') #cbar1 = ColorbarBase(cbar.ax, cmap=palette, norm=norm, spacing='proportional', ticks=clev, boundaries=clev, fraction=0.036, pad=0.04, format=cbfmt, extend='both', extendfrac='auto') else: cbar = fig.colorbar(panelhandle, fraction=0.036, pad=0.04, extend='both', format=cbfmt) #cbar1 = ColorbarBase(cbar.ax, cmap=palette, norm=norm, fraction=0.036, pad=0.04, extend='both', extendfrac='auto', format=cbfmt) if topodata is not None: panelhandle.remove() cbar.set_label(label1, fontsize=10) cbar.ax.tick_params(labelsize=8) parallels = m.drawparallels(getMapLines(bymin, bymax, 3), labels=[1, 0, 0, 0], linewidth=0.5, labelstyle='+/-', fontsize=9, xoffset=-0.8, color='gray', zorder=100.) m.drawmeridians(getMapLines(bxmin, bxmax, 3), labels=[0, 0, 0, 1], linewidth=0.5, labelstyle='+/-', fontsize=9, color='gray', zorder=100.) for par in parallels: try: parallels[par][1][0].set_rotation(90) except: pass #draw roads on the map, if they were provided to us if maproads is True and roadslist is not None: try: for road in roadslist: try: xy = list(road['geometry']['coordinates']) roadx, roady = list(zip(*xy)) mapx, mapy = m(roadx, roady) m.plot(mapx, mapy, roadcolor, lw=0.5, zorder=9) except: continue except Exception as e: print(('Failed to plot roads, %s' % e)) #add city names to map if mapcities is True and cityfile is not None: try: fontname = 'Arial' fontsize = 8 if k == 0: # Only need to choose cities first time and then apply to rest fcities = bcities.limitByMapCollision( m, fontname=fontname, fontsize=fontsize) ctlats, ctlons, names = fcities.getCities() cxis, cyis = m(ctlons, ctlats) for ctlat, ctlon, cxi, cyi, name in zip(ctlats, ctlons, cxis, cyis, names): m.scatter(ctlon, ctlat, c='k', latlon=True, marker='.', zorder=100000) ax.text(cxi, cyi, name, fontname=fontname, fontsize=fontsize, zorder=100000) except Exception as e: print('Failed to plot cities, %s' % e) #draw star at epicenter plt.sca(ax) if edict is not None: elat, elon = edict['lat'], edict['lon'] ex, ey = m(elon, elat) plt.plot(ex, ey, '*', markeredgecolor='k', mfc='None', mew=1.0, ms=15, zorder=10000.) m.drawmapboundary(fill_color=watercolor) m.fillcontinents(color=clear_color, lake_color=watercolor) m.drawrivers(color=watercolor) ##m.drawcoastlines() #draw country boundaries m.drawcountries(color=countrycolor, linewidth=1.0) #add map scale m.drawmapscale((bxmax+bxmin)/2., (bymin+(bymax-bymin)/9.), clon, clat, np.round((((bxmax-bxmin)*111)/5)/10.)*10, barstyle='fancy', zorder=10) # Add border autoAxis = ax.axis() rec = Rectangle((autoAxis[0]-0.7, autoAxis[2]-0.2), (autoAxis[1]-autoAxis[0])+1, (autoAxis[3]-autoAxis[2])+0.4, fill=False, lw=1, zorder=1e8) rec = ax.add_patch(rec) rec.set_clip_on(False) plt.draw() if sref is not None: label2 = '%s\nsource: %s' % (label1, sref) # '%s\n' % label1 + r'{\fontsize{10pt}{3em}\selectfont{}%s}' % sref # else: label2 = label1 plt.title(label2, axes=ax, fontsize=fontsizesub) #draw scenario watermark, if scenario if isScenario: plt.sca(ax) cx, cy = m(clon, clat) plt.text(cx, cy, 'SCENARIO', rotation=45, alpha=0.10, size=72, ha='center', va='center', color='red') #if ds: # Could add this to print "downsampled" on map # plt.text() if k == 1 and rowpan == 1: # adjust single level plot axsize = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) ht2 = axsize.height fig.set_figheight(ht2*1.6) else: plt.tight_layout() # Make room for suptitle - tight layout doesn't account for it plt.subplots_adjust(top=0.92) if printparam is True: try: fig = plt.gcf() dictionary = grids['model']['description']['parameters'] paramstring = 'Model parameters: ' halfway = np.ceil(len(dictionary)/2.) for i, key in enumerate(dictionary): if i == halfway and colpan == 1: paramstring += '\n' paramstring += ('%s = %s; ' % (key, dictionary[key])) print(paramstring) fig.text(0.01, 0.015, paramstring, fontsize=fontsizesmallest) plt.draw() except: print('Could not display model parameters') if edict is not None: eventid = edict['eventid'] else: eventid = '' time1 = datetime.datetime.utcnow().strftime('%d%b%Y_%H%M') outfile = os.path.join(outfolder, '%s_%s_%s.pdf' % (eventid, suptitle, time1)) pngfile = os.path.join(outfolder, '%s_%s_%s.png' % (eventid, suptitle, time1)) if savepdf is True: print('Saving map output to %s' % outfile) plt.savefig(outfile, dpi=300) if savepng is True: print('Saving map output to %s' % pngfile) plt.savefig(pngfile) if showplots is True: plt.show() else: plt.close(fig) return newgrids
def kritikos_fuzzygamma(shakefile, config, bounds=None): """ Runs kritikos procedure with fuzzy gamma overlay method """ cmodel = config['statistical_models']['kritikos_2015'] gamma = cmodel['gamma_value'] ############ This section reads in items from the config file ## Read in layer files and get data layers = cmodel['layers'] try: # Slope slope_file = layers['slope'] # DFF dff_file = layers['dff'] # DFS dfs_file = layers['dfs'] # elev elev_file = layers['elev'] except: print('Unable to retrieve grid data.') try: div = cmodel['divisor'] # Load in divisors MMI_div = div['MMI'] slope_div = div['slope'] dff_div = div['dff'] dfs_div = div['dfs'] slope_pos_div = div['slope_pos'] except: print('Unable to retrieve divisors.') try: power = cmodel['power'] # Load in powers MMI_power = power['MMI'] slope_power = power['slope'] dff_power = power['dff'] dfs_power = power['dfs'] slope_pos_power = power['slope_pos'] except: print('Unable to retrieve powers.') # Cut and resample, create geodict try: bounds = None shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') slopedict, duplicated = GDALGrid.getFileGeoDict(slope_file) if bounds is not None: # Make sure bounds are within ShakeMap Grid if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds[ 'xmax'] or shkgdict.ymin > bounds[ 'ymin'] or shkgdict.ymax < bounds['ymax']: print( 'Specified bounds are outside shakemap area, using ShakeMap bounds instead' ) bounds = None if bounds is not None: tempgdict = GeoDict( { 'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': 100., 'dy': 100., 'nx': 100., 'ny': 100. }, adjust='res') gdict = slpdict.getBoundsWithin(tempgdict) else: # Get boundaries from shakemap if not specified gdict = slopedict.getBoundsWithin(shkgdict) except: raise NameError('Unable to create base geodict.') # Load in data ############## Still need to make DFF and DFS layers try: # Load in slope data slopegrid = GDALGrid.load(slope_file, samplegeodict=gdict, resample=False) slope_data = slopegrid.getData().astype(float) # Load in MMI shakemap = ShakeGrid.load(shakefile, samplegeodict=gdict, resample=True, method='linear', adjust='res') MMI_data = shakemap.getLayer('mmi').getData().astype(float) # Load in Dff ############### STILL NEED THIS FILE dffgrid = GDALGrid.load(dff_file, samplegeodict=gdict, resample=False) dff_data = dffgrid.getData().astype(float) # Load in DFS ############### STILL NEED THIS FILE dfsgrid = GDALGrid.load(dfs_file, samplegeodict=gdict, resample=False) dfs_data = dfsgrid.getData().astype(float) # Load in elevation elev_grid = GDALGrid.load(elev_file, samplegeodict=gdict, resample=False) DEM = elev_grid.getData().astype(float) except: print('Data could not be retrieved.') # Read in classifications try: mmi_class = cmodel['classification']['MMI'] slope_class = cmodel['classification']['slope'] dff_class = cmodel['classification']['dff'] dfs_class = cmodel['classification']['dfs'] slope_pos_class = cmodel['classification']['slope_pos'] except: print('Could not recover classifications from config.') try: slope_pos_data = create_slopePos(slope_data, DEM, cmodel) except: print('Could not create slope position grid.') ####### Split classification strings into lists containing numbers and classify layers # MMI classifications try: mmi_classes = mmi_class.split(',') for i in mmi_classes: if i.find('-') != -1: j = i.split('-') if MMI_data in range(int(j[0]), int(j[1])): MMI_data = int(j[0]) else: MMI_data = int(i) except: print('Could not categorize MMI values') # Slope Classifications try: slope_classes = slope_class.split(',') k = 1 for i in mmi_classes: if i.find('-') != -1: j = i.split('-') if slope_data in range(int(j[0]), int(j[1])): slope_data = k k += 1 else: slope_data = 11 except: print('Could not recategorize Slope Values.') # DFF classifications try: dff_classes = dff_class.split(',') k = 1 for i in dff_classes: if i.find('-') != -1: j = i.split('-') if dff_data in range(int(j[0]), int(j[1])): dff_data = k k += 1 else: dff_data = 7 except: print('Could not recategorize DFF values.') # DFS classifications try: dfs_classes = dfs_class.split(',') k = 1 for i in dfs_classes: if i.find('-') != -1: j = i.split('-') if dfs_data in range(int(j[0]), int(j[1])): dfs_data = k k += 1 else: dfs_data = 6 except: print('Could not recategorize DFS values.') # Slope position classification try: slope_pos_classes = slope_pos_class.split(',') k = 1 for i in slope_poss_classes: if slope_pos_data == i: slope_pos_data = k k += 1 except: print('Could not recategorize slope position values.') ############## # This section runs all the calculations ############## # Run each layer through a membership function try: layers = [] # Calculate layers slope = 1 / (1 + np.exp(slope_data / slope_div, slope_power)) MMI = 1 / (1 + np.exp(MMI_data / MMI_div, MMI_power)) dff = 1 / (1 + np.exp(dff_data / dff_div, dff_power)) dfs = 1 / (1 + np.exp(dfs_data / dfs_div, dfs_power)) slope_pos = 1 / (1 + np.exp(slop_pos_data / slop_pos_div, slope_pos_power)) # Add to layers list (to be used in further calculations) layers.append(slope) layers.append(MMI) layers.append(dff) layers.append(dfs) layers.append(slope_pos) except: print('Layer calculations failed.') # Apply final calculations operator # From Kritikos paper equation 4 ############ Haven't run. try: a = np.prod(layers) b = np.prod(1 - layers) mu_x = np.power(a, 1 - gamma) * np.power(1 - b, gamma) except: print('Unable to calculate final product.')
def slhrf_liq(shakefile, config, uncertfile=None, saveinputs=False, modeltype=None, displmodel=None, probtype=None, bounds=None): """ Method for computing the probability of liquefaction using the SLHRF, primarily relying on the Wills et al. (2015) Vs30 map of California and Hydrosheds distance to rivers. """ layers = config['slhrf_liq_cal']['layers'] vs30_file = layers['vs30']['file'] elev_file = layers['elev']['file'] dc_file = layers['dc']['file'] dr_file = layers['dr']['file'] fgeodict = GMTGrid.getFileGeoDict(vs30_file)[0] #--------------------------------------------------------------------------- # Read in data layers #--------------------------------------------------------------------------- shakemap = ShakeGrid.load(shakefile, fgeodict, resample=True, method='linear', doPadding=True) PGA = shakemap.getLayer('pga').getData()/100 # convert to g griddict,eventdict,specdict,fields,uncertainties = getHeaderData(shakefile) mag = eventdict['magnitude'] vs30_grid = GMTGrid.load(vs30_file) vs30 = vs30_grid.getData() elev = GDALGrid.load(elev_file, fgeodict, resample=True, method=layers['elev']['interpolation'], doPadding = True).getData() dc = GDALGrid.load(dc_file, fgeodict, resample=True, method=layers['dc']['interpolation'], doPadding = True).getData() dr = GDALGrid.load(dr_file, fgeodict, resample=True, method=layers['dr']['interpolation'], doPadding = True).getData() dw = np.minimum(dr, dc) #--------------------------------------------------------------------------- # Evaluate the different factors #--------------------------------------------------------------------------- Fgeo = np.zeros_like(vs30) for k,v in config['slhrf_liq_cal']['parameters'].items(): ind = np.where(vs30 == float(v[0])) Fgeo[ind] = float(v[1]) Fz = z_factor(elev) Fmag = mag_factor(mag) Fpga = pga_factor(PGA) Fdw = dw_factor(dw) Fnehrp = nehrp_factor(vs30) #--------------------------------------------------------------------------- # Combine factors #--------------------------------------------------------------------------- SLHRF = Fz * Fmag * Fpga * Fdw * Fgeo * Fnehrp # Transform into a 'probability' prob = 0.4 * (1 - np.exp(-0.2 * SLHRF**2) ) #--------------------------------------------------------------------------- # Turn output and inputs into into grids and put in maplayers dictionary #--------------------------------------------------------------------------- maplayers = collections.OrderedDict() temp = shakemap.getShakeDict() shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version']) modelsref = config['slhrf_liq_cal']['shortref'] modellref = config['slhrf_liq_cal']['longref'] modeltype = 'SLHRF/Wills' maplayers['model'] = {'grid': GDALGrid(prob, fgeodict), 'label': 'Probability', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'coverage', 'shakemap': shakedetail, 'parameters': {'modeltype': modeltype} } } if saveinputs is True: maplayers['slhrf'] = {'grid': GDALGrid(SLHRF, fgeodict), 'label': 'SLHRF', 'type': 'input', 'description': {'units': 'none'}} maplayers['pga'] = {'grid': GDALGrid(PGA, fgeodict), 'label': 'PGA (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}} maplayers['vs30'] = {'grid': GDALGrid(vs30, fgeodict), 'label': 'Vs30 (m/s)', 'type': 'input', 'description': {'units': 'm/s'}} maplayers['dw'] = {'grid': GDALGrid(dw, fgeodict), 'label': 'dw (km)', 'type': 'input', 'description': {'units': 'km'}} maplayers['elev'] = {'grid': GDALGrid(elev, fgeodict), 'label': 'elev (m)', 'type': 'input', 'description': {'units': 'm'}} maplayers['FPGA'] = {'grid': GDALGrid(Fpga, fgeodict), 'label': 'Fpga', 'type': 'input', 'description': {'units': 'none'}} maplayers['FDW'] = {'grid': GDALGrid(Fdw, fgeodict), 'label': 'Fdw', 'type': 'input', 'description': {'units': 'none'}} maplayers['FGEO'] = {'grid': GDALGrid(Fgeo, fgeodict), 'label': 'Fgeo', 'type': 'input', 'description': {'units': 'none'}} maplayers['FZ'] = {'grid': GDALGrid(Fz, fgeodict), 'label': 'Fz', 'type': 'input', 'description': {'units': 'none'}} maplayers['FNEHRP'] = {'grid': GDALGrid(Fnehrp, fgeodict), 'label': 'Fnehrp', 'type': 'input', 'description': {'units': 'none'}} return maplayers
def draw_contour(shakefile, popfile, oceanfile, cityfile, outfilename, make_png=False): """Create a contour map showing population (greyscale) underneath contoured MMI. :param shakefile: String path to ShakeMap grid.xml file. :param popfile: String path to GDALGrid-compliant file containing population data. :param oceanfile: String path to file containing ocean vector data in a format compatible with fiona. :param cityfile: String path to file containing GeoNames cities data. :param outfilename: String path containing desired output PDF filename. :param make_png: Boolean indicating whether a PNG version of the file should also be created in the same output folder as the PDF. :returns: Tuple containing: - Name of PNG file created, or None if PNG output not specified. - CartopyCities object containing the cities that were rendered on the contour map. """ #load the shakemap - for the time being, we're interpolating the #population data to the shakemap, which would be important #if we were doing math with the pop values. We're not, so I think it's ok. shakegrid = ShakeGrid.load(shakefile, adjust='res') gd = shakegrid.getGeoDict() #retrieve the epicenter - this will get used on the map clat = shakegrid.getEventDict()['lat'] clon = shakegrid.getEventDict()['lon'] #load the population data, sample to shakemap popgrid = GDALGrid.load(popfile, samplegeodict=gd, resample=True) popdata = popgrid.getData() #smooth the MMI data for contouring mmi = shakegrid.getLayer('mmi').getData() smoothed_mmi = gaussian_filter(mmi, FILTER_SMOOTH) #clip the ocean data to the shakemap bbox = (gd.xmin, gd.ymin, gd.xmax, gd.ymax) oceanshapes = _clip_bounds(bbox, oceanfile) #load the cities data, limit to cities within shakemap bounds allcities = CartopyCities.fromDefault() cities = allcities.limitByBounds((gd.xmin, gd.xmax, gd.ymin, gd.ymax)) # Define ocean/land masks to do the contours, since we want different contour line styles over land and water. oceangrid = Grid2D.rasterizeFromGeometry(oceanshapes, gd, burnValue=1.0, fillValue=0.0, mustContainCenter=False, attribute=None) oceanmask = np.ma.masked_where(oceangrid == 1.0, smoothed_mmi) landmask = np.ma.masked_where(oceangrid == 0.0, smoothed_mmi) # Use our GMT-inspired palette class to create population and MMI colormaps popmap = ColorPalette.fromPreset('pop') mmimap = ColorPalette.fromPreset('mmi') #use the ShakeMap to determine the aspect ratio of the map aspect = (gd.xmax - gd.xmin) / (gd.ymax - gd.ymin) figheight = FIGWIDTH / aspect fig = plt.figure(figsize=(FIGWIDTH, figheight)) # set up axes object with PlateCaree (non) projection. ax = plt.axes([0.02, 0.02, 0.95, 0.95], projection=ccrs.PlateCarree()) #set the image extent to that of the data img_extent = (gd.xmin, gd.xmax, gd.ymin, gd.ymax) plt.imshow(popdata, origin='upper', extent=img_extent, cmap=popmap.cmap, vmin=popmap.vmin, vmax=popmap.vmax, zorder=9, interpolation='none') #define arrays of latitude and longitude we will use to plot MMI contours lat = np.linspace(gd.ymin, gd.ymax, gd.ny) lon = np.linspace(gd.xmin, gd.xmax, gd.nx) #contour the masked land/ocean MMI data at half-integer levels plt.contour(lon, lat, landmask, linewidths=3.0, linestyles='solid', zorder=10, cmap=mmimap.cmap, vmin=mmimap.vmin, vmax=mmimap.vmax, levels=np.arange(0.5, 10.5, 1.0)) plt.contour(lon, lat, oceanmask, linewidths=2.0, linestyles='dashed', zorder=13, cmap=mmimap.cmap, vmin=mmimap.vmin, vmax=mmimap.vmax, levels=np.arange(0.5, 10.5, 1.0)) #the idea here is to plot invisible MMI contours at integer levels and then label them. #labeling part does not currently work. cs = plt.contour(lon, lat, landmask, linewidths=0.0, levels=np.arange(0, 11), zorder=10) #clabel is not actually drawing anything, but it is blotting out a portion of the contour line. ?? ax.clabel(cs, np.arange(0, 11), colors='k', zorder=25) #set the extent of the map to our data ax.set_extent([lon.min(), lon.max(), lat.min(), lat.max()]) #draw the ocean data if isinstance(oceanshapes[0], mPolygon): for shape in oceanshapes[0]: ocean_patch = PolygonPatch(shape, zorder=10, facecolor=WATERCOLOR, edgecolor=WATERCOLOR) ax.add_patch(ocean_patch) else: ocean_patch = PolygonPatch(oceanshapes[0], zorder=10, facecolor=WATERCOLOR, edgecolor=WATERCOLOR) ax.add_patch(ocean_patch) # add coastlines with desired scale of resolution ax.coastlines('10m', zorder=11) #draw meridians and parallels using Cartopy's functions for that gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2, color=(0.9, 0.9, 0.9), alpha=0.5, linestyle='-', zorder=20) gl.xlabels_top = False gl.xlabels_bottom = False gl.ylabels_left = False gl.ylabels_right = False gl.xlines = True xlocs = np.arange(np.floor(gd.xmin - 1), np.ceil(gd.xmax + 1)) ylocs = np.arange(np.floor(gd.ymin - 1), np.ceil(gd.ymax + 1)) gl.xlocator = mticker.FixedLocator(xlocs) gl.ylocator = mticker.FixedLocator(ylocs) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabel_style = {'size': 15, 'color': 'black'} gl.ylabel_style = {'size': 15, 'color': 'black'} #drawing our own tick labels INSIDE the plot, as Cartopy doesn't seem to support this. yrange = gd.ymax - gd.ymin xrange = gd.xmax - gd.xmin for xloc in gl.xlocator.locs: outside = xloc < gd.xmin or xloc > gd.xmax #don't draw labels when we're too close to either edge near_edge = (xloc - gd.xmin) < (xrange * 0.1) or (gd.xmax - xloc) < ( xrange * 0.1) if outside or near_edge: continue if xloc < 0: xtext = r'$%s^\circ$W' % str(abs(int(xloc))) else: xtext = r'$%s^\circ$E' % str(int(xloc)) ax.text(xloc, gd.ymax - (yrange / 35), xtext, fontsize=14, zorder=20, ha='center', fontname='Bitstream Vera Sans') for yloc in gl.ylocator.locs: outside = yloc < gd.ymin or yloc > gd.ymax #don't draw labels when we're too close to either edge near_edge = (yloc - gd.ymin) < (yrange * 0.1) or (gd.ymax - yloc) < ( yrange * 0.1) if outside or near_edge: continue if yloc < 0: ytext = r'$%s^\circ$S' % str(abs(int(yloc))) else: ytext = r'$%s^\circ$N' % str(int(yloc)) thing = ax.text(gd.xmin + (xrange / 100), yloc, ytext, fontsize=14, zorder=20, va='center', fontname='Bitstream Vera Sans') #Limit the number of cities we show - we may not want to use the population size #filter in the global case, but the map collision filter is a little sketchy right now. mapcities = cities.limitByPopulation(25000) mapcities = mapcities.limitByGrid() mapcities = mapcities.limitByMapCollision(ax, shadow=True) mapcities.renderToMap(ax, shadow=True, fontsize=12, zorder=11) #Get the corner of the map with the lowest population corner_rect, filled_corner = _get_open_corner(popgrid, ax) clat = round_to_nearest(clat, 1.0) clon = round_to_nearest(clon, 1.0) #draw a little globe in the corner showing in small-scale where the earthquake is located. proj = ccrs.Orthographic(central_latitude=clat, central_longitude=clon) ax2 = fig.add_axes(corner_rect, projection=proj) ax2.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=WATERCOLOR, edgecolor=WATERCOLOR) ax2.add_feature(cartopy.feature.LAND, zorder=0, edgecolor='black') ax2.plot([clon], [clat], 'w*', linewidth=1, markersize=16, markeredgecolor='k', markerfacecolor='r') gh = ax2.gridlines() ax2.set_global() ax2.outline_patch.set_edgecolor('black') ax2.outline_patch.set_linewidth(2) #Draw the map scale in the unoccupied lower corner. corner = 'lr' if filled_corner == 'lr': corner = 'll' draw_scale(ax, corner, pady=0.05, padx=0.05) plt.savefig(outfilename) pngfile = None if make_png: fpath, fname = os.path.split(outfilename) fbase, t = os.path.splitext(fname) pngfile = os.path.join(fpath, fbase + '.png') plt.savefig(pngfile) return (pngfile, mapcities)
def main(args): events = search(starttime=args.start, endtime=args.end, minlatitude=args.latmin, maxlatitude=args.latmax, minlongitude=args.lonmin, maxlongitude=args.lonmax, producttype='shakemap', maxmagnitude=args.magRange[1], minmagnitude=args.magRange[0]) print('%i events found containing ShakeMaps.' % len(events)) # Create the GeoDict to which the ShakeMaps will be resampled stack_dict = GeoDict.createDictFromBox(args.lonmin, args.lonmax, args.latmin, args.latmax, args.resolution, args.resolution) nrows, ncols = stack_dict.ny, stack_dict.nx imts = {} layer_names = {} event_info = {} layer_count = {} ic = 0 for event in events: tnow = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') if ic % 10 == 0: print('%s: Attempting to fetch ShakeMap for %s (%i of %i)' % (tnow, event.id, ic, len(events))) ic += 1 event_info[event.id] = event.toDict() try: detail = event.getDetailEvent() except Exception as e: fmt = 'Could not retrieve detail data for event %s, error "%s". Skipping.' print(fmt % (event.id, str(e))) continue if not detail.hasProduct('shakemap'): print( 'Event %s appears not to have a ShakeMap after all... skipping.' % detail.id) shakemap = detail.getProducts('shakemap')[0] try: f, gridfile = tempfile.mkstemp() os.close(f) shakemap.getContent('grid.xml', gridfile) shakegrid = ShakeGrid.load(gridfile, samplegeodict=stack_dict, resample=True, doPadding=True) imtlist = list(shakegrid.getLayerNames()) # remove the things that are not ground motions kill_list = ['stdpga', 'urat', 'svel'] for layer in kill_list: if layer in imtlist: imtlist.remove(layer) for imt in imtlist: imtdata = shakegrid.getLayer(imt).getData() if imt not in imts: imts[imt] = np.zeros((nrows, ncols, len(events))) layer_count[imt] = 0 idx = 0 layer_names[imt] = [event.id] else: idx = layer_count[imt] + 1 layer_names[imt].append(event.id) layer_count[imt] = layer_count[imt] + 1 imts[imt][:, :, idx] = imtdata except Exception as e: print('Error fetching ShakeMap grid from %s - "%s". Skipping.' % (event.id, str(e))) finally: os.remove(gridfile) # make sure all imts have valid grids in each vertical layer # trim off any layers that don't have any data in them. for imtname, imtcube in imts.items(): height_diff = len(events) - (layer_count[imtname] + 1) if height_diff: top_layer = layer_count[imtname] imts[imtname] = imtcube[:, :, 0:top_layer] # now create an HDF file, and stuff our data and metadata into it stack_file = GridHDFContainer.create(args.outputfile) stack_file.setDictionary('layer_names', layer_names) stack_file.setDictionary('event', event_info) metadata = stack_dict.asDict() for imtname, imtcube in imts.items(): stack_file.setArray(imtname, imtcube, metadata=metadata, compression=True) stack_file.close()
def getDataFrames(sampleparams, shakeparams, predictors, outparams): """Return Pandas training and testing data frames containing sampled data from hazard coverage, ShakeMap, and predictor data sets. :param sampleparams: Dictionary with at least these values: * coverage: Name of hazard coverage shapefile (decimal degrees). Required. * dx: Float desired sample resolution, and can be overridden by nmax, below (meters). Required. * cb: Desired class balance, i.e., fraction of sampled points that should be from hazard polygons. Optional for polygons, Required for points. * nmax: Maximum number of possible yes/no sample points (usually set to avoid memory issues). Optional. * nsamp: Number of total hazard and no-hazard sample points to collect. Required. * touch_center: Boolean (0 or 1) indicating whether polygons must touch the center of the cell in order for that cell to count as a "yes" sample point. * testpercent: Fraction of sampled points to be used for testing (1-testpercent) will be used for training. Optional, defaults to 0 * extent: xmin,xmax,ymin,ymax OR convex #geographic extent within which to sample data. Four numbers are interpreted as bounding box, the word convex will be interpreted to mean a convex hull. Default (not specified) will mean the bounding box of the hazard coverage. Optional. * h1: Minimum buffer size for sampling non-hazard points when input coverage takes the form of points. Optional for polygons, required for points. * h2: Maximum buffer size for sampling non-hazard points when input coverage takes the form of points. Optional for polygons, required for points. :param shakeparams: Dictionary with at least these values: * shakemap: Name of shakemap file to use for sampling hazard values. Required. * shakemap_uncertainty: Name of shakemap uncertainty file to use for sampling hazard uncertainty values. Optional. :param predictors: Dictionary with at least these values: * layername: Path to ESRI shapefile, or grid in GMT or ESRI format which represents predictor data. Required. * layername_sampling: 'nearest' or 'linear', optional for grids, not used for shapefiles. * layername_attribute: Name of attribute in shapefile which should be sampled at hazard/non-hazard points. Required for points. :param outparams: Dictionary with at least these values: * folder: Name of folder where all output (data frames, plots) will be written. Will be created if does not exist. Required. * basename: The name that will be included in all output file names (i.e., northridge_train.csv). Required. :returns: Tuple of (training,testing) Pandas data frames. """ coverage = sampleparams['coverage'] f = fiona.collection(coverage, 'r') cbounds = f.bounds f.close() dx = sampleparams['dx'] cb = sampleparams['cb'] nmax = sampleparams['nmax'] nsamp = sampleparams['nsamp'] touch_center = sampleparams['touch_center'] testpercent = sampleparams['testpercent'] extent = sampleparams['extent'] h1 = sampleparams['h1'] h2 = sampleparams['h2'] yestest, yestrain, notest, notrain, xvar, yvar, pshapes, proj = sampleFromFile( coverage, dx=dx, nmax=nmax, testPercent=testpercent, touch_center=touch_center, classBalance=cb, extent=extent, Nsamp=nsamp, h1=h1, h2=h2) traincolumns = OrderedDict() testcolumns = OrderedDict() if (100 - testpercent) > 0: traincolumns['lat'] = np.concatenate((yestrain[:, 1], notrain[:, 1])) traincolumns['lon'] = np.concatenate((yestrain[:, 0], notrain[:, 0])) traincolumns['coverage'] = np.concatenate( (np.ones_like(yestrain[:, 1]), np.zeros_like(notrain[:, 1]))) if testpercent > 0: testcolumns['lat'] = np.concatenate((yestest[:, 1], notest[:, 1])) testcolumns['lon'] = np.concatenate((yestest[:, 0], notest[:, 0])) testcolumns['coverage'] = np.concatenate( (np.ones_like(yestest[:, 1]), np.zeros_like(notest[:, 1]))) for predname, predfile in predictors.items(): ftype = getFileType(predfile) if ftype == 'shapefile': attribute = predictors[predname + '_attribute'] shapes = subsetShapes(predfile, cbounds) yes_test_samples = sampleShapes(shapes, yestest, attribute) no_test_samples = sampleShapes(shapes, notest, attribute) yes_train_samples = sampleShapes(shapes, yestrain, attribute) no_train_samples = sampleShapes(shapes, notrain, attribute) testcolumns[predname] = np.squeeze( np.concatenate((yes_test_samples, no_test_samples))) traincolumns[predname] = np.squeeze( np.concatenate((yes_train_samples, no_train_samples))) elif ftype == 'grid': method = 'nearest' if predname + '_sampling' in predictors: method = predictors[predname + '_sampling'] if testpercent > 0: yes_test_samples = sampleGridFile(predfile, yestest, method=method) no_test_samples = sampleGridFile(predfile, notest, method=method) testcolumns[predname] = np.squeeze( np.concatenate((yes_test_samples, no_test_samples))) if (100 - testpercent) > 0: yes_train_samples = sampleGridFile(predfile, yestrain, method=method) no_train_samples = sampleGridFile(predfile, notrain, method=method) traincolumns[predname] = np.squeeze( np.concatenate((yes_train_samples, no_train_samples))) else: continue # attribute or sampling method key #sample the shakemap layers = ['mmi', 'pga', 'pgv', 'psa03', 'psa10', 'psa30'] shakegrid = ShakeGrid.load(shakeparams['shakemap'], adjust='res') for layer in layers: yes_test_samples = sampleFromMultiGrid(shakegrid, layer, yestest) no_test_samples = sampleFromMultiGrid(shakegrid, layer, notest) yes_train_samples = sampleFromMultiGrid(shakegrid, layer, yestrain) no_train_samples = sampleFromMultiGrid(shakegrid, layer, notrain) if testpercent > 0: testcolumns[layer] = np.squeeze( np.concatenate((yes_test_samples, no_test_samples))) if (100 - testpercent) > 0: traincolumns[layer] = np.squeeze( np.concatenate((yes_train_samples, no_train_samples))) dftest = pd.DataFrame(testcolumns) dftrain = pd.DataFrame(traincolumns) return (dftrain, dftest)
def run_method(direc, voi, num_realizations, radius, corr_model, vscorr, output_dir): """ Parallel code for computing the spatial correlation for a ShakeMap, adding to a ShakeMap grid, and computing multiple realizations. File may be run using: mpiexec -n # python test.py path imt distance_measure N where # is the desired number of processors. Required command line parameters are listed below: :param direc: string, path to directory containing grid, stationlist, uncertainty, event xmls and fault.txt :param voi: string, intensity measures to use, i.e., 'pga pgv psa03' :param N: integer, number of realizations to compute :param radius: integer, radius of influence :param corr_model: string, specifies the correlation model :param vs_corr: boolean, specifies whether vs30 are correlated :param output_dir: path to directory where output is stored """ start_time = time.time() # Start MPI comm = MPI.COMM_WORLD size = comm.Get_size() my_rank = comm.Get_rank() # Get shakemap, uncertainty grid, and stationdata shakegrid = ShakeGrid.load(os.path.join(direc,'grid.xml'), adjust='res') unc_grid = ShakeGrid.load(os.path.join(direc,'uncertainty.xml'), adjust = 'res') stationlist = os.path.join(direc,'stationlist.xml') stationdata = readStation(stationlist) # Initialize the grid # In this step we use the ShakeMap outputs to determine the grid points, grid spacing, site collections, # station data, and other initial values variables = initialize(shakegrid, unc_grid, stationdata, direc, voi) if my_rank == 0: print(variables['K'], 'stations', variables['M']*variables['N'], 'data points') initialization_time = time.time() - start_time if my_rank == 0: print('Initialization time', initialization_time) sys.stdout.flush() # Compute the grid, mu, and sigma arrays # In this step, we use the correlation model to compute the covariance matrices for each point on the # ShakeMap grid. This computation is done in parallel out = main(variables, radius, voi, corr_model, vscorr) main_time = time.time()- start_time - initialization_time if my_rank == 0: print('Main time', main_time) # Compute realizations of the random field # After computing the covariance matrices for each point, we can compute realizations of the random # fields. If multiple cores are used, each core needs the data for every point on the grid. for ii in range(0, np.size(voi)): if num_realizations == 1: # Master will compute this single realization if my_rank == 0: data = realizations(1, 1, radius, variables, out['grid_arr'], out['mu_arr'][voi[ii]], out['sigma_arr'][voi[ii]], out['list_sizes_grid'], out['list_sizes_mu'], shakegrid, voi[ii], comm, direc, method, output_dir) else: # Master broadcasts the arrays to the other cores if my_rank == 0: grid_arr = out['grid_arr'] mu_arr = out['mu_arr'][voi[ii]] sigma_arr = out['sigma_arr'][voi[ii]] list_sizes_grid = out['list_sizes_grid'] list_sizes_mu = out['list_sizes_mu'] else: grid_arr = None mu_arr = None sigma_arr = None list_sizes_grid = None list_sizes_mu = None grid_arr = comm.bcast(grid_arr, root = 0) mu_arr = comm.bcast(mu_arr, root = 0) sigma_arr = comm.bcast(sigma_arr, root = 0) list_sizes_grid = comm.bcast(list_sizes_grid, root = 0) list_sizes_mu = comm.bcast(list_sizes_mu, root = 0) my_reals = np.arange(my_rank, num_realizations, size) # Each core does a set of realizations data = realizations(num_realizations, my_reals, radius, variables, grid_arr, mu_arr, sigma_arr, list_sizes_grid, list_sizes_mu, shakegrid, voi[ii], comm, direc, output_dir) realization_time = time.time() - start_time - initialization_time - main_time if my_rank == 0: print('Realization time', realization_time)
def calcExposure(self,shakefile): """Calculate population exposure to shaking, per country, plus total exposure across all countries. :param shakefile: Path to ShakeMap grid.xml file. :returns: Dictionary containing country code (ISO2) keys, and values of 10 element arrays representing population exposure to MMI 1-10. Dictionary will contain an additional key 'TotalExposure', with value of exposure across all countries. Dictionary will also contain a field "maximum_border_mmi" which indicates the maximum MMI value along any edge of the ShakeMap. """ #get shakemap geodict shakedict = ShakeGrid.getFileGeoDict(shakefile,adjust='res') #get population geodict popdict,t = self._pop_class.getFileGeoDict(self._popfile) #get country code geodict isodict,t = self._iso_class.getFileGeoDict(self._isofile) #special case for very high latitude events that may be outside the bounds #of our population data... if not popdict.intersects(shakedict): expdict = {'UK':np.zeros((10,)),'TotalExposure':np.zeros((10,))} return expdict if popdict == shakedict == isodict: #special case, probably for testing... self._shakegrid = ShakeGrid.load(shakefile,adjust='res') self._popgrid = self._pop_class.load(self._popfile) self._isogrid = self._iso_class.load(self._isofile) else: sampledict = popdict.getBoundsWithin(shakedict) self._shakegrid = ShakeGrid.load(shakefile,samplegeodict=sampledict,resample=True, method='linear',adjust='res') self._popgrid = self._pop_class.load(self._popfile,samplegeodict=sampledict, resample=False,doPadding=True,padValue=np.nan) self._isogrid = self._iso_class.load(self._isofile,samplegeodict=sampledict, resample=True,method='nearest',doPadding=True,padValue=0) mmidata = self._shakegrid.getLayer('mmi').getData() popdata = self._popgrid.getData() isodata = self._isogrid.getData() eventyear = self._shakegrid.getEventDict()['event_timestamp'].year #in order to avoid crazy far-future scenarios where PAGER models are probably invalid, #check to see if the time gap between the date of population data collection and event year #reaches either of a couple of different thresholds. if eventyear > self._popyear: tdiff = (eventyear - self._popyear) if tdiff > SCENARIO_WARNING and tdiff < SCENARIO_ERROR: msg = '''The input ShakeMap event year is more than %i years from the population date. PAGER results for events this far in the future may not be valid.''' % SCENARIO_WARNING warnings.warn(msg) if tdiff > SCENARIO_ERROR: msg = '''The input ShakeMap event year is more than %i years from the population date. PAGER results for events this far in the future are not valid. Stopping.''' % SCENARIO_ERROR raise PagerException(msg) ucodes = np.unique(isodata) for ccode in ucodes: cidx = (isodata == ccode) popdata[cidx] = self._popgrowth.adjustPopulation(popdata[cidx],ccode,self._popyear,eventyear) exposure_dict = calc_exposure(mmidata,popdata,isodata) newdict = {} #Get rolled up exposures total = np.zeros((10,),dtype=np.uint32) for isocode,value in exposure_dict.items(): cdict = self._country.getCountry(int(isocode)) if cdict is None: ccode = 'UK' else: ccode = cdict['ISO2'] newdict[ccode] = value total += value newdict['TotalExposure'] = total #get the maximum MMI value along any of the four map edges nrows,ncols = mmidata.shape top = mmidata[0,0:ncols].max() bottom = mmidata[nrows-1,0:ncols].max() left = mmidata[0:nrows,0].max() right = mmidata[0:nrows,ncols-1].max() newdict['maximum_border_mmi'] = np.array([top,bottom,left,right]).max() return newdict
def test_save(): tdir = tempfile.mkdtemp() testfile = os.path.join(tdir,'test.xml') try: print('Testing save/read functionality for shakemap grids...') pga = np.arange(0,16,dtype=np.float32).reshape(4,4) pgv = np.arange(1,17,dtype=np.float32).reshape(4,4) mmi = np.arange(2,18,dtype=np.float32).reshape(4,4) geodict = GeoDict({'xmin':0.5,'xmax':3.5, 'ymin':0.5,'ymax':3.5, 'dx':1.0,'dy':1.0, 'ny':4,'nx':4}) layers = OrderedDict() layers['pga'] = pga layers['pgv'] = pgv layers['mmi'] = mmi shakeDict = {'event_id':'usabcd1234', 'shakemap_id':'usabcd1234', 'shakemap_version':1, 'code_version':'4.0', 'process_timestamp':datetime.utcnow(), 'shakemap_originator':'us', 'map_status':'RELEASED', 'shakemap_event_type':'ACTUAL'} eventDict = {'event_id':'usabcd1234', 'magnitude':7.6, 'depth':1.4, 'lat':2.0, 'lon':2.0, 'event_timestamp':datetime.utcnow(), 'event_network':'us', 'event_description':'sample event'} uncDict = {'pga':(0.0,0), 'pgv':(0.0,0), 'mmi':(0.0,0)} shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict) print('Testing save/read functionality...') shake.save(testfile,version=3) shake2 = ShakeGrid.load(testfile) for layer in ['pga','pgv','mmi']: tdata = shake2.getLayer(layer).getData() np.testing.assert_almost_equal(tdata,layers[layer]) print('Passed save/read functionality for shakemap grids.') print('Testing getFileGeoDict method...') fgeodict = ShakeGrid.getFileGeoDict(testfile) print('Passed save/read functionality for shakemap grids.') print('Testing loading with bounds (no resampling or padding)...') sampledict = GeoDict({'xmin':-0.5,'xmax':3.5, 'ymin':-0.5,'ymax':3.5, 'dx':1.0,'dy':1.0, 'ny':5,'nx':5}) shake3 = ShakeGrid.load(testfile,samplegeodict=sampledict, resample=False,doPadding=False,padValue=np.nan) tdata = shake3.getLayer('pga').getData() np.testing.assert_almost_equal(tdata,layers['pga']) print('Passed loading with bounds (no resampling or padding)...') print('Testing loading shakemap with padding, no resampling...') newdict = GeoDict({'xmin':-0.5,'xmax':4.5, 'ymin':-0.5,'ymax':4.5, 'dx':1.0,'dy':1.0, 'ny':6,'nx':6}) shake4 = ShakeGrid.load(testfile,samplegeodict=newdict, resample=False,doPadding=True,padValue=np.nan) output = np.array([[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan], [np.nan,0.0,1.0,2.0,3.0,np.nan], [np.nan,4.0,5.0,6.0,7.0,np.nan], [np.nan,8.0,9.0,10.0,11.0,np.nan], [np.nan,12.0,13.0,14.0,15.0,np.nan], [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]]) tdata = shake4.getLayer('pga').getData() np.testing.assert_almost_equal(tdata,output) print('Passed loading shakemap with padding, no resampling...') #make a bigger grid pga = np.arange(0,36,dtype=np.float32).reshape(6,6) pgv = np.arange(1,37,dtype=np.float32).reshape(6,6) mmi = np.arange(2,38,dtype=np.float32).reshape(6,6) layers = OrderedDict() layers['pga'] = pga layers['pgv'] = pgv layers['mmi'] = mmi geodict = GeoDict({'xmin':0.5,'xmax':5.5, 'ymin':0.5,'ymax':5.5, 'dx':1.0,'dy':1.0, 'ny':6,'nx':6}) shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict) shake.save(testfile,version=3) print('Testing resampling, no padding...') littledict = GeoDict({'xmin':2.0,'xmax':4.0, 'ymin':2.0,'ymax':4.0, 'dx':1.0,'dy':1.0, 'ny':3,'nx':3}) shake5 = ShakeGrid.load(testfile,samplegeodict=littledict,resample=True,doPadding=False,padValue=np.nan) output = np.array([[10.5,11.5,12.5], [16.5,17.5,18.5], [22.5,23.5,24.5]]) tdata = shake5.getLayer('pga').getData() np.testing.assert_almost_equal(tdata,output) print('Passed resampling, no padding...') print('Testing resampling and padding...') pga = np.arange(0,16,dtype=np.float32).reshape(4,4) pgv = np.arange(1,17,dtype=np.float32).reshape(4,4) mmi = np.arange(2,18,dtype=np.float32).reshape(4,4) geodict = GeoDict({'xmin':0.5,'ymax':3.5, 'ymin':0.5,'xmax':3.5, 'dx':1.0,'dy':1.0, 'ny':4,'nx':4}) layers = OrderedDict() layers['pga'] = pga layers['pgv'] = pgv layers['mmi'] = mmi shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict) shake.save(testfile,version=3) bigdict = GeoDict({'xmin':0.0,'xmax':4.0, 'ymin':0.0,'ymax':4.0, 'dx':1.0,'dy':1.0, 'ny':5,'nx':5}) shake6 = ShakeGrid.load(testfile,samplegeodict=bigdict,resample=True,doPadding=True,padValue=np.nan) tdata = shake6.getLayer('pga').getData() output = np.array([[np.nan,np.nan,np.nan,np.nan,np.nan], [np.nan,2.5,3.5,4.5,np.nan], [np.nan,6.5,7.5,8.5,np.nan], [np.nan,10.5,11.5,12.5,np.nan], [np.nan,np.nan,np.nan,np.nan,np.nan]]) np.testing.assert_almost_equal(tdata,output) print('Passed resampling and padding...') except Exception as error: print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile,str(error))) assert 0 == 1 finally: if os.path.isdir(tdir): shutil.rmtree(tdir)
def quickcut(filename, gdict, tempname=None, extrasamp=5., method='bilinear', precise=True, cleanup=True, verbose=False, override=False): """ Use gdal to trim a large global file down quickly so mapio can read it efficiently. (Cannot read Shakemap.xml files, must save as .bil filrst) Args: filename (str): File path to original input file (raster). gdict (geodict): Geodictionary to cut around and align with. tempname (str): File path to desired location of clipped part of filename. extrasamp (int): Number of extra cells to cut around each edge of geodict to have resampling buffer for future steps. method (str): If resampling is necessary, method to use. precise (bool): If true, will resample to the gdict as closely as possible, if False it will just roughly cut around the area of interest without changing resolution cleanup (bool): if True, delete tempname after reading it back in verbose (bool): if True, prints more details override (bool): if True, if filename extent is not fully contained by gdict, read in the entire file (only used for ShakeMaps) Returns: New grid2D layer Note: This function uses the subprocess approach because ``gdal.Translate`` doesn't hang on the command until the file is created which causes problems in the next steps. """ if gdict.xmax < gdict.xmin: raise Exception('quickcut: your geodict xmax is smaller than xmin') try: filegdict = GDALGrid.getFileGeoDict(filename) except: try: filegdict = GMTGrid.getFileGeoDict(filename) except: raise Exception('Cannot get geodict for %s' % filename) if tempname is None: tempdir = tempfile.mkdtemp() tempname = os.path.join(tempdir, 'junk.tif') deltemp = True else: tempdir = None deltemp = False # if os.path.exists(tempname): # os.remove(tempname) # print('Temporary file already there, removing file') filegdict = filegdict[0] # Get the right methods for mapio (method) and gdal (method2) if method == 'linear': method2 = 'bilinear' if method == 'nearest': method2 = 'near' if method == 'bilinear': method = 'linear' method2 = 'bilinear' if method == 'near': method = 'nearest' method2 = 'near' else: method2 = method if filegdict != gdict: # First cut without resampling tempgdict = GeoDict.createDictFromBox(gdict.xmin, gdict.xmax, gdict.ymin, gdict.ymax, filegdict.dx, filegdict.dy, inside=True) try: egdict = filegdict.getBoundsWithin(tempgdict) ulx = egdict.xmin - extrasamp * egdict.dx uly = egdict.ymax + extrasamp * egdict.dy lrx = egdict.xmax + (extrasamp + 1) * egdict.dx lry = egdict.ymin - (extrasamp + 1) * egdict.dy cmd = 'gdal_translate -a_srs EPSG:4326 -of GTiff -projwin %1.8f \ %1.8f %1.8f %1.8f -r %s %s %s' % (ulx, uly, lrx, lry, method2, filename, tempname) except Exception as e: if override: # When ShakeMap is being loaded, sometimes they won't align # right because it's already cut to the area, so just load # the whole file cmd = 'gdal_translate -a_srs EPSG:4326 -of GTiff -r %s %s %s' \ % (method2, filename, tempname) else: raise Exception('Failed to cut layer: %s' % e) rc, so, se = get_command_output(cmd) if not rc: raise Exception(se.decode()) else: if verbose: print(so.decode()) newgrid2d = GDALGrid.load(tempname) if precise: # Resample to exact geodictionary newgrid2d = newgrid2d.interpolate2(gdict, method=method) if cleanup: os.remove(tempname) if deltemp: shutil.rmtree(tempdir) else: ftype = GMTGrid.getFileType(filename) if ftype != 'unknown': newgrid2d = GMTGrid.load(filename) elif filename.endswith('.xml'): newgrid2d = ShakeGrid.load(filename) else: newgrid2d = GDALGrid.load(filename) return newgrid2d
def getLosses(self, shakefile): """Calculate number of fatalities using semi-empirical approach. :param shakefile: Path to a ShakeMap grid.xml file. :returns: Tuple of: 1) Total number of fatalities 2) Dictionary of residential fatalities per building type, per country. 3) Dictionary of non-residential fatalities per building type, per country. """ # get shakemap geodict shakedict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') # get population geodict popdict = get_file_geodict(self._popfile) # get country code geodict isodict = get_file_geodict(self._isofile) # get urban grid geodict urbdict = get_file_geodict(self._urbanfile) # load all of the grids we need if popdict == shakedict == isodict == urbdict: # special case, probably for testing... shakegrid = ShakeGrid.load(shakefile, adjust='res') popgrid = read(self._popfile) isogrid = read(self._isofile) urbgrid = read(self._urbanfile) else: sampledict = popdict.getBoundsWithin(shakedict) shakegrid = ShakeGrid.load(shakefile, samplegeodict=sampledict, resample=True, method='linear', adjust='res') popgrid = read(self._popfile, samplegeodict=sampledict, resample=False) isogrid = read(self._isofile, samplegeodict=sampledict, resample=True, method='nearest', doPadding=True, padValue=0) urbgrid = read(self._urbanfile, samplegeodict=sampledict, resample=True, method='nearest', doPadding=True, padValue=RURAL) # determine the local apparent time of day (based on longitude) edict = shakegrid.getEventDict() etime = edict['event_timestamp'] elon = edict['lon'] time_of_day, event_year, event_hour = get_time_of_day(etime, elon) # round off our MMI data to nearest 0.5 (5.5 should stay 5.5, 5.4 # should become 5.5, 5.24 should become 5.0, etc.) # TODO: Someday, make this more general to include perhaps grids of all IMT values, or # at least the ones we have collapse data for. mmidata = np.round(shakegrid.getLayer('mmi').getData() / 0.5) * 0.5 # get arrays from our other grids popdata = popgrid.getData() isodata = isogrid.getData() urbdata = urbgrid.getData() # modify the population values for growth rate by country ucodes = np.unique(isodata[~np.isnan(isodata)]) for ccode in ucodes: cidx = (isodata == ccode) popdata[cidx] = self._popgrowth.adjustPopulation( popdata[cidx], ccode, self._popyear, event_year) # create a dictionary containing indoor populations by building type (in cells where MMI >= 6) #popbystruct = get_indoor_pop(mmidata,popdata,urbdata,isodata,time_of_day) # find all mmi values greater than 9, set them to 9 mmidata[mmidata > 9.0] = 9.0 # dictionary containers for sums of fatalities (res/nonres) by building type res_fatal_by_ccode = {} nonres_fatal_by_ccode = {} # fatality sum ntotal = 0 # loop over countries ucodes = np.unique(isodata[~np.isnan(isodata)]) for ucode in ucodes: if ucode == 0: continue res_fatal_by_btype = {} nonres_fatal_by_btype = {} cdict = self._country.getCountry(int(ucode)) ccode = cdict['ISO2'] # get the workforce Series data for the current country wforce = self.getWorkforce(ccode) if wforce is None: logging.info('No workforce data for %s. Skipping.' % (cdict['Name'])) continue # loop over MMI values 6-9 for mmi in np.arange(6, 9.5, 0.5): c1 = (mmidata == mmi) c2 = (isodata == ucode) if ucode > 900 and ucode != CALIFORNIA_US_CCODE: ucode = US_CCODE for dclass in [URBAN, RURAL]: c3 = (urbdata == dclass) # get the population data in those cells at MMI, in country, and density class # I think I want an AND condition here popcells = popdata[c1 & c2 & c3] # get the population distribution across residential, non-residential, and outdoor. res, nonres, outside = pop_dist( popcells, wforce, time_of_day, dclass) # get the inventory for urban residential resrow, nresrow = self.getInventories(ccode, dclass) # TODO - figure out why this is happening, make the following lines # not necessary if 'Unnamed: 0' in resrow: resrow = resrow.drop('Unnamed: 0') if 'Unnamed: 0' in nresrow: nresrow = nresrow.drop('Unnamed: 0') # now multiply the residential/non-residential population through the inventory data numres = len(resrow) numnonres = len(nresrow) resmat = np.reshape( resrow.values, (numres, 1)).astype(np.float32) nresmat = np.reshape( nresrow.values, (numnonres, 1)).astype(np.float32) popres = np.tile(res, (numres, 1)) popnonres = np.tile(nonres, (numnonres, 1)) popresbuilding = (popres * resmat) popnonresbuilding = (popnonres * nresmat) # now we have the residential and non-residental population # distributed through the building types for each cell that matches # MMI,country, and density criteria. # popresbuilding rows are building types, columns are population cells # next, we get the collapse rates for these buildings # and multiply them by the population by building. collapse_res = self.getCollapse(ccode, mmi, resrow) collapse_nonres = self.getCollapse(ccode, mmi, nresrow) resrates = np.reshape( collapse_res.values.astype(np.float32), (numres, 1)) nonresrates = np.reshape( collapse_nonres.values.astype(np.float32), (numnonres, 1)) rescollapse = popresbuilding * resrates nonrescollapse = popnonresbuilding * nonresrates # get the fatality rates given collapse by building type and # multiply through the result of collapse*population per building resfatalcol = self.getFatalityRates( ccode, time_of_day, resrow) nonresfatalcol = self.getFatalityRates( ccode, time_of_day, nresrow) resfatal = np.reshape( resfatalcol.values.astype(np.float32), (numres, 1)) nonresfatal = np.reshape( nonresfatalcol.values.astype(np.float32), (numnonres, 1)) resfat = rescollapse * resfatal nonresfat = nonrescollapse * nonresfatal # zero out the cells where fatalities are less than 1 or nan try: if len(resfat) and len(resfat[0]): resfat[np.ma.masked_less(resfat, 1).mask] = 0.0 except: resfat[np.isnan(resfat)] = 0.0 try: if len(nonresfat) and len(nonresfat[0]): nonresfat[np.ma.masked_less( nonresfat, 1).mask] = 0.0 except: nonresfat[np.isnan(nonresfat)] = 0.0 # sum the fatalities per building through all cells resfatbybuilding = np.nansum(resfat, axis=1) nonresfatbybuilding = np.nansum(nonresfat, axis=1) resfdict = dict( zip(resrow.index, resfatbybuilding.tolist())) nonresfdict = dict( zip(nresrow.index, nonresfatbybuilding.tolist())) res_fatal_by_btype = add_dicts( res_fatal_by_btype, resfdict) nonres_fatal_by_btype = add_dicts( nonres_fatal_by_btype, nonresfdict) # add the fatalities by building type to the dictionary containing fatalities by country res_fatal_by_ccode[ccode] = res_fatal_by_btype.copy() nonres_fatal_by_ccode[ccode] = nonres_fatal_by_btype.copy() # increment the total number of fatalities ntotal += int(sum(res_fatal_by_btype.values()) + sum(nonres_fatal_by_btype.values())) return (ntotal, res_fatal_by_ccode, nonres_fatal_by_ccode)
def hazus_liq(shakefile, config, uncertfile=None, saveinputs=False, modeltype=None, displmodel=None, probtype=None, bounds=None): """ Method for computing the probability of liquefaction using the Hazus method using the Wills et al. (2015) Vs30 map of California to define the susceptibility classes and the Fan et al. global water table model. """ layers = config['hazus_liq_cal']['layers'] vs30_file = layers['vs30']['file'] wtd_file = layers['watertable']['file'] shkgdict = ShakeGrid.getFileGeoDict(shakefile) fgeodict = GMTGrid.getFileGeoDict(vs30_file)[0] #--------------------------------------------------------------------------- # Loading #--------------------------------------------------------------------------- shakemap = ShakeGrid.load(shakefile, fgeodict, resample=True, method='linear', doPadding=True) PGA = shakemap.getLayer('pga').getData() / 100 # convert to g griddict, eventdict, specdict, fields, uncertainties = getHeaderData( shakefile) mag = eventdict['magnitude'] # Correction factor for moment magnitudes other than M=7.5 k_m = 0.0027 * mag**3 - 0.0267 * mag**2 - 0.2055 * mag + 2.9188 #--------------------------------------------------------------------------- # Susceptibility from Vs30 #--------------------------------------------------------------------------- vs30_grid = GMTGrid.load(vs30_file) vs30 = vs30_grid.getData() p_ml = np.zeros_like(vs30) a = np.zeros_like(vs30) b = np.zeros_like(vs30) for k, v in config['hazus_liq_cal']['parameters'].items(): ind = np.where(vs30 == float(v[0])) if v[1] == "VH": p_ml[ind] = 0.25 a[ind] = 9.09 b[ind] = -0.82 if v[1] == "H": p_ml[ind] = 0.2 a[ind] = 7.67 b[ind] = -0.92 if v[1] == "M": p_ml[ind] = 0.1 a[ind] = 6.67 b[ind] = -1.0 if v[1] == "L": p_ml[ind] = 0.05 a[ind] = 5.57 b[ind] = -1.18 if v[1] == "VL": p_ml[ind] = 0.02 a[ind] = 4.16 b[ind] = -1.08 # Conditional liquefaction probability for a given susceptibility category # at a specified PGA p_liq_pga = a * PGA + b p_liq_pga = p_liq_pga.clip(min=0, max=1) #--------------------------------------------------------------------------- # Water table #--------------------------------------------------------------------------- wtd_grid = GMTGrid.load(wtd_file, fgeodict, resample=True, method=layers['watertable']['interpolation'], doPadding=True) tmp = wtd_grid._data tmp = np.nan_to_num(tmp) # Convert to ft wt_ft = tmp * 3.28084 # Correction factor for groundwater depths other than five feet k_w = 0.022 * wt_ft + 0.93 #--------------------------------------------------------------------------- # Combine to get conditional liquefaction probability #--------------------------------------------------------------------------- p_liq_sc = p_liq_pga * p_ml / k_m / k_w #--------------------------------------------------------------------------- # Turn output and inputs into into grids and put in maplayers dictionary #--------------------------------------------------------------------------- maplayers = collections.OrderedDict() temp = shakemap.getShakeDict() shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version']) modelsref = config['hazus_liq_cal']['shortref'] modellref = config['hazus_liq_cal']['longref'] modeltype = 'Hazus/Wills' maplayers['model'] = { 'grid': GDALGrid(p_liq_sc, fgeodict), 'label': 'Probability', 'type': 'output', 'description': { 'name': modelsref, 'longref': modellref, 'units': 'coverage', 'shakemap': shakedetail, 'parameters': { 'modeltype': modeltype } } } if saveinputs is True: maplayers['pga'] = { 'grid': GDALGrid(PGA, fgeodict), 'label': 'PGA (g)', 'type': 'input', 'description': { 'units': 'g', 'shakemap': shakedetail } } maplayers['vs30'] = { 'grid': GDALGrid(vs30, fgeodict), 'label': 'Vs30 (m/s)', 'type': 'input', 'description': { 'units': 'm/s' } } maplayers['wtd'] = { 'grid': GDALGrid(wtd_grid._data, fgeodict), 'label': 'wtd (m)', 'type': 'input', 'description': { 'units': 'm' } } return maplayers
def kritikos_fuzzygamma(shakefile, config, bounds=None): """ Runs kritikos procedure with fuzzy gamma """ cmodel = config['statistic_models']['kritikos_2014'] gamma = cmodel['gamma_value'] ## Read in layer files and get data layers = cmodel['layers'] try: # Slope slope_file = layers['slope'] # DFF dff_file = layers['dff'] # DFS dfs_file = layers['dfs'] # Slope Position slope_pos_file = layers['slope_pos'] except: print('Unable to retrieve grid data.') try: div = cmodel['divisor'] # Load in divisors MMI_div = div['MMI'] slope_div = div['slope'] dff_div = div['dff'] dfs_div = div['dfs'] slope_pos_div = div['slope_pos'] except: print('Unable to retrieve divisors.') try: power = cmodel['power'] # Load in powers MMI_power = power['MMI'] slope_power = power['slope'] dff_power = power['dff'] dfs_power = power['dfs'] slope_pos_power = power['slope_pos'] except: print('Unable to retrieve powers.') # Cut and resample all files try: shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') slopedict = GDALGrid.getFileGeoDict(slope_file) if bounds is not None: # Make sure bounds are within ShakeMap Grid if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds['xmax'] or shkgdict.ymin > bounds['ymin'] or shkgdict.ymax < bounds['ymax']: print('Specified bounds are outside shakemap area, using ShakeMap bounds instead') bounds = None if bounds is not None: tempgdict = GeoDict({'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': 100., 'dy': 100., 'nx': 100., 'ny': 100.}, adjust='res') gdict = slpdict.getBoundsWithin(tempgdict) else: # Get boundaries from shakemap if not specified shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res') slpdict = GDALGrid.getFileGeoDict(slopefile) gdict = slpdict.getBoundsWithin(shkgdict) except: print('Unable to create base geodict.') # Load in data try: # Load in slope data slopegrid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False) slope_data = slopefrid.getData().astype(float) # Load in MMI shakemap = ShakeGrid.load(shakefile, samplegeodict=gdict, resample=True, method='linear', adjust='res') MMI_data = shakemap.getLayer('MMI').getData().astype(float) # Load in Dff dffgrid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False) dff_data = dffgrid.getData().astype(float) # Load in DFS dfsgrid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False) dfs_data = dfsgrid.getData().astype(float) # Load in Slope Position slope_pos_grid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False) slope_pos_data = slop_pos_grid.getData().astype(float) except: print('Data could not be retrieved.') [[[classification]]] MMI = 5,6,7,8,9 slope = 0-4, 5-9, 10-14, 15-19, 20-24, 25-29, 30-34, 35-39, 40-44, 45-49, 50+ # Reclassify as 1,2,3,etc. dff = 0-4, 5-9, 10-19, 20-29, 30-39, 40-49, 50+ # Reclassify as 1,2,3,etc. dfs = 0-0.49, 0.5-0.99, 1.0-1.49, 1.5-1.99, 2.0-2.49, 2.5+ # Reclassify as 1,2,3,etc. slope_pos = 'Flat', 'Valley', 'Mid-Slope', 'Ridge' # Reclassify as 1,2,3,etc.