Exemple #1
0
def test_modify():
    print('Testing ShakeGrid interpolate() method...')
    geodict = GeoDict({'xmin':0.5,'xmax':6.5,'ymin':1.5,'ymax':6.5,'dx':1.0,'dy':1.0,'ny':6,'nx':7})
    data = np.arange(14,56).reshape(6,7)
    layers = OrderedDict()
    layers['pga'] = data
    shakeDict = {'event_id':'usabcd1234',
                 'shakemap_id':'usabcd1234',
                 'shakemap_version':1,
                 'code_version':'4.0',
                 'process_timestamp':datetime.utcnow(),
                 'shakemap_originator':'us',
                 'map_status':'RELEASED',
                 'shakemap_event_type':'ACTUAL'}
    eventDict = {'event_id':'usabcd1234',
                 'magnitude':7.6,
                 'depth':1.4,
                 'lat':2.0,
                 'lon':2.0,
                 'event_timestamp':datetime.utcnow(),
                 'event_network':'us',
                 'event_description':'sample event'}
    uncDict = {'pga':(0.0,0)}
    shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
    rdata = np.random.rand(data.shape[0],data.shape[1])
    shake.setLayer('pga',rdata)
    newdata = shake.getLayer('pga').getData()
    np.testing.assert_almost_equal(rdata,newdata)
Exemple #2
0
def test_interpolate():
    print('Testing ShakeGrid interpolate() method...')
    geodict = GeoDict({'xmin':0.5,'xmax':6.5,'ymin':1.5,'ymax':6.5,'dx':1.0,'dy':1.0,'ny':6,'nx':7})
    data = np.arange(14,56).reshape(6,7)
    layers = OrderedDict()
    layers['pga'] = data
    shakeDict = {'event_id':'usabcd1234',
                 'shakemap_id':'usabcd1234',
                 'shakemap_version':1,
                 'code_version':'4.0',
                 'process_timestamp':datetime.utcnow(),
                 'shakemap_originator':'us',
                 'map_status':'RELEASED',
                 'shakemap_event_type':'ACTUAL'}
    eventDict = {'event_id':'usabcd1234',
                 'magnitude':7.6,
                 'depth':1.4,
                 'lat':2.0,
                 'lon':2.0,
                 'event_timestamp':datetime.utcnow(),
                 'event_network':'us',
                 'event_description':'sample event'}
    uncDict = {'pga':(0.0,0)}
    shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
    sampledict = GeoDict({'xmin':3.0,'xmax':4.0,
                          'ymin':3.0,'ymax':4.0,
                          'dx':1.0,'dy':1.0,
                          'ny':2,'nx':2})
    shake2 = shake.interpolateToGrid(sampledict,method='linear')
    output = np.array([[34.,35.],[41.,42.]])
    np.testing.assert_almost_equal(output,shake2.getLayer('pga').getData())
    print('Passed test of ShakeGrid interpolate() method.')
def do_gridxml(evid, datapath, oc):

    check_failures(evid, datapath, GridXMLModule)

    mod = GridXMLModule(evid)
    mod.execute()
    mod.writeContents()

    #
    # Test that the grid.xml grids actually match what's in
    # shake_results.hdf
    #
    imts = oc.getIMTs()

    gxml = os.path.join(datapath, evid, 'current', 'products', 'grid.xml')
    g2d = ShakeGrid.load(gxml)
    layers = g2d.getData()
    for imt in imts:
        component, imt = imt.split('/')
        comp = oc.getComponents(imt)
        cdata = oc.getIMTGrids(imt, comp[0])['mean']
        #
        # Do the same conversion to the container data as is
        # done to the file data
        #
        digits = oc.getIMTGrids(imt, comp[0])['mean_metadata']['digits']
        vfunc = rounder(digits)
        if imt == 'MMI':
            cdata = vfunc(cdata)
        elif imt == 'PGV':
            cdata = vfunc(np.exp(cdata))
        else:
            cdata = vfunc(100 * np.exp(cdata))
        lname = _oq_to_gridxml(imt).lower()
        layer = layers[lname]
        gdata = layer.getData()
        assert np.allclose(gdata, cdata)

    #
    # Do the uncertainty grids
    #
    uxml = os.path.join(datapath, evid, 'current', 'products',
                        'uncertainty.xml')
    u2d = ShakeGrid.load(uxml)
    ulayers = u2d.getData()

    for imt in imts:
        component, imt = imt.split('/')
        comp = oc.getComponents(imt)
        cdata = oc.getIMTGrids(imt, comp[0])['std']
        #
        # The stddevs just get rounded
        #
        digits = oc.getIMTGrids(imt, comp[0])['std_metadata']['digits']
        vfunc = rounder(digits)
        cdata = vfunc(cdata)
        lname = 'std' + _oq_to_gridxml(imt).lower()
        layer = ulayers[lname]
        gdata = layer.getData()
        assert np.allclose(gdata, cdata)
Exemple #4
0
def test_interpolate():
    print('Testing ShakeGrid interpolate() method...')
    geodict = GeoDict({
        'xmin': 0.5,
        'xmax': 6.5,
        'ymin': 1.5,
        'ymax': 6.5,
        'dx': 1.0,
        'dy': 1.0,
        'ny': 6,
        'nx': 7
    })
    data = np.arange(14, 56).reshape(6, 7)
    layers = OrderedDict()
    layers['pga'] = data
    shakeDict = {
        'event_id': 'usabcd1234',
        'shakemap_id': 'usabcd1234',
        'shakemap_version': 1,
        'code_version': '4.0',
        'process_timestamp': datetime.utcnow(),
        'shakemap_originator': 'us',
        'map_status': 'RELEASED',
        'shakemap_event_type': 'ACTUAL'
    }
    eventDict = {
        'event_id': 'usabcd1234',
        'magnitude': 7.6,
        'depth': 1.4,
        'lat': 2.0,
        'lon': 2.0,
        'event_timestamp': datetime.utcnow(),
        'event_network': 'us',
        'event_description': 'sample event'
    }
    uncDict = {'pga': (0.0, 0)}
    shake = ShakeGrid(layers, geodict, eventDict, shakeDict, uncDict)
    sampledict = GeoDict({
        'xmin': 3.0,
        'xmax': 4.0,
        'ymin': 3.0,
        'ymax': 4.0,
        'dx': 1.0,
        'dy': 1.0,
        'ny': 2,
        'nx': 2
    })
    shake2 = shake.interpolateToGrid(sampledict, method='linear')
    output = np.array([[34., 35.], [41., 42.]])
    np.testing.assert_almost_equal(output, shake2.getLayer('pga').getData())
    print('Passed test of ShakeGrid interpolate() method.')
Exemple #5
0
def test():
    shakefile = os.path.join(homedir, 'data', 'northridge.xml')
    t1 = datetime.datetime.now()
    sgrid = ShakeGrid.load(shakefile, adjust='res')
    t2 = datetime.datetime.now()
    origin = {}
    origin['id'] = sgrid._eventDict['event_id']
    origin['source'] = sgrid._eventDict['event_network']
    origin['time'] = sgrid._eventDict['event_timestamp']
    origin['lat'] = sgrid._eventDict['lat']
    origin['lon'] = sgrid._eventDict['lon']
    origin['depth'] = sgrid._eventDict['depth']
    origin['magnitude'] = sgrid._eventDict['magnitude']

    header = {}
    header['type'] = 'shakemap'
    header['version'] = sgrid._shakeDict['shakemap_version']
    header['process_time'] = sgrid._shakeDict['process_timestamp']
    header['code_version'] = sgrid._shakeDict['code_version']
    header['originator'] = sgrid._shakeDict['shakemap_originator']
    header['product_id'] = sgrid._shakeDict['shakemap_id']
    header['map_status'] = sgrid._shakeDict['map_status']
    header['event_type'] = sgrid._shakeDict['shakemap_event_type']

    layers = collections.OrderedDict()
    for (layername, layerdata) in sgrid.getData().items():
        layers[layername] = layerdata.getData()

    tdict = {
        'name': 'fred',
        'family': {
            'wife': 'wilma',
            'daughter': 'pebbles'
        }
    }
    mgrid = MultiHazardGrid(layers,
                            sgrid.getGeoDict(),
                            origin,
                            header,
                            metadata={'flintstones': tdict})
    tdir = tempfile.mkdtemp()
    testfile = os.path.join(tdir, 'test.hdf')
    try:
        mgrid.save(testfile)
        t3 = datetime.datetime.now()
        mgrid2 = MultiHazardGrid.load(testfile)
        t4 = datetime.datetime.now()
        xmlmb = os.path.getsize(shakefile) / float(1e6)
        hdfmb = os.path.getsize(testfile) / float(1e6)
        xmltime = (t2 - t1).seconds + (t2 - t1).microseconds / float(1e6)
        hdftime = (t4 - t3).seconds + (t4 - t3).microseconds / float(1e6)
        print('Input XML file size: %.2f MB (loading time %.3f seconds)' %
              (xmlmb, xmltime))
        print('Output HDF file size: %.2f MB (loading time %.3f seconds)' %
              (hdfmb, hdftime))
    except DataSetException as obj:
        pass
    finally:
        if os.path.isdir(tdir):
            shutil.rmtree(tdir)
Exemple #6
0
def test():
    homedir = os.path.dirname(
        os.path.abspath(__file__))  # where is this script?
    cityfile = os.path.join(homedir, '..', 'data', 'cities1000.txt')
    shakefile1 = os.path.join(homedir, '..', 'data', 'eventdata', 'northridge',
                              'northridge_grid.xml')
    shakefile2 = os.path.join(homedir, '..', 'data', 'eventdata', 'lomaprieta',
                              'lomaprieta_grid.xml')
    shakefiles = [shakefile1, shakefile2]
    lengths = [11, 11]
    first_city = ['Santa Clarita', 'Lexington Hills']
    last_city = ['Bakersfield', 'Fresno']
    ic = 0
    cities = Cities.loadFromGeoNames(cityfile)
    for shakefile in shakefiles:
        shakemap = ShakeGrid.load(shakefile, adjust='res')

        # get the top ten (by population) nearby cities
        clat = shakemap.getEventDict()['lat']
        clon = shakemap.getEventDict()['lon']
        nearcities = cities.limitByRadius(clat, clon, 100)
        nearcities.sortByColumns('pop', ascending=False)
        nearcities = Cities(nearcities._dataframe.iloc[0:10])
        mmigrid = shakemap.getLayer('mmi')
        pc = PagerCities(cities, mmigrid)
        rows = pc.getCityTable(nearcities)
        print('Testing that number of cities retrieved is consistent...')
        assert len(rows) == lengths[ic]
        assert rows.iloc[0]['name'] == first_city[ic]
        assert rows.iloc[-1]['name'] == last_city[ic]
        print('Passed.')
        ic += 1
def get_bounds(shakefile, parameter='pga', threshold=2.0):
    """
    Get the boundaries of the shakemap that include all areas with shaking
    above the defined threshold.

    Args:
        shakefile (str): Path to shakemap file.
        parameter (str): Either 'pga' or 'pgv'.
        threshold (float): Minimum value of parameter of interest, in units
            of %g for pga and cm/s for pgv. The default value of 2% g is based
            on minimum pga threshold ever observed to have triggered landslides
            by Jibson and Harp (2016).

    Returns:
        dict: A dictionary with keys 'xmin', 'xmax', 'ymin', and 'ymax' that
        defines the boundaries in geographic coordinates.
    """
    shakemap = ShakeGrid.load(shakefile, adjust='res')
    if parameter == 'pga':
        vals = shakemap.getLayer('pga')
    elif parameter == 'pgv':
        vals = shakemap.getLayer('pgv')
    else:
        raise Exception('parameter not valid')
    xmin, xmax, ymin, ymax = vals.getBounds()
    lons = np.linspace(xmin, xmax, vals.getGeoDict().nx)
    lats = np.linspace(ymax, ymin, vals.getGeoDict().ny)
    row, col = np.where(vals.getData() > float(threshold))
    lonmin = lons[col].min()
    lonmax = lons[col].max()
    latmin = lats[row].min()
    latmax = lats[row].max()

    # dummy fillers, only really care about bounds
    boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100}

    if xmin < lonmin:
        boundaries1['xmin'] = lonmin
    else:
        boundaries1['xmin'] = xmin
    if xmax > lonmax:
        boundaries1['xmax'] = lonmax
    else:
        boundaries1['xmax'] = xmax
    if ymin < latmin:
        boundaries1['ymin'] = latmin
    else:
        boundaries1['ymin'] = ymin
    if ymax > latmax:
        boundaries1['ymax'] = latmax
    else:
        boundaries1['ymax'] = ymax

    return boundaries1
Exemple #8
0
def test_modify():
    print('Testing ShakeGrid interpolate() method...')
    geodict = GeoDict({
        'xmin': 0.5,
        'xmax': 6.5,
        'ymin': 1.5,
        'ymax': 6.5,
        'dx': 1.0,
        'dy': 1.0,
        'ny': 6,
        'nx': 7
    })
    data = np.arange(14, 56).reshape(6, 7)
    layers = OrderedDict()
    layers['pga'] = data
    shakeDict = {
        'event_id': 'usabcd1234',
        'shakemap_id': 'usabcd1234',
        'shakemap_version': 1,
        'code_version': '4.0',
        'process_timestamp': datetime.utcnow(),
        'shakemap_originator': 'us',
        'map_status': 'RELEASED',
        'shakemap_event_type': 'ACTUAL'
    }
    eventDict = {
        'event_id': 'usabcd1234',
        'magnitude': 7.6,
        'depth': 1.4,
        'lat': 2.0,
        'lon': 2.0,
        'event_timestamp': datetime.utcnow(),
        'event_network': 'us',
        'event_description': 'sample event'
    }
    uncDict = {'pga': (0.0, 0)}
    shake = ShakeGrid(layers, geodict, eventDict, shakeDict, uncDict)
    rdata = np.random.rand(data.shape[0], data.shape[1])
    shake.setLayer('pga', rdata)
    newdata = shake.getLayer('pga').getData()
    np.testing.assert_almost_equal(rdata, newdata)
Exemple #9
0
def sampleFromShakeMap(shakefile,layer,xypoints):
    """
    Sample ShakeMap grid file at each of a set of XY (decimal degrees) points.
    :param shakefile:
      Grid2D object at which to sample data.
    :param xypoints:
      2D numpy array of XY points, decimal degrees.
    :returns:
      1D numpy array of grid values at each of input XY points.
    """
    shakegrid = ShakeGrid.load(shakefile,fixFileGeoDict='corner')
    return sampleFromMultiGrid(shakegrid,layer,points)
Exemple #10
0
def sampleFromShakeMap(shakefile, layer, xypoints):
    """Sample ShakeMap grid file at each of a set of XY (decimal degrees) points.

    :param shakefile:
      Grid2D object at which to sample data.
    :param xypoints:
      2D numpy array of XY points, decimal degrees.
    :returns:
      1D numpy array of grid values at each of input XY points.
    """

    shakegrid = ShakeGrid.load(shakefile, fixFileGeoDict='corner')
    return sampleFromMultiGrid(shakegrid, layer, xypoints)
Exemple #11
0
def test():
    shakefile = os.path.join(homedir,'data','northridge.xml')
    t1 = datetime.datetime.now()
    sgrid = ShakeGrid.load(shakefile,adjust='res')
    t2 = datetime.datetime.now()
    origin = {}
    origin['id'] = sgrid._eventDict['event_id']
    origin['source'] = sgrid._eventDict['event_network']
    origin['time'] = sgrid._eventDict['event_timestamp']
    origin['lat'] = sgrid._eventDict['lat']
    origin['lon'] = sgrid._eventDict['lon']
    origin['depth'] = sgrid._eventDict['depth']
    origin['magnitude'] = sgrid._eventDict['magnitude']

    header = {}
    header['type'] = 'shakemap'
    header['version'] = sgrid._shakeDict['shakemap_version']
    header['process_time'] = sgrid._shakeDict['process_timestamp']
    header['code_version'] = sgrid._shakeDict['code_version']
    header['originator'] = sgrid._shakeDict['shakemap_originator']
    header['product_id'] = sgrid._shakeDict['shakemap_id']
    header['map_status'] = sgrid._shakeDict['map_status']
    header['event_type'] = sgrid._shakeDict['shakemap_event_type']

    layers = collections.OrderedDict()
    for (layername,layerdata) in sgrid.getData().items():
        layers[layername] = layerdata.getData()

    tdict = {'name':'fred','family':{'wife':'wilma','daughter':'pebbles'}}
    mgrid = MultiHazardGrid(layers,sgrid.getGeoDict(),origin,header,metadata={'flintstones':tdict})
    tdir = tempfile.mkdtemp()
    testfile = os.path.join(tdir,'test.hdf')
    try:
        mgrid.save(testfile)
        t3 = datetime.datetime.now()
        mgrid2 = MultiHazardGrid.load(testfile)
        t4 = datetime.datetime.now()
        xmlmb = os.path.getsize(shakefile)/float(1e6)
        hdfmb = os.path.getsize(testfile)/float(1e6)
        xmltime = (t2-t1).seconds + (t2-t1).microseconds/float(1e6)
        hdftime = (t4-t3).seconds + (t4-t3).microseconds/float(1e6)
        print('Input XML file size: %.2f MB (loading time %.3f seconds)' % (xmlmb,xmltime))
        print('Output HDF file size: %.2f MB (loading time %.3f seconds)' % (hdfmb,hdftime))
    except DataSetException as obj:
        pass
    finally:
        if os.path.isdir(tdir):
            shutil.rmtree(tdir)
Exemple #12
0
def test_read():
    xmlfile = os.path.join(homedir,'data','northridge.xml')
    tdir = tempfile.mkdtemp()
    testfile = os.path.join(tdir,'test.xml')
    try:
        shakegrid = ShakeGrid.load(xmlfile,adjust='res')
        t1 = time.time()
        shakegrid.save(testfile)
        t2 = time.time()
        print('Saving shakemap took %.2f seconds' % (t2-t1))
    except Exception as error:
        print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile,str(error)))
        assert 0 == 1
    finally:
        if os.path.isdir(tdir):
            shutil.rmtree(tdir)
Exemple #13
0
def test_read():
    xmlfile = os.path.join(homedir, 'data', 'northridge.xml')
    tdir = tempfile.mkdtemp()
    testfile = os.path.join(tdir, 'test.xml')
    try:
        shakegrid = ShakeGrid.load(xmlfile, adjust='res')
        t1 = time.time()
        shakegrid.save(testfile)
        t2 = time.time()
        print('Saving shakemap took %.2f seconds' % (t2 - t1))
    except Exception as error:
        print('Failed to read grid.xml format file "%s". Error "%s".' %
              (xmlfile, str(error)))
        assert 0 == 1
    finally:
        if os.path.isdir(tdir):
            shutil.rmtree(tdir)
def basic_test():

    mmidata = np.array([[7, 8, 8, 8, 7], [8, 9, 9, 9, 8], [8, 9, 10, 9, 8],
                        [8, 9, 9, 8, 8], [7, 8, 8, 6, 5]],
                       dtype=np.float32)
    popdata = np.ones_like(mmidata) * 1e7
    isodata = np.array(
        [[4, 4, 4, 4, 4], [4, 4, 4, 4, 4], [4, 4, 156, 156, 156],
         [156, 156, 156, 156, 156], [156, 156, 156, 156, 156]],
        dtype=np.int32)

    shakefile = get_temp_file_name()
    popfile = get_temp_file_name()
    isofile = get_temp_file_name()
    geodict = GeoDict({
        'xmin': 0.5,
        'xmax': 4.5,
        'ymin': 0.5,
        'ymax': 4.5,
        'dx': 1.0,
        'dy': 1.0,
        'nx': 5,
        'ny': 5
    })
    layers = OrderedDict([
        ('mmi', mmidata),
    ])
    event_dict = {
        'event_id': 'us12345678',
        'magnitude': 7.8,
        'depth': 10.0,
        'lat': 34.123,
        'lon': -118.123,
        'event_timestamp': datetime.utcnow(),
        'event_description': 'foo',
        'event_network': 'us'
    }
    shake_dict = {
        'event_id': 'us12345678',
        'shakemap_id': 'us12345678',
        'shakemap_version': 1,
        'code_version': '4.5',
        'process_timestamp': datetime.utcnow(),
        'shakemap_originator': 'us',
        'map_status': 'RELEASED',
        'shakemap_event_type': 'ACTUAL'
    }
    unc_dict = {'mmi': (1, 1)}
    shakegrid = ShakeGrid(layers, geodict, event_dict, shake_dict, unc_dict)
    shakegrid.save(shakefile)
    popgrid = Grid2D(popdata, geodict.copy())
    isogrid = Grid2D(isodata, geodict.copy())
    write(popgrid, popfile, 'netcdf')
    write(isogrid, isofile, 'netcdf')

    ratedict = {
        4: {
            'start': [2010, 2012, 2014, 2016],
            'end': [2012, 2014, 2016, 2018],
            'rate': [0.01, 0.02, 0.03, 0.04]
        },
        156: {
            'start': [2010, 2012, 2014, 2016],
            'end': [2012, 2014, 2016, 2018],
            'rate': [0.02, 0.03, 0.04, 0.05]
        }
    }

    popgrowth = PopulationGrowth(ratedict)
    popyear = datetime.utcnow().year
    exposure = Exposure(popfile, popyear, isofile, popgrowth=popgrowth)
    expdict = exposure.calcExposure(shakefile)

    modeldict = [
        LognormalModel('AF', 11.613073, 0.180683, 1.0),
        LognormalModel('CN', 10.328811, 0.100058, 1.0)
    ]
    fatmodel = EmpiricalLoss(modeldict)

    # for the purposes of this test, let's override the rates
    # for Afghanistan and China with simpler numbers.
    fatmodel.overrideModel(
        'AF',
        np.array([0, 0, 0, 0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 0],
                 dtype=np.float32))
    fatmodel.overrideModel(
        'CN',
        np.array([0, 0, 0, 0, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 0],
                 dtype=np.float32))

    print('Testing very basic fatality calculation...')
    fatdict = fatmodel.getLosses(expdict)
    # strictly speaking, the afghanistant fatalities should be 462,000 but floating point precision dictates otherwise.
    testdict = {'CN': 46111, 'AF': 461999, 'TotalFatalities': 508110}
    for key, value in fatdict.items():
        assert value == testdict[key]
    print('Passed very basic fatality calculation...')

    print('Testing grid fatality calculations...')
    mmidata = exposure.getShakeGrid().getLayer('mmi').getData()
    popdata = exposure.getPopulationGrid().getData()
    isodata = exposure.getCountryGrid().getData()
    fatgrid = fatmodel.getLossGrid(mmidata, popdata, isodata)

    assert np.nansum(fatgrid) == 508111
    print('Passed grid fatality calculations...')

    # Testing modifying rates and stuffing them back in...
    chile = LognormalModel('CL', 19.786773, 0.259531, 0.0)
    rates = chile.getLossRates(np.arange(5, 10))
    modrates = rates * 2  # does this make event twice as deadly?

    # roughly the exposures from 2015-9-16 CL event
    expo_pop = np.array(
        [0, 0, 0, 1047000, 7314000, 1789000, 699000, 158000, 0, 0])
    mmirange = np.arange(5, 10)
    chile_deaths = chile.getLosses(expo_pop[4:9], mmirange)
    chile_double_deaths = chile.getLosses(expo_pop[4:9],
                                          mmirange,
                                          rates=modrates)
    print('Chile model fatalities: %f' % chile_deaths)
    print('Chile model x2 fatalities: %f' % chile_double_deaths)
Exemple #15
0
def realizations(total_real, my_reals, radius, variables, grid_arr, mu_arr, sigma_arr, list_sizes_grid, list_sizes_mu,
                 shakegrid, voi, comm, dir, output_dir):    
    '''
    Function realizations uses output from the main function in loop.py to compute realizations of the spatially variable random field.
    :param total_real:
        integer, total number of realizations assigned to each core
    :param my_reals:
        numpy array, which realizations each core is computing
    :param radius:
        float, radius of influence
    :param variable:
        dict, output from initialize function in setup.py
    :param grid_arr:
        numpy array of all grid array values, note that these are all combined into one large array
        these are indices that each grid point depends on
    :param mu_arr:
        numpy array of all mu arrays, note that these are all combined into one large array
        Sig12.T*Sig11inv
    :param sigma_arr:
        numpy array of R values
    :param list_sizes_grid
        numpy array, the number of elements of grid_arr belonging to each grid point
    :param list_sizes_mu:
        numpy array, the number of elements of mu)arr belonging to each grid point
    :param shakegrid:
        shakegrid object
    :param voi:
        string, intensity measure
    :param comm:
        mpi communicator
    :param dir:
        string, path to inputs folder
    Outputs are saved to a file. If multiple grid.xml files are used, the epsilon matrices will be saved to file. Otherwise realizations
    of the spatially variable ShakeMap will be saved.
    '''
    num_realizations = np.size(my_reals)
    if num_realizations == 0:
        return
    my_rank = comm.Get_rank()
    size = comm.Get_size()

    # Determine if multiple grid.xml files are to be used.
    multiple_maps = 0
    isd = True
    while isd == True:
        isd = os.path.isdir(os.path.join(dir,'%i'%(multiple_maps+1)))
        if isd == True:
            multiple_maps += 1
            
    # Set data file names. If multiple realizations are used, store epsilon
    if multiple_maps > 0:
        write_correlation = True
        filename = os.path.join(output_dir,'Epsilon_%s_%i.hdf5'%(voi,my_rank))
    else:
        write_correlation = False
        filename = os.path.join(output_dir,'SVSM_%s_%i.hdf5'%(voi, my_rank))

    shakemap = shakegrid.getLayer(voi)
    N = variables['N']
    M = variables['M']

    event_attr = shakegrid.getEventDict()
    grid_attr =  shakegrid.getGeoDict()

    # Set up dictionaries to store data
    uncertaintydata, data, data_new, sm_dict = {},{}, {}, {}
    uncertaintydata['map0'] = variables['uncertaintydata']
    stationlist = os.path.join(dir,'stationlist.xml')
    stationdata = readStation(stationlist)
    data['map0'] = variables['data']
    sm_dict['map0'] = shakemap

    # If there are multiple maps, store other maps data
    for i in range(1, multiple_maps+1):
        folder = '%i/'%i
        sm_grid = ShakeGrid.load(os.path.join(dir,folder,'grid.xml'), adjust = 'res')
        sm_dict['map%i'%i] = sm_grid.getLayer(voi)
        event_attr = sm_grid.getEventDict()
        unc_grid = ShakeGrid.load(os.path.join(dir,folder,'uncertainty.xml'), adjust = 'res')
        stationlist = os.path.join(dir,folder,'stationlist.xml')
        stationdata = readStation(stationlist)
        voi_list = []
        voi_list.append(voi)
        variables = initialize(sm_grid, unc_grid, stationdata, dir, voi_list)
        uncertaintydata["map{0}".format(i)] = variables['uncertaintydata']
        data["map{0}".format(i)] = variables['data']

    list_size_mu = np.reshape(list_sizes_mu, [M*N,1])
    list_size_grid = np.reshape(list_sizes_grid, [M*N,1])
    sigma_arr = np.reshape(sigma_arr, [M*N,1])

    # Set header information for file. Change if neccesary
    f = h5py.File(filename, 'w')
    f.attrs['Conventions'] = 'COARDS, CF-1.5'
    f.attrs['title'] = 'filename'
    f.attrs['history'] = 'Created with python MultiHazardGrid.save(%s)' % filename
    f.attrs['GMT_version'] = 'NA'

    xvar = np.linspace(grid_attr.xmin,grid_attr.xmax,grid_attr.nx)
    yvar = np.linspace(grid_attr.ymin,grid_attr.ymax,grid_attr.ny)
    x = f.create_dataset('x',data=xvar,compression='gzip',shape=xvar.shape,dtype=str(xvar.dtype))
    x.attrs['CLASS'] = 'DIMENSION_SCALE'
    x.attrs['NAME'] = 'x'
    x.attrs['_Netcdf4Dimid'] = 0 #no idea what this is
    x.attrs['long_name'] = 'x'
    x.attrs['actual_range'] = np.array((xvar[0],xvar[-1]))
    
    y = f.create_dataset('y',data=yvar,compression='gzip',shape=yvar.shape,dtype=str(yvar.dtype))
    y.attrs['CLASS'] = 'DIMENSION_SCALE'
    y.attrs['NAME'] = 'y'
    y.attrs['_Netcdf4Dimid'] = 1 #no idea what this is
    y.attrs['long_name'] = 'y'
    y.attrs['actual_range'] = np.array((yvar[0],yvar[-1]))

    # Compute realizations of the field, COR
    for j in range(0, num_realizations):
        X = np.zeros([M*N,1])
        for i in range(0,M*N):
            st_g = np.sum(list_size_grid[0:i])
            st_m = np.sum(list_size_mu[0:i])
            end_g = st_g + list_size_grid[i]
            end_m = st_m + list_size_mu[i]
            rand_arr = np.random.randn()
            nzeros = list_size_mu[i] - list_size_grid[i]
            x = np.append(np.zeros(nzeros), X[np.array(grid_arr[st_g:end_g], dtype = 'i')])            
            mu = np.dot(mu_arr[st_m:end_m], x)
            X[i] = mu + rand_arr * sigma_arr[i]

        COR = np.reshape(X, [M,N])
        layerkey = 'realization_%i'%j

        # Write data to file
        if write_correlation == True:
            dset = f.create_dataset(layerkey,data=COR,compression='gzip')
            dset.attrs['long_name'] = layerkey

        else:
            for i in range(0, multiple_maps+1):
                xx = 'map%i'%i
                X = np.multiply(COR, uncertaintydata[xx][voi])
                DATA_NEW = data[xx][voi]*np.exp(X)
                dset = f.create_dataset(layerkey,data=DATA_NEW,compression='gzip')
                dset.attrs['long_name'] = layerkey

        if np.mod(j+1, 25) == 0:
            print('Done with', j+1, 'of', num_realizations, 'iterations.')

    f.close()
Exemple #16
0
def model_test_simple():
    A = 4  #ccode for afghanistan
    J = 392  #ccode for japan
    R = 1  #rural code
    U = 2  #urban code
    #create a 5x5 population data set with 1000 people in each cell
    popdata = np.ones((5, 5)) * 1000.0
    #create a mixed grid of afghanistan and japan (have very different inventory,collapse, and fatality rates.)
    isodata = np.array([[A, A, A, A, A], [A, A, A, A, A], [A, A, A, J, J],
                        [J, J, J, J, J], [J, J, J, J, J]],
                       dtype=np.int16)
    #make a mix of urban and rural cells
    urbdata = np.array([[R, R, R, R, R], [R, U, U, U, R], [R, U, U, U, U],
                        [U, U, U, R, R], [R, R, R, R, R]],
                       dtype=np.int16)
    mmidata = np.array([[6, 7, 8, 9, 6], [7, 8, 9, 6, 7], [8, 9, 6, 6, 7],
                        [8, 9, 6, 7, 8], [9, 6, 7, 8, 9]],
                       dtype=np.float32)
    homedir = os.path.dirname(
        os.path.abspath(__file__))  #where is this script?
    invfile = os.path.join(homedir, '..', 'data', 'semi_inventory.hdf')
    colfile = os.path.join(homedir, '..', 'data', 'semi_collapse_mmi.hdf')
    fatfile = os.path.join(homedir, '..', 'data', 'semi_casualty.hdf')
    workfile = os.path.join(homedir, '..', 'data', 'semi_workforce.hdf')
    growthfile = os.path.join(homedir, '..', 'data',
                              'WPP2015_POP_F02_POPULATION_GROWTH_RATE.xls')
    geodict = GeoDict({
        'xmin': 0.5,
        'xmax': 4.5,
        'ymin': 0.5,
        'ymax': 4.5,
        'dx': 1.0,
        'dy': 1.0,
        'nx': 5,
        'ny': 5
    })

    popgrid = GMTGrid(popdata, geodict)
    isogrid = GMTGrid(isodata, geodict)
    urbgrid = GMTGrid(urbdata, geodict)
    popyear = 2016
    layers = {'mmi': mmidata}
    eventdict = {
        'event_id': '1234',
        'magnitude': 7.5,
        'lat': 34.2,
        'lon': 118.2,
        'depth': 10.0,
        'event_timestamp': datetime(2016, 1, 1, 0, 0, 0),
        'event_description': 'test data',
        'event_network': 'us'
    }
    shakedict = {
        'event_id': '1234',
        'shakemap_id': '1234',
        'shakemap_version': 1,
        'code_version': '1.0',
        'process_timestamp': datetime.utcnow(),
        'shakemap_originator': 'us',
        'map_status': 'RELEASED',
        'shakemap_event_type': 'SCENARIO'
    }
    uncdict = {'mmi': (1.0, 1)}
    mmigrid = ShakeGrid(layers, geodict, eventdict, shakedict, uncdict)

    popfile = isofile = urbfile = shakefile = ''
    try:
        #make some temporary files
        f, popfile = tempfile.mkstemp()
        os.close(f)
        f, isofile = tempfile.mkstemp()
        os.close(f)
        f, urbfile = tempfile.mkstemp()
        os.close(f)
        f, shakefile = tempfile.mkstemp()
        os.close(f)

        popgrid.save(popfile)
        isogrid.save(isofile)
        urbgrid.save(urbfile)
        mmigrid.save(shakefile)

        semi = SemiEmpiricalFatality.fromDefault()
        losses, resfat, nonresfat = semi.getLosses(shakefile)
        assert losses == 85
        print('Semi-empirical model calculations appear to be done correctly.')
    except:
        print(
            'There is an error attempting to do semi-empirical loss calculations.'
        )
    finally:
        files = [popfile, isofile, urbfile, shakefile]
        for fname in files:
            if os.path.isfile(fname):
                os.remove(fname)
    def __init__(self,config,shakefile,model):
        if model not in getLogisticModelNames(config):
            raise Exception('Could not find a model called "%s" in config %s.' % (model,config))
        #do everything here short of calculations - parse config, assemble eqn strings, load data.
        self.model = model
        cmodel = config['logistic_models'][model]
        self.coeffs = validateCoefficients(cmodel)
        self.layers = validateLayers(cmodel)#key = layer name, value = file name
        self.terms,timeField = validateTerms(cmodel,self.coeffs,self.layers)
        self.interpolations = validateInterpolations(cmodel,self.layers)
        self.units = validateUnits(cmodel,self.layers)

        if 'baselayer' not in cmodel:
            raise Exception('You must specify a base layer file in config.')
        if cmodel['baselayer'] not in list(self.layers.keys()):
            raise Exception('You must specify a base layer corresponding to one of the files in the layer section.')

        #get the geodict for the shakemap
        geodict = ShakeGrid.getFileGeoDict(shakefile,adjust='res')
        griddict,eventdict,specdict,fields,uncertainties = getHeaderData(shakefile)
        YEAR = eventdict['event_timestamp'].year
        MONTH = MONTHS[(eventdict['event_timestamp'].month)-1]
        DAY = eventdict['event_timestamp'].day
        HOUR = eventdict['event_timestamp'].hour

        #now find the layer that is our base layer and get the largest bounds we can guaranteed not to exceed shakemap bounds
        basefile = self.layers[cmodel['baselayer']]
        ftype = getFileType(basefile)
        if ftype == 'esri':
            basegeodict = GDALGrid.getFileGeoDict(basefile)
            sampledict = basegeodict.getBoundsWithin(geodict)
        elif ftype == 'gmt':
            basegeodict = GMTGrid.getFileGeoDict(basefile)
            sampledict = basegeodict.getBoundsWithin(geodict)
        else:
            raise Exception('All predictor variable grids must be a valid GMT or ESRI file type')

        #now load the shakemap, resampling and padding if necessary
        self.shakemap = ShakeGrid.load(shakefile,samplegeodict=sampledict,resample=True,doPadding=True,adjust='res')

        #load the predictor layers into a dictionary
        self.layerdict = {} #key = layer name, value = grid object
        for layername,layerfile in self.layers.items():
            if isinstance(layerfile,list):
                for lfile in layerfile:
                    if timeField == 'MONTH':
                        if lfile.find(MONTH) > -1:
                            layerfile = lfile
                            ftype = getFileType(layerfile)
                            interp = self.interpolations[layername]
                            if ftype == 'gmt':
                                lyr = GMTGrid.load(layerfile,sampledict,resample=True,method=interp,doPadding=True)
                            elif ftype == 'esri':
                                lyr = GDALGrid.load(layerfile,sampledict,resample=True,method=interp,doPadding=True)
                            else:
                                msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % (layername,layerfile)
                                raise Exception(msg)
                            self.layerdict[layername] = lyr
            else:
                #first, figure out what kind of file we have (or is it a directory?)
                ftype = getFileType(layerfile)
                interp = self.interpolations[layername]
                if ftype == 'gmt':
                    lyr = GMTGrid.load(layerfile,sampledict,resample=True,method=interp,doPadding=True)
                elif ftype == 'esri':
                    lyr = GDALGrid.load(layerfile,sampledict,resample=True,method=interp,doPadding=True)
                else:
                    msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % (layername,layerfile)
                    raise Exception(msg)
                self.layerdict[layername] = lyr

        shapes = {}
        for layername,layer in self.layerdict.items():
            shapes[layername] = layer.getData().shape

        x = 1
        self.nuggets = [str(self.coeffs['b0'])]
        ckeys = list(self.terms.keys())
        ckeys.sort()
        for key in ckeys:
            term = self.terms[key]
            coeff = self.coeffs[key]
            self.nuggets.append('(%g * %s)' % (coeff, term))

        self.equation = ' + '.join(self.nuggets)
        self.geodict = self.shakemap.getGeoDict()
Exemple #18
0
def create_info(event_dir,
                lsmodels=None,
                lqmodels=None,
                eventsource='',
                eventsourcecode='',
                point=True):
    """Create info.json for ground failure product.

    Args:
        event_dir (srt): Directory containing ground failure results.
        lsmodels (list): List of dictionaries of model summary info compiled
            by the hazdev function. If not specified, code will search for
            the hdf5 files for the preferred model and will create this
            dictionary and will apply default colorbars and bins.
        lqmodels (list): Same as above for liquefaction.
        point (bool): if True, event is a point source and warning should be
            displayed

    Returns:
        creates info.json for this event
    """
    filenames = []
    # Find the shakemap grid.xml file
    with open(os.path.join(event_dir, 'shakefile.txt'), 'r') as f:
        shakefile = f.read()

    files = os.listdir(event_dir)

    if lsmodels is None and lqmodels is None:

        # Read in the "preferred" model for landslides and liquefaction
        ls_mod_file = [f2 for f2 in files if 'jessee_2017.hdf5' in f2]
        if len(ls_mod_file) == 1:
            ls_file = os.path.join(event_dir, ls_mod_file[0])
            ls_mod = loadlayers(ls_file)
            # get extents
            lsext = get_zoomextent(ls_mod['model']['grid'])
        else:
            raise OSError("Preferred landslide model result not found.")
        lq_mod_file = [f2 for f2 in files if 'zhu_2017_general.hdf5' in f2]
        if len(lq_mod_file) == 1:
            lq_file = os.path.join(event_dir, lq_mod_file[0])
            lq_mod = loadlayers(lq_file)
            # get extents
            lqext = get_zoomextent(lq_mod['model']['grid'])
        else:
            raise OSError("Preferred liquefaction model result not found.")

        # Read in extents
        ls_extent_file = [
            f2 for f2 in files if 'jessee_2017_extent.json' in f2
        ]
        if len(ls_extent_file) == 1:
            ls_file = os.path.join(event_dir, ls_extent_file[0])
            with open(ls_file) as f:
                jessee_extent = json.load(f)
        else:
            raise OSError("Landslide extent not found.")
        lq_extent_file = [
            f2 for f2 in files if 'zhu_2017_general_extent.json' in f2
        ]
        if len(lq_extent_file) == 1:
            lq_file = os.path.join(event_dir, lq_extent_file[0])
            with open(lq_file) as f:
                zhu_extent = json.load(f)
        else:
            raise OSError("Liquefaction extent not found.")

        # Read in default paths to get location of the population grid
        default_file = os.path.join(os.path.expanduser('~'), '.gfail_defaults')
        defaults = ConfigObj(default_file)
        pop_file = defaults['popfile']

        # Landslide alert statistics
        ls_stats = computeStats(ls_mod['model']['grid'],
                                probthresh=None,
                                shakefile=shakefile,
                                shakethresh=10.0,
                                shakethreshtype='pga',
                                statprobthresh=None,
                                pop_file=pop_file)

        # Liquefaction alert statistics
        lq_stats = computeStats(lq_mod['model']['grid'],
                                probthresh=None,
                                shakefile=shakefile,
                                shakethresh=10.0,
                                shakethreshtype='pga',
                                statprobthresh=None,
                                pop_file=pop_file)

        # Get alert levels
        ls_haz_level = ls_stats['hagg_0.10g']
        lq_haz_level = lq_stats['hagg_0.10g']
        ls_pop_level = ls_stats['exp_pop_0.10g']
        lq_pop_level = lq_stats['exp_pop_0.10g']

        # If hazard alert level is less than 0.1, zero it out
        # (due to rounding to 2 sig digits later, this can give
        #  overly precise results, e.g., 0.000012 if we don't clip,
        #  but this doesn't happen with pop alerts because they are
        #  integers)
        if ls_haz_level < 0.1:
            ls_haz_level = 0.0
        if lq_haz_level < 0.1:
            lq_haz_level = 0.0

        # Convert levels into categories
        alert_info = get_alert(ls_haz_level, lq_haz_level, ls_pop_level,
                               lq_pop_level)
        # Unpack info (I think we are now assuming that the statements will be
        # constructed on the website and so we don't need them here)
        ls_haz_alert, ls_pop_alert, lq_haz_alert, lq_pop_alert, \
            ls_alert, lq_alert = alert_info

        if lsmodels is None:
            lsmodels = [{
                'id': 'nowicki_jessee_2017',
                'title': 'Nowicki Jessee and others (2017)',
                'overlay': 'jessee_2017.png',
                'extent': jessee_extent,
                'units': "Proportion of area affected",
                'preferred': True,
                'alert': ls_alert,
                'hazard_alert': {
                    'color': ls_haz_alert,
                    'value': set_num_precision(ls_haz_level, 2, 'float'),
                    'parameter': 'Aggregate Hazard',
                    'units': 'km^2'
                },
                'population_alert': {
                    'color': ls_pop_alert,
                    'value': set_num_precision(ls_pop_level, 2, 'int'),
                    'parameter': 'Population exposure',
                    'units': 'people'
                },
                'probability': {
                    'max': float("%.2f" % ls_stats['Max']),
                    'std': float("%.2f" % ls_stats['Std']),
                    'hagg0.1g': float("%.2f" % ls_stats['hagg_0.10g']),
                    'popexp0.1g': float("%.2f" % ls_stats['exp_pop_0.10g'])
                }
            }]
        if lqmodels is None:
            lqmodels = [{
                'id': 'zhu_2017',
                'title': 'Zhu and others (2017)',
                'overlay': 'zhu_2017.png',
                'extent': zhu_extent,
                'units': "Proportion of area affected",
                'preferred': True,
                'alert': lq_alert,
                'hazard_alert': {
                    'color': lq_haz_alert,
                    'value': set_num_precision(lq_haz_level, 2, 'float'),
                    'parameter': 'Aggregate Hazard',
                    'units': 'km^2'
                },
                'population_alert': {
                    'color': lq_pop_alert,
                    'value': set_num_precision(lq_pop_level, 2, 'int'),
                    'parameter': 'Population exposure',
                    'units': 'people'
                },
                'probability': {
                    'max': float("%.2f" % lq_stats['Max']),
                    'std': float("%.2f" % lq_stats['Std']),
                    'hagg0.1g': float("%.2f" % ls_stats['hagg_0.10g']),
                    'popexp0.1g': float("%.2f" % ls_stats['exp_pop_0.10g'])
                }
            }]
    else:
        # Get all info from dictionaries of preferred events, add in extent
        # and filename
        for lsm in lsmodels:
            # Add extent and filename for preferred model
            if lsm['preferred']:
                filesnippet = lsm['id']
                # Read in extents
                flnm = '%s_extent.json' % filesnippet
                ls_extent_file = [f for f in files if flnm in f]
                if len(ls_extent_file) == 1:
                    ls_file = os.path.join(event_dir, ls_extent_file[0])
                    with open(ls_file) as f:
                        ls_extent = json.load(f)
                else:
                    raise OSError("Landslide extent not found.")
                lsm['extent'] = ls_extent
                # lsm['filename'] = flnm
                lsext = lsm['zoomext']  # Get zoom extent
                ls_alert = lsm['alert']
                rmkeys = ['bin_edges', 'bin_colors', 'zoomext']
            else:
                # Remove any alert keys
                rmkeys = [
                    'bin_edges', 'bin_colors', 'zoomext', 'population_alert',
                    'alert', 'hazard_alert'
                ]
            for key in rmkeys:
                if key in lsm:
                    lsm.pop(key)

        for lqm in lqmodels:
            if lqm['preferred']:
                filesnippet = lqm['id']
                # Read in extents
                flnm = '%s_extent.json' % filesnippet
                lq_extent_file = [f2 for f2 in files if flnm in f2]
                if len(lq_extent_file) == 1:
                    lq_file = os.path.join(event_dir, lq_extent_file[0])
                    with open(lq_file) as f:
                        lq_extent = json.load(f)
                else:
                    raise OSError("Liquefaction extent not found.")
                lqm['extent'] = lq_extent
                # lqm['filename'] = flnm
                lqext = lqm['zoomext']  # Get zoom extent
                lq_alert = lqm['alert']
                rmkeys = ['bin_edges', 'bin_colors', 'zoomext']
            else:
                # Remove any alert keys
                rmkeys = [
                    'bin_edges', 'bin_colors', 'zoomext', 'population_alert',
                    'alert', 'hazard_alert'
                ]
            for key in rmkeys:
                if key in lqm:
                    lqm.pop(key)

    # Try to get event info
    shake_grid = ShakeGrid.load(shakefile, adjust='res')
    event_dict = shake_grid.getEventDict()
    sm_dict = shake_grid.getShakeDict()
    base_url = 'https://earthquake.usgs.gov/earthquakes/eventpage/'

    # Is this a point source?
    # point = is_grid_point_source(shake_grid)
    # Temporarily hard code this until we can get a better solution via
    # new grid.xml attributes.
    #point = True

    net = eventsource
    code = eventsourcecode
    time = event_dict['event_timestamp'].strftime('%Y-%m-%dT%H:%M:%SZ')

    event_url = '%s%s%s#executive' % (base_url, net, code)

    # Get extents that work for both unless one is green and the other isn't
    if lq_alert == 'green' and ls_alert != 'green' and ls_alert is not None:
        xmin = lsext['xmin']
        xmax = lsext['xmax']
        ymin = lsext['ymin']
        ymax = lsext['ymax']
    elif lq_alert != 'green' and ls_alert == 'green' and lq_alert is not None:
        xmin = lqext['xmin']
        xmax = lqext['xmax']
        ymin = lqext['ymin']
        ymax = lqext['ymax']
    else:
        xmin = np.min((lqext['xmin'], lsext['xmin']))
        xmax = np.max((lqext['xmax'], lsext['xmax']))
        ymin = np.min((lqext['ymin'], lsext['ymin']))
        ymax = np.max((lqext['ymax'], lsext['ymax']))

    # Should we display the warning about point source?
    rupture_warning = False
    if point and event_dict['magnitude'] > 6.5:
        rupture_warning = True

    # Create info.json for website rendering and metadata purposes
    info_dict = {
        'Summary': {
            'code': code,
            'net': net,
            'magnitude': event_dict['magnitude'],
            'depth': event_dict['depth'],
            'time': time,
            'lat': event_dict['lat'],
            'lon': event_dict['lon'],
            'event_url': event_url,
            'shakemap_version': sm_dict['shakemap_version'],
            'rupture_warning': rupture_warning,
            'point_source': point,
            'zoom_extent': [xmin, xmax, ymin, ymax]
        },
        'Landslides': lsmodels,
        'Liquefaction': lqmodels
    }

    info_file = os.path.join(event_dir, 'info.json')
    with open(info_file, 'w') as f:
        json.dump(info_dict, f)  # allow_nan=False)
    filenames.append(info_file)
    return filenames
Exemple #19
0
def computeParea(grid2D,
                 proj='moll',
                 probthresh=0.0,
                 shakefile=None,
                 shakethreshtype='pga',
                 shakethresh=0.0):
    """
    Alternative to Aggregate Hazard (Hagg), which is equal to the
    the sum of the area of grid cells that exceeds a given probability.

    Args:
        grid2D: grid2D object of model output.
        proj: projection to use to obtain equal area, 'moll'  mollweide, or
            'laea' lambert equal area.
        probthresh: Optional, Float or list of probability thresholds.
        shakefile: Optional, path to shakemap file to use for ground motion
            threshold.
        shakethreshtype: Optional, Type of ground motion to use for
            shakethresh, 'pga', 'pgv', or 'mmi'.
        shakethresh: Optional, Float of shaking thresholds in %g for
            pga, cm/s for pgv, float for mmi.

    Returns:
        Parea (float) if no or only one probthresh defined,
        otherwise, a list of floats of Parea corresponding to all
        specified probthresh values.
    """
    if type(probthresh) != list and type(probthresh) != np.ndarray:
        probthresh = [probthresh]

    Parea = []
    bounds = grid2D.getBounds()
    lat0 = np.mean((bounds[2], bounds[3]))
    lon0 = np.mean((bounds[0], bounds[1]))
    projs = ('+proj=%s +lat_0=%f +lon_0=%f +x_0=0 +y_0=0 +ellps=WGS84 '
             '+units=km +no_defs' % (proj, lat0, lon0))
    geodict = grid2D.getGeoDict()

    if shakefile is not None:
        if shakethresh < 0.:
            raise Exception('shaking threshold must be equal or greater '
                            'than zero')
        tmpdir = tempfile.mkdtemp()
        # resample shakemap to grid2D
        temp = ShakeGrid.load(shakefile)
        junkfile = os.path.join(tmpdir, 'temp.bil')
        GDALGrid.copyFromGrid(temp.getLayer(shakethreshtype)).save(junkfile)
        shk = quickcut(junkfile, geodict, precise=True, method='bilinear')
        shutil.rmtree(tmpdir)
        if shk.getGeoDict() != geodict:
            raise Exception('shakemap was not resampled to exactly the same '
                            'geodict as the model')

    grid = grid2D.project(projection=projs)
    geodictRS = grid.getGeoDict()
    cell_area_km2 = geodictRS.dx * geodictRS.dy
    model = grid.getData()
    model[np.isnan(model)] = -1.
    for probt in probthresh:
        if probt < 0.:
            raise Exception('probability threshold must be equal or greater '
                            'than zero')
        modcop = model.copy()
        if shakefile is not None:
            shkgrid = shk.project(projection=projs)
            shkdat = shkgrid.getData()
            # use -1 to avoid nan errors and warnings, will always be thrown
            # out because default probthresh is 0 and must be positive.
            shkdat[np.isnan(shkdat)] = -1.
            modcop[shkdat < shakethresh] = -1.
        one_mat = np.ones_like(modcop)
        Parea.append(np.sum(one_mat[modcop >= probt] * cell_area_km2))

    if len(Parea) == 1:
        Parea = Parea[0]
    return Parea
Exemple #20
0
    def __init__(self, config, shakefile, model, uncertfile=None):
        """Set up the logistic model

        :param config: configobj (config .ini file read in using configobj) defining the model and its inputs
        :type config: dictionary
        :param shakefile: Full file path to shakemap.xml file for the event of interest
        :type shakefile: string
        :param model: Name of model defined in config that should be run for the event of interest
        :type model: string
        :param uncertfile:
        :type uncertfile:

        """
        if model not in getLogisticModelNames(config):
            raise Exception('Could not find a model called "%s" in config %s.' % (model, config))
        #do everything here short of calculations - parse config, assemble eqn strings, load data.

        self.model = model
        cmodel = config['logistic_models'][model]
        self.modeltype = cmodel['gfetype']
        self.coeffs = validateCoefficients(cmodel)
        self.layers = validateLayers(cmodel)  # key = layer name, value = file name
        self.terms, timeField = validateTerms(cmodel, self.coeffs, self.layers)
        self.interpolations = validateInterpolations(cmodel, self.layers)
        self.units = validateUnits(cmodel, self.layers)
        self.gmused = [value for term, value in cmodel['terms'].items() if 'pga' in value.lower() or 'pgv' in
                       value.lower() or 'mmi' in value.lower()]
        self.modelrefs, self.longrefs, self.shortrefs = validateRefs(cmodel)
        if 'baselayer' not in cmodel:
            raise Exception('You must specify a base layer file in config.')
        if cmodel['baselayer'] not in list(self.layers.keys()):
            raise Exception('You must specify a base layer corresponding to one of the files in the layer section.')

        #get the geodict for the shakemap
        geodict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
        griddict, eventdict, specdict, fields, uncertainties = getHeaderData(shakefile)
        #YEAR = eventdict['event_timestamp'].year
        MONTH = MONTHS[(eventdict['event_timestamp'].month)-1]
        #DAY = eventdict['event_timestamp'].day
        #HOUR = eventdict['event_timestamp'].hour

        #now find the layer that is our base layer and get the largest bounds we can guarantee not to exceed shakemap bounds
        basefile = self.layers[cmodel['baselayer']]
        ftype = getFileType(basefile)
        if ftype == 'esri':
            basegeodict, firstcol = GDALGrid.getFileGeoDict(basefile)
            sampledict = basegeodict.getBoundsWithin(geodict)
        elif ftype == 'gmt':
            basegeodict, firstcol = GMTGrid.getFileGeoDict(basefile)
            sampledict = basegeodict.getBoundsWithin(geodict)
        else:
            raise Exception('All predictor variable grids must be a valid GMT or ESRI file type')

        #now load the shakemap, resampling and padding if necessary
        self.shakemap = ShakeGrid.load(shakefile, samplegeodict=sampledict, resample=True, doPadding=True, adjust='res')

        # take uncertainties into account
        if uncertfile is not None:
            try:
                self.uncert = ShakeGrid.load(uncertfile, samplegeodict=sampledict, resample=True, doPadding=True,
                                             adjust='res')
            except:
                print('Could not read uncertainty file, ignoring uncertainties')
                self.uncert = None
        else:
            self.uncert = None

        #load the predictor layers into a dictionary
        self.layerdict = {}  # key = layer name, value = grid object
        for layername, layerfile in self.layers.items():
            if isinstance(layerfile, list):
                for lfile in layerfile:
                    if timeField == 'MONTH':
                        if lfile.find(MONTH) > -1:
                            layerfile = lfile
                            ftype = getFileType(layerfile)
                            interp = self.interpolations[layername]
                            if ftype == 'gmt':
                                lyr = GMTGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True)
                            elif ftype == 'esri':
                                lyr = GDALGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True)
                            else:
                                msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % (layername, layerfile)
                                raise Exception(msg)
                            self.layerdict[layername] = lyr
            else:
                #first, figure out what kind of file we have (or is it a directory?)
                ftype = getFileType(layerfile)
                interp = self.interpolations[layername]
                if ftype == 'gmt':
                    lyr = GMTGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True)
                elif ftype == 'esri':
                    lyr = GDALGrid.load(layerfile, sampledict, resample=True, method=interp, doPadding=True)
                else:
                    msg = 'Layer %s (file %s) does not appear to be a valid GMT or ESRI file.' % (layername, layerfile)
                    raise Exception(msg)
                self.layerdict[layername] = lyr

        shapes = {}
        for layername, layer in self.layerdict.items():
            shapes[layername] = layer.getData().shape

        self.nuggets = [str(self.coeffs['b0'])]

        ckeys = list(self.terms.keys())
        ckeys.sort()
        for key in ckeys:
            term = self.terms[key]
            coeff = self.coeffs[key]
            self.nuggets.append('(%g * %s)' % (coeff, term))

        self.equation = ' + '.join(self.nuggets)

        if self.uncert is not None:
            self.nugmin = copy.copy(self.nuggets)
            self.nugmax = copy.copy(self.nuggets)
            # Find the term with the shakemap input and replace for these nuggets
            for k, nug in enumerate(self.nuggets):
                if "self.shakemap.getLayer('pga').getData()" in nug:
                    self.nugmin[k] = self.nugmin[k].replace("self.shakemap.getLayer('pga').getData()", "(np.exp(np.log(self.shakemap.getLayer('pga').getData()) - self.uncert.getLayer('stdpga').getData()))")
                    self.nugmax[k] = self.nugmax[k].replace("self.shakemap.getLayer('pga').getData()", "(np.exp(np.log(self.shakemap.getLayer('pga').getData()) + self.uncert.getLayer('stdpga').getData()))")
                elif "self.layerdict['pgv'].getData()" in nug:
                    self.nugmin[k] = self.nugmin[k].replace("self.shakemap.getLayer('pgv').getData()", "(np.exp(np.log(self.shakemap.getLayer('pgv').getData()) - self.uncert.getLayer('stdpgv').getData()))")
                    self.nugmax[k] = self.nugmax[k].replace("self.shakemap.getLayer('pgv').getData()", "(np.exp(np.log(self.shakemap.getLayer('pgv').getData()) + self.uncert.getLayer('stdpgv').getData()))")
                elif "self.layerdict['mmi'].getData()" in nug:
                    self.nugmin[k] = self.nugmin[k].replace("self.shakemap.getLayer('mmi').getData()", "(np.exp(np.log(self.shakemap.getLayer('mmi').getData()) - self.uncert.getLayer('stdmmi').getData()))")
                    self.nugmax[k] = self.nugmax[k].replace("self.shakemap.getLayer('mmi').getData()", "(np.exp(np.log(self.shakemap.getLayer('mmi').getData()) + self.uncert.getLayer('stdmmi').getData()))")
            self.equationmin = ' + '.join(self.nugmin)
            self.equationmax = ' + '.join(self.nugmax)
        else:
            self.equationmin = None
            self.equationmax = None

        self.geodict = self.shakemap.getGeoDict()

        try:
            self.slopemin = float(config['logistic_models'][model]['slopemin'])
            self.slopemax = float(config['logistic_models'][model]['slopemax'])
        except:
            print('could not find slopemin and/or slopemax in config, no limits will be applied')
            self.slopemin = 0.
            self.slopemax = 90.
Exemple #21
0
def test_save():
    tdir = tempfile.mkdtemp()
    testfile = os.path.join(tdir,'test.xml')
    try:
        print('Testing save/read functionality for shakemap grids...')
        pga = np.arange(0,16,dtype=np.float32).reshape(4,4)
        pgv = np.arange(1,17,dtype=np.float32).reshape(4,4)
        mmi = np.arange(2,18,dtype=np.float32).reshape(4,4)
        geodict = GeoDict({'xmin':0.5,'xmax':3.5,
                           'ymin':0.5,'ymax':3.5,
                           'dx':1.0,'dy':1.0,
                           'ny':4,'nx':4})
        layers = OrderedDict()
        layers['pga'] = pga
        layers['pgv'] = pgv
        layers['mmi'] = mmi
        shakeDict = {'event_id':'usabcd1234',
                     'shakemap_id':'usabcd1234',
                     'shakemap_version':1,
                     'code_version':'4.0',
                     'process_timestamp':datetime.utcnow(),
                     'shakemap_originator':'us',
                     'map_status':'RELEASED',
                     'shakemap_event_type':'ACTUAL'}
        eventDict = {'event_id':'usabcd1234',
                     'magnitude':7.6,
                     'depth':1.4,
                     'lat':2.0,
                     'lon':2.0,
                     'event_timestamp':datetime.utcnow(),
                     'event_network':'us',
                     'event_description':'sample event'}
        uncDict = {'pga':(0.0,0),
                   'pgv':(0.0,0),
                   'mmi':(0.0,0)}
        shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
        
        print('Testing save/read functionality...')
        shake.save(testfile,version=3)
        shake2 = ShakeGrid.load(testfile)
        for layer in ['pga','pgv','mmi']:
            tdata = shake2.getLayer(layer).getData()
            np.testing.assert_almost_equal(tdata,layers[layer])

        print('Passed save/read functionality for shakemap grids.')

        print('Testing getFileGeoDict method...')
        fgeodict = ShakeGrid.getFileGeoDict(testfile)
        print('Passed save/read functionality for shakemap grids.')
        
        print('Testing loading with bounds (no resampling or padding)...')
        sampledict = GeoDict({'xmin':-0.5,'xmax':3.5,
                              'ymin':-0.5,'ymax':3.5,
                              'dx':1.0,'dy':1.0,
                              'ny':5,'nx':5})
        shake3 = ShakeGrid.load(testfile,samplegeodict=sampledict,
                                resample=False,doPadding=False,padValue=np.nan)
        tdata = shake3.getLayer('pga').getData()
        np.testing.assert_almost_equal(tdata,layers['pga'])

        print('Passed loading with bounds (no resampling or padding)...')

        print('Testing loading shakemap with padding, no resampling...')
        newdict = GeoDict({'xmin':-0.5,'xmax':4.5,
                           'ymin':-0.5,'ymax':4.5,
                           'dx':1.0,'dy':1.0,
                           'ny':6,'nx':6})
        shake4 = ShakeGrid.load(testfile,samplegeodict=newdict,
                                resample=False,doPadding=True,padValue=np.nan)
        output = np.array([[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan],
                           [np.nan,0.0,1.0,2.0,3.0,np.nan],
                           [np.nan,4.0,5.0,6.0,7.0,np.nan],
                           [np.nan,8.0,9.0,10.0,11.0,np.nan],
                           [np.nan,12.0,13.0,14.0,15.0,np.nan],
                           [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]])
        tdata = shake4.getLayer('pga').getData()
        np.testing.assert_almost_equal(tdata,output)
        print('Passed loading shakemap with padding, no resampling...')

        #make a bigger grid
        pga = np.arange(0,36,dtype=np.float32).reshape(6,6)
        pgv = np.arange(1,37,dtype=np.float32).reshape(6,6)
        mmi = np.arange(2,38,dtype=np.float32).reshape(6,6)
        layers = OrderedDict()
        layers['pga'] = pga
        layers['pgv'] = pgv
        layers['mmi'] = mmi
        geodict = GeoDict({'xmin':0.5,'xmax':5.5,
                           'ymin':0.5,'ymax':5.5,
                           'dx':1.0,'dy':1.0,
                           'ny':6,'nx':6})
        shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
        shake.save(testfile,version=3)

        print('Testing resampling, no padding...')
        littledict = GeoDict({'xmin':2.0,'xmax':4.0,
                              'ymin':2.0,'ymax':4.0,
                              'dx':1.0,'dy':1.0,
                              'ny':3,'nx':3})
        shake5 = ShakeGrid.load(testfile,samplegeodict=littledict,resample=True,doPadding=False,padValue=np.nan)
        output = np.array([[10.5,11.5,12.5],
                           [16.5,17.5,18.5],
                           [22.5,23.5,24.5]])
        tdata = shake5.getLayer('pga').getData()
        np.testing.assert_almost_equal(tdata,output)
        print('Passed resampling, no padding...')

        print('Testing resampling and padding...')
        pga = np.arange(0,16,dtype=np.float32).reshape(4,4)
        pgv = np.arange(1,17,dtype=np.float32).reshape(4,4)
        mmi = np.arange(2,18,dtype=np.float32).reshape(4,4)
        geodict = GeoDict({'xmin':0.5,'ymax':3.5,
                           'ymin':0.5,'xmax':3.5,
                           'dx':1.0,'dy':1.0,
                           'ny':4,'nx':4})
        layers = OrderedDict()
        layers['pga'] = pga
        layers['pgv'] = pgv
        layers['mmi'] = mmi
        shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
        shake.save(testfile,version=3)
        bigdict = GeoDict({'xmin':0.0,'xmax':4.0,
                           'ymin':0.0,'ymax':4.0,
                           'dx':1.0,'dy':1.0,
                           'ny':5,'nx':5})
        shake6 = ShakeGrid.load(testfile,samplegeodict=bigdict,resample=True,doPadding=True,padValue=np.nan)
        tdata = shake6.getLayer('pga').getData()
        output = np.array([[np.nan,np.nan,np.nan,np.nan,np.nan],
                           [np.nan,2.5,3.5,4.5,np.nan],
                           [np.nan,6.5,7.5,8.5,np.nan],
                           [np.nan,10.5,11.5,12.5,np.nan],
                           [np.nan,np.nan,np.nan,np.nan,np.nan]])
        np.testing.assert_almost_equal(tdata,output)
        print('Passed resampling and padding...')
    except Exception as error:
        print('Failed to read grid.xml format file "%s". Error "%s".' % (xmlfile,str(error)))
        assert 0 == 1
    finally:
        if os.path.isdir(tdir):
            shutil.rmtree(tdir)
Exemple #22
0
def make_test_semi_model(ccode, timeofday, density, popvalue, mmi):
    """Run the semi-empirical model for a single value of input.  Intended for testing purposes.

    :param ccode:
      Two letter ISO country code ('US', 'JP', etc.) to be used to extract inventory, collapse rates, etc.
    :param timeofday:
      One of 'day','night' - used to determine residential/non-residental population distribution and casualty rates.
    :param density:
      One of semimodel.URBAN (2) or semimodel.RURAL (1).
    :param popvalue:
      Scalar population value to multiply by inventory, collapse, and fatality rates.
    :param mmi:
      MMI value used to extract collapse rates in given country code.
    :returns:
      Tuple of:
        1) Total number of fatalities
        2) Dictionary of residential fatalities per building type, per country.
        3) Dictionary of non-residential fatalities per building type, per country.
    """
    country = Country()
    cdict = country.getCountry(ccode)
    ucode = cdict['ISON']
    geodict = GeoDict({
        'xmin': 0.5,
        'xmax': 4.5,
        'ymin': 0.5,
        'ymax': 4.5,
        'dx': 1.0,
        'dy': 1.0,
        'nx': 5,
        'ny': 5
    })
    if timeofday == 'day':
        etime = datetime(2016, 1, 1, 12, 0, 0)  #noon
    elif timeofday == 'transit':
        etime = datetime(2016, 1, 1, 18, 0, 0)  #6 pm
    else:
        etime = datetime(2016, 1, 1, 0, 0, 0)  #midnight
    eventdict = {
        'event_id': '1234',
        'magnitude': 7.5,
        'lat': 0.0,
        'lon': 0.0,
        'depth': 10.0,
        'event_timestamp': etime,
        'event_description': 'test data',
        'event_network': 'us'
    }
    shakedict = {
        'event_id': '1234',
        'shakemap_id': '1234',
        'shakemap_version': 1,
        'code_version': '1.0',
        'process_timestamp': datetime.utcnow(),
        'shakemap_originator': 'us',
        'map_status': 'RELEASED',
        'shakemap_event_type': 'SCENARIO'
    }
    uncdict = {'mmi': (1.0, 1)}
    popdata = np.ones((2, 2), dtype=np.float32) * (popvalue) / 4
    isodata = np.ones((2, 2), dtype=np.int16) * ucode
    urbdata = np.ones((2, 2), dtype=np.int16) * density
    mmidata = np.ones((2, 2), dtype=np.float32) * mmi
    geodict = GeoDict({
        'xmin': 0.5,
        'xmax': 1.5,
        'ymin': 0.5,
        'ymax': 1.5,
        'dx': 1.0,
        'dy': 1.0,
        'nx': 2,
        'ny': 2
    })
    popgrid = GMTGrid(popdata, geodict)
    isogrid = GMTGrid(isodata, geodict)
    urbgrid = GMTGrid(urbdata, geodict)
    popyear = 2016
    layers = {'mmi': mmidata}
    mmigrid = ShakeGrid(layers, geodict, eventdict, shakedict, uncdict)
    popfile = isofile = urbfile = shakefile = ''
    popsum = None
    newresfat = None
    newnresfat = None
    try:
        #make some temporary files
        f, popfile = tempfile.mkstemp()
        os.close(f)
        f, isofile = tempfile.mkstemp()
        os.close(f)
        f, urbfile = tempfile.mkstemp()
        os.close(f)
        f, shakefile = tempfile.mkstemp()
        os.close(f)

        popgrid.save(popfile)
        isogrid.save(isofile)
        urbgrid.save(urbfile)
        mmigrid.save(shakefile)

        semi = SemiEmpiricalFatality.fromDefault()
        semi.setGlobalFiles(popfile, popyear, urbfile, isofile)
        t, resfat, nonresfat = semi.getLosses(shakefile)
        popsum = 0
        newresfat = {ccode: {}}
        newnonresfat = {ccode: {}}
        for key, value in resfat[ccode].items():
            if value < 1:
                value = np.floor(value)
            newresfat[ccode][key] = value / 4.0
            popsum += value / 4.0
        for key, value in nonresfat[ccode].items():
            newnonresfat[ccode][key] = value / 4.0
            if value < 1:
                value = np.floor(value)
            popsum += value / 4.0
        popsum = int(popsum)
    finally:
        files = [popfile, isofile, urbfile, shakefile]
        for fname in files:
            if os.path.isfile(fname):
                os.remove(fname)
    return (popsum, newresfat, newnonresfat)
Exemple #23
0
def getDataFrames(sampleparams, shakeparams, predictors, outparams):
    """Return Pandas training and testing data frames containing sampled data from hazard coverage, ShakeMap, and predictor data sets.

    :param sampleparams:
        Dictionary with at least these values:
            * coverage: Name of hazard coverage shapefile (decimal degrees). Required.
            * dx: Float desired sample resolution, and can be overridden by nmax, below (meters).  Required.
            * cb: Desired class balance, i.e., fraction of sampled points that should be from hazard polygons. Optional for polygons, Required for points.
            * nmax: Maximum number of possible yes/no sample points (usually set to avoid memory issues). Optional.
            * nsamp: Number of total hazard and no-hazard sample points to collect.  Required.
            * touch_center: Boolean (0 or 1) indicating whether polygons must touch the center of the cell in order for that cell to count as a "yes" sample point.
            * testpercent: Fraction of sampled points to be used for testing (1-testpercent) will be used for training. Optional, defaults to 0
            * extent: xmin,xmax,ymin,ymax OR convex #geographic extent within which to sample data.  Four numbers are interpreted as bounding box, the word convex will be interpreted to mean a convex hull.  Default (not specified) will mean the bounding box of the hazard coverage. Optional.
            * h1: Minimum buffer size for sampling non-hazard points when input coverage takes the form of points. Optional for polygons, required for points.
            * h2: Maximum buffer size for sampling non-hazard points when input coverage takes the form of points. Optional for polygons, required for points.
    :param shakeparams:
        Dictionary with at least these values:
            * shakemap: Name of shakemap file to use for sampling hazard values. Required.
            * shakemap_uncertainty: Name of shakemap uncertainty file to use for sampling hazard uncertainty values. Optional.
    :param predictors:
        Dictionary with at least these values:
            * layername: Path to ESRI shapefile, or grid in GMT or ESRI format which represents predictor data. Required.
            * layername_sampling: 'nearest' or 'linear', optional for grids, not used for shapefiles.
            * layername_attribute: Name of attribute in shapefile which should be sampled at hazard/non-hazard points.  Required for points.
    :param outparams:
        Dictionary with at least these values:
            * folder: Name of folder where all output (data frames, plots) will be written.  Will be created if does not exist. Required.
            * basename: The name that will be included in all output file names (i.e., northridge_train.csv). Required.
    :returns:
      Tuple of (training,testing) Pandas data frames.
    """
    coverage = sampleparams['coverage']
    f = fiona.collection(coverage, 'r')
    cbounds = f.bounds
    f.close()
    dx = sampleparams['dx']
    cb = sampleparams['cb']
    nmax = sampleparams['nmax']
    nsamp = sampleparams['nsamp']
    touch_center = sampleparams['touch_center']
    testpercent = sampleparams['testpercent']
    extent = sampleparams['extent']
    h1 = sampleparams['h1']
    h2 = sampleparams['h2']

    yestest, yestrain, notest, notrain, xvar, yvar, pshapes, proj = sampleFromFile(
        coverage,
        dx=dx,
        nmax=nmax,
        testPercent=testpercent,
        touch_center=touch_center,
        classBalance=cb,
        extent=extent,
        Nsamp=nsamp,
        h1=h1,
        h2=h2)

    traincolumns = OrderedDict()
    testcolumns = OrderedDict()

    if (100 - testpercent) > 0:
        traincolumns['lat'] = np.concatenate((yestrain[:, 1], notrain[:, 1]))
        traincolumns['lon'] = np.concatenate((yestrain[:, 0], notrain[:, 0]))
        traincolumns['coverage'] = np.concatenate(
            (np.ones_like(yestrain[:, 1]), np.zeros_like(notrain[:, 1])))

    if testpercent > 0:
        testcolumns['lat'] = np.concatenate((yestest[:, 1], notest[:, 1]))
        testcolumns['lon'] = np.concatenate((yestest[:, 0], notest[:, 0]))
        testcolumns['coverage'] = np.concatenate(
            (np.ones_like(yestest[:, 1]), np.zeros_like(notest[:, 1])))

    for predname, predfile in predictors.items():
        ftype = getFileType(predfile)
        if ftype == 'shapefile':
            attribute = predictors[predname + '_attribute']
            shapes = subsetShapes(predfile, cbounds)
            yes_test_samples = sampleShapes(shapes, yestest, attribute)
            no_test_samples = sampleShapes(shapes, notest, attribute)
            yes_train_samples = sampleShapes(shapes, yestrain, attribute)
            no_train_samples = sampleShapes(shapes, notrain, attribute)
            testcolumns[predname] = np.squeeze(
                np.concatenate((yes_test_samples, no_test_samples)))
            traincolumns[predname] = np.squeeze(
                np.concatenate((yes_train_samples, no_train_samples)))
        elif ftype == 'grid':
            method = 'nearest'
            if predname + '_sampling' in predictors:
                method = predictors[predname + '_sampling']

            if testpercent > 0:
                yes_test_samples = sampleGridFile(predfile,
                                                  yestest,
                                                  method=method)
                no_test_samples = sampleGridFile(predfile,
                                                 notest,
                                                 method=method)
                testcolumns[predname] = np.squeeze(
                    np.concatenate((yes_test_samples, no_test_samples)))

            if (100 - testpercent) > 0:
                yes_train_samples = sampleGridFile(predfile,
                                                   yestrain,
                                                   method=method)
                no_train_samples = sampleGridFile(predfile,
                                                  notrain,
                                                  method=method)
                traincolumns[predname] = np.squeeze(
                    np.concatenate((yes_train_samples, no_train_samples)))
        else:
            continue  # attribute or sampling method key

    #sample the shakemap
    layers = ['mmi', 'pga', 'pgv', 'psa03', 'psa10', 'psa30']
    shakegrid = ShakeGrid.load(shakeparams['shakemap'], adjust='res')
    for layer in layers:
        yes_test_samples = sampleFromMultiGrid(shakegrid, layer, yestest)
        no_test_samples = sampleFromMultiGrid(shakegrid, layer, notest)
        yes_train_samples = sampleFromMultiGrid(shakegrid, layer, yestrain)
        no_train_samples = sampleFromMultiGrid(shakegrid, layer, notrain)
        if testpercent > 0:
            testcolumns[layer] = np.squeeze(
                np.concatenate((yes_test_samples, no_test_samples)))
        if (100 - testpercent) > 0:
            traincolumns[layer] = np.squeeze(
                np.concatenate((yes_train_samples, no_train_samples)))

    dftest = pd.DataFrame(testcolumns)
    dftrain = pd.DataFrame(traincolumns)

    return (dftrain, dftest)
Exemple #24
0
def computePexp(grid, pop_file, shakefile=None, shakethreshtype='pga',
                shakethresh=0., probthresh=0., stdgrid2D=None,
                stdtype='full', maxP=1., sill1=None, range1=None):
    """
    Get exposure-based statistics.

    Args:
        grid: Model grid.
        pop_file (str):  Path to the landscan population grid.
        shakefile (str): Optional, path to shakemap file to use for ground
            motion threshold.
        shakethreshtype(str): Optional, Type of ground motion to use for
            shakethresh, 'pga', 'pgv', or 'mmi'.
        shakethresh: Float or list of shaking thresholds in %g for
            pga, cm/s for pgv, float for mmi.
        probthresh: Float, exclude any cells with
            probabilities less than or equal to this value
        stdgrid2D: grid2D object of model standard deviations (optional)
        stdtype (str): assumption of spatial correlation used to compute
            the stdev of the statistics, 'max', 'min', 'mean' of max and min,
            or 'full' (default) which estimates the range of correlation and
            accounts for covariance. Will return 'mean' if
            ridge and sill cannot be estimated.
        maxP (float): the maximum possible probability of the model
        sill1 (float): If known, the sill of the variogram of grid2D, will be
            estimated if None and stdtype='full'
        range1 (float): If known, the range of the variogram of grid2D, will
            be estimated if None and stdtype='full'

    Returns:
        dict: Dictionary with keys named exp_pop_# where # is the shakethresh
            and exp_std_# if stdgrid2D is supplied (stdev of exp_pop)
            and elim_#, the maximum exposure value possible with the
            applied thresholds and given maxP value
            p_exp_# beta distribution shape factor p (sometimes called alpha)
            q_exp_# beta distribution shape factor q (sometimes called beta)
    """

    model = grid.getData().copy()
    mdict = grid.getGeoDict()

    # Figure out difference in resolution of popfile to shakefile
    ptemp, J = GDALGrid.getFileGeoDict(pop_file)
    factor = ptemp.dx/mdict.dx

    # Cut out area from population file
    popcut1 = quickcut(pop_file, mdict, precise=False, extrasamp=2., method='nearest')
    #tot1 = np.sum(popcut1.getData())
    # Adjust for factor to prepare for upsampling to avoid creating new people
    popcut1.setData(popcut1.getData()/factor**2)

    # Upsample to mdict
    popcut = popcut1.interpolate2(mdict, method='nearest')
    popdat = popcut.getData()
    exp_pop = {}

    if shakefile is not None:
        if shakethresh < 0.:
            raise Exception('shaking threshold must be equal or greater '
                            'than zero')
        # resample shakemap to grid2D
        temp = ShakeGrid.load(shakefile)
        shk = temp.getLayer(shakethreshtype)
        shk = shk.interpolate2(mdict)
        if shk.getGeoDict() != mdict:
            raise Exception('shakemap was not resampled to exactly the same '
                            'geodict as the model')
        shkdat = shk.getData()
        model[shkdat < shakethresh] = float('nan')
    else:
        shakethresh = 0.
        shkdat = None

    mu = np.nansum(model[model >= probthresh] * popdat[model >= probthresh])
    exp_pop['exp_pop_%1.2fg' % (shakethresh/100.,)] = mu
    #N = np.nansum([model >= probthresh])
    #exp_pop['N_%1.2fg' % (shakethresh/100.,)] = N
    elim = np.nansum(popdat[model >= probthresh])*maxP
    exp_pop['elim_%1.2fg' % (shakethresh/100.,)] = elim

    if stdgrid2D is not None:
        std = stdgrid2D.getData().copy()
        if np.nanmax(std) > 0. and np.nanmax(model) >= probthresh:
            totalmin = np.sqrt(np.nansum((popdat[model >= probthresh]*std[model >= probthresh])**2.))
            totalmax = np.nansum(std[model >= probthresh] * popdat[model >= probthresh])
            if stdtype=='full':
                if sill1 is None or range1 is None:
                    modelfresh = grid.getData().copy()
                    range1, sill1 = semivario(modelfresh, probthresh,
                                              shakethresh=shakethresh,
                                              shakegrid=shkdat)
                if range1 is None:
                    # Use mean
                    exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = (totalmax+totalmin)/2.
                else:
                    # Zero out std at cells where the model probability was below
                    # the threshold because we aren't including those cells in Hagg
                    stdz = std.copy()
                    stdz[model < probthresh] = 0.
                    svar1 = svar(stdz, range1, sill1, scale=popdat)
                    exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = np.sqrt(svar1)
                    #exp_pop['exp_range_%1.2fg' % (shakethresh/100.,)] = range1
                    #exp_pop['exp_sill_%1.2fg' % (shakethresh/100.,)] = sill1

            elif stdtype == 'max':
                exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = totalmax
            elif stdtype == 'min':
                exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = totalmin
            else:
                exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = (totalmax+totalmin)/2.
            # Beta distribution shape factors
            var = exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)]**2.
            exp_pop['p_exp_%1.2fg' % (shakethresh/100.,)] = (mu/elim)*((elim*mu-mu**2)/var-1)
            exp_pop['q_exp_%1.2fg' % (shakethresh/100.,)] = (1-mu/elim)*((elim*mu-mu**2)/var-1)
        else:
            print('no std values above zero, filling with zeros')
            exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = 0.
            exp_pop['p_exp_%1.2fg' % (shakethresh/100.,)] = 0.
            exp_pop['q_exp_%1.2fg' % (shakethresh/100.,)] = 0.
    else:
        exp_pop['exp_std_%1.2fg' % (shakethresh/100.,)] = 0.
        exp_pop['p_exp_%1.2fg' % (shakethresh/100.,)] = 0.
        exp_pop['q_exp_%1.2fg' % (shakethresh/100.,)] = 0.

    return exp_pop
Exemple #25
0
    def execute(self):
        """Create grid.xml and uncertainty.xml files.

        Raises:
            NotADirectoryError: When the event data directory does not exist.
            FileNotFoundError: When the the shake_result HDF file does not
                exist.
        """
        logger = logging.getLogger(__name__)
        install_path, data_path = get_config_paths()
        datadir = os.path.join(data_path, self._eventid, 'current', 'products')
        if not os.path.isdir(datadir):
            raise NotADirectoryError('%s is not a valid directory.' % datadir)
        datafile = os.path.join(datadir, 'shake_result.hdf')
        if not os.path.isfile(datafile):
            raise FileNotFoundError('%s does not exist.' % datafile)

        # Open the ShakeMapOutputContainer and extract the data
        container = ShakeMapOutputContainer.load(datafile)

        # get all of the grid layers and the geodict
        if container.getDataType() != 'grid':
            raise NotImplementedError('gridxml module can only function on '
                                      'gridded data, not sets of points')
        gridnames = container.getIMTs(COMPONENT)
        layers = {}
        field_keys = {}
        xml_types = ['grid', 'uncertainty']
        for xml_type in xml_types:
            for gridname in gridnames:
                imt_field = _oq_to_gridxml(gridname)
                imtdict = container.getIMTGrids(gridname, COMPONENT)
                if xml_type == 'grid':
                    grid = imtdict['mean']
                    metadata = imtdict['mean_metadata']
                elif xml_type == 'uncertainty':
                    grid = imtdict['mean']
                    metadata = imtdict['mean_metadata']

                units = metadata['units']
                digits = metadata['digits']
                grid_data = grid.getData()
                # convert from HDF units to legacy grid.xml units
                if units == 'ln(cm/s)':
                    grid_data = np.exp(grid_data)
                    units = 'cm/s'
                elif units == 'ln(g)':
                    grid_data = np.exp(grid_data) * 100
                    units = '%g'
                else:
                    pass
                layers[imt_field] = grid_data

                field_keys[imt_field] = (units, digits)
            geodict = grid.getGeoDict()

            config = container.getConfig()

            # event dictionary
            info_data = container.getString('info.json')
            info = json.loads(info_data)
            event_info = info['input']['event_information']
            event_dict = {}
            event_dict['event_id'] = event_info['event_id']
            event_dict['magnitude'] = float(event_info['magnitude'])
            event_dict['depth'] = float(event_info['depth'])
            event_dict['lat'] = float(event_info['latitude'])
            event_dict['lon'] = float(event_info['longitude'])
            event_dict['event_timestamp'] = datetime.strptime(
                event_info['origin_time'], TIMEFMT)
            event_dict['event_description'] = event_info['location']
            # TODO the following is SUPER-SKETCHY - we need to save the event
            # network info!!!
            event_dict['event_network'] = event_dict['event_id'][0:2]

            # shake dictionary
            shake_dict = {}
            shake_dict['event_id'] = event_dict['event_id']
            shake_dict['shakemap_id'] = event_dict['event_id']
            # TODO - where are we supposed to get shakemap version
            shake_dict['shakemap_version'] = 1
            shake_dict['code_version'] = shakemap.__version__
            shake_dict['process_timestamp'] = datetime.utcnow()
            shake_dict['shakemap_originator'] = config['system']['source_network']
            shake_dict['map_status'] = config['system']['map_status']
            # TODO - we need a source for this!!!
            shake_dict['shakemap_event_type'] = 'ACTUAL'

            shake_grid = ShakeGrid(
                layers, geodict, event_dict,
                shake_dict, {}, field_keys=field_keys)
            fname = os.path.join(datadir, '%s.xml' % xml_type)
            logger.info('Saving IMT grids to %s' % fname)
            shake_grid.save(fname)  # TODO - set grid version number
def _test_intensity():

    datadir = os.path.abspath(os.path.join(homedir, "..", "data", "eventdata", "northridge"))
    shakefile = os.path.join(datadir, "northridge_grid.xml")
    topofile = os.path.join(datadir, "northridge_topo.grd")
    rupturefile = os.path.join(datadir, "northridge_fault.txt")
    cityfile = os.path.join(datadir, "northridge_cities.txt")
    coastfile = os.path.join(datadir, "northridge_coastline.json")
    countryfile = os.path.join(datadir, "northridge_countries.json")
    statefile = os.path.join(datadir, "northridge_states.json")
    lakefile = os.path.join(datadir, "northridge_lakes.json")
    oceanfile = os.path.join(datadir, "northridge_ocean.json")
    stationfile = os.path.join(datadir, "northridge_stations.db")
    roadfile = os.path.join(datadir, "northridge_roads.json")
    tancptfile = os.path.join(shakedir, "shakemap", "mapping", "tan.cpt")
    shakecptfile = os.path.join(shakedir, "shakemap", "mapping", "shakecpt.cpt")

    layerdict = {
        "coast": coastfile,
        "ocean": oceanfile,
        "lake": lakefile,
        "country": countryfile,
        "roads": roadfile,
        "state": statefile,
    }

    tancolormap = ColorPalette.fromPreset("shaketopo")
    shakecolormap = ColorPalette.fromPreset("mmi")
    cities = BasemapCities.loadFromCSV(cityfile)
    shakemap = ShakeGrid.load(shakefile, adjust="res")
    stations = StationList(stationfile)
    rupture = QuadRupture.readRuptureFile(rupturefile)
    edict = shakemap.getEventDict()
    eventdict = {
        "lat": edict["lat"],
        "lon": edict["lon"],
        "depth": edict["depth"],
        "mag": edict["magnitude"],
        "time": edict["event_timestamp"],
    }
    source = Source(eventdict, rupture)
    maker = MapMaker(shakemap, topofile, stations, rupture, layerdict, source, cities)

    # draw intensity map
    outfolder = os.path.expanduser("~")
    maker.setIntensityLayer("mmi")
    maker.setIntensityGMTColorMap(shakecolormap)
    intensity_map = maker.drawIntensityMap(outfolder)
    print("Intensity map saved as: %s" % intensity_map)

    # draw contour maps
    maker.setContourGMTColorMap(tancolormap)

    # Draw pgv contours
    maker.setContourLayer("pgv")
    contour_pgv_map = maker.drawContourMap(outfolder)
    print("PGV contour map saved as: %s" % contour_pgv_map)

    # Draw pga contours
    maker.setContourLayer("pga")
    contour_pga_map = maker.drawContourMap(outfolder)
    print("PGA contour map saved as: %s" % contour_pga_map)

    # Draw psa0.3 contours
    maker.setContourLayer("psa03")
    contour_psa03_map = maker.drawContourMap(outfolder)
    print("PSA0.3 contour map saved as: %s" % contour_psa03_map)

    # Draw psa1.0 contours
    maker.setContourLayer("psa10")
    contour_psa10_map = maker.drawContourMap(outfolder)
    print("PSA1.0 contour map saved as: %s" % contour_psa10_map)

    # Draw psa3.0 contours
    maker.setContourLayer("psa30")
    contour_psa30_map = maker.drawContourMap(outfolder)
    print("PSA3.0 contour map saved as: %s" % contour_psa30_map)
Exemple #27
0
def get_exposures(grid,
                  pop_file,
                  shakefile=None,
                  shakethreshtype=None,
                  shakethresh=None,
                  probthresh=None):
    """
    Get exposure-based statistics.

    Args:
        grid: Model grid.
        pop_file (str):  Path to the landscan population grid.
        shakefile (str): Optional, path to shakemap file to use for ground
            motion threshold.
        shakethreshtype(str): Optional, Type of ground motion to use for
            shakethresh, 'pga', 'pgv', or 'mmi'.
        shakethresh: Optional, Float or list of shaking thresholds in %g for
            pga, cm/s for pgv, float for mmi.
        probthresh: Optional, None or float, exclude any cells with probabilities
            less than or equal to this value

    Returns:
        dict: Dictionary with keys named exp_pop_# where # is the shakethresh
    """

    # If probthresh defined, zero out any areas less than or equal to probthresh
    # before proceeding

    if probthresh is not None:
        origdata = grid.getData()
        moddat = origdata.copy()
        moddat[moddat <= probthresh] = 0.0
        moddat[np.isnan(origdata)] = float('nan')
    else:
        moddat = grid.getData()

    mdict = grid.getGeoDict()

    # Cut out area from population file
    popcut = quickcut(pop_file,
                      mdict,
                      precise=False,
                      extrasamp=2.,
                      method='nearest')
    popdat = popcut.getData()
    pdict = popcut.getGeoDict()

    # Pad grid with nans to beyond extent of pdict
    pad_dict = {}
    pad_dict['padleft'] = int(
        np.abs(np.ceil((mdict.xmin - pdict.xmin) / mdict.dx)))
    pad_dict['padright'] = int(
        np.abs(np.ceil((pdict.xmax - mdict.xmax) / mdict.dx)))
    pad_dict['padbottom'] = int(
        np.abs(np.ceil((mdict.ymin - pdict.ymin) / mdict.dy)))
    pad_dict['padtop'] = int(
        np.abs(np.ceil((pdict.ymax - mdict.ymax) / mdict.dy)))
    padgrid, mdict2 = Grid2D.padGrid(moddat, mdict, pad_dict)  # padds with inf
    padgrid[np.isinf(padgrid)] = float('nan')  # change to pad with nan
    padgrid = Grid2D(data=padgrid, geodict=mdict2)  # Turn into grid2d object

    # Resample model grid so as to be the nearest integer multiple of popdict
    factor = np.round(pdict.dx / mdict2.dx)

    # Create geodictionary that is a factor of X higher res but otherwise
    # identical
    ndict = GeoDict.createDictFromBox(pdict.xmin, pdict.xmax, pdict.ymin,
                                      pdict.ymax, pdict.dx / factor,
                                      pdict.dy / factor)

    # Resample
    grid2 = padgrid.interpolate2(ndict, method='linear')

    # Get proportion of each cell that has values (to account properly
    # for any nans)
    prop = block_reduce(~np.isnan(grid2.getData().copy()),
                        block_size=(int(factor), int(factor)),
                        cval=float('nan'),
                        func=np.sum) / (factor**2.)

    # Now block reduce to same geodict as popfile
    modresamp = block_reduce(grid2.getData().copy(),
                             block_size=(int(factor), int(factor)),
                             cval=float('nan'),
                             func=np.nanmean)

    exp_pop = {}
    if shakefile is not None:
        # Resample shakefile to population grid
        # , doPadding=True, padValue=0.)
        shakemap = ShakeGrid.load(shakefile, resample=False)
        shakemap = shakemap.getLayer(shakethreshtype)
        shakemap = shakemap.interpolate2(pdict)
        shkdat = shakemap.getData()
        for shaket in shakethresh:
            threshmult = shkdat > shaket
            threshmult = threshmult.astype(float)
            exp_pop['exp_pop_%1.2fg' % (shaket / 100., )] = np.nansum(
                popdat * prop * modresamp * threshmult)

    else:
        exp_pop['exp_pop_0.00g'] = np.nansum(popdat * prop * modresamp)

    return exp_pop
Exemple #28
0
    def getLosses(self, shakefile):
        """Calculate number of fatalities using semi-empirical approach.

        :param shakefile:
          Path to a ShakeMap grid.xml file.
        :returns:
          Tuple of:
            1) Total number of fatalities
            2) Dictionary of residential fatalities per building type, per country.
            3) Dictionary of non-residential fatalities per building type, per country.
        """
        # get shakemap geodict
        shakedict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
        # get population geodict
        popdict = get_file_geodict(self._popfile)

        # get country code geodict
        isodict = get_file_geodict(self._isofile)

        # get urban grid geodict
        urbdict = get_file_geodict(self._urbanfile)

        # load all of the grids we need
        if popdict == shakedict == isodict == urbdict:
            # special case, probably for testing...
            shakegrid = ShakeGrid.load(shakefile, adjust='res')
            popgrid = read(self._popfile)
            isogrid = read(self._isofile)
            urbgrid = read(self._urbanfile)
        else:
            sampledict = popdict.getBoundsWithin(shakedict)
            shakegrid = ShakeGrid.load(shakefile,
                                       samplegeodict=sampledict,
                                       resample=True,
                                       method='linear',
                                       adjust='res')
            popgrid = read(self._popfile,
                           samplegeodict=sampledict,
                           resample=False)
            isogrid = read(self._isofile,
                           samplegeodict=sampledict,
                           resample=True,
                           method='nearest',
                           doPadding=True,
                           padValue=0)
            urbgrid = read(self._urbanfile,
                           samplegeodict=sampledict,
                           resample=True,
                           method='nearest',
                           doPadding=True,
                           padValue=RURAL)

        # determine the local apparent time of day (based on longitude)
        edict = shakegrid.getEventDict()
        etime = edict['event_timestamp']
        elon = edict['lon']
        time_of_day, event_year, event_hour = get_time_of_day(etime, elon)

        # round off our MMI data to nearest 0.5 (5.5 should stay 5.5, 5.4
        # should become 5.5, 5.24 should become 5.0, etc.)
        # TODO:  Someday, make this more general to include perhaps grids of all IMT values, or
        # at least the ones we have collapse data for.
        mmidata = np.round(shakegrid.getLayer('mmi').getData() / 0.5) * 0.5

        # get arrays from our other grids
        popdata = popgrid.getData()
        isodata = isogrid.getData()
        urbdata = urbgrid.getData()

        # modify the population values for growth rate by country
        ucodes = np.unique(isodata[~np.isnan(isodata)])
        for ccode in ucodes:
            cidx = (isodata == ccode)
            popdata[cidx] = self._popgrowth.adjustPopulation(
                popdata[cidx], ccode, self._popyear, event_year)

        # create a dictionary containing indoor populations by building type (in cells where MMI >= 6)
        #popbystruct = get_indoor_pop(mmidata,popdata,urbdata,isodata,time_of_day)

        # find all mmi values greater than 9, set them to 9
        mmidata[mmidata > 9.0] = 9.0

        # dictionary containers for sums of fatalities (res/nonres) by building type
        res_fatal_by_ccode = {}
        nonres_fatal_by_ccode = {}

        # fatality sum
        ntotal = 0

        # loop over countries
        ucodes = np.unique(isodata[~np.isnan(isodata)])
        for ucode in ucodes:
            if ucode == 0:
                continue
            res_fatal_by_btype = {}
            nonres_fatal_by_btype = {}

            cdict = self._country.getCountry(int(ucode))
            ccode = cdict['ISO2']
            # get the workforce Series data for the current country
            wforce = self.getWorkforce(ccode)
            if wforce is None:
                logging.info('No workforce data for %s.  Skipping.' %
                             (cdict['Name']))
                continue

            # loop over MMI values 6-9
            for mmi in np.arange(6, 9.5, 0.5):
                c1 = (mmidata == mmi)
                c2 = (isodata == ucode)
                if ucode > 900 and ucode != CALIFORNIA_US_CCODE:
                    ucode = US_CCODE
                for dclass in [URBAN, RURAL]:
                    c3 = (urbdata == dclass)

                    # get the population data in those cells at MMI, in country, and density class
                    # I think I want an AND condition here
                    popcells = popdata[c1 & c2 & c3]

                    # get the population distribution across residential, non-residential, and outdoor.
                    res, nonres, outside = pop_dist(
                        popcells, wforce, time_of_day, dclass)

                    # get the inventory for urban residential
                    resrow, nresrow = self.getInventories(ccode, dclass)

                    # TODO - figure out why this is happening, make the following lines
                    # not necessary
                    if 'Unnamed: 0' in resrow:
                        resrow = resrow.drop('Unnamed: 0')
                    if 'Unnamed: 0' in nresrow:
                        nresrow = nresrow.drop('Unnamed: 0')
                    # now multiply the residential/non-residential population through the inventory data
                    numres = len(resrow)
                    numnonres = len(nresrow)
                    resmat = np.reshape(
                        resrow.values, (numres, 1)).astype(np.float32)
                    nresmat = np.reshape(
                        nresrow.values, (numnonres, 1)).astype(np.float32)
                    popres = np.tile(res, (numres, 1))
                    popnonres = np.tile(nonres, (numnonres, 1))
                    popresbuilding = (popres * resmat)
                    popnonresbuilding = (popnonres * nresmat)

                    # now we have the residential and non-residental population
                    # distributed through the building types for each cell that matches
                    # MMI,country, and density criteria.
                    # popresbuilding rows are building types, columns are population cells

                    # next, we get the collapse rates for these buildings
                    # and multiply them by the population by building.
                    collapse_res = self.getCollapse(ccode, mmi, resrow)
                    collapse_nonres = self.getCollapse(ccode, mmi, nresrow)
                    resrates = np.reshape(
                        collapse_res.values.astype(np.float32), (numres, 1))
                    nonresrates = np.reshape(
                        collapse_nonres.values.astype(np.float32), (numnonres, 1))
                    rescollapse = popresbuilding * resrates
                    nonrescollapse = popnonresbuilding * nonresrates

                    # get the fatality rates given collapse by building type and
                    # multiply through the result of collapse*population per building
                    resfatalcol = self.getFatalityRates(
                        ccode, time_of_day, resrow)
                    nonresfatalcol = self.getFatalityRates(
                        ccode, time_of_day, nresrow)
                    resfatal = np.reshape(
                        resfatalcol.values.astype(np.float32), (numres, 1))
                    nonresfatal = np.reshape(
                        nonresfatalcol.values.astype(np.float32), (numnonres, 1))
                    resfat = rescollapse * resfatal
                    nonresfat = nonrescollapse * nonresfatal

                    # zero out the cells where fatalities are less than 1 or nan
                    try:
                        if len(resfat) and len(resfat[0]):
                            resfat[np.ma.masked_less(resfat, 1).mask] = 0.0
                    except:
                        resfat[np.isnan(resfat)] = 0.0
                    try:
                        if len(nonresfat) and len(nonresfat[0]):
                            nonresfat[np.ma.masked_less(
                                nonresfat, 1).mask] = 0.0
                    except:
                        nonresfat[np.isnan(nonresfat)] = 0.0

                    # sum the fatalities per building through all cells
                    resfatbybuilding = np.nansum(resfat, axis=1)
                    nonresfatbybuilding = np.nansum(nonresfat, axis=1)
                    resfdict = dict(
                        zip(resrow.index, resfatbybuilding.tolist()))
                    nonresfdict = dict(
                        zip(nresrow.index, nonresfatbybuilding.tolist()))
                    res_fatal_by_btype = add_dicts(
                        res_fatal_by_btype, resfdict)
                    nonres_fatal_by_btype = add_dicts(
                        nonres_fatal_by_btype, nonresfdict)

            # add the fatalities by building type to the dictionary containing fatalities by country
            res_fatal_by_ccode[ccode] = res_fatal_by_btype.copy()
            nonres_fatal_by_ccode[ccode] = nonres_fatal_by_btype.copy()

            # increment the total number of fatalities
            ntotal += int(sum(res_fatal_by_btype.values())
                          + sum(nonres_fatal_by_btype.values()))

        return (ntotal, res_fatal_by_ccode, nonres_fatal_by_ccode)
Exemple #29
0
def HAZUS(shakefile, config, uncertfile=None, saveinputs=False, modeltype='coverage', regressionmodel='J_PGA', probtype='jibson2000', bounds=None):
    """
    Runs HAZUS landslide procedure (FEMA, 2003, Chapter 4) using susceptiblity categories from defined by HAZUS manual (I-X)

    :param shakefile: URL or complete file path to the location of the Shakemap to use as input
    :type shakefile: string:
    :param config: Model configuration file object containing locations of input files and other input values config = ConfigObj(configfilepath)
    :type config: ConfigObj
    :param saveinputs: Whether or not to return the model input layers, False (defeault) returns only the model output (one layer)
    :type saveinputs: boolean
    :param modeltype: 'coverage' if critical acceleration is exceeded by pga, this gives the estimated areal coverage of landsliding for that cell
        'dn_hazus' - Outputs Newmark displacement using HAZUS methods without relating to probability of failure
        'dn_prob' - Estimates Newmark displacement using HAZUS methods and relates to probability of failure using param probtype
        'ac_classic_dn' - Uses the critical acceleration defined by HAZUS methodology and uses regression model defined by regressionmodel param to get Newmark displacement without relating to probability of failure
        'ac_classic_prob' - Uses the critical acceleration defined by HAZUS methodology and uses regression model defined by regressionmodel param to get Newmark displacement and probability defined by probtype method
    :type modeltype: string
    :param regressionmodel:
        Newmark displacement regression model to use
        'J_PGA' (default) - PGA-based model from Jibson (2007) - equation 6
        'J_PGA_M' - PGA and M-based model from Jibson (2007) - equation 7
        'RS_PGA_M' - PGA and M-based model from from Rathje and Saygili (2009)
        'RS_PGA_PGV' - PGA and PGV-based model from Saygili and Rathje (2008) - equation 6
    :type regressionmodel: string
    :param probtype: Method used to estimate probability. Entering 'jibson2000' uses equation 5 from Jibson et al. (2000) to estimate probability from Newmark displacement. 'threshold' uses a specified threshold of Newmark displacement (defined in config file) and assumes anything greather than this threshold fails
    :type probtype: string
    :param bounds: Boundaries to compute over if different from ShakeMap boundaries as dictionary with keys 'xmin', 'xmax', 'ymin', 'ymax'

    :returns maplayers:  Dictionary containing output and input layers (if saveinputs=True) along with metadata formatted like maplayers['layer name']={'grid': mapio grid2D object, 'label': 'label for colorbar and top line of subtitle', 'type': 'output or input to model', 'description': 'detailed description of layer for subtitle, potentially including source information'}
    :type maplayers: OrderedDict
    """

    # Empty refs
    suslref = 'unknown'
    sussref = 'unknown'
    modellref = 'unknown'
    modelsref = 'unknown'

    # Parse config and read in files
    sus = None
    susdat = None

    if uncertfile is not None:
        print('ground motion uncertainty option not implemented yet')

    # Read in susceptiblity file
    #try:
    susfile = config['mechanistic_models']['hazus']['layers']['susceptibility']['file']
    shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
    susdict = GDALGrid.getFileGeoDict(susfile)
    if bounds is not None:  # Make sure bounds are within ShakeMap Grid
        if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds['xmax'] or shkgdict.ymin > bounds['ymin'] or shkgdict.ymax < bounds['ymax']:
            print('Specified bounds are outside shakemap area, using ShakeMap bounds instead')
            bounds = None
    if bounds is not None:
        tempgdict1 = GeoDict({'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': 100., 'dy': 100., 'nx': 100., 'ny': 100.}, adjust='res')
        tempgdict = susdict.getBoundsWithin(tempgdict1)
    else:
        tempgdict = susdict.getBoundsWithin(shkgdict)
    sus = GDALGrid.load(susfile, samplegeodict=tempgdict, resample=False)
    gdict = sus.getGeoDict()
    susdat = sus.getData()
    #except Exception as e:
    #    raise IOError('Unable to read in susceptibility category file specified in config, %s,' % e)
    #    return

    try:  # Try to fetch source information from config
        modelsref = config['mechanistic_models']['hazus']['shortref']
        modellref = config['mechanistic_models']['hazus']['longref']
        sussref = config['mechanistic_models']['hazus']['layers']['susceptibility']['shortref']
        suslref = config['mechanistic_models']['hazus']['layers']['susceptibility']['longref']
    except:
        print('Was not able to retrieve all references from config file. Continuing')

    try:
        dnthresh = float(config['mechanistic_models']['hazus']['values']['dnthresh'])
    except:
        if probtype == 'threshold':
            dnthresh = 5.
            print('Unable to find dnthresh in config, using 5cm')

    # Load in shakemap, resample to susceptibility file
    shakemap = ShakeGrid.load(shakefile, adjust='res')

    PGA = shakemap.getLayer('pga').subdivide(gdict).getData().astype(float)/100.  # in units of g
    PGV = shakemap.getLayer('pgv').subdivide(gdict).getData().astype(float)  # cm/sec
    M = shakemap.getEventDict()['magnitude']

    # Get critical accelerations in g
    Ac = np.empty(np.shape(susdat))
    Ac[(susdat < 1) & (susdat > 10)] = 9999.
    Ac[susdat == 1] = 0.6
    Ac[susdat == 2] = 0.5
    Ac[susdat == 3] = 0.4
    Ac[susdat == 4] = 0.35
    Ac[susdat == 5] = 0.3
    Ac[susdat == 6] = 0.25
    Ac[susdat == 7] = 0.2
    Ac[susdat == 8] = 0.15
    Ac[susdat == 9] = 0.1
    Ac[susdat == 10] = 0.05

    # can delete sus and susdat now, if don't need to output it, to free up memory
    if saveinputs is False:
        del susdat, sus

    if modeltype == 'coverage':
        areal = np.zeros(np.shape(PGA))
        # This seems to be slow for large matrices
        areal[(PGA >= Ac) & (Ac == 0.6)] = 0.01
        areal[(PGA >= Ac) & (Ac == 0.5)] = 0.02
        areal[(PGA >= Ac) & (Ac == 0.4)] = 0.03
        areal[(PGA >= Ac) & (Ac == 0.35)] = 0.05
        areal[(PGA >= Ac) & (Ac == 0.3)] = 0.08
        areal[(PGA >= Ac) & (Ac == 0.25)] = 0.1
        areal[(PGA >= Ac) & (Ac == 0.2)] = 0.15
        areal[(PGA >= Ac) & (Ac == 0.15)] = 0.2
        areal[(PGA >= Ac) & (Ac == 0.1)] = 0.25
        areal[(PGA >= Ac) & (Ac == 0.05)] = 0.3
        # # But this way is even slower, takes 2x as long
        # numrows, numcols = np.shape(areal)
        # for j in np.arange(numrows):
        #     for k in np.arange(numcols):
        #         acval = Ac[j, k]
        #         if PGA[j, k] >= acval:
        #             if acval == 0.6:
        #                 areal[j, k] = 0.01
        #             elif acval == 0.5:
        #                 areal[j, k] = 0.02
        #             elif acval == 0.4:
        #                 areal[j, k] = 0.03
        #             elif acval == 0.35:
        #                 areal[j, k] = 0.05
        #             elif acval == 0.3:
        #                 areal[j, k] = 0.08
        #             elif acval == 0.25:
        #                 areal[j, k] = 0.1
        #             elif acval == 0.2:
        #                 areal[j, k] = 0.15
        #             elif acval == 0.15:
        #                 areal[j, k] = 0.2
        #             elif acval == 0.1:
        #                 areal[j, k] = 0.25
        #             elif acval == 0.05:
        #                 areal[j, k] = 0.3

    elif modeltype == 'dn_hazus' or modeltype == 'dn_prob':
        ed_low, ed_high = est_disp(Ac, PGA)
        ed_mean = np.mean((np.dstack((ed_low, ed_high))), axis=2)  # Get mean estimated displacements
        dn = ed_mean * numcycles(M) * PGA
    else:  # Calculate newmark displacement using a regression model
        if regressionmodel is 'J_PGA':
            dn = J_PGA(Ac, PGA)
        elif regressionmodel is 'J_PGA_M':
            dn = J_PGA_M(Ac, PGA, M)
        elif regressionmodel is 'RS_PGA_M':
            dn = RS_PGA_M(Ac, PGA, M)
        elif regressionmodel is 'RS_PGA_PGV':
            dn = RS_PGA_PGV(Ac, PGA, PGV)
        else:
            print('Unrecognized model, using J_PGA\n')
            dn = J_PGA(Ac, PGA)

    # Calculate probability from dn, if necessary for selected model
    if modeltype == 'ac_classic_prob' or modeltype == 'dn_prob':
        if probtype.lower() in 'jibson2000':
            PROB = 0.335*(1-np.exp(-0.048*dn**1.565))
            dnthresh = None
        elif probtype.lower() in 'threshold':
            PROB = dn.copy()
            PROB[PROB <= dnthresh] = 0
            PROB[PROB > dnthresh] = 1
        else:
            raise NameError('invalid probtype, assuming jibson2000')
            PROB = 0.335*(1-np.exp(-0.048*dn**1.565))
            dnthresh = None

    # Turn output and inputs into into grids and put in maplayers dictionary
    maplayers = collections.OrderedDict()

    temp = shakemap.getShakeDict()
    shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version'])

    if modeltype == 'coverage':
        maplayers['model'] = {'grid': GDALGrid(areal, gdict), 'label': 'Areal coverage', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'coverage', 'shakemap': shakedetail, 'parameters': {'modeltype': modeltype}}}
    elif modeltype == 'dn_hazus':
        maplayers['model'] = {'grid': GDALGrid(dn, gdict), 'label': 'Dn (cm)', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'displacement', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'modeltype': modeltype}}}
    elif modeltype == 'ac_classic_dn':
        maplayers['model'] = {'grid': GDALGrid(dn, gdict), 'label': 'Dn (cm)', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'displacement', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'modeltype': modeltype}}}
    elif modeltype == 'dn_prob':
        maplayers['model'] = {'grid': GDALGrid(PROB, gdict), 'label': 'Landslide Probability', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'probability', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'dnthresh_cm': dnthresh, 'modeltype': modeltype, 'probtype': probtype}}}
    elif modeltype == 'ac_classic_prob':
        maplayers['model'] = {'grid': GDALGrid(PROB, gdict), 'label': 'Landslide Probability', 'type': 'output', 'description': {'name': modelsref, 'longref': modellref, 'units': 'probability', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'dnthresh_cm': dnthresh, 'modeltype': modeltype, 'probtype': probtype}}}

    if saveinputs is True:
        maplayers['suscat'] = {'grid': sus, 'label': 'Susceptibility Category', 'type': 'input', 'description': {'name': sussref, 'longref': suslref, 'units': 'Category'}}
        maplayers['Ac'] = {'grid': GDALGrid(Ac, gdict), 'label': 'Ac (g)', 'type': 'output', 'description': {'units': 'g', 'shakemap': shakedetail}}
        maplayers['pga'] = {'grid': GDALGrid(PGA, gdict), 'label': 'PGA (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}}
        if 'pgv' in regressionmodel.lower():
            maplayers['pgv'] = {'grid': GDALGrid(PGV, gdict), 'label': 'PGV (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}}
        if 'dn' not in modeltype.lower() and modeltype != 'coverage':
            maplayers['dn'] = {'grid': GDALGrid(dn, gdict), 'label': 'Dn (cm)', 'type': 'output', 'description': {'units': 'displacement', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'modeltype': modeltype}}}

    return maplayers
Exemple #30
0
def makeTestData():
    # make test layers
    X = ['friction', 'slope', 'vs30', 'cti1', 'precip']
    config = OrderedDict()
    config.setdefault('logistic_models', {}).setdefault('test_model', {})
    config['logistic_models']['test_model'].setdefault('shortref', 'Name et al. year')
    config['logistic_models']['test_model'].setdefault('longref', 'full reference')
    config['logistic_models']['test_model'].setdefault('layers', {})
    config['logistic_models']['test_model'].setdefault('interpolations', {})
    config['logistic_models']['test_model'].setdefault('terms', {})
    config['logistic_models']['test_model'].setdefault('coefficients', {})['b0'] = 3.5

    for k, items in enumerate(X):
        coef = 'b%1d' % (k+1)
        # make a GDALGrid object
        testgrid = GDALGrid(eval(items), geodict)
        # Save the file
        if items == 'precip':
            try:
                os.mkdir('test_precip')
            except:
                pass
            filename = 'test_precip/prec_Jan.bil'  # Only make January for testing
        else:
            filename = 'test_%s.bil' % (items)
        testgrid.save(filename, format='EHdr')

        # add to test config file
        config['logistic_models']['test_model']['layers'].update({items: {'file': filename.split('/')[0],
                                                                          'units': units[k], 'longref': 'longref',
                                                                          'shortref': 'shortref'}})
        config['logistic_models']['test_model']['interpolations'].update({items: 'nearest'})
        config['logistic_models']['test_model']['terms'].update({coef: terms[k]})
        config['logistic_models']['test_model']['coefficients'].update({coef: coefficients[k]})

    config['logistic_models']['test_model']['gfeype'] = 'landslide'
    config['logistic_models']['test_model']['baselayer'] = 'slope'
    config['logistic_models']['test_model']['slopemin'] = 5.
    config['logistic_models']['test_model']['slopemax'] = 90.

    # Make test_shakegrid and test_uncert
    eventDict = OrderedDict([('event_id', 'test'),
                            ('lon', 0.5),
                            ('lat', 0.5),
                            ('event_timestamp', datetime(2000, 1, 5, 0, 30, 55)),
                            ('event_network', 'na'),
                            ('magnitude', 6.0),
                            ('event_description', 'Test event'),
                            ('depth', 5.0)])
    shakeDict = OrderedDict([('process_timestamp',
                            datetime(2000, 1, 6, 20, 38, 19)),
                            ('event_id', 'test'),
                            ('shakemap_version', 2),
                            ('code_version', '1 billion'),
                            ('shakemap_event_type', 'TEST'),
                            ('map_status', 'TEST'),
                            ('shakemap_id', 'test'),
                            ('shakemap_originator', 'na')])
    uncertaintyDict = {}

    layers1 = {'pga': pga, 'pgv': pgv}
    shakegrid = ShakeGrid(layers1, geodict, eventDict, shakeDict, uncertaintyDict)
    shakegrid.save('test_shakegrid.xml')

    layers2 = {'stdpga': stdpga}
    uncertgrid = ShakeGrid(layers2, geodict, eventDict, shakeDict, uncertaintyDict)
    uncertgrid.save('test_uncert.xml')

    C = ConfigObj(config)
    C.filename = 'test.ini'
    C.write()

    return config
Exemple #31
0
def draw_contour(shakefile,
                 popfile,
                 oceanfile,
                 oceangridfile,
                 cityfile,
                 basename,
                 borderfile=None,
                 is_scenario=False):
    """Create a contour map showing population (greyscale) underneath contoured MMI.

    :param shakefile:
      String path to ShakeMap grid.xml file.
    :param popfile:
      String path to GDALGrid-compliant file containing population data.
    :param oceanfile:
      String path to file containing ocean vector data in a format compatible with fiona.
    :param oceangridfile:
      String path to file containing ocean grid data .
    :param cityfile:
      String path to file containing GeoNames cities data.
    :param basename:
      String path containing desired output PDF base name, i.e., /home/pager/exposure.  ".pdf" and ".png" files will
      be made.
    :param make_png:
      Boolean indicating whether a PNG version of the file should also be created in the
      same output folder as the PDF.
    :returns:
      Tuple containing: 
        - Name of PNG file created, or None if PNG output not specified.
        - Cities object containing the cities that were rendered on the contour map.
    """
    #load the shakemap - for the time being, we're interpolating the
    #population data to the shakemap, which would be important
    #if we were doing math with the pop values.  We're not, so I think it's ok.
    shakegrid = ShakeGrid.load(shakefile, adjust='res')
    gd = shakegrid.getGeoDict()

    #Retrieve the epicenter - this will get used on the map
    clat = shakegrid.getEventDict()['lat']
    clon = shakegrid.getEventDict()['lon']

    #Load the population data, sample to shakemap
    popgrid = GDALGrid.load(popfile, samplegeodict=gd, resample=True)

    #load the ocean grid file (has 1s in ocean, 0s over land)
    #having this file saves us almost 30 seconds!
    oceangrid = GDALGrid.load(oceangridfile, samplegeodict=gd, resample=True)

    #load the cities data, limit to cities within shakemap bounds
    allcities = Cities.fromDefault()
    cities = allcities.limitByBounds((gd.xmin, gd.xmax, gd.ymin, gd.ymax))

    #define the map
    #first cope with stupid 180 meridian
    height = (gd.ymax - gd.ymin) * 111.191
    if gd.xmin < gd.xmax:
        width = (gd.xmax - gd.xmin) * np.cos(np.radians(clat)) * 111.191
        xmin, xmax, ymin, ymax = (gd.xmin, gd.xmax, gd.ymin, gd.ymax)
    else:
        xmin, xmax, ymin, ymax = (gd.xmin, gd.xmax, gd.ymin, gd.ymax)
        xmax += 360
        width = (
            (gd.xmax + 360) - gd.xmin) * np.cos(np.radians(clat)) * 111.191

    aspect = width / height

    #if the aspect is not 1, then trim bounds in x or y direction as appropriate
    if width > height:
        dw = (width - height) / 2.0  #this is width in km
        xmin = xmin + dw / (np.cos(np.radians(clat)) * 111.191)
        xmax = xmax - dw / (np.cos(np.radians(clat)) * 111.191)
        width = (xmax - xmin) * np.cos(np.radians(clat)) * 111.191
    if height > width:
        dh = (height - width) / 2.0  #this is width in km
        ymin = ymin + dh / 111.191
        ymax = ymax - dh / 111.191
        height = (ymax - ymin) * 111.191

    aspect = width / height
    figheight = FIGWIDTH / aspect
    bbox = (xmin, ymin, xmax, ymax)
    bounds = (xmin, xmax, ymin, ymax)
    figsize = (FIGWIDTH, figheight)

    #Create the MercatorMap object, which holds a separate but identical
    #axes object used to determine collisions between city labels.
    mmap = MercatorMap(bounds, figsize, cities, padding=0.5)
    fig = mmap.figure
    ax = mmap.axes
    #this needs to be done here so that city label collision detection will work
    fig.canvas.draw()

    clon = xmin + (xmax - xmin) / 2
    clat = ymin + (ymax - ymin) / 2
    geoproj = mmap.geoproj
    proj = mmap.proj

    #project our population grid to the map projection
    projstr = proj.proj4_init
    popgrid_proj = popgrid.project(projstr)
    popdata = popgrid_proj.getData()
    newgd = popgrid_proj.getGeoDict()

    # Use our GMT-inspired palette class to create population and MMI colormaps
    popmap = ColorPalette.fromPreset('pop')
    mmimap = ColorPalette.fromPreset('mmi')

    #set the image extent to that of the data
    img_extent = (newgd.xmin, newgd.xmax, newgd.ymin, newgd.ymax)
    plt.imshow(popdata,
               origin='upper',
               extent=img_extent,
               cmap=popmap.cmap,
               vmin=popmap.vmin,
               vmax=popmap.vmax,
               zorder=POP_ZORDER,
               interpolation='nearest')

    #draw 10m res coastlines
    ax.coastlines(resolution="10m", zorder=COAST_ZORDER)

    #draw country borders using natural earth data set
    if borderfile is not None:
        borders = ShapelyFeature(
            Reader(borderfile).geometries(), ccrs.PlateCarree())
        ax.add_feature(borders,
                       zorder=COAST_ZORDER,
                       edgecolor='black',
                       linewidth=2,
                       facecolor='none')

    #clip the ocean data to the shakemap
    bbox = (gd.xmin, gd.ymin, gd.xmax, gd.ymax)
    oceanshapes = _clip_bounds(bbox, oceanfile)

    ax.add_feature(ShapelyFeature(oceanshapes, crs=geoproj),
                   facecolor=WATERCOLOR,
                   zorder=OCEAN_ZORDER)

    #It turns out that when presented with a map that crosses the 180 meridian,
    #the matplotlib/cartopy contouring routine thinks that the 180 meridian is a map boundary
    #and only plots one side of the contour.  Contouring the geographic MMI data and then
    #projecting the resulting contour vectors does the trick.  Sigh.

    #define contour grid spacing
    contoury = np.linspace(ymin, ymax, gd.ny)
    contourx = np.linspace(xmin, xmax, gd.nx)

    #smooth the MMI data for contouring
    mmi = shakegrid.getLayer('mmi').getData()
    smoothed_mmi = gaussian_filter(mmi, FILTER_SMOOTH)

    #create masked arrays of the ocean grid
    landmask = np.ma.masked_where(oceangrid._data == 0.0, smoothed_mmi)
    oceanmask = np.ma.masked_where(oceangrid._data == 1.0, smoothed_mmi)

    #contour the data
    land_contour = plt.contour(contourx,
                               contoury,
                               np.flipud(oceanmask),
                               linewidths=3.0,
                               linestyles='solid',
                               zorder=LANDC_ZORDER,
                               cmap=mmimap.cmap,
                               vmin=mmimap.vmin,
                               vmax=mmimap.vmax,
                               levels=np.arange(0.5, 10.5, 1.0),
                               transform=geoproj)

    ocean_contour = plt.contour(contourx,
                                contoury,
                                np.flipud(landmask),
                                linewidths=2.0,
                                linestyles='dashed',
                                zorder=OCEANC_ZORDER,
                                cmap=mmimap.cmap,
                                vmin=mmimap.vmin,
                                vmax=mmimap.vmax,
                                levels=np.arange(0.5, 10.5, 1.0),
                                transform=geoproj)

    #the idea here is to plot invisible MMI contours at integer levels and then label them.
    #clabel method won't allow text to appear, which is this case is kind of ok, because
    #it allows us an easy way to draw MMI labels as roman numerals.
    cs_land = plt.contour(contourx,
                          contoury,
                          np.flipud(oceanmask),
                          linewidths=0.0,
                          levels=np.arange(0, 11),
                          zorder=CLABEL_ZORDER,
                          transform=geoproj)

    clabel_text = ax.clabel(cs_land,
                            np.arange(0, 11),
                            colors='k',
                            zorder=CLABEL_ZORDER,
                            fmt='%.0f',
                            fontsize=40)
    for clabel in clabel_text:
        x, y = clabel.get_position()
        label_str = clabel.get_text()
        roman_label = MMI_LABELS[label_str]
        th = plt.text(x,
                      y,
                      roman_label,
                      zorder=CLABEL_ZORDER,
                      ha='center',
                      va='center',
                      color='black',
                      weight='normal',
                      size=16)
        th.set_path_effects([
            path_effects.Stroke(linewidth=2.0, foreground='white'),
            path_effects.Normal()
        ])

    cs_ocean = plt.contour(contourx,
                           contoury,
                           np.flipud(landmask),
                           linewidths=0.0,
                           levels=np.arange(0, 11),
                           zorder=CLABEL_ZORDER,
                           transform=geoproj)

    clabel_text = ax.clabel(cs_ocean,
                            np.arange(0, 11),
                            colors='k',
                            zorder=CLABEL_ZORDER,
                            fmt='%.0f',
                            fontsize=40)
    for clabel in clabel_text:
        x, y = clabel.get_position()
        label_str = clabel.get_text()
        roman_label = MMI_LABELS[label_str]
        th = plt.text(x,
                      y,
                      roman_label,
                      zorder=CLABEL_ZORDER,
                      ha='center',
                      va='center',
                      color='black',
                      weight='normal',
                      size=16)
        th.set_path_effects([
            path_effects.Stroke(linewidth=2.0, foreground='white'),
            path_effects.Normal()
        ])

    #draw meridians and parallels using Cartopy's functions for that
    gl = ax.gridlines(draw_labels=True,
                      linewidth=2,
                      color=(0.9, 0.9, 0.9),
                      alpha=0.5,
                      linestyle='-',
                      zorder=GRID_ZORDER)
    gl.xlabels_top = False
    gl.xlabels_bottom = False
    gl.ylabels_left = False
    gl.ylabels_right = False
    gl.xlines = True
    step = 1

    #let's floor/ceil the edges to nearest half a degree
    gxmin = np.floor(xmin * 2) / 2
    gxmax = np.ceil(xmax * 2) / 2
    gymin = np.floor(ymin * 2) / 2
    gymax = np.ceil(ymax * 2) / 2

    xlocs = np.linspace(gxmin, gxmax + 0.5, num=5)
    ylocs = np.linspace(gymin, gymax + 0.5, num=5)

    gl.xlocator = mticker.FixedLocator(xlocs)
    gl.ylocator = mticker.FixedLocator(ylocs)
    gl.xformatter = LONGITUDE_FORMATTER
    gl.yformatter = LATITUDE_FORMATTER
    gl.xlabel_style = {'size': 15, 'color': 'black'}
    gl.ylabel_style = {'size': 15, 'color': 'black'}

    #TODO - figure out x/y axes data coordinates corresponding to 10% from left
    #and 10% from top
    #use geoproj and proj
    dleft = 0.01
    dtop = 0.97
    proj_str = proj.proj4_init
    merc_to_dd = pyproj.Proj(proj_str)

    #use built-in transforms to get from axes units to data units
    display_to_data = ax.transData.inverted()
    axes_to_display = ax.transAxes

    #these are x,y coordinates in projected space
    yleft, t1 = display_to_data.transform(
        axes_to_display.transform((dleft, 0.5)))
    t2, xtop = display_to_data.transform(axes_to_display.transform(
        (0.5, dtop)))

    #these are coordinates in lon,lat space
    yleft_dd, t1_dd = merc_to_dd(yleft, t1, inverse=True)
    t2_dd, xtop_dd = merc_to_dd(t2, xtop, inverse=True)

    #drawing our own tick labels INSIDE the plot, as Cartopy doesn't seem to support this.
    yrange = ymax - ymin
    xrange = xmax - xmin
    for xloc in gl.xlocator.locs:
        outside = xloc < xmin or xloc > xmax
        #don't draw labels when we're too close to either edge
        near_edge = (xloc - xmin) < (xrange * 0.1) or (xmax - xloc) < (xrange *
                                                                       0.1)
        if outside or near_edge:
            continue
        xtext = r'$%.1f^\circ$W' % (abs(xloc))
        ax.text(xloc,
                xtop_dd,
                xtext,
                fontsize=14,
                zorder=GRID_ZORDER,
                ha='center',
                fontname=DEFAULT_FONT,
                transform=ccrs.Geodetic())

    for yloc in gl.ylocator.locs:
        outside = yloc < gd.ymin or yloc > gd.ymax
        #don't draw labels when we're too close to either edge
        near_edge = (yloc - gd.ymin) < (yrange * 0.1) or (gd.ymax - yloc) < (
            yrange * 0.1)
        if outside or near_edge:
            continue
        if yloc < 0:
            ytext = r'$%.1f^\circ$S' % (abs(yloc))
        else:
            ytext = r'$%.1f^\circ$N' % (abs(yloc))
        thing = ax.text(yleft_dd,
                        yloc,
                        ytext,
                        fontsize=14,
                        zorder=GRID_ZORDER,
                        va='center',
                        fontname=DEFAULT_FONT,
                        transform=ccrs.Geodetic())

    #draw cities
    mapcities = mmap.drawCities(shadow=True, zorder=CITIES_ZORDER)

    #draw the figure border thickly
    #TODO - figure out how to draw map border
    # bwidth = 3
    # ax.spines['top'].set_visible(True)
    # ax.spines['left'].set_visible(True)
    # ax.spines['bottom'].set_visible(True)
    # ax.spines['right'].set_visible(True)
    # ax.spines['top'].set_linewidth(bwidth)
    # ax.spines['right'].set_linewidth(bwidth)
    # ax.spines['bottom'].set_linewidth(bwidth)
    # ax.spines['left'].set_linewidth(bwidth)

    #Get the corner of the map with the lowest population
    corner_rect, filled_corner = _get_open_corner(popgrid, ax)
    clat2 = round_to_nearest(clat, 1.0)
    clon2 = round_to_nearest(clon, 1.0)

    #draw a little globe in the corner showing in small-scale where the earthquake is located.
    proj = ccrs.Orthographic(central_latitude=clat2, central_longitude=clon2)
    ax2 = fig.add_axes(corner_rect, projection=proj)
    ax2.add_feature(cartopy.feature.OCEAN,
                    zorder=0,
                    facecolor=WATERCOLOR,
                    edgecolor=WATERCOLOR)
    ax2.add_feature(cartopy.feature.LAND, zorder=0, edgecolor='black')
    ax2.plot([clon2], [clat2],
             'w*',
             linewidth=1,
             markersize=16,
             markeredgecolor='k',
             markerfacecolor='r')
    gh = ax2.gridlines()
    ax2.set_global()
    ax2.outline_patch.set_edgecolor('black')
    ax2.outline_patch.set_linewidth(2)

    #Draw the map scale in the unoccupied lower corner.
    corner = 'lr'
    if filled_corner == 'lr':
        corner = 'll'
    draw_scale(ax, corner, pady=0.05, padx=0.05)

    #Draw the epicenter as a black star
    plt.sca(ax)
    plt.plot(clon,
             clat,
             'k*',
             markersize=16,
             zorder=EPICENTER_ZORDER,
             transform=geoproj)

    if is_scenario:
        plt.text(clon,
                 clat,
                 'SCENARIO',
                 fontsize=64,
                 zorder=WATERMARK_ZORDER,
                 transform=geoproj,
                 alpha=0.2,
                 color='red',
                 horizontalalignment='center')

    #create pdf and png output file names
    pdf_file = basename + '.pdf'
    png_file = basename + '.png'

    #save to pdf
    plt.savefig(pdf_file)
    plt.savefig(png_file)

    return (pdf_file, png_file, mapcities)
Exemple #32
0
    def calcExposure(self,shakefile):
        """Calculate population exposure to shaking, per country, plus total exposure across all countries.

        :param shakefile:
          Path to ShakeMap grid.xml file.
        :returns:
          Dictionary containing country code (ISO2) keys, and values of
          10 element arrays representing population exposure to MMI 1-10.
          Dictionary will contain an additional key 'TotalExposure', with value of exposure across all countries.
          Dictionary will also contain a field "maximum_border_mmi" which indicates the maximum MMI value along
          any edge of the ShakeMap.
        """
        #get shakemap geodict
        shakedict = ShakeGrid.getFileGeoDict(shakefile,adjust='res')
            
        #get population geodict
        popdict,t = self._pop_class.getFileGeoDict(self._popfile)

        #get country code geodict
        isodict,t = self._iso_class.getFileGeoDict(self._isofile)

        #special case for very high latitude events that may be outside the bounds
        #of our population data...
        if not popdict.intersects(shakedict):
            expdict = {'UK':np.zeros((10,)),'TotalExposure':np.zeros((10,))}
            return expdict
        
        if popdict == shakedict == isodict:
            #special case, probably for testing...
            self._shakegrid = ShakeGrid.load(shakefile,adjust='res')
            self._popgrid = self._pop_class.load(self._popfile)
            self._isogrid = self._iso_class.load(self._isofile)
        else:
            sampledict = popdict.getBoundsWithin(shakedict)
            self._shakegrid = ShakeGrid.load(shakefile,samplegeodict=sampledict,resample=True,
                                             method='linear',adjust='res')
            self._popgrid = self._pop_class.load(self._popfile,samplegeodict=sampledict,
                                                 resample=False,doPadding=True,padValue=np.nan)
            self._isogrid = self._iso_class.load(self._isofile,samplegeodict=sampledict,
                                                 resample=True,method='nearest',doPadding=True,padValue=0)

        mmidata = self._shakegrid.getLayer('mmi').getData()
        popdata = self._popgrid.getData()
        isodata = self._isogrid.getData()

        eventyear = self._shakegrid.getEventDict()['event_timestamp'].year

        #in order to avoid crazy far-future scenarios where PAGER models are probably invalid,
        #check to see if the time gap between the date of population data collection and event year
        #reaches either of a couple of different thresholds.
        if eventyear > self._popyear:
            tdiff = (eventyear - self._popyear)
            if tdiff > SCENARIO_WARNING and tdiff < SCENARIO_ERROR:
                msg = '''The input ShakeMap event year is more than %i years from the population date.
                PAGER results for events this far in the future may not be valid.''' % SCENARIO_WARNING
                warnings.warn(msg)
            if tdiff > SCENARIO_ERROR:
                msg = '''The input ShakeMap event year is more than %i years from the population date.
                PAGER results for events this far in the future are not valid. Stopping.''' % SCENARIO_ERROR
                raise PagerException(msg)
        
        ucodes = np.unique(isodata)
        for ccode in ucodes:
            cidx = (isodata == ccode)
            popdata[cidx] = self._popgrowth.adjustPopulation(popdata[cidx],ccode,self._popyear,eventyear)
        
        exposure_dict = calc_exposure(mmidata,popdata,isodata)
        newdict = {}
        #Get rolled up exposures
        total = np.zeros((10,),dtype=np.uint32)
        for isocode,value in exposure_dict.items():
            cdict = self._country.getCountry(int(isocode))
            if cdict is None:
                ccode = 'UK'
            else:
                ccode = cdict['ISO2']
            newdict[ccode] = value
            total += value

        newdict['TotalExposure'] = total

        #get the maximum MMI value along any of the four map edges
        nrows,ncols = mmidata.shape
        top = mmidata[0,0:ncols].max()
        bottom = mmidata[nrows-1,0:ncols].max()
        left = mmidata[0:nrows,0].max()
        right = mmidata[0:nrows,ncols-1].max()
        newdict['maximum_border_mmi'] = np.array([top,bottom,left,right]).max()
        
        return newdict
Exemple #33
0
def godt2008(shakefile, config, uncertfile=None, saveinputs=False, regressionmodel='J_PGA', bounds=None, slopediv=100., codiv=10.):
    """ This function runs the Godt et al. (2008) global method for a given ShakeMap. The Factor of Safety
    is calculated using infinite slope analysis assumuing dry conditions. The method uses threshold newmark
    displacement and estimates areal coverage by doing the calculations for each slope quantile
    TO DO - add 'all' - averages Dn from all four equations, add term to convert PGA and PGV to Ia and use other equations, add Ambraseys and Menu (1988) option

    :param shakefile: url or filepath to shakemap xml file
    :type shakefile: string
    :param config: ConfigObj of config file containing inputs required for running the model
    :type config: ConfigObj
    :param saveinputs: Whether or not to return the model input layers, False (defeault) returns only the model output (one layer)
    :type saveinputs: boolean
    :param regressionmodel:
        Newmark displacement regression model to use
        'J_PGA' (default) - PGA-based model from Jibson (2007) - equation 6
        'J_PGA_M' - PGA and M-based model from Jibson (2007) - equation 7
        'RS_PGA_M' - PGA and M-based model from from Rathje and Saygili (2009)
        'RS_PGA_PGV' - PGA and PGV-based model from Saygili and Rathje (2008) - equation 6
    :type regressionmodel: string
    :param probtype: Method used to estimate probability. Entering 'jibson2000' uses equation 5 from Jibson et al. (2000) to estimate probability from Newmark displacement. 'threshold' uses a specified threshold of Newmark displacement (defined in config file) and assumes anything greather than this threshold fails
    :type probtype: string
    :param slopediv: Divide slope by this number to get slope in degrees (Verdin datasets need to be divided by 100)
    :type slopediv: float
    :param codiv: Divide cohesion by this number to get reasonable numbers (For Godt method, need to divide by 10 because that is how it was calibrated, but values are reasonable without multiplying for regular analysis)
    :type codiv: float

    :returns maplayers:  Dictionary containing output and input layers (if saveinputs=True) along with metadata formatted like maplayers['layer name']={'grid': mapio grid2D object, 'label': 'label for colorbar and top line of subtitle', 'type': 'output or input to model', 'description': 'detailed description of layer for subtitle, potentially including source information'}
    :type maplayers: OrderedDict

    :raises NameError: when unable to parse the config correctly (probably a formatting issue in the configfile) or when unable to find the shakefile (Shakemap URL or filepath) - these cause program to end
    """

    # Empty refs
    slopesref = 'unknown'
    slopelref = 'unknown'
    cohesionlref = 'unknown'
    cohesionsref = 'unknown'
    frictionsref = 'unknown'
    frictionlref = 'unknown'
    modellref = 'unknown'
    modelsref = 'unknown'

    if uncertfile is not None:
        print('ground motion uncertainty option not implemented yet')

    # Parse config
    try:  # May want to add error handling so if refs aren't given, just includes unknown
        slopefilepath = config['mechanistic_models']['godt_2008']['layers']['slope']['filepath']
        slopeunits = config['mechanistic_models']['godt_2008']['layers']['slope']['units']
        cohesionfile = config['mechanistic_models']['godt_2008']['layers']['cohesion']['file']
        cohesionunits = config['mechanistic_models']['godt_2008']['layers']['cohesion']['units']
        frictionfile = config['mechanistic_models']['godt_2008']['layers']['friction']['file']
        frictionunits = config['mechanistic_models']['godt_2008']['layers']['friction']['units']

        thick = float(config['mechanistic_models']['godt_2008']['parameters']['thick'])
        uwt = float(config['mechanistic_models']['godt_2008']['parameters']['uwt'])
        nodata_cohesion = float(config['mechanistic_models']['godt_2008']['parameters']['nodata_cohesion'])
        nodata_friction = float(config['mechanistic_models']['godt_2008']['parameters']['nodata_friction'])
        dnthresh = float(config['mechanistic_models']['godt_2008']['parameters']['dnthresh'])
        fsthresh = float(config['mechanistic_models']['godt_2008']['parameters']['fsthresh'])
        acthresh = float(config['mechanistic_models']['godt_2008']['parameters']['acthresh'])
    except Exception as e:
        raise NameError('Could not parse configfile, %s' % e)
        return

    # TO DO, ADD ERROR CATCHING ON UNITS, MAKE SURE THEY ARE WHAT THEY SHOULD BE FOR THIS MODEL

    try:  # Try to fetch source information from config
        modelsref = config['mechanistic_models']['godt_2008']['shortref']
        modellref = config['mechanistic_models']['godt_2008']['longref']
        slopesref = config['mechanistic_models']['godt_2008']['layers']['slope']['shortref']
        slopelref = config['mechanistic_models']['godt_2008']['layers']['slope']['longref']
        cohesionsref = config['mechanistic_models']['godt_2008']['layers']['cohesion']['shortref']
        cohesionlref = config['mechanistic_models']['godt_2008']['layers']['cohesion']['longref']
        frictionsref = config['mechanistic_models']['godt_2008']['layers']['friction']['shortref']
        frictionlref = config['mechanistic_models']['godt_2008']['layers']['friction']['longref']
    except:
        print('Was not able to retrieve all references from config file. Continuing')

    # Load in shakefile
    if not os.path.isfile(shakefile):
        if isURL(shakefile):
            shakefile = getGridURL(shakefile)  # returns a file object
        else:
            raise NameError('Could not find "%s" as a file or a valid url' % (shakefile))
            return

    shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
    if bounds is not None:  # Make sure bounds are within ShakeMap Grid
        if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds['xmax'] or shkgdict.ymin > bounds['ymin'] or shkgdict.ymax < bounds['ymax']:
            print('Specified bounds are outside shakemap area, using ShakeMap bounds instead')
            bounds = None
    if bounds is not None:
        tempgdict = GeoDict({'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': shkgdict.dx, 'dy': shkgdict.dy, 'nx': shkgdict.nx, 'ny': shkgdict.ny}, adjust='res')
        gdict = shkgdict.getBoundsWithin(tempgdict)
        shakemap = ShakeGrid.load(shakefile, samplegeodict=gdict, adjust='bounds')
    else:
        shakemap = ShakeGrid.load(shakefile, adjust='res')
    shkgdict = shakemap.getGeoDict()  # Get updated geodict
    M = shakemap.getEventDict()['magnitude']

    # Read in all the slope files, divide all by 100 to get to slope in degrees (because input files are multiplied by 100.)
    slopes = []
    slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope_min.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv)
    slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope10.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv)
    slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope30.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv)
    slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope50.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv)
    slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope70.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv)
    slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope90.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv)
    slopes.append(GDALGrid.load(os.path.join(slopefilepath, 'slope_max.bil'), samplegeodict=shkgdict, resample=True, method='linear').getData()/slopediv)
    slopestack = np.dstack(slopes)

    # Change any zero slopes to a very small number to avoid dividing by zero later
    slopestack[slopestack == 0] = 1e-8

    # Read in the cohesion and friction files and duplicate layers so they are same shape as slope structure
    cohesion = np.repeat(GDALGrid.load(cohesionfile, samplegeodict=shakemap.getGeoDict(), resample=True, method='nearest').getData()[:, :, np.newaxis]/codiv, 7, axis=2)
    cohesion[cohesion == -999.9] = nodata_cohesion
    cohesion[cohesion == 0] = nodata_cohesion
    friction = np.repeat(GDALGrid.load(frictionfile, samplegeodict=shakemap.getGeoDict(), resample=True, method='nearest').getData().astype(float)[:, :, np.newaxis], 7, axis=2)
    friction[friction == -9999] = nodata_friction
    friction[friction == 0] = nodata_friction

    # Do the calculations using Jibson (2007) PGA only model for Dn
    FS = cohesion/(uwt*thick*np.sin(slopestack*(np.pi/180.))) + np.tan(friction*(np.pi/180.))/np.tan(slopestack*(np.pi/180.))
    FS[FS < fsthresh] = fsthresh

    # Compute critical acceleration, in g
    Ac = (FS-1)*np.sin(slopestack*(np.pi/180.)).astype(float)  # This gives ac in g, equations that multiply by g give ac in m/s2
    Ac[Ac < acthresh] = acthresh

    # Get PGA in g (PGA is %g in ShakeMap, convert to g)
    PGA = np.repeat(shakemap.getLayer('pga').getData()[:, :, np.newaxis]/100., 7, axis=2).astype(float)

    if 'PGV' in regressionmodel:  # Load in PGV also, in cm/sec
        PGV = np.repeat(shakemap.getLayer('pgv').getData()[:, :, np.newaxis], 7, axis=2).astype(float)

    np.seterr(invalid='ignore')  # Ignore errors so still runs when Ac > PGA, just leaves nan instead of crashing

    if regressionmodel is 'J_PGA':
        Dn = J_PGA(Ac, PGA)

    if regressionmodel is 'J_PGA_M':
        Dn = J_PGA_M(Ac, PGA, M)

    if regressionmodel is 'RS_PGA_M':
        Dn = RS_PGA_M(Ac, PGA, M)

    if regressionmodel is 'RS_PGA_PGV':
        Dn = RS_PGA_PGV(Ac, PGA, PGV)

    PROB = Dn.copy()
    PROB[PROB < dnthresh] = 0.
    PROB[PROB >= dnthresh] = 1.
    PROB = np.sum(PROB, axis=2)
    PROB[PROB == 1.] = 0.01
    PROB[PROB == 2.] = 0.10
    PROB[PROB == 3.] = 0.30
    PROB[PROB == 4.] = 0.50
    PROB[PROB == 5.] = 0.70
    PROB[PROB == 6.] = 0.90
    PROB[PROB == 7.] = 0.99

    # Turn output and inputs into into grids and put in mapLayers dictionary
    maplayers = collections.OrderedDict()

    temp = shakemap.getShakeDict()
    shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version'])

    description = {'name': modelsref, 'longref': modellref, 'units': 'coverage', 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'thickness_m': thick, 'unitwt_kNm3': uwt, 'dnthresh_cm': dnthresh, 'acthresh_g': acthresh, 'fsthresh': fsthresh}}

    maplayers['model'] = {'grid': GDALGrid(PROB, shakemap.getGeoDict()), 'label': 'Areal coverage', 'type': 'output', 'description': description}

    if saveinputs is True:
        maplayers['pga'] = {'grid': GDALGrid(PGA[:, :, 0], shakemap.getGeoDict()), 'label': 'PGA (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}}
        if 'PGV' in regressionmodel:
            maplayers['pgv'] = {'grid': GDALGrid(PGV[:, :, 0], shakemap.getGeoDict()), 'label': 'PGV (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}}
        maplayers['minFS'] = {'grid': GDALGrid(np.min(FS, axis=2), shakemap.getGeoDict()), 'label': 'Min Factor of Safety', 'type': 'input', 'description': {'units': 'unitless'}}
        maplayers['max slope'] = {'grid': GDALGrid(slopestack[:, :, -1], shakemap.getGeoDict()), 'label': 'Maximum slope ($^\circ$)', 'type': 'input', 'description': {'units': 'degrees', 'name': slopesref, 'longref': slopelref}}
        maplayers['cohesion'] = {'grid': GDALGrid(cohesion[:, :, 0], shakemap.getGeoDict()), 'label': 'Cohesion (kPa)', 'type': 'input', 'description': {'units': 'kPa (adjusted)', 'name': cohesionsref, 'longref': cohesionlref}}
        maplayers['friction angle'] = {'grid': GDALGrid(friction[:, :, 0], shakemap.getGeoDict()), 'label': 'Friction angle ($^\circ$)', 'type': 'input', 'description': {'units': 'degrees', 'name': frictionsref, 'longref': frictionlref}}

    return maplayers
Exemple #34
0
def _test_intensity():

    datadir = os.path.abspath(os.path.join(
        homedir, '..', 'data', 'eventdata', 'northridge'))
    shakefile = os.path.join(datadir, 'northridge_grid.xml')
    topofile = os.path.join(datadir, 'northridge_topo.grd')
    faultfile = os.path.join(datadir, 'northridge_fault.txt')
    cityfile = os.path.join(datadir, 'northridge_cities.txt')
    coastfile = os.path.join(datadir, 'northridge_coastline.json')
    countryfile = os.path.join(datadir, 'northridge_countries.json')
    statefile = os.path.join(datadir, 'northridge_states.json')
    lakefile = os.path.join(datadir, 'northridge_lakes.json')
    oceanfile = os.path.join(datadir, 'northridge_ocean.json')
    stationfile = os.path.join(datadir, 'northridge_stations.db')
    roadfile = os.path.join(datadir, 'northridge_roads.json')
    tancptfile = os.path.join(shakedir, 'shakemap', 'mapping', 'tan.cpt')
    shakecptfile = os.path.join(
        shakedir, 'shakemap', 'mapping', 'shakecpt.cpt')

    layerdict = {'coast': coastfile,
                 'ocean': oceanfile,
                 'lake': lakefile,
                 'country': countryfile,
                 'roads': roadfile,
                 'state': statefile}

    tancolormap = ColorPalette.fromPreset('shaketopo')
    shakecolormap = ColorPalette.fromPreset('mmi')
    cities = BasemapCities.loadFromCSV(cityfile)
    shakemap = ShakeGrid.load(shakefile, adjust='res')
    stations = StationList(stationfile)
    fault = Fault.readFaultFile(faultfile)
    edict = shakemap.getEventDict()
    eventdict = {'lat': edict['lat'],
                 'lon': edict['lon'],
                 'depth': edict['depth'],
                 'mag': edict['magnitude'],
                 'time': edict['event_timestamp']}
    source = Source(eventdict, fault)
    maker = MapMaker(shakemap, topofile, stations,
                     fault, layerdict, source, cities)

    # draw intensity map
    outfolder = os.path.expanduser('~')
    maker.setIntensityLayer('mmi')
    maker.setIntensityGMTColorMap(shakecolormap)
    intensity_map = maker.drawIntensityMap(outfolder)
    print('Intensity map saved as: %s' % intensity_map)

    # draw contour maps
    maker.setContourGMTColorMap(tancolormap)

    # Draw pgv contours
    maker.setContourLayer('pgv')
    contour_pgv_map = maker.drawContourMap(outfolder)
    print('PGV contour map saved as: %s' % contour_pgv_map)

    # Draw pga contours
    maker.setContourLayer('pga')
    contour_pga_map = maker.drawContourMap(outfolder)
    print('PGA contour map saved as: %s' % contour_pga_map)

    # Draw psa0.3 contours
    maker.setContourLayer('psa03')
    contour_psa03_map = maker.drawContourMap(outfolder)
    print('PSA0.3 contour map saved as: %s' % contour_psa03_map)

    # Draw psa1.0 contours
    maker.setContourLayer('psa10')
    contour_psa10_map = maker.drawContourMap(outfolder)
    print('PSA1.0 contour map saved as: %s' % contour_psa10_map)

    # Draw psa3.0 contours
    maker.setContourLayer('psa30')
    contour_psa30_map = maker.drawContourMap(outfolder)
    print('PSA3.0 contour map saved as: %s' % contour_psa30_map)
Exemple #35
0
def draw_contour(shakefile,
                 popfile,
                 oceanfile,
                 cityfile,
                 outfilename,
                 make_png=False):
    """Create a contour map showing population (greyscale) underneath contoured MMI.

    :param shakefile:
      String path to ShakeMap grid.xml file.
    :param popfile:
      String path to GDALGrid-compliant file containing population data.
    :param oceanfile:
      String path to file containing ocean vector data in a format compatible with fiona.
    :param cityfile:
      String path to file containing GeoNames cities data.
    :param outfilename:
      String path containing desired output PDF filename.
    :param make_png:
      Boolean indicating whether a PNG version of the file should also be created in the
      same output folder as the PDF.
    :returns:
      Tuple containing: 
        - Name of PNG file created, or None if PNG output not specified.
        - CartopyCities object containing the cities that were rendered on the contour map.
    """
    #load the shakemap - for the time being, we're interpolating the
    #population data to the shakemap, which would be important
    #if we were doing math with the pop values.  We're not, so I think it's ok.
    shakegrid = ShakeGrid.load(shakefile, adjust='res')
    gd = shakegrid.getGeoDict()

    #retrieve the epicenter - this will get used on the map
    clat = shakegrid.getEventDict()['lat']
    clon = shakegrid.getEventDict()['lon']

    #load the population data, sample to shakemap
    popgrid = GDALGrid.load(popfile, samplegeodict=gd, resample=True)
    popdata = popgrid.getData()

    #smooth the MMI data for contouring
    mmi = shakegrid.getLayer('mmi').getData()
    smoothed_mmi = gaussian_filter(mmi, FILTER_SMOOTH)

    #clip the ocean data to the shakemap
    bbox = (gd.xmin, gd.ymin, gd.xmax, gd.ymax)
    oceanshapes = _clip_bounds(bbox, oceanfile)

    #load the cities data, limit to cities within shakemap bounds
    allcities = CartopyCities.fromDefault()
    cities = allcities.limitByBounds((gd.xmin, gd.xmax, gd.ymin, gd.ymax))

    # Define ocean/land masks to do the contours, since we want different contour line styles over land and water.
    oceangrid = Grid2D.rasterizeFromGeometry(oceanshapes,
                                             gd,
                                             burnValue=1.0,
                                             fillValue=0.0,
                                             mustContainCenter=False,
                                             attribute=None)
    oceanmask = np.ma.masked_where(oceangrid == 1.0, smoothed_mmi)
    landmask = np.ma.masked_where(oceangrid == 0.0, smoothed_mmi)

    # Use our GMT-inspired palette class to create population and MMI colormaps
    popmap = ColorPalette.fromPreset('pop')
    mmimap = ColorPalette.fromPreset('mmi')

    #use the ShakeMap to determine the aspect ratio of the map
    aspect = (gd.xmax - gd.xmin) / (gd.ymax - gd.ymin)
    figheight = FIGWIDTH / aspect
    fig = plt.figure(figsize=(FIGWIDTH, figheight))

    # set up axes object with PlateCaree (non) projection.
    ax = plt.axes([0.02, 0.02, 0.95, 0.95], projection=ccrs.PlateCarree())

    #set the image extent to that of the data
    img_extent = (gd.xmin, gd.xmax, gd.ymin, gd.ymax)
    plt.imshow(popdata,
               origin='upper',
               extent=img_extent,
               cmap=popmap.cmap,
               vmin=popmap.vmin,
               vmax=popmap.vmax,
               zorder=9,
               interpolation='none')

    #define arrays of latitude and longitude we will use to plot MMI contours
    lat = np.linspace(gd.ymin, gd.ymax, gd.ny)
    lon = np.linspace(gd.xmin, gd.xmax, gd.nx)

    #contour the masked land/ocean MMI data at half-integer levels
    plt.contour(lon,
                lat,
                landmask,
                linewidths=3.0,
                linestyles='solid',
                zorder=10,
                cmap=mmimap.cmap,
                vmin=mmimap.vmin,
                vmax=mmimap.vmax,
                levels=np.arange(0.5, 10.5, 1.0))

    plt.contour(lon,
                lat,
                oceanmask,
                linewidths=2.0,
                linestyles='dashed',
                zorder=13,
                cmap=mmimap.cmap,
                vmin=mmimap.vmin,
                vmax=mmimap.vmax,
                levels=np.arange(0.5, 10.5, 1.0))

    #the idea here is to plot invisible MMI contours at integer levels and then label them.
    #labeling part does not currently work.
    cs = plt.contour(lon,
                     lat,
                     landmask,
                     linewidths=0.0,
                     levels=np.arange(0, 11),
                     zorder=10)
    #clabel is not actually drawing anything, but it is blotting out a portion of the contour line.  ??
    ax.clabel(cs, np.arange(0, 11), colors='k', zorder=25)

    #set the extent of the map to our data
    ax.set_extent([lon.min(), lon.max(), lat.min(), lat.max()])

    #draw the ocean data
    if isinstance(oceanshapes[0], mPolygon):
        for shape in oceanshapes[0]:
            ocean_patch = PolygonPatch(shape,
                                       zorder=10,
                                       facecolor=WATERCOLOR,
                                       edgecolor=WATERCOLOR)
            ax.add_patch(ocean_patch)
    else:
        ocean_patch = PolygonPatch(oceanshapes[0],
                                   zorder=10,
                                   facecolor=WATERCOLOR,
                                   edgecolor=WATERCOLOR)
        ax.add_patch(ocean_patch)

    # add coastlines with desired scale of resolution
    ax.coastlines('10m', zorder=11)

    #draw meridians and parallels using Cartopy's functions for that
    gl = ax.gridlines(crs=ccrs.PlateCarree(),
                      draw_labels=True,
                      linewidth=2,
                      color=(0.9, 0.9, 0.9),
                      alpha=0.5,
                      linestyle='-',
                      zorder=20)
    gl.xlabels_top = False
    gl.xlabels_bottom = False
    gl.ylabels_left = False
    gl.ylabels_right = False
    gl.xlines = True
    xlocs = np.arange(np.floor(gd.xmin - 1), np.ceil(gd.xmax + 1))
    ylocs = np.arange(np.floor(gd.ymin - 1), np.ceil(gd.ymax + 1))
    gl.xlocator = mticker.FixedLocator(xlocs)
    gl.ylocator = mticker.FixedLocator(ylocs)
    gl.xformatter = LONGITUDE_FORMATTER
    gl.yformatter = LATITUDE_FORMATTER
    gl.xlabel_style = {'size': 15, 'color': 'black'}
    gl.ylabel_style = {'size': 15, 'color': 'black'}

    #drawing our own tick labels INSIDE the plot, as Cartopy doesn't seem to support this.
    yrange = gd.ymax - gd.ymin
    xrange = gd.xmax - gd.xmin
    for xloc in gl.xlocator.locs:
        outside = xloc < gd.xmin or xloc > gd.xmax
        #don't draw labels when we're too close to either edge
        near_edge = (xloc - gd.xmin) < (xrange * 0.1) or (gd.xmax - xloc) < (
            xrange * 0.1)
        if outside or near_edge:
            continue
        if xloc < 0:
            xtext = r'$%s^\circ$W' % str(abs(int(xloc)))
        else:
            xtext = r'$%s^\circ$E' % str(int(xloc))
        ax.text(xloc,
                gd.ymax - (yrange / 35),
                xtext,
                fontsize=14,
                zorder=20,
                ha='center',
                fontname='Bitstream Vera Sans')

    for yloc in gl.ylocator.locs:
        outside = yloc < gd.ymin or yloc > gd.ymax
        #don't draw labels when we're too close to either edge
        near_edge = (yloc - gd.ymin) < (yrange * 0.1) or (gd.ymax - yloc) < (
            yrange * 0.1)
        if outside or near_edge:
            continue
        if yloc < 0:
            ytext = r'$%s^\circ$S' % str(abs(int(yloc)))
        else:
            ytext = r'$%s^\circ$N' % str(int(yloc))
        thing = ax.text(gd.xmin + (xrange / 100),
                        yloc,
                        ytext,
                        fontsize=14,
                        zorder=20,
                        va='center',
                        fontname='Bitstream Vera Sans')

    #Limit the number of cities we show - we may not want to use the population size
    #filter in the global case, but the map collision filter is a little sketchy right now.
    mapcities = cities.limitByPopulation(25000)
    mapcities = mapcities.limitByGrid()
    mapcities = mapcities.limitByMapCollision(ax, shadow=True)
    mapcities.renderToMap(ax, shadow=True, fontsize=12, zorder=11)

    #Get the corner of the map with the lowest population
    corner_rect, filled_corner = _get_open_corner(popgrid, ax)
    clat = round_to_nearest(clat, 1.0)
    clon = round_to_nearest(clon, 1.0)

    #draw a little globe in the corner showing in small-scale where the earthquake is located.
    proj = ccrs.Orthographic(central_latitude=clat, central_longitude=clon)
    ax2 = fig.add_axes(corner_rect, projection=proj)
    ax2.add_feature(cartopy.feature.OCEAN,
                    zorder=0,
                    facecolor=WATERCOLOR,
                    edgecolor=WATERCOLOR)
    ax2.add_feature(cartopy.feature.LAND, zorder=0, edgecolor='black')
    ax2.plot([clon], [clat],
             'w*',
             linewidth=1,
             markersize=16,
             markeredgecolor='k',
             markerfacecolor='r')
    gh = ax2.gridlines()
    ax2.set_global()
    ax2.outline_patch.set_edgecolor('black')
    ax2.outline_patch.set_linewidth(2)

    #Draw the map scale in the unoccupied lower corner.
    corner = 'lr'
    if filled_corner == 'lr':
        corner = 'll'
    draw_scale(ax, corner, pady=0.05, padx=0.05)

    plt.savefig(outfilename)

    pngfile = None
    if make_png:
        fpath, fname = os.path.split(outfilename)
        fbase, t = os.path.splitext(fname)
        pngfile = os.path.join(fpath, fbase + '.png')
        plt.savefig(pngfile)

    return (pngfile, mapcities)
Exemple #36
0
def classic(shakefile, config, uncertfile=None, saveinputs=False, regressionmodel='J_PGA', probtype='jibson2000', slopediv=1., codiv=1., bounds=None):
    """This function uses the Newmark method to estimate probability of failure at each grid cell.
    Factor of Safety and critcal accelerations are calculated following Jibson et al. (2000) and the
    Newmark displacement is estimated using PGA, PGV, and/or Magnitude (depending on equation used)
    from Shakemap with regression equations from Jibson (2007), Rathje and Saygili (2008) and
    Saygili and Rathje (2009)

    :param shakefile: URL or complete file path to the location of the Shakemap to use as input
    :type shakefile: string:
    :param config: Model configuration file object containing locations of input files and other input values config = ConfigObj(configfilepath)
    :type config: ConfigObj
    :param uncertfile: complete file path to the location of the uncertainty.xml for the shakefile, if this is not None, it will compute the model for +-std in addition to the best estimate
    :param saveinputs: Whether or not to return the model input layers, False (defeault) returns only the model output (one layer)
    :type saveinputs: boolean
    :param regressionmodel:
        Newmark displacement regression model to use
        'J_PGA' (default) - PGA-based model from Jibson (2007) - equation 6
        'J_PGA_M' - PGA and M-based model from Jibson (2007) - equation 7
        'RS_PGA_M' - PGA and M-based model from from Rathje and Saygili (2009)
        'RS_PGA_PGV' - PGA and PGV-based model from Saygili and Rathje (2008) - equation 6
    :type regressionmodel: string
    :param probtype: Method used to estimate probability. Entering 'jibson2000' uses equation 5 from Jibson et al. (2000) to estimate probability from Newmark displacement. 'threshold' uses a specified threshold of Newmark displacement (defined in config file) and assumes anything greather than this threshold fails
    :type probtype: string
    :param slopediv: Divide slope by this number to get slope in degrees (Verdin datasets need to be divided by 100)
    :type slopediv: float
    :param codiv: Divide cohesion by this number to get reasonable numbers (For Godt method, need to divide by 10 because that is how it was calibrated, but values are reasonable without multiplying for regular analysis)
    :type codiv: float

    :returns maplayers:  Dictionary containing output and input layers (if saveinputs=True) along with metadata formatted like maplayers['layer name']={'grid': mapio grid2D object, 'label': 'label for colorbar and top line of subtitle', 'type': 'output or input to model', 'description': 'detailed description of layer for subtitle, potentially including source information'}
    :type maplayers: OrderedDict

    :raises NameError: when unable to parse the config correctly (probably a formatting issue in the configfile) or when unable to find the shakefile (Shakemap URL or filepath) - these cause program to end
    :raises NameError: when probtype does not match a predifined probability type, will cause to default to 'jibson2000'

    """
    # Empty refs
    slopesref = 'unknown'
    slopelref = 'unknown'
    cohesionlref = 'unknown'
    cohesionsref = 'unknown'
    frictionsref = 'unknown'
    frictionlref = 'unknown'
    modellref = 'unknown'
    modelsref = 'unknown'

    # Parse config - should make it so it uses defaults if any are missing...
    try:
        slopefile = config['mechanistic_models']['classic_newmark']['layers']['slope']['file']
        slopeunits = config['mechanistic_models']['classic_newmark']['layers']['slope']['units']
        cohesionfile = config['mechanistic_models']['classic_newmark']['layers']['cohesion']['file']
        cohesionunits = config['mechanistic_models']['classic_newmark']['layers']['cohesion']['units']
        frictionfile = config['mechanistic_models']['classic_newmark']['layers']['friction']['file']
        frictionunits = config['mechanistic_models']['classic_newmark']['layers']['friction']['units']

        thick = float(config['mechanistic_models']['classic_newmark']['parameters']['thick'])
        uwt = float(config['mechanistic_models']['classic_newmark']['parameters']['uwt'])
        nodata_cohesion = float(config['mechanistic_models']['classic_newmark']['parameters']['nodata_cohesion'])
        nodata_friction = float(config['mechanistic_models']['classic_newmark']['parameters']['nodata_friction'])
        try:
            dnthresh = float(config['mechanistic_models']['classic_newmark']['parameters']['dnthresh'])
        except:
            if probtype == 'threshold':
                dnthresh = 5.
                print('Unable to find dnthresh in config, using 5cm')
            else:
                dnthresh = None
        fsthresh = float(config['mechanistic_models']['classic_newmark']['parameters']['fsthresh'])
        acthresh = float(config['mechanistic_models']['classic_newmark']['parameters']['acthresh'])
        slopethresh = float(config['mechanistic_models']['classic_newmark']['parameters']['slopethresh'])
        try:
            m = float(config['mechanistic_models']['classic_newmark']['parameters']['m'])
        except:
            print('no constant saturated thickness specified, m=0 if no watertable file is found')
            m = 0.
    except Exception as e:
        raise NameError('Could not parse configfile, %s' % e)
        return

    try:  # Try to fetch source information from config
        modelsref = config['mechanistic_models']['classic_newmark']['shortref']
        modellref = config['mechanistic_models']['classic_newmark']['longref']
        slopesref = config['mechanistic_models']['classic_newmark']['layers']['slope']['shortref']
        slopelref = config['mechanistic_models']['classic_newmark']['layers']['slope']['longref']
        cohesionsref = config['mechanistic_models']['classic_newmark']['layers']['cohesion']['shortref']
        cohesionlref = config['mechanistic_models']['classic_newmark']['layers']['cohesion']['longref']
        frictionsref = config['mechanistic_models']['classic_newmark']['layers']['friction']['shortref']
        frictionlref = config['mechanistic_models']['classic_newmark']['layers']['friction']['longref']
    except:
        print('Was not able to retrieve all references from config file. Continuing')

    # Cut and resample all files
    shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
    slpdict = GDALGrid.getFileGeoDict(slopefile)
    if bounds is not None:  # Make sure bounds are within ShakeMap Grid
        if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds['xmax'] or shkgdict.ymin > bounds['ymin'] or shkgdict.ymax < bounds['ymax']:
            print('Specified bounds are outside shakemap area, using ShakeMap bounds instead')
            bounds = None
    if bounds is not None:
        tempgdict = GeoDict({'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': 100., 'dy': 100., 'nx': 100., 'ny': 100.}, adjust='res')
        gdict = slpdict.getBoundsWithin(tempgdict)
    else:  # Get boundaries from shakemap if not specified
        shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
        slpdict = GDALGrid.getFileGeoDict(slopefile)
        gdict = slpdict.getBoundsWithin(shkgdict)

    # Load in slope file
    slopegrid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False)
    gdict = slopegrid.getGeoDict()  # Get this again just in case it changed
    slope = slopegrid.getData().astype(float)/slopediv  # Adjust slope to degrees, if needed
    # Change any zero slopes to a very small number to avoid dividing by zero later
    slope[slope == 0] = 1e-8

    # Load in shakefile
    if not os.path.isfile(shakefile):
        if isURL(shakefile):
            shakefile = getGridURL(shakefile)  # returns a file object
        else:
            raise NameError('Could not find "%s" as a file or a valid url' % (shakefile))
            return

    # Load in shakemap, resample to slope file (this will be important when go to higher res)
    shakemap = ShakeGrid.load(shakefile, samplegeodict=gdict, resample=True, method='linear', adjust='res')
    M = shakemap.getEventDict()['magnitude']
    # Read in uncertainty layer, if present
    if uncertfile is not None:
        try:
            uncert = ShakeGrid.load(uncertfile, samplegeodict=gdict, resample=True, method='linear', adjust='res')
        except:
            print('Could not read uncertainty file, ignoring uncertainties')
            uncertfile = None

    # Read in the cohesion and friction files, resampled to slope grid
    cohesion = GDALGrid.load(cohesionfile, samplegeodict=gdict, resample=True, method='nearest').getData().astype(float)/codiv
    cohesion[np.isnan(cohesion)] = nodata_cohesion
    friction = GDALGrid.load(frictionfile, samplegeodict=gdict, resample=True, method='nearest').getData().astype(float)
    friction[np.isnan(friction)] = nodata_friction

    # See if there is a water table depth file and read it in if there is
    try:
        waterfile = config['mechanistic_models']['classic_newmark']['layers']['watertable']['file']
        watertable = GDALGrid.load(waterfile, samplegeodict=gdict, resample=True, method='linear').getData()  # Needs to be in meters!
        uwtw = float(config['mechanistic_models']['classic_newmark']['parameters']['uwtw'])
        try:
            watersref = config['mechanistic_models']['classic_newmark']['layers']['watertable']['shortref']
            waterlref = config['mechanistic_models']['classic_newmark']['layers']['watertable']['longref']
        except:
            print('Was not able to retrieve water table references from config file. Continuing')

    except:
        print(('Water table file not specified or readable, assuming constant saturated thickness proportion of %0.1f' % m))
        watertable = None
        try:
            uwtw = float(config['mechanistic_models']['classic_newmark']['parameters']['uwtw'])
        except:
            print('Could not read soil wet unit weight, using 18.8 kN/m3')
            uwtw = 18.8

    # Factor of safety
    if watertable is not None:
        watertable[watertable > thick] = thick
        m = (thick - watertable)/thick
    FS = cohesion/(uwt*thick*np.sin(slope*(np.pi/180.))) + np.tan(friction*(np.pi/180.))/np.tan(slope*(np.pi/180.)) - (m*uwtw*np.tan(friction*(np.pi/180.)))/(uwt*np.tan(slope*(np.pi/180.)))
    FS[FS < fsthresh] = fsthresh

    # Compute critical acceleration, in g
    Ac = (FS-1.)*np.sin(slope*(np.pi/180.))  # This gives ac in g, equations that multiply by g give ac in m/s2
    Ac[Ac < acthresh] = acthresh
    Ac[slope < slopethresh] = float('nan')

    # Get PGA in g (PGA is %g in ShakeMap, convert to g)
    PGA = shakemap.getLayer('pga').getData().astype(float)/100.
    PGV = shakemap.getLayer('pgv').getData().astype(float)
    if uncertfile is not None:
        stdpga = uncert.getLayer('stdpga')
        stdpgv = uncert.getLayer('stdpgv')
        # Estimate PGA +- 1std
        PGAmin = np.exp(np.log(PGA*100.) - stdpga.getData())/100.
        PGAmax = np.exp(np.log(PGA*100.) + stdpga.getData())/100.
        PGVmin = np.exp(np.log(PGV) - stdpgv.getData())
        PGVmax = np.exp(np.log(PGV) + stdpgv.getData())

    np.seterr(invalid='ignore')  # Ignore errors so still runs when Ac > PGA, just leaves nan instead of crashing

    if regressionmodel is 'J_PGA':
        Dn = J_PGA(Ac, PGA)
        if uncertfile is not None:
            Dnmin = J_PGA(Ac, PGAmin)
            Dnmax = J_PGA(Ac, PGAmax)
    elif regressionmodel is 'J_PGA_M':
        Dn = J_PGA_M(Ac, PGA, M)
        if uncertfile is not None:
            Dnmin = J_PGA_M(Ac, PGAmin, M)
            Dnmax = J_PGA_M(Ac, PGAmax, M)

    elif regressionmodel is 'RS_PGA_M':
        Dn = RS_PGA_M(Ac, PGA, M)
        if uncertfile is not None:
            Dnmin = RS_PGA_M(Ac, PGAmin, M)
            Dnmax = RS_PGA_M(Ac, PGAmax, M)

    elif regressionmodel is 'RS_PGA_PGV':
        Dn = RS_PGA_PGV(Ac, PGA, PGV)
        if uncertfile is not None:
            Dnmin = RS_PGA_PGV(Ac, PGAmin, PGVmin)
            Dnmax = RS_PGA_PGV(Ac, PGAmax, PGVmax)
    else:
        print('Unrecognized regression model, aborting')
        return

    units = 'probability'
    label = 'Landslide Probability'
    if probtype.lower() in 'jibson2000':
        PROB = 0.335*(1-np.exp(-0.048*Dn**1.565))
        dnthresh = None
        if uncertfile is not None:
            PROBmin = 0.335*(1-np.exp(-0.048*Dnmin**1.565))
            PROBmax = 0.335*(1-np.exp(-0.048*Dnmax**1.565))
    elif probtype.lower() in 'threshold':
        PROB = Dn.copy()
        PROB[PROB <= dnthresh] = 0
        PROB[PROB > dnthresh] = 1
        units = 'prediction'
        label = 'Predicted Landslides'
        if uncertfile is not None:
            PROBmin = Dnmin.copy()
            PROBmin[PROBmin <= dnthresh] = 0
            PROBmin[PROBmin > dnthresh] = 1
            PROBmax = Dnmax.copy()
            PROBmax[PROBmax <= dnthresh] = 0
            PROBmax[PROBmax > dnthresh] = 1
    else:
        raise NameError('invalid probtype, assuming jibson2000')
        PROB = 0.335*(1-np.exp(-0.048*Dn**1.565))
        dnthresh = None
        if uncertfile is not None:
            PROBmin = 0.335*(1-np.exp(-0.048*Dnmin**1.565))
            PROBmax = 0.335*(1-np.exp(-0.048*Dnmax**1.565))

    # Turn output and inputs into into grids and put in mapLayers dictionary
    maplayers = collections.OrderedDict()

    temp = shakemap.getShakeDict()
    shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version'])

    if watertable is not None:
        des = 'variable'
    else:
        des = m
    description = {'name': modelsref, 'longref': modellref, 'units': units, 'shakemap': shakedetail, 'parameters': {'regressionmodel': regressionmodel, 'thickness_m': thick, 'unitwt_kNm3': uwt, 'dnthresh_cm': dnthresh, 'acthresh_g': acthresh, 'fsthresh': fsthresh, 'slopethresh': slopethresh, 'sat_proportion': des}}

    maplayers['model'] = {'grid': GDALGrid(PROB, gdict), 'label': label, 'type': 'output', 'description': description}
    if uncertfile is not None:
        maplayers['modelmin'] = {'grid': GDALGrid(PROBmin, gdict), 'label': label+' -1std', 'type': 'output', 'description': description}
        maplayers['modelmax'] = {'grid': GDALGrid(PROBmax, gdict), 'label': label+' +1std', 'type': 'output', 'description': description}

    if saveinputs is True:
        maplayers['pga'] = {'grid': GDALGrid(PGA, gdict), 'label': 'PGA (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}}
        maplayers['FS'] = {'grid': GDALGrid(FS, gdict), 'label': 'Factor of Safety', 'type': 'input', 'description': {'units': 'unitless'}}
        maplayers['Ac'] = {'grid': GDALGrid(Ac, gdict), 'label': 'Critical acceleration (g)', 'type': 'input'}
        maplayers['Dn'] = {'grid': GDALGrid(Dn, gdict), 'label': 'Newmark Displacement (cm)', 'type': 'input'}
        maplayers['slope'] = {'grid': GDALGrid(slope, gdict), 'label': 'Max slope ($^\circ$)', 'type': 'input', 'description': {'units': 'degrees', 'name': slopesref, 'longref': slopelref}}
        maplayers['cohesion'] = {'grid': GDALGrid(cohesion, gdict), 'label': 'Cohesion (kPa)', 'type': 'input', 'description': {'units': 'kPa (adjusted)', 'name': cohesionsref, 'longref': cohesionlref}}
        maplayers['friction angle'] = {'grid': GDALGrid(friction, gdict), 'label': 'Friction angle ($^\circ$)', 'type': 'input', 'description': {'units': 'degrees', 'name': frictionsref, 'longref': frictionlref}}
        if uncertfile is not None:
            maplayers['pgamin'] = {'grid': GDALGrid(PGAmin, gdict), 'label': 'PGA - 1std (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}}
            maplayers['pgamax'] = {'grid': GDALGrid(PGAmax, gdict), 'label': 'PGA + 1std (g)', 'type': 'input', 'description': {'units': 'g', 'shakemap': shakedetail}}
        if 'PGV' in regressionmodel:
            maplayers['pgv'] = {'grid': GDALGrid(PGV, gdict), 'label': 'PGV (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}}
            if uncertfile is not None:
                maplayers['pgvmin'] = {'grid': GDALGrid(PGVmin, gdict), 'label': 'PGV - 1std (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}}
                maplayers['pgvmax'] = {'grid': GDALGrid(PGVmax, gdict), 'label': 'PGV + 1std (cm/s)', 'type': 'input', 'description': {'units': 'cm/s', 'shakemap': shakedetail}}
        if watertable is not None:
            maplayers['sat thick prop'] = {'grid': GDALGrid(m, gdict), 'label': 'Saturated thickness proprtion [0,1]', 'type': 'input', 'description': {'units': 'meters', 'name': watersref, 'longref': waterlref}}

    return maplayers
Exemple #37
0
def godt2008(shakefile,
             config,
             uncertfile=None,
             saveinputs=False,
             displmodel=None,
             bounds=None,
             slopediv=100.,
             codiv=10.,
             numstd=None,
             trimfile=None):
    """
    This function runs the Godt and others (2008) global method for a given
    ShakeMap. The Factor of Safety is calculated using infinite slope analysis
    assumuing dry conditions. The method uses threshold newmark displacement
    and estimates areal coverage by doing the calculations for each slope
    quantile.

    Args:
        shakefile (str): Path to shakemap xml file.
        config (ConfigObj): ConfigObj of config file containing inputs required
            for running the model
        uncertfile (str): Path to shakemap uncertainty xml file (optional).
        saveinputs (bool): Whether or not to return the model input layers,
            False (default) returns only the model output (one layer).
        displmodel (str): Newmark displacement regression model to use

            * ``'J_PGA'`` (default) -- PGA-based model, equation 6 from
              Jibson (2007).
            * ``'J_PGA_M'`` -- PGA and M-based model, equation 7 from
              Jibson (2007).
            * ``'RS_PGA_M'`` -- PGA and M-based model from from Rathje and
              Saygili (2009).
            * ``'RS_PGA_PGV'`` -- PGA and PGV-based model, equation 6
              from Saygili and Rathje (2008).

        bounds (dict): Optional dictionary with keys 'xmin', 'xmax', 'ymin',
            'ymax' that defines a subset of the shakemap area to compute.
        slopediv (float): Divide slope by this number to get slope in degrees
            (Verdin datasets need to be divided by 100).
        codiv (float): Divide cohesion input layer by this number
            (For Godt method, need to divide by 10 because that is how it was
            calibrated).
        numstd (float): Number of (+/-) standard deviations to use if
            uncertainty is computed (uncertfile must be supplied).
        trimfile (str): shapefile of earth's land masses to trim offshore areas
            of model

    Returns:
        dict: Dictionary containing output and input layers (if
        saveinputs=True):

        .. code-block:: python

            {
                'grid': mapio grid2D object,
                'label': 'label for colorbar and top line of subtitle',
                'type': 'output or input to model',
                'description': {'name': 'short reference of model',
                                'longref': 'full model reference',
                                'units': 'units of output',
                                'shakemap': 'information about shakemap used',
                                'event_id': 'shakemap event id',
                                'parameters': 'dictionary of model parameters
                                               used'

                }
            }

    Raises:
         NameError: when unable to parse the config correctly (probably a
             formatting issue in the configfile) or when unable to find the
             shakefile (Shakemap filepath) -- these cause program to end.

    """
    # TODO:
    #    - Add 'all' -- averages Dn from all four equations, add term to
    #      convert PGA and PGV to Ia and use other equations, add Ambraseys and
    #      Menu (1988) option.

    # Empty refs
    slopesref = 'unknown'
    slopelref = 'unknown'
    cohesionlref = 'unknown'
    cohesionsref = 'unknown'
    frictionsref = 'unknown'
    frictionlref = 'unknown'
    modellref = 'unknown'
    modelsref = 'unknown'

    # See if trimfile exists
    if trimfile is not None:
        if not os.path.exists(trimfile):
            print('trimfile defined does not exist: %s\n'
                  'Ocean will not be trimmed' % trimfile)
            trimfile = None
        if os.path.splitext(trimfile)[1] != '.shp':
            print('trimfile must be a shapefile, ocean will not be trimmed')
            trimfile = None

    # Parse config
    try:  # May want to add error handling so if refs aren't given, just
        # includes unknown
        slopefilepath = config['godt_2008']['layers']['slope']['filepath']
        slopeunits = config['godt_2008']['layers']['slope']['units']
        cohesionfile = config['godt_2008']['layers']['cohesion']['file']
        cohesionunits = config['godt_2008']['layers']['cohesion']['units']
        frictionfile = config['godt_2008']['layers']['friction']['file']
        frictionunits = config['godt_2008']['layers']['friction']['units']

        thick = float(config['godt_2008']['parameters']['thick'])
        uwt = float(config['godt_2008']['parameters']['uwt'])
        nodata_cohesion = \
            float(config['godt_2008']['parameters']['nodata_cohesion'])
        nodata_friction = \
            float(config['godt_2008']['parameters']['nodata_friction'])
        dnthresh = float(config['godt_2008']['parameters']['dnthresh'])
        fsthresh = float(config['godt_2008']['parameters']['fsthresh'])
        acthresh = float(config['godt_2008']['parameters']['acthresh'])
        try:
            slopemin = float(config['godt_2008']['parameters']['slopemin'])
        except:
            slopemin = 0.01
            print('No slopemin found in config file, using 0.01 deg '
                  'for slope minimum')
    except Exception as e:
        raise NameError('Could not parse configfile, %s' % e)

    if displmodel is None:
        try:
            displmodel = config['godt_2008']['parameters']['displmodel']
        except:
            print('No regression model specified, using default of J_PGA_M')
            displmodel = 'J_PGA_M'

    # TO DO: ADD ERROR CATCHING ON UNITS, MAKE SURE THEY ARE WHAT THEY SHOULD
    #        BE FOR THIS MODEL

    try:  # Try to fetch source information from config
        modelsref = config['godt_2008']['shortref']
        modellref = config['godt_2008']['longref']
        slopesref = config['godt_2008']['layers']['slope']['shortref']
        slopelref = config['godt_2008']['layers']['slope']['longref']
        cohesionsref = config['godt_2008']['layers']['cohesion']['shortref']
        cohesionlref = config['godt_2008']['layers']['cohesion']['longref']
        frictionsref = config['godt_2008']['layers']['friction']['shortref']
        frictionlref = config['godt_2008']['layers']['friction']['longref']
    except:
        print('Was not able to retrieve all references from config file. '
              'Continuing')

    # Figure out how/if need to cut anything
    geodict = ShakeGrid.getFileGeoDict(shakefile)  # , adjust='res')
    if bounds is not None:  # Make sure bounds are within ShakeMap Grid
        if geodict.xmin < geodict.xmax:  # only if signs are not opposite
            if (geodict.xmin > bounds['xmin'] or geodict.xmax < bounds['xmax']
                    or geodict.ymin > bounds['ymin']
                    or geodict.ymax < bounds['ymax']):
                print('Specified bounds are outside shakemap area, using '
                      'ShakeMap bounds instead.')
                bounds = None

    if bounds is not None:
        tempgdict = GeoDict.createDictFromBox(bounds['xmin'],
                                              bounds['xmax'],
                                              bounds['ymin'],
                                              bounds['ymax'],
                                              geodict.dx,
                                              geodict.dy,
                                              inside=False)
        # If Shakemap geodict crosses 180/-180 line, fix geodict so things don't break
        if geodict.xmin > geodict.xmax:
            if tempgdict.xmin < 0:
                geodict._xmin -= 360.
            else:
                geodict._xmax += 360.
        geodict = geodict.getBoundsWithin(tempgdict)

    basegeodict, firstcol = GDALGrid.getFileGeoDict(
        os.path.join(slopefilepath, 'slope_min.bil'))
    if basegeodict == geodict:
        sampledict = geodict
    else:
        sampledict = basegeodict.getBoundsWithin(geodict)

    # Do we need to subdivide baselayer?
    if 'divfactor' in config['godt_2008'].keys():
        divfactor = float(config['godt_2008']['divfactor'])
        if divfactor != 1.:
            # adjust sampledict so everything will be resampled (cut one cell
            # of each edge so will be inside bounds)
            newxmin = sampledict.xmin - sampledict.dx/2. + \
                sampledict.dx/(2.*divfactor) + sampledict.dx
            newymin = sampledict.ymin - sampledict.dy/2. + \
                sampledict.dy/(2.*divfactor) + sampledict.dy
            newxmax = sampledict.xmax + sampledict.dx/2. - \
                sampledict.dx/(2.*divfactor) - sampledict.dx
            newymax = sampledict.ymax + sampledict.dy/2. - \
                sampledict.dy/(2.*divfactor) - sampledict.dy
            newdx = sampledict.dx / divfactor
            newdy = sampledict.dy / divfactor

            sampledict = GeoDict.createDictFromBox(newxmin,
                                                   newxmax,
                                                   newymin,
                                                   newymax,
                                                   newdx,
                                                   newdy,
                                                   inside=True)

    tmpdir = tempfile.mkdtemp()

    # Load in ShakeMap and get new geodictionary
    temp = ShakeGrid.load(shakefile)  # , adjust='res')
    junkfile = os.path.join(tmpdir, 'temp.bil')
    GDALGrid.copyFromGrid(temp.getLayer('pga')).save(junkfile)
    pga = quickcut(junkfile, sampledict, precise=True, method='bilinear')
    os.remove(junkfile)
    GDALGrid.copyFromGrid(temp.getLayer('pgv')).save(junkfile)
    pgv = quickcut(junkfile, sampledict, precise=True, method='bilinear')
    os.remove(junkfile)
    # Update geodictionary
    sampledict = pga.getGeoDict()

    t2 = temp.getEventDict()
    M = t2['magnitude']
    event_id = t2['event_id']
    shakedict = temp.getShakeDict()
    del (temp)

    # read in uncertainty if present
    if uncertfile is not None:
        try:
            temp = ShakeGrid.load(uncertfile)  # , adjust='res')
            GDALGrid.copyFromGrid(temp.getLayer('stdpga')).save(junkfile)
            uncertpga = quickcut(junkfile,
                                 sampledict,
                                 precise=True,
                                 method='bilinear',
                                 override=True)
            os.remove(junkfile)
            GDALGrid.copyFromGrid(temp.getLayer('stdpgv')).save(junkfile)
            uncertpgv = quickcut(junkfile,
                                 sampledict,
                                 precise=True,
                                 method='bilinear',
                                 override=True)
            os.remove(junkfile)
        except:
            print('Could not read uncertainty file, ignoring uncertainties')
            uncertfile = None
        if numstd is None:
            numstd = 1.

    # Read in all the slope files, divide all by 100 to get to slope in
    # degrees (because input files are multiplied by 100.)
    slopes = []
    quantiles = [
        'slope_min.bil', 'slope10.bil', 'slope30.bil', 'slope50.bil',
        'slope70.bil', 'slope90.bil', 'slope_max.bil'
    ]
    for quant in quantiles:
        tmpslp = quickcut(os.path.join(slopefilepath, quant), sampledict)
        tgd = tmpslp.getGeoDict()
        if tgd != sampledict:
            raise Exception('Input layers are not aligned to same geodict')
        else:
            slopes.append(tmpslp.getData() / slopediv)

    slopestack = np.dstack(slopes)

    # Change any zero slopes to a very small number to avoid dividing by
    # zero later
    slopestack[slopestack == 0] = 1e-8

    # Read in the cohesion and friction files and duplicate layers so they
    # are same shape as slope structure

    tempco = quickcut(cohesionfile, sampledict, method='near')
    tempco = tempco.getData()[:, :, np.newaxis] / codiv
    cohesion = np.repeat(tempco, 7, axis=2)
    cohesion[cohesion == -999.9] = nodata_cohesion
    cohesion = np.nan_to_num(cohesion)
    cohesion[cohesion == 0] = nodata_cohesion

    tempfric = quickcut(frictionfile, sampledict, method='near')
    tempfric = tempfric.getData().astype(float)[:, :, np.newaxis]
    friction = np.repeat(tempfric, 7, axis=2)
    friction[friction == -9999] = nodata_friction
    friction = np.nan_to_num(friction)
    friction[friction == 0] = nodata_friction

    # Do the calculations using Jibson (2007) PGA only model for Dn
    FS = (cohesion / (uwt * thick * np.sin(slopestack * (np.pi / 180.))) +
          np.tan(friction * (np.pi / 180.)) / np.tan(slopestack *
                                                     (np.pi / 180.)))
    FS[FS < fsthresh] = fsthresh

    # Compute critical acceleration, in g
    # This gives ac in g, equations that multiply by g give ac in m/s2
    Ac = (FS - 1) * np.sin(slopestack * (np.pi / 180.)).astype(float)
    Ac[Ac < acthresh] = acthresh

    # Get PGA in g (PGA is %g in ShakeMap, convert to g)
    PGA = np.repeat(pga.getData()[:, :, np.newaxis] / 100., 7,
                    axis=2).astype(float)
    if 'PGV' in displmodel:  # Load in PGV also, in cm/sec
        PGV = np.repeat(pgv.getData()[:, :, np.newaxis], 7,
                        axis=2).astype(float)
    else:
        PGV = None

    if uncertfile is not None:
        stdpga = np.repeat(uncertpga.getData()[:, :, np.newaxis], 7,
                           axis=2).astype(float)
        stdpgv = np.repeat(uncertpgv.getData()[:, :, np.newaxis], 7,
                           axis=2).astype(float)
        # estimate PGA +- 1std
        PGAmin = np.exp(np.log(PGA * 100) - numstd * stdpga) / 100
        PGAmax = np.exp(np.log(PGA * 100) + numstd * stdpga) / 100
        if 'PGV' in displmodel:
            PGVmin = np.exp(np.log(PGV) - numstd * stdpgv)
            PGVmax = np.exp(np.log(PGV) + numstd * stdpgv)
        else:
            PGVmin = None
            PGVmax = None

    # Ignore errors so still runs when Ac > PGA, just leaves nan instead
    # of crashing.
    np.seterr(invalid='ignore')

    Dn, logDnstd, logtype = NMdisp(Ac, PGA, model=displmodel, M=M, PGV=PGV)
    if uncertfile is not None:
        Dnmin, logDnstdmin, logtype = NMdisp(Ac,
                                             PGAmin,
                                             model=displmodel,
                                             M=M,
                                             PGV=PGVmin)
        Dnmax, logDnstdmax, logtype = NMdisp(Ac,
                                             PGAmax,
                                             model=displmodel,
                                             M=M,
                                             PGV=PGVmax)

    PROB = Dn.copy()
    PROB[PROB < dnthresh] = 0.
    PROB[PROB >= dnthresh] = 1.
    PROB = np.sum(PROB, axis=2)
    if uncertfile is not None:
        PROBmin = Dnmin.copy()
        PROBmin[PROBmin <= dnthresh] = 0.
        PROBmin[PROBmin > dnthresh] = 1.
        PROBmin = np.sum(PROBmin, axis=2)
        PROBmax = Dnmax.copy()
        PROBmax[PROBmax <= dnthresh] = 0.
        PROBmax[PROBmax > dnthresh] = 1.
        PROBmax = np.sum(PROBmax, axis=2)

    PROB[PROB == 1.] = 0.01
    PROB[PROB == 2.] = 0.10
    PROB[PROB == 3.] = 0.30
    PROB[PROB == 4.] = 0.50
    PROB[PROB == 5.] = 0.70
    PROB[PROB == 6.] = 0.90
    PROB[PROB == 7.] = 0.99

    if uncertfile is not None:
        PROBmin[PROBmin == 1.] = 0.01
        PROBmin[PROBmin == 2.] = 0.10
        PROBmin[PROBmin == 3.] = 0.30
        PROBmin[PROBmin == 4.] = 0.50
        PROBmin[PROBmin == 5.] = 0.70
        PROBmin[PROBmin == 6.] = 0.90
        PROBmin[PROBmin == 7.] = 0.99
        PROBmax[PROBmax == 1.] = 0.01
        PROBmax[PROBmax == 2.] = 0.10
        PROBmax[PROBmax == 3.] = 0.30
        PROBmax[PROBmax == 4.] = 0.50
        PROBmax[PROBmax == 5.] = 0.70
        PROBmax[PROBmax == 6.] = 0.90
        PROBmax[PROBmax == 7.] = 0.99

    if slopemin is not None:
        PROB[slopestack[:, :, 6] <= slopemin] = 0.
        # uncert too
        if uncertfile is not None:
            PROBmin[slopestack[:, :, 6] <= slopemin] = 0.
            PROBmax[slopestack[:, :, 6] <= slopemin] = 0.

    # Turn output and inputs into into grids and put in mapLayers dictionary
    maplayers = collections.OrderedDict()

    shakedetail = '%s_ver%s' % (shakedict['shakemap_id'],
                                shakedict['shakemap_version'])

    description = {
        'name': modelsref,
        'longref': modellref,
        'units': 'Proportion of Area Affected',
        'shakemap': shakedetail,
        'event_id': event_id,
        'parameters': {
            'displmodel': displmodel,
            'thickness_m': thick,
            'unitwt_kNm3': uwt,
            'dnthresh_cm': dnthresh,
            'acthresh_g': acthresh,
            'fsthresh': fsthresh,
            'modeltype': 'Landslide'
        }
    }
    PROBgrid = GDALGrid(PROB, sampledict)
    if trimfile is not None:
        PROBgrid = trim_ocean(PROBgrid, trimfile)

    maplayers['model'] = {
        'grid': PROBgrid,
        'label': 'Landslide - Proportion of Area Affected',
        'type': 'output',
        'description': description
    }

    if uncertfile is not None:
        PROBmingrid = GDALGrid(PROBmin, sampledict)
        PROBmaxgrid = GDALGrid(PROBmax, sampledict)
        if trimfile is not None:
            PROBmingrid = trim_ocean(PROBmingrid, trimfile)
            PROBmaxgrid = trim_ocean(PROBmaxgrid, trimfile)
        maplayers['modelmin'] = {
            'grid': PROBmingrid,
            'label': 'Landslide Probability-%1.2fstd' % numstd,
            'type': 'output',
            'description': description
        }
        maplayers['modelmax'] = {
            'grid': PROBmaxgrid,
            'label': 'Landslide Probability+%1.2fstd' % numstd,
            'type': 'output',
            'description': description
        }

    if saveinputs is True:
        maplayers['pga'] = {
            'grid': GDALGrid(PGA[:, :, 0], sampledict),
            'label': 'PGA (g)',
            'type': 'input',
            'description': {
                'units': 'g',
                'shakemap': shakedetail
            }
        }
        if 'PGV' in displmodel:
            maplayers['pgv'] = {
                'grid': GDALGrid(PGV[:, :, 0], sampledict),
                'label': 'PGV (cm/s)',
                'type': 'input',
                'description': {
                    'units': 'cm/s',
                    'shakemap': shakedetail
                }
            }
        maplayers['minFS'] = {
            'grid': GDALGrid(np.min(FS, axis=2), sampledict),
            'label': 'Min Factor of Safety',
            'type': 'input',
            'description': {
                'units': 'unitless'
            }
        }
        maplayers['max slope'] = {
            'grid': GDALGrid(slopestack[:, :, -1], sampledict),
            'label': r'Maximum slope ($^\circ$)',
            'type': 'input',
            'description': {
                'units': 'degrees',
                'name': slopesref,
                'longref': slopelref
            }
        }
        maplayers['cohesion'] = {
            'grid': GDALGrid(cohesion[:, :, 0], sampledict),
            'label': 'Cohesion (kPa)',
            'type': 'input',
            'description': {
                'units': 'kPa (adjusted)',
                'name': cohesionsref,
                'longref': cohesionlref
            }
        }
        maplayers['friction angle'] = {
            'grid': GDALGrid(friction[:, :, 0], sampledict),
            'label': r'Friction angle ($^\circ$)',
            'type': 'input',
            'description': {
                'units': 'degrees',
                'name': frictionsref,
                'longref': frictionlref
            }
        }
        if uncertfile is not None:
            maplayers['pgamin'] = {
                'grid': GDALGrid(PGAmin[:, :, 0], sampledict),
                'label': 'PGA - %1.2fstd (g)' % numstd,
                'type': 'input',
                'description': {
                    'units': 'g',
                    'shakemap': shakedetail
                }
            }
            maplayers['pgamax'] = {
                'grid': GDALGrid(PGAmax[:, :, 0], sampledict),
                'label': 'PGA + %1.2fstd (g)' % numstd,
                'type': 'input',
                'description': {
                    'units': 'g',
                    'shakemap': shakedetail
                }
            }
        if 'PGV' in displmodel:
            if uncertfile is not None:
                maplayers['pgvmin'] = {
                    'grid': GDALGrid(PGVmin[:, :, 0], sampledict),
                    'label': 'PGV - %1.2fstd (cm/s)' % numstd,
                    'type': 'input',
                    'description': {
                        'units': 'cm/s',
                        'shakemap': shakedetail
                    }
                }
                maplayers['pgvmax'] = {
                    'grid': GDALGrid(PGVmax[:, :, 0], sampledict),
                    'label': 'PGV + %1.2fstd (cm/s)' % numstd,
                    'type': 'input',
                    'description': {
                        'units': 'cm/s',
                        'shakemap': shakedetail
                    }
                }

    shutil.rmtree(tmpdir)

    return maplayers
Exemple #38
0
    def execute(self):
        """Create grid.xml and uncertainty.xml files.

        Raises:
            NotADirectoryError: When the event data directory does not exist.
            FileNotFoundError: When the the shake_result HDF file does not
                exist.
        """
        logger = logging.getLogger(__name__)
        install_path, data_path = get_config_paths()
        datadir = os.path.join(data_path, self._eventid, 'current', 'products')
        if not os.path.isdir(datadir):
            raise NotADirectoryError('%s is not a valid directory.' % datadir)
        datafile = os.path.join(datadir, 'shake_result.hdf')
        if not os.path.isfile(datafile):
            raise FileNotFoundError('%s does not exist.' % datafile)

        # Open the ShakeMapOutputContainer and extract the data
        container = ShakeMapOutputContainer.load(datafile)

        # get all of the grid layers and the geodict
        if container.getDataType() != 'grid':
            raise NotImplementedError('gridxml module can only function on '
                                      'gridded data, not sets of points')
        gridnames = container.getIMTs(COMPONENT)
        xml_types = ['grid', 'uncertainty']
        for xml_type in xml_types:
            layers = OrderedDict()
            field_keys = OrderedDict()
            for gridname in gridnames:
                imt_field = _oq_to_gridxml(gridname)
                imtdict = container.getIMTGrids(gridname, COMPONENT)
                if xml_type == 'grid':
                    grid = imtdict['mean']
                    metadata = imtdict['mean_metadata']
                elif xml_type == 'uncertainty':
                    grid = imtdict['std']
                    metadata = imtdict['std_metadata']

                units = metadata['units']
                digits = metadata['digits']
                grid_data = grid.getData()
                # convert from HDF units to legacy grid.xml units
                if xml_type == 'grid':
                    if units == 'ln(cm/s)':
                        grid_data = np.exp(grid_data)
                        units = 'cm/s'
                    elif units == 'ln(g)':
                        grid_data = np.exp(grid_data) * 100
                        units = '%g'
                    else:
                        pass

                if xml_type == 'grid':
                    layers[imt_field] = grid_data
                    field_keys[imt_field] = (units, digits)
                else:
                    layers['STD' + imt_field] = grid_data
                    field_keys['STD' + imt_field] = (units, digits)

            geodict = grid.getGeoDict()

            config = container.getConfig()

            # event dictionary
            info = container.getMetadata()
            event_info = info['input']['event_information']
            event_dict = {}
            event_dict['event_id'] = event_info['event_id']
            event_dict['magnitude'] = float(event_info['magnitude'])
            event_dict['depth'] = float(event_info['depth'])
            event_dict['lat'] = float(event_info['latitude'])
            event_dict['lon'] = float(event_info['longitude'])
            event_dict['event_timestamp'] = datetime.strptime(
                event_info['origin_time'], TIMEFMT)
            event_dict['event_description'] = event_info['location']
            event_dict['event_network'] = \
                info['input']['event_information']['eventsource']

            # shake dictionary
            shake_dict = {}
            shake_dict['event_id'] = event_dict['event_id']
            shake_dict['shakemap_id'] = event_dict['event_id']
            shake_dict['shakemap_version'] = \
                info['processing']['shakemap_versions']['map_version']
            shake_dict['code_version'] = shakemap.__version__
            ptime = info['processing']['shakemap_versions']['process_time']
            shake_dict['process_timestamp'] = datetime.strptime(ptime, TIMEFMT)
            shake_dict['shakemap_originator'] = \
                config['system']['source_network']
            shake_dict['map_status'] = config['system']['map_status']
            shake_dict['shakemap_event_type'] = 'ACTUAL'
            if event_dict['event_id'].endswith('_se'):
                shake_dict['shakemap_event_type'] = 'SCENARIO'

            shake_grid = ShakeGrid(
                layers, geodict, event_dict,
                shake_dict, {}, field_keys=field_keys)
            fname = os.path.join(datadir, '%s.xml' % xml_type)
            logger.debug('Saving IMT grids to %s' % fname)
            shake_grid.save(fname)  # TODO - set grid version number

        container.close()
Exemple #39
0
def test_save():
    tdir = tempfile.mkdtemp()
    testfile = os.path.join(tdir, 'test.xml')
    try:
        print('Testing save/read functionality for shakemap grids...')
        pga = np.arange(0, 16, dtype=np.float32).reshape(4, 4)
        pgv = np.arange(1, 17, dtype=np.float32).reshape(4, 4)
        mmi = np.arange(2, 18, dtype=np.float32).reshape(4, 4)
        geodict = GeoDict({
            'xmin': 0.5,
            'xmax': 3.5,
            'ymin': 0.5,
            'ymax': 3.5,
            'dx': 1.0,
            'dy': 1.0,
            'ny': 4,
            'nx': 4
        })
        layers = OrderedDict()
        layers['pga'] = pga
        layers['pgv'] = pgv
        layers['mmi'] = mmi
        shakeDict = {
            'event_id': 'usabcd1234',
            'shakemap_id': 'usabcd1234',
            'shakemap_version': 1,
            'code_version': '4.0',
            'process_timestamp': datetime.utcnow(),
            'shakemap_originator': 'us',
            'map_status': 'RELEASED',
            'shakemap_event_type': 'ACTUAL'
        }
        eventDict = {
            'event_id': 'usabcd1234',
            'magnitude': 7.6,
            'depth': 1.4,
            'lat': 2.0,
            'lon': 2.0,
            'event_timestamp': datetime.utcnow(),
            'event_network': 'us',
            'event_description': 'sample event'
        }
        uncDict = {'pga': (0.0, 0), 'pgv': (0.0, 0), 'mmi': (0.0, 0)}
        shake = ShakeGrid(layers, geodict, eventDict, shakeDict, uncDict)

        print('Testing save/read functionality...')
        shake.save(testfile, version=3)
        shake2 = ShakeGrid.load(testfile)
        for layer in ['pga', 'pgv', 'mmi']:
            tdata = shake2.getLayer(layer).getData()
            np.testing.assert_almost_equal(tdata, layers[layer])

        print('Passed save/read functionality for shakemap grids.')

        print('Testing getFileGeoDict method...')
        fgeodict = ShakeGrid.getFileGeoDict(testfile)
        print('Passed save/read functionality for shakemap grids.')

        print('Testing loading with bounds (no resampling or padding)...')
        sampledict = GeoDict({
            'xmin': -0.5,
            'xmax': 3.5,
            'ymin': -0.5,
            'ymax': 3.5,
            'dx': 1.0,
            'dy': 1.0,
            'ny': 5,
            'nx': 5
        })
        shake3 = ShakeGrid.load(testfile,
                                samplegeodict=sampledict,
                                resample=False,
                                doPadding=False,
                                padValue=np.nan)
        tdata = shake3.getLayer('pga').getData()
        np.testing.assert_almost_equal(tdata, layers['pga'])

        print('Passed loading with bounds (no resampling or padding)...')

        print('Testing loading shakemap with padding, no resampling...')
        newdict = GeoDict({
            'xmin': -0.5,
            'xmax': 4.5,
            'ymin': -0.5,
            'ymax': 4.5,
            'dx': 1.0,
            'dy': 1.0,
            'ny': 6,
            'nx': 6
        })
        shake4 = ShakeGrid.load(testfile,
                                samplegeodict=newdict,
                                resample=False,
                                doPadding=True,
                                padValue=np.nan)
        output = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
                           [np.nan, 0.0, 1.0, 2.0, 3.0, np.nan],
                           [np.nan, 4.0, 5.0, 6.0, 7.0, np.nan],
                           [np.nan, 8.0, 9.0, 10.0, 11.0, np.nan],
                           [np.nan, 12.0, 13.0, 14.0, 15.0, np.nan],
                           [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
        tdata = shake4.getLayer('pga').getData()
        np.testing.assert_almost_equal(tdata, output)
        print('Passed loading shakemap with padding, no resampling...')

        #make a bigger grid
        pga = np.arange(0, 36, dtype=np.float32).reshape(6, 6)
        pgv = np.arange(1, 37, dtype=np.float32).reshape(6, 6)
        mmi = np.arange(2, 38, dtype=np.float32).reshape(6, 6)
        layers = OrderedDict()
        layers['pga'] = pga
        layers['pgv'] = pgv
        layers['mmi'] = mmi
        geodict = GeoDict({
            'xmin': 0.5,
            'xmax': 5.5,
            'ymin': 0.5,
            'ymax': 5.5,
            'dx': 1.0,
            'dy': 1.0,
            'ny': 6,
            'nx': 6
        })
        shake = ShakeGrid(layers, geodict, eventDict, shakeDict, uncDict)
        shake.save(testfile, version=3)

        print('Testing resampling, no padding...')
        littledict = GeoDict({
            'xmin': 2.0,
            'xmax': 4.0,
            'ymin': 2.0,
            'ymax': 4.0,
            'dx': 1.0,
            'dy': 1.0,
            'ny': 3,
            'nx': 3
        })
        shake5 = ShakeGrid.load(testfile,
                                samplegeodict=littledict,
                                resample=True,
                                doPadding=False,
                                padValue=np.nan)
        output = np.array([[10.5, 11.5, 12.5], [16.5, 17.5, 18.5],
                           [22.5, 23.5, 24.5]])
        tdata = shake5.getLayer('pga').getData()
        np.testing.assert_almost_equal(tdata, output)
        print('Passed resampling, no padding...')

        print('Testing resampling and padding...')
        pga = np.arange(0, 16, dtype=np.float32).reshape(4, 4)
        pgv = np.arange(1, 17, dtype=np.float32).reshape(4, 4)
        mmi = np.arange(2, 18, dtype=np.float32).reshape(4, 4)
        geodict = GeoDict({
            'xmin': 0.5,
            'ymax': 3.5,
            'ymin': 0.5,
            'xmax': 3.5,
            'dx': 1.0,
            'dy': 1.0,
            'ny': 4,
            'nx': 4
        })
        layers = OrderedDict()
        layers['pga'] = pga
        layers['pgv'] = pgv
        layers['mmi'] = mmi
        shake = ShakeGrid(layers, geodict, eventDict, shakeDict, uncDict)
        shake.save(testfile, version=3)
        bigdict = GeoDict({
            'xmin': 0.0,
            'xmax': 4.0,
            'ymin': 0.0,
            'ymax': 4.0,
            'dx': 1.0,
            'dy': 1.0,
            'ny': 5,
            'nx': 5
        })
        shake6 = ShakeGrid.load(testfile,
                                samplegeodict=bigdict,
                                resample=True,
                                doPadding=True,
                                padValue=np.nan)
        tdata = shake6.getLayer('pga').getData()
        output = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan],
                           [np.nan, 2.5, 3.5, 4.5, np.nan],
                           [np.nan, 6.5, 7.5, 8.5, np.nan],
                           [np.nan, 10.5, 11.5, 12.5, np.nan],
                           [np.nan, np.nan, np.nan, np.nan, np.nan]])
        np.testing.assert_almost_equal(tdata, output)
        print('Passed resampling and padding...')
    except Exception as error:
        print('Failed to read grid.xml format file "%s". Error "%s".' %
              (xmlfile, str(error)))
        assert 0 == 1
    finally:
        if os.path.isdir(tdir):
            shutil.rmtree(tdir)
Exemple #40
0
def computeHagg(grid2D, proj='moll', probthresh=0., shakefile=None,
                shakethreshtype='pga', shakethresh=0., stdgrid2D=None,
                stdtype='full', maxP=1., sill1=None, range1=None):
    """
    Computes the Aggregate Hazard (Hagg) which is equal to the
    probability * area of grid cell For models that compute areal coverage,
    this is equivalant to the total predicted area affected in km2.

    Args:
        grid2D: grid2D object of model output.
        proj: projection to use to obtain equal area, 'moll'  mollweide, or
            'laea' lambert equal area.
        probthresh: Probability threshold, any values less than this will not
            be included in aggregate hazard estimation.
        shakefile: Optional, path to shakemap file to use for ground motion
            threshold.
        shakethreshtype: Optional, Type of ground motion to use for
            shakethresh, 'pga', 'pgv', or 'mmi'.
        shakethresh: Optional, Float or list of shaking thresholds in %g for
            pga, cm/s for pgv, float for mmi.
        stdgrid2D: grid2D object of model standard deviations (optional)
        stdtype (str): assumption of spatial correlation used to compute
            the stdev of the statistics, 'max', 'min', 'mean' of max and min,
            or 'full' (default) which estimates the range of correlation and
            accounts for covariance. Will return 'mean' if
            ridge and sill cannot be estimated.
        maxP (float): the maximum possible probability of the model
        sill1 (float): If known, the sill of the variogram of grid2D, will be
            estimated if None and stdtype='full'
        range1 (float): If known, the range of the variogram of grid2D, will
            be estimated if None and stdtype='full'

    Returns:
        dict: Dictionary with keys:
            hagg_#g where # is the shakethresh
            std_# if stdgrid2D is supplied (stdev of exp_pop)
            hlim_#, the maximum exposure value possible with the
            applied thresholds and given maxP value
            cell_area_km2 grid cell area
            p_hagg_# beta distribution shape factor p (sometimes called alpha)
            q_hagg_# beta distribution shape factor q (sometimes called beta)
    """
    bounds = grid2D.getBounds()
    lat0 = np.mean((bounds[2], bounds[3]))
    lon0 = np.mean((bounds[0], bounds[1]))
    projs = ('+proj=%s +lat_0=%f +lon_0=%f +x_0=0 +y_0=0 +ellps=WGS84 '
             '+units=km +no_defs' % (proj, lat0, lon0))
    geodict = grid2D.getGeoDict()

    if shakefile is not None:
        if shakethresh < 0.:
            raise Exception('shaking threshold must be equal or greater '
                            'than zero')
        # resample shakemap to grid2D
        temp = ShakeGrid.load(shakefile)
        shk = temp.getLayer(shakethreshtype)
        shk = shk.interpolate2(geodict)
        if shk.getGeoDict() != geodict:
            raise Exception('shakemap was not resampled to exactly the same '
                            'geodict as the model')

    if probthresh < 0.:
        raise Exception('probability threshold must be equal or greater '
                        'than zero')

    grid = grid2D.project(projection=projs, method='bilinear')
    geodictRS = grid.getGeoDict()
    
    cell_area_km2 = geodictRS.dx * geodictRS.dy
    
    model = grid.getData().copy()

    Hagg = {}

    if shakefile is not None:
        shkgrid = shk.project(projection=projs)
        shkdat = shkgrid.getData()
        model[shkdat < shakethresh] = float('nan')
    else:
        shakethresh = 0.
        shkdat = None

    mu = np.nansum(model[model >= probthresh] * cell_area_km2)
    Hagg['hagg_%1.2fg' % (shakethresh/100.,)] = mu
    Hagg['cell_area_km2'] = cell_area_km2
    N = np.nansum([model >= probthresh])
    #Hagg['N_%1.2fg' % (shakethresh/100.,)] = N
    hlim = cell_area_km2*N*maxP
    Hagg['hlim_%1.2fg' % (shakethresh/100.,)] = hlim

    if stdgrid2D is not None:
        stdgrid = GDALGrid.copyFromGrid(stdgrid2D) # Make a copy
        stdgrid = stdgrid.project(projection=projs, method='bilinear')
        std = stdgrid.getData().copy()
        if np.nanmax(std) > 0. and np.nanmax(model) >= probthresh:
            totalmin = cell_area_km2 * np.sqrt(np.nansum((std[model >= probthresh])**2.))
            totalmax = np.nansum(std[model >= probthresh] * cell_area_km2)
            if stdtype == 'full':
                if sill1 is None or range1 is None:
                    range1, sill1 = semivario(grid.getData().copy(), probthresh,
                                              shakethresh=shakethresh,
                                              shakegrid=shkdat)
                if range1 is None:
                    # Use mean
                    Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = (totalmax+totalmin)/2.
                else:
                    # Zero out std at cells where the model probability was below
                    # the threshold because we aren't including those cells in Hagg
                    stdz = std.copy()
                    stdz[model < probthresh] = 0.
                    svar1 = svar(stdz, range1, sill1, scale=cell_area_km2)
                    Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = np.sqrt(svar1)
                    #Hagg['hagg_range_%1.2fg' % (shakethresh/100.,)] = range1
                    #Hagg['hagg_sill_%1.2fg' % (shakethresh/100.,)] = sill1 
            elif stdtype == 'max':
                Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = totalmax
            elif stdtype == 'min':
                Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = totalmin
            else:
                Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = (totalmax+totalmin)/2.

            var = Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)]**2.
            # Beta distribution shape factors
            Hagg['p_hagg_%1.2fg' % (shakethresh/100.,)] = (mu/hlim)*((hlim*mu-mu**2)/var-1)
            Hagg['q_hagg_%1.2fg' % (shakethresh/100.,)] = (1-mu/hlim)*((hlim*mu-mu**2)/var-1)
        else:
            print('No model values above threshold, skipping uncertainty '
                  'and filling with zeros')
            Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = 0.
            Hagg['p_hagg_%1.2fg' % (shakethresh/100.,)] = 0.
            Hagg['q_hagg_%1.2fg' % (shakethresh/100.,)] = 0.
    else:
        print('No uncertainty provided, filling with zeros')
        Hagg['hagg_std_%1.2fg' % (shakethresh/100.,)] = 0.
        Hagg['p_hagg_%1.2fg' % (shakethresh/100.,)] = 0.
        Hagg['q_hagg_%1.2fg' % (shakethresh/100.,)] = 0.

    return Hagg
Exemple #41
0
def quickcut(filename,
             gdict,
             tempname=None,
             extrasamp=5.,
             method='bilinear',
             precise=True,
             cleanup=True,
             verbose=False,
             override=False):
    """
    Use gdal to trim a large global file down quickly so mapio can read it
    efficiently. (Cannot read Shakemap.xml files, must save as .bil filrst)

    Args:
        filename (str): File path to original input file (raster).
        gdict (geodict): Geodictionary to cut around and align with.
        tempname (str): File path to desired location of clipped part of
            filename.
        extrasamp (int): Number of extra cells to cut around each edge of
            geodict to have resampling buffer for future steps.
        method (str): If resampling is necessary, method to use.
        precise (bool): If true, will resample to the gdict as closely as
            possible, if False it will just roughly cut around the area of
            interest without changing resolution
        cleanup (bool): if True, delete tempname after reading it back in
        verbose (bool): if True, prints more details
        override (bool): if True, if filename extent is not fully contained by
            gdict, read in the entire file (only used for ShakeMaps)

    Returns: New grid2D layer

    Note: This function uses the subprocess approach because ``gdal.Translate``
        doesn't hang on the command until the file is created which causes
        problems in the next steps.
    """
    if gdict.xmax < gdict.xmin:
        raise Exception('quickcut: your geodict xmax is smaller than xmin')

    try:
        filegdict = GDALGrid.getFileGeoDict(filename)
    except:
        try:
            filegdict = GMTGrid.getFileGeoDict(filename)
        except:
            raise Exception('Cannot get geodict for %s' % filename)

    if tempname is None:
        tempdir = tempfile.mkdtemp()
        tempname = os.path.join(tempdir, 'junk.tif')
        deltemp = True
    else:
        tempdir = None
        deltemp = False

    # if os.path.exists(tempname):
    #     os.remove(tempname)
    #     print('Temporary file already there, removing file')

    filegdict = filegdict[0]

    # Get the right methods for mapio (method) and gdal (method2)
    if method == 'linear':
        method2 = 'bilinear'
    if method == 'nearest':
        method2 = 'near'
    if method == 'bilinear':
        method = 'linear'
        method2 = 'bilinear'
    if method == 'near':
        method = 'nearest'
        method2 = 'near'
    else:
        method2 = method

    if filegdict != gdict:
        # First cut without resampling
        tempgdict = GeoDict.createDictFromBox(gdict.xmin,
                                              gdict.xmax,
                                              gdict.ymin,
                                              gdict.ymax,
                                              filegdict.dx,
                                              filegdict.dy,
                                              inside=True)

        try:
            egdict = filegdict.getBoundsWithin(tempgdict)

            ulx = egdict.xmin - extrasamp * egdict.dx
            uly = egdict.ymax + extrasamp * egdict.dy
            lrx = egdict.xmax + (extrasamp + 1) * egdict.dx
            lry = egdict.ymin - (extrasamp + 1) * egdict.dy

            cmd = 'gdal_translate -a_srs EPSG:4326 -of GTiff -projwin %1.8f \
            %1.8f %1.8f %1.8f -r %s %s %s' % (ulx, uly, lrx, lry, method2,
                                              filename, tempname)
        except Exception as e:
            if override:
                # When ShakeMap is being loaded, sometimes they won't align
                # right because it's already cut to the area, so just load
                # the whole file
                cmd = 'gdal_translate -a_srs EPSG:4326 -of GTiff -r %s %s %s' \
                      % (method2, filename, tempname)
            else:
                raise Exception('Failed to cut layer: %s' % e)

        rc, so, se = get_command_output(cmd)
        if not rc:
            raise Exception(se.decode())
        else:
            if verbose:
                print(so.decode())

        newgrid2d = GDALGrid.load(tempname)
        if precise:
            # Resample to exact geodictionary
            newgrid2d = newgrid2d.interpolate2(gdict, method=method)
        if cleanup:
            os.remove(tempname)

        if deltemp:
            shutil.rmtree(tempdir)

    else:
        ftype = GMTGrid.getFileType(filename)
        if ftype != 'unknown':
            newgrid2d = GMTGrid.load(filename)
        elif filename.endswith('.xml'):
            newgrid2d = ShakeGrid.load(filename)
        else:
            newgrid2d = GDALGrid.load(filename)

    return newgrid2d
Exemple #42
0
def run_method(direc, voi, num_realizations, radius, corr_model, vscorr, output_dir):
    """
    Parallel code for computing the spatial correlation for a ShakeMap,
    adding to a ShakeMap grid, and computing multiple realizations.
    File may be run using:
    mpiexec -n # python test.py path imt distance_measure N
    where # is the desired number of processors. Required command line parameters are listed below:
    :param direc:
        string, path to directory containing grid, stationlist, uncertainty, event xmls and fault.txt
    :param voi:
        string, intensity measures to use, i.e., 'pga pgv psa03'
    :param N:
        integer, number of realizations to compute
    :param radius:
        integer, radius of influence
    :param corr_model:
        string, specifies the correlation model
    :param vs_corr:
        boolean, specifies whether vs30 are correlated
    :param output_dir:
        path to directory where output is stored
    """
    start_time = time.time()
    
    # Start MPI
    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    my_rank = comm.Get_rank()
    
    # Get shakemap, uncertainty grid, and stationdata
    shakegrid = ShakeGrid.load(os.path.join(direc,'grid.xml'), adjust='res')
    unc_grid = ShakeGrid.load(os.path.join(direc,'uncertainty.xml'), adjust = 'res')
    stationlist = os.path.join(direc,'stationlist.xml')
    stationdata = readStation(stationlist)
    
    # Initialize the grid
    # In this step we use the ShakeMap outputs to determine the grid points, grid spacing, site collections, 
    # station data, and other initial values
    variables = initialize(shakegrid, unc_grid, stationdata, direc, voi)
    if my_rank == 0:
        print(variables['K'], 'stations', variables['M']*variables['N'], 'data points')
    initialization_time = time.time() - start_time
    if my_rank == 0:
        print('Initialization time', initialization_time)
    sys.stdout.flush()

    # Compute the grid, mu, and sigma arrays
    # In this step, we use the correlation model to compute the covariance matrices for each point on the 
    # ShakeMap grid. This computation is done in parallel
    out = main(variables, radius, voi, corr_model, vscorr)
    main_time = time.time()-  start_time - initialization_time
    if my_rank == 0:
        print('Main time', main_time)
    

    # Compute realizations of the random field
    # After computing the covariance matrices for each point, we can compute realizations of the random
    # fields. If multiple cores are used, each core needs the data for every point on the grid.
    for ii in range(0, np.size(voi)):
        if num_realizations == 1:
            # Master will compute this single realization
            if my_rank == 0:
                data = realizations(1, 1, radius, variables, 
                                    out['grid_arr'], out['mu_arr'][voi[ii]], 
                                    out['sigma_arr'][voi[ii]], out['list_sizes_grid'], out['list_sizes_mu'],
                                    shakegrid, voi[ii], comm, direc, method, output_dir)
        else:
            # Master broadcasts the arrays to the other cores
            if my_rank == 0:
                grid_arr = out['grid_arr']
                mu_arr = out['mu_arr'][voi[ii]]
                sigma_arr = out['sigma_arr'][voi[ii]]
                list_sizes_grid = out['list_sizes_grid']
                list_sizes_mu = out['list_sizes_mu']
            else:
                grid_arr = None
                mu_arr = None
                sigma_arr = None
                list_sizes_grid = None
                list_sizes_mu = None        

            grid_arr = comm.bcast(grid_arr, root = 0)
            mu_arr = comm.bcast(mu_arr, root = 0)
            sigma_arr = comm.bcast(sigma_arr, root = 0)
            list_sizes_grid = comm.bcast(list_sizes_grid, root = 0)
            list_sizes_mu = comm.bcast(list_sizes_mu, root = 0)
            
            my_reals = np.arange(my_rank, num_realizations, size) 
            
            # Each core does a set of realizations
            data = realizations(num_realizations, my_reals, radius, variables,
                                grid_arr, mu_arr, sigma_arr, list_sizes_grid, list_sizes_mu,
                                shakegrid, voi[ii], comm, direc, output_dir)

    realization_time = time.time() - start_time - initialization_time - main_time
    if my_rank == 0:
        print('Realization time', realization_time)
def run_gfail(args):
    """Runs ground failure.

    Args:
        args: dictionary or argument parser Namespace output by bin/gfail
            program.

    Returns:
        list: Names of created files.

    """
    # TODO: ADD CONFIG VALIDATION STEP THAT MAKES SURE ALL THE FILES EXIST
    filenames = []
    # If args is a dictionary, convert to a Namespace
    if isinstance(args, dict):
        args = Namespace(**args)

    if args.set_default_paths:
        set_default_paths(args)
        print('default paths set, continuing...\n')

    if args.list_default_paths:
        list_default_paths()
        return

    if args.reset_default_paths:
        reset_default_paths()
        return

    if args.make_webpage:
        # Turn on GIS and HDF5 flags
        gis = True
        hdf5 = True
        kmz = True
    else:
        gis = args.gis
        hdf5 = args.hdf5
        kmz = args.kmz

    # Figure out what models will be run
    if args.shakefile is not None:  # user intends to actually run some models
        shakefile = args.shakefile

        # make output location for things
        if args.output_filepath is None:
            outdir = os.getcwd()
        else:
            outdir = args.output_filepath

        if hdf5 or gis or kmz:
            if not os.path.exists(outdir):
                os.makedirs(outdir)

        # download if is url
        # cleanup = False
        if not os.path.isfile(shakefile):
            if isURL(shakefile):
                # getGridURL returns a named temporary file object
                shakefile = getGridURL(shakefile)
                # cleanup = True  # Be sure to delete it after
            else:
                raise NameError('Could not find "%s" as a file or a valid url'
                                % shakefile)
        eventid = getHeaderData(shakefile)[0]['event_id']

        # Get entire path so won't break if running gfail with relative path
        shakefile = os.path.abspath(shakefile)

        if args.extract_contents:
            outfolder = outdir
        else:  # Nest in a folder named by eventid
            outfolder = os.path.join(outdir, eventid)
            if not os.path.exists(outfolder):
                os.makedirs(outfolder)

        # Copy shake grid into output directory
        # --- this is base on advice from Mike that when running in production
        #     the shake grids are not archived and so if we need/want to have
        #     the exact grid used for the calculation later if there's every a
        #     question about how the calculation was done, the safest thing is
        #     to store a copy of it here.
        shake_copy = os.path.join(outfolder, "grid.xml")
        shutil.copyfile(shakefile, shake_copy)

        if args.uncertfile is not None:
            uncertfile = os.path.abspath(args.uncertfile)
            unc_copy = os.path.join(outfolder, "uncertainty.xml")
            shutil.copyfile(uncertfile, unc_copy)
        else:
            uncertfile = None

        # Write shakefile to a file for use later
        shakename = os.path.join(outfolder, "shakefile.txt")
        shake_file = open(shakename, "wt")
        shake_file.write(shake_copy)
        shake_file.close()
        filenames.append(shakename)

        # Check that shakemap bounds do not cross 180/-180 line

        if args.set_bounds is None:
            sd = ShakeGrid.getFileGeoDict(shakefile)
            if sd.xmin > sd.xmax:
                print('\nShakeMap crosses 180/-180 line, setting bounds so '
                      'only side with more land area is run')
                if sd.xmax + 180. > 180 - sd.xmin:
                    set_bounds = '%s, %s, %s, %s' % (
                        sd.ymin, sd.ymax, -180., sd.xmax)
                else:
                    set_bounds = '%s, %s, %s, %s' % (sd.ymin, sd.ymax, sd.xmin,
                                                     180.)
                print('Bounds applied: %s' % set_bounds)
            else:
                set_bounds = args.set_bounds
        else:
            set_bounds = args.set_bounds

        config = args.config

        if args.config_filepath is not None:
            # only add config_filepath if full filepath not given and file
            # ext is .ini
            if (not os.path.isabs(config) and
                    os.path.splitext(config)[-1] == '.ini'):
                config = os.path.join(args.config_filepath, config)

        if os.path.splitext(config)[-1] == '.ini':
            temp = ConfigObj(config)
            if len(temp) == 0:
                raise Exception(
                    'Could not find specified .ini file: %s' % config)
            if args.data_path is not None:
                temp = correct_config_filepaths(args.data_path, temp)
            configs = [temp]
            conffail = []
        else:
            # input is a list of config files
            f = open(config, 'r')
            configlist = f.readlines()
            configs = []
            conffail = []
            for conf in configlist:
                conf = conf.strip()
                if not os.path.isabs(conf):
                    # only add config_filepath if full filepath not given
                    conf = os.path.join(args.config_filepath, conf)
                try:
                    temp = ConfigObj(conf)
                    if temp:
                        if args.data_path is not None:
                            temp = correct_config_filepaths(
                                args.data_path, temp)
                        configs.append(temp)
                    else:
                        conffail.append(conf)
                except BaseException:
                    conffail.append(conf)

        print('\nRunning the following models:')

        for conf in configs:
            print('\t%s' % conf.keys()[0])
        if len(conffail) > 0:
            print('Could not find or read in the following config files:\n')
            for conf in conffail:
                print('\t%s' % conf)
            print('\nContinuing...\n')

        if set_bounds is not None:
            if 'zoom' in set_bounds:
                temp = set_bounds.split(',')
                print('Using %s threshold of %1.1f to cut model bounds'
                      % (temp[1].strip(), float(temp[2].strip())))
                bounds = get_bounds(shakefile, temp[1].strip(),
                                    float(temp[2].strip()))
            else:
                temp = eval(set_bounds)
                latmin = temp[0]
                latmax = temp[1]
                lonmin = temp[2]
                lonmax = temp[3]
                bounds = {'xmin': lonmin, 'xmax': lonmax,
                          'ymin': latmin, 'ymax': latmax}
            print('Applying bounds of lonmin %1.2f, lonmax %1.2f, '
                  'latmin %1.2f, latmax %1.2f'
                  % (bounds['xmin'], bounds['xmax'],
                     bounds['ymin'], bounds['ymax']))
        else:
            bounds = None

        if args.make_webpage:
            results = []

        # pre-read in ocean trimming file polygons so only do this step once
        if args.trimfile is not None:
            if not os.path.exists(args.trimfile):
                print('trimfile defined does not exist: %s\n'
                      'Ocean will not be trimmed.' % args.trimfile)
                trimfile = None
            elif os.path.splitext(args.trimfile)[1] != '.shp':
                print('trimfile must be a shapefile, '
                      'ocean will not be trimmed')
                trimfile = None
            else:
                trimfile = args.trimfile
        else:
            trimfile = None

        # Get finite fault ready, if exists

        ffault = None
        point = True
        if args.finite_fault is not None:
            point = False
            try:
                if os.path.splitext(args.finite_fault)[-1] == '.txt':
                    ffault = text_to_json(args.finite_fault)
                elif os.path.splitext(args.finite_fault)[-1] == '.json':
                    ffault = args.finite_fault
                else:
                    print('Could not read in finite fault, will '
                          'try to download from comcat')
                    ffault = None
            except BaseException:
                print('Could not read in finite fault, will try to '
                      'download from comcat')
                ffault = None

        if ffault is None:
            # Try to get finite fault file, if it exists
            try:
                returned_ev = get_event_comcat(shakefile)
                if returned_ev is not None:
                    testjd, detail, temp = returned_ev
                    evinfo = testjd['input']['event_information']
                    if 'faultfiles' in evinfo:
                        ffilename = evinfo['faultfiles']
                        if len(ffilename) > 0:
                            # Download the file
                            with tempfile.NamedTemporaryFile(
                                    delete=False, mode='w') as f:
                                temp.getContent(ffilename, filename=f.name)
                                ffault = text_to_json(f.name)
                                os.remove(f.name)
                            point = False
                        else:
                            point = True
                else:
                    print('Unable to determine source type, unknown if finite'
                          ' fault or point source')
                    ffault = None
                    point = False

            except Exception as e:
                print(e)
                print('Unable to determine source type, unknown if finite'
                      ' fault or point source')
                ffault = None
                point = False

        # Loop over config files
        for conf in configs:
            modelname = conf.keys()[0]
            print('\nNow running %s:' % modelname)
            notcov, newbnds = check_input_extents(
                conf, shakefile=shakefile,
                bounds=bounds
            )
            if len(notcov) > 0:
                print('\nThe following input layers do not cover'
                      ' the area of interest:\n\t%s' % '\n\t'.join(notcov))
                if newbnds is None:
                    print('\nCannnot make bounds that work. '
                          'Skipping to next model\n')
                    continue
                else:
                    pnt = '%s, %s, %s, %s' % (
                        newbnds['xmin'], newbnds['xmax'],
                        newbnds['ymin'], newbnds['ymax'])
                    print('Running model for new bounds that are fully covered'
                          ' by input layer: %s' % pnt)
                    bounds2 = newbnds
            else:
                bounds2 = bounds

            modelfunc = conf[modelname]['funcname']
            if modelfunc == 'LogisticModel':
                lm = LM.LogisticModel(shakefile, conf,
                                      uncertfile=uncertfile,
                                      saveinputs=args.save_inputs,
                                      bounds=bounds2,
                                      trimfile=trimfile)

                maplayers = lm.calculate()
            elif modelfunc == 'godt2008':
                maplayers = godt2008(shakefile, conf,
                                     uncertfile=uncertfile,
                                     saveinputs=args.save_inputs,
                                     bounds=bounds2,
                                     trimfile=trimfile)
            else:
                print('Unknown model function specified in config for %s '
                      'model, skipping to next config' % modelfunc)
                continue

            # time1 = datetime.datetime.utcnow().strftime('%d%b%Y_%H%M')
            # filename = ('%s_%s_%s' % (eventid, modelname, time1))

            if args.appendname is not None:
                filename = ('%s_%s_%s' % (eventid, modelname, args.appendname))
            else:
                filename = ('%s_%s' % (eventid, modelname))
            if hdf5:
                filenameh = filename + '.hdf5'
                if os.path.exists(filenameh):
                    os.remove(filenameh)
                savelayers(maplayers, os.path.join(outfolder, filenameh))
                filenames.append(filenameh)

            if gis or kmz:
                for key in maplayers:
                    # Rename 'std' key to 'beta_sigma'
                    if key == 'std':
                        key_label = 'beta_sigma'
                    else:
                        key_label = key
                    if gis:
                        filen = os.path.join(outfolder, '%s_%s.bil'
                                             % (filename, key_label))
                        fileh = os.path.join(outfolder, '%s_%s.hdr'
                                             % (filename, key_label))
                        fileg = os.path.join(outfolder, '%s_%s.tif'
                                             % (filename, key_label))

                        GDALGrid.copyFromGrid(
                            maplayers[key]['grid']).save(filen)
                        cflags = '-co COMPRESS=DEFLATE -co predictor=2'
                        srs = '-a_srs EPSG:4326'
                        cmd = 'gdal_translate %s %s -of GTiff %s %s' % (
                            srs, cflags, filen, fileg)
                        rc, so, se = get_command_output(cmd)
                        # Delete bil file and its header
                        os.remove(filen)
                        os.remove(fileh)
                        filenames.append(fileg)
                    if kmz and (not key.startswith('quantile') and not key.startswith('std')) :
                        plotorder, logscale, lims, colormaps, maskthresh = \
                            parseConfigLayers(maplayers, conf, keys=['model'])
                        maxprob = np.nanmax(maplayers[key]['grid'].getData())
                        if key == 'model':
                            qdict = {
                                k: maplayers[k] for k in maplayers.keys()
                                if k.startswith('quantile')
                            }
                        else:
                            qdict = None
                        if maskthresh is None:
                            maskthresh = [0.]
                        if maxprob >= maskthresh[0]:
                            filen = os.path.join(outfolder, '%s_%s.kmz'
                                                 % (filename, key_label))
                            filek = create_kmz(maplayers[key], filen,
                                               mask=maskthresh[0],
                                               levels=lims[0],
                                               qdict=qdict)
                            filenames.append(filek)
                        else:
                            print('No unmasked pixels present, skipping kmz '
                                  'file creation')

            if args.make_webpage:
                # Compile into list of results for later
                results.append(maplayers)

                #  # Make binary output for ShakeCast
                #  filef = os.path.join(outfolder, '%s_model.flt'
                #                       % filename)
                #  # And get name of header
                #  filefh = os.path.join(outfolder, '%s_model.hdr'
                #                        % filename)
                #  # Make file
                #  write_floats(filef, maplayers['model']['grid'])
                #  filenames.append(filef)
                #  filenames.append(filefh)

        eventid = getHeaderData(shakefile)[0]['event_id']
        if not hasattr(args, 'eventsource'):
            args.eventsource = 'us'
        if not hasattr(args, 'eventsourcecode'):
            args.eventsourcecode = eventid

        if args.make_webpage:
            if len(results) == 0:
                raise Exception('No models were run. Cannot make webpages.')
            outputs = hazdev(
                results, configs,
                shakefile, outfolder=outfolder,
                pop_file=args.popfile,
                pager_alert=args.property_alertlevel,
                eventsource=args.eventsource,
                eventsourcecode=args.eventsourcecode,
                point=point, gf_version=args.gf_version,
                pdlcall=args.pdlcall)
            filenames = filenames + outputs

#        # create transparent png file
#        outputs = create_png(outdir)
#        filenames = filenames + outputs
#
#        # create info file
#        infofile = create_info(outdir)
#        filenames = filenames + infofile

        print('\nFiles created:\n')
        for filen in filenames:
            print('%s' % filen)

        return filenames
Exemple #44
0
def getDataFrames(sampleparams,shakeparams,predictors,outparams):
    """
    Return Pandas training and testing data frames containing sampled data from hazard coverage, ShakeMap, and predictor data sets.
    :param sampleparams:
      Dictionary with at least these values:
        - coverage: Name of hazard coverage shapefile (decimal degrees). Required.
        - dx: Float desired sample resolution, and can be overridden by nmax, below (meters).  Required.
        - cb: Desired class balance, i.e., fraction of sampled points that should be from hazard polygons. Optional for polygons, Required for points.
        - nmax: Maximum number of possible yes/no sample points (usually set to avoid memory issues). Optional.
        - nsamp: Number of total hazard and no-hazard sample points to collect.  Required.
        - touch_center: Boolean (0 or 1) indicating whether polygons must touch the center of the cell in order for that cell to count as a "yes" sample point.
        - testpercent: Fraction of sampled points to be used for testing (1-testpercent) will be used for training. Optional, defaults to 0
        - extent: xmin,xmax,ymin,ymax OR convex #geographic extent within which to sample data.  Four numbers are interpreted as bounding box, the word convex will be interpreted to mean a convex hull.  Default (not specified) will mean the bounding box of the hazard coverage. Optional.
        - h1: Minimum buffer size for sampling non-hazard points when input coverage takes the form of points. Optional for polygons, required for points.
        - h2: Maximum buffer size for sampling non-hazard points when input coverage takes the form of points. Optional for polygons, required for points.
    :param shakeparams:
      Dictionary with at least these values:
        - shakemap: Name of shakemap file to use for sampling hazard values. Required.
        - shakemap_uncertainty: Name of shakemap uncertainty file to use for sampling hazard uncertainty values. Optional.
    :param predictors:
      Dictionary with at least these values:
        - layername: Path to ESRI shapefile, or grid in GMT or ESRI format which represents predictor data. Required.
        - layername_sampling: 'nearest' or 'linear', optional for grids, not used for shapefiles.
        - layername_attribute: Name of attribute in shapefile which should be sampled at hazard/non-hazard points.  Required for points.
    :param outparams:
      Dictionary with at least these values:
        - folder: Name of folder where all output (data frames, plots) will be written.  Will be created if does not exist. Required.
        - basename: The name that will be included in all output file names (i.e., northridge_train.csv). Required.
    :returns:
      Tuple of (training,testing) Pandas data frames. 
    """
    coverage = sampleparams['coverage']
    f = fiona.collection(coverage,'r')
    cbounds = f.bounds
    f.close()
    dx = sampleparams['dx']
    cb = sampleparams['cb']
    nmax = sampleparams['nmax']
    nsamp = sampleparams['nsamp']
    touch_center = sampleparams['touch_center']
    testpercent = sampleparams['testpercent']
    extent = sampleparams['extent']
    h1 = sampleparams['h1']
    h2 = sampleparams['h2']

    yestest,yestrain,notest,notrain,xvar,yvar,pshapes,proj = sampleFromFile(coverage,predictors,dx=dx,nmax=nmax,testPercent=testpercent,
                                                                            touch_center=touch_center,classBalance=cb,extent=extent,
                                                                            Nsamp=nsamp,h1=h1,h2=h2)

    traincolumns = OrderedDict()
    testcolumns = OrderedDict()

    if (100-testpercent) > 0:
        traincolumns['lat'] = np.concatenate((yestrain[:,1],notrain[:,1]))
        traincolumns['lon'] = np.concatenate((yestrain[:,0],notrain[:,0]))
        traincolumns['coverage'] = np.concatenate((np.ones_like(yestrain[:,1]),np.zeros_like(notrain[:,1])))

    if testpercent > 0:
        testcolumns['lat'] = np.concatenate((yestest[:,1],notest[:,1]))
        testcolumns['lon'] = np.concatenate((yestest[:,0],notest[:,0]))
        testcolumns['coverage'] = np.concatenate((np.ones_like(yestest[:,1]),np.zeros_like(notest[:,1])))
    
    
    for predname,predfile in predictors.items():
        if not os.path.isfile(predfile):
            continue
        ftype = getFileType(predfile)
        if ftype == 'shapefile':
            attribute = predictors[predname+'_attribute']
            shapes = subsetShapes(predfile,cbounds)
            yes_test_samples = sampleShapes(shapes,yestest,attribute)
            no_test_samples = sampleShapes(shapes,notest,attribute)
            yes_train_samples = sampleShapes(shapes,yestrain,attribute)
            no_train_samples = sampleShapes(shapes,notrain,attribute)
            testcolumns[predname] = np.squeeze(np.concatenate((yes_test_samples,no_test_samples)))
            traincolumns[predname] = np.squeeze(np.concatenate((yes_train_samples,no_train_samples)))
        elif ftype == 'grid':
            method = 'nearest'
            if predname+'_sampling' in predictors:
                method = predictors[predname+'_sampling']

            if testpercent > 0:
                yes_test_samples = sampleGridFile(predfile,yestest,method=method)
                no_test_samples = sampleGridFile(predfile,notest,method=method)
                testcolumns[predname] = np.squeeze(np.concatenate((yes_test_samples,no_test_samples)))

            if (100-testpercent) > 0:
                yes_train_samples = sampleGridFile(predfile,yestrain,method=method)
                no_train_samples = sampleGridFile(predfile,notrain,method=method)
                traincolumns[predname] = np.squeeze(np.concatenate((yes_train_samples,no_train_samples)))
        else:
            continue #attribute or sampling method key

    #sample the shakemap
    layers = ['mmi','pga','pgv','psa03','psa10','psa30']
    shakegrid = ShakeGrid.load(shakeparams['shakemap'],adjust='res')
    for layer in layers:
        yes_test_samples = sampleFromMultiGrid(shakegrid,layer,yestest)
        no_test_samples = sampleFromMultiGrid(shakegrid,layer,notest)
        yes_train_samples = sampleFromMultiGrid(shakegrid,layer,yestrain)
        no_train_samples = sampleFromMultiGrid(shakegrid,layer,notrain)
        if testpercent > 0:
            testcolumns[layer] = np.squeeze(np.concatenate((yes_test_samples,no_test_samples)))
        if (100-testpercent) > 0:
            traincolumns[layer] = np.squeeze(np.concatenate((yes_train_samples,no_train_samples)))
        
    dftest = pd.DataFrame(testcolumns)
    dftrain = pd.DataFrame(traincolumns)

    return (dftrain,dftest)
def check_input_extents(config, shakefile=None, bounds=None):
    """Make sure all input files exist and cover the extent desired

    Args:
        config: configObj of a single model
        shakefile: path to ShakeMap grid.xml file (used for bounds). If not
            provided, bounds must be provided
        bounds: dictionary of bounds with keys: 'xmin', 'xmax', 'ymin', 'ymax'

    Returns:
        tuple containing:
            notcovered: list of files that do not cover the entire area
                defined by bounds or shakefile
            newbounds: new dictionary of bounds of subarea of original
                bounds or shakefile extent that is covered by all input files
    """
    if shakefile is None and bounds is None:
        raise Exception('Must define either a shakemap file or bounds')
    modelname = config.keys()[0]
    # Make dummy geodict to use
    if bounds is None:
        evdict = ShakeGrid.getFileGeoDict(shakefile)
    else:
        evdict = GeoDict.createDictFromBox(
            bounds['xmin'], bounds['xmax'],
            bounds['ymin'], bounds['ymax'],
            0.00001, 0.00001, inside=False)

    # Check extents of all input layers
    notcovered = []
    notcovgdicts = []
    newbounds = None
    for item, value in config[modelname]['layers'].items():
        if 'file' in value.keys():
            filelook = value['file']
            if getFileType(filelook) == 'gmt':
                tmpgd, _ = GMTGrid.getFileGeoDict(filelook)
            else:
                tmpgd, _ = GDALGrid.getFileGeoDict(filelook)
            # See if tempgd contains evdict
            contains = tmpgd.contains(evdict)
            if not contains:
                notcovered.append(filelook)
                notcovgdicts.append(tmpgd)
                # print(filelook)
    if len(notcovered) > 0:
        # Figure out what bounds COULD be run
        xmins = [gd.xmin for gd in notcovgdicts]
        xmaxs = [gd.xmax for gd in notcovgdicts]
        ymins = [gd.ymin for gd in notcovgdicts]
        ymaxs = [gd.ymax for gd in notcovgdicts]

        # Set in by a buffer of 0.05 degrees because mapio doesn't like 
        # when bounds are exactly the same for getboundswithin
        newbounds = dict(xmin=evdict.xmin + 0.05,
                         xmax=evdict.xmax - 0.05,
                         ymin=evdict.ymin + 0.05,
                         ymax=evdict.ymax - 0.05)
        # Which one is the problem?
        if evdict.xmin < np.max(xmins):
            newbounds['xmin'] = np.max(xmins) + 0.05
        if evdict.xmax > np.min(xmaxs):
            newbounds['xmax'] = np.min(xmaxs) - 0.05
        if evdict.ymin < np.max(ymins):
            newbounds['ymin'] = np.max(ymins) + 0.05
        if evdict.ymax > np.min(ymaxs):
            newbounds['ymax'] = np.min(ymaxs) - 0.05

        # See if this is a possible extent
        try:
            test = GeoDict.createDictFromBox(
                newbounds['xmin'], newbounds['xmax'],
                newbounds['ymin'], newbounds['ymax'],
                0.00001, 0.00001, inside=False)
        except BaseException:
            print('Cannot make new bounds that will work')
            newbounds = None

    return notcovered, newbounds
def create_info(event_dir, lsmodels, lqmodels,
                eventsource='', eventsourcecode='', point=True):
    """Create info.json for ground failure product.

    Args:
        event_dir (srt): Directory containing ground failure results.
        lsmodels (list): List of dictionaries of model summary info compiled
            by the hazdev function. If not specified, code will search for
            the hdf5 files for the preferred model and will create this
            dictionary and will apply default colorbars and bins.
        lqmodels (list): Same as above for liquefaction.
        point (bool): if True, event is a point source and warning should be
            displayed

    Returns:
        creates info.json for this event
    """
    filenames = []
    # Find the shakemap grid.xml file
    with open(os.path.join(event_dir, 'shakefile.txt'), 'r') as f:
        shakefile = f.read()

    files = os.listdir(event_dir)

    # Get all info from dictionaries of preferred events, add in extent
    # and filename
    for lsm in lsmodels:
        # Add extent and filename for preferred model
        if lsm['preferred']:
            filesnippet = lsm['id']
            # Read in extents
            flnm = '%s_extent.json' % filesnippet
            ls_extent_file = [f for f in files if flnm in f]
            if len(ls_extent_file) == 1:
                ls_file = os.path.join(event_dir, ls_extent_file[0])
                with open(ls_file) as f:
                    ls_extent = json.load(f)
            else:
                raise OSError("Landslide extent not found.")
            lsm['extent'] = ls_extent
            # lsm['filename'] = flnm
            lsext = lsm['zoomext']  # Get zoom extent
            ls_alert = lsm['alert']
            rmkeys = ['bin_edges', 'bin_colors', 'zoomext']
        else:
            # Remove any alert keys
            rmkeys = ['bin_edges', 'bin_colors', 'zoomext',
                      'population_alert', 'alert', 'hazard_alert']
        for key in rmkeys:
            if key in lsm:
                lsm.pop(key)

    for lqm in lqmodels:
        if lqm['preferred']:
            filesnippet = lqm['id']
            # Read in extents
            flnm = '%s_extent.json' % filesnippet
            lq_extent_file = [f2 for f2 in files if flnm in f2]
            if len(lq_extent_file) == 1:
                lq_file = os.path.join(event_dir, lq_extent_file[0])
                with open(lq_file) as f:
                    lq_extent = json.load(f)
            else:
                raise OSError("Liquefaction extent not found.")
            lqm['extent'] = lq_extent
            # lqm['filename'] = flnm
            lqext = lqm['zoomext']  # Get zoom extent
            lq_alert = lqm['alert']
            rmkeys = ['bin_edges', 'bin_colors', 'zoomext']
        else:
            # Remove any alert keys
            rmkeys = ['bin_edges', 'bin_colors', 'zoomext',
                      'population_alert', 'alert', 'hazard_alert']
        for key in rmkeys:
            if key in lqm:
                lqm.pop(key)

    # Try to get event info
    shake_grid = ShakeGrid.load(shakefile, adjust='res')
    event_dict = shake_grid.getEventDict()
    sm_dict = shake_grid.getShakeDict()
    base_url = 'https://earthquake.usgs.gov/earthquakes/eventpage/'

    # Is this a point source?
    # point = is_grid_point_source(shake_grid)
    # Temporarily hard code this until we can get a better solution via
    # new grid.xml attributes.
    #point = True

    net = eventsource
    code = eventsourcecode
    time = event_dict['event_timestamp'].strftime('%Y-%m-%dT%H:%M:%SZ')

    event_url = '%s%s%s#executive' % (base_url, net, code)

    # Get extents that work for both unless one is green and the other isn't
    if lq_alert == 'green' and ls_alert != 'green' and ls_alert is not None:
        xmin = lsext['xmin']
        xmax = lsext['xmax']
        ymin = lsext['ymin']
        ymax = lsext['ymax']
    elif lq_alert != 'green' and ls_alert == 'green' and lq_alert is not None:
        xmin = lqext['xmin']
        xmax = lqext['xmax']
        ymin = lqext['ymin']
        ymax = lqext['ymax']
    else:
        xmin = np.min((lqext['xmin'], lsext['xmin']))
        xmax = np.max((lqext['xmax'], lsext['xmax']))
        ymin = np.min((lqext['ymin'], lsext['ymin']))
        ymax = np.max((lqext['ymax'], lsext['ymax']))

    # Should we display the warning about point source?
    rupture_warning = False
    if point and event_dict['magnitude'] > 6.5:
        rupture_warning = True

    # Create info.json for website rendering and metadata purposes
    info_dict = {
        'Summary': {
            'code': code,
            'net': net,
            'magnitude': event_dict['magnitude'],
            'depth': event_dict['depth'],
            'time': time,
            'lat': event_dict['lat'],
            'lon': event_dict['lon'],
            'event_url': event_url,
            'shakemap_version': sm_dict['shakemap_version'],
            'rupture_warning': rupture_warning,
            'point_source': point,
            'zoom_extent': [xmin, xmax, ymin, ymax]
        },
        'Landslides': lsmodels,
        'Liquefaction': lqmodels

    }

    info_file = os.path.join(event_dir, 'info.json')
    with open(info_file, 'w') as f:
        json.dump(info_dict, f)  # allow_nan=False)
    filenames.append(info_file)
    return filenames
Exemple #47
0
def modelMap(grids, shakefile=None, suptitle=None, inventory_shapefile=None,
             plotorder=None, maskthreshes=None, colormaps=None, boundaries=None,
             zthresh=0, scaletype='continuous', lims=None, logscale=False,
             ALPHA=0.7, maproads=True, mapcities=True, isScenario=False,
             roadfolder=None, topofile=None, cityfile=None, oceanfile=None,
             roadcolor='#6E6E6E', watercolor='#B8EEFF', countrycolor='#177F10',
             outputdir=None, savepdf=True, savepng=True, showplots=False,
             roadref='unknown', cityref='unknown', oceanref='unknown',
             printparam=False, ds=True, dstype='mean', upsample=False):
    """
    This function creates maps of mapio grid layers (e.g. liquefaction or
    landslide models with their input layers)
    All grids must use the same bounds
    TO DO change so that all input layers do not have to have the same bounds,
    test plotting multiple probability layers, and add option so that if PDF and
    PNG aren't output, opens plot on screen using plt.show()

    :param grids: Dictionary of N layers and metadata formatted like:
        maplayers['layer name']={
        'grid': mapio grid2D object,
        'label': 'label for colorbar and top line of subtitle',
        'type': 'output or input to model',
        'description': 'detailed description of layer for subtitle'}.
      Layer names must be unique.
    :type name: Dictionary or Ordered dictionary - import collections;
      grids = collections.OrderedDict()
    :param shakefile: optional ShakeMap file (url or full file path) to extract information for labels and folder names
    :type shakefile: Shakemap Event Dictionary
    :param suptitle: This will be displayed at the top of the plots and in the
      figure names
    :type suptitle: string
    :param plotorder: List of keys describing the order to plot the grids, if
      None and grids is an ordered dictionary, it will use the order of the
      dictionary, otherwise it will choose order which may be somewhat random
      but it will always put a probability grid first
    :type plotorder: list
    :param maskthreshes: N x 1 array or list of lower thresholds for masking
      corresponding to order in plotorder or order of OrderedDict if plotorder
      is None. If grids is not an ordered dict and plotorder is not specified,
      this will not work right. If None (default), nothing will be masked
    :param colormaps: List of strings of matplotlib colormaps (e.g. cm.autumn_r)
      corresponding to plotorder or order of dictionary if plotorder is None.
      The list can contain both strings and None e.g. colormaps = ['cm.autumn',
      None, None, 'cm.jet'] and None's will default to default colormap
    :param boundaries: None to show entire study area, 'zoom' to zoom in on the
      area of action (only works if there is a probability layer) using zthresh
      as a threshold, or a dictionary defining lats and lons in the form of
      boundaries.xmin = minlon, boundaries.xmax = maxlon, boundaries.ymin =
      min lat, boundaries.ymax = max lat
    :param zthresh: threshold for computing zooming bounds, only used if
      boundaries = 'zoom'
    :type zthresh: float
    :param scaletype: Type of scale for plotting, 'continuous' or 'binned' -
      will be reflected in colorbar
    :type scaletype: string
    :param lims: None or Nx1 list of tuples or numpy arrays corresponding to
      plotorder defining the limits for saturating the colorbar (vmin, vmax) if
      scaletype is continuous or the bins to use (clev) if scaletype if binned.
      The list can contain tuples, arrays, and Nones, e.g. lims = [(0., 10.),
      None, (0.1, 1.5), np.linspace(0., 1.5, 15)]. When None is specified, the
      program will estimate the limits, when an array is specified but the scale
      type is continuous, vmin will be set to min(array) and vmax will be set
      to max(array)
    :param lims: None or Nx1 list of Trues and Falses corresponding to
      plotorder defining whether to use a linear or log scale (log10) for
      plotting the layer. This will be reflected in the labels
    :param ALPHA: Transparency for mapping, if there is a hillshade that will
      plot below each layer, it is recommended to set this to at least 0.7
    :type ALPHA: float
    :param maproads: Whether to show roads or not, default True, but requires
      that roadfile is specified and valid to work
    :type maproads: boolean
    :param mapcities: Whether to show cities or not, default True, but requires
      that cityfile is specified and valid to work
    :type mapcities: boolean
    :param isScenario: Whether this is a scenario (True) or a real event (False)
      (default False)
    :type isScenario: boolean
    :param roadfolder: Full file path to folder containing road shapefiles
    :type roadfolder: string
    :param topofile: Full file path to topography grid (GDAL compatible) - this
      is only needed to make a hillshade if a premade hillshade is not specified
    :type topofile: string
    :param cityfile: Full file path to Pager file containing city & population
      information
    :type cityfile: string
    :param roadcolor: Color to use for roads, if plotted, default #6E6E6E
    :type roadcolor: Hex color or other matplotlib compatible way of defining
      color
    :param watercolor: Color to use for oceans, lakes, and rivers, default
      #B8EEFF
    :type watercolor: Hex color or other matplotlib compatible way of defining
      color
    :param countrycolor: Color for country borders, default #177F10
    :type countrycolor: Hex color or other matplotlib compatible way of defining
      color
    :param outputdir: File path for outputting figures, if edict is defined, a
      subfolder based on the event id will be created in this folder. If None,
      will use current directory
    :param savepdf: True to save pdf figure, False to not
    :param savepng: True to save png figure, False to not
    :param ds: True to allow downsampling for display (necessary when arrays
      are quite large, False to not allow)
    :param dstype: What function to use in downsampling, options are 'min',
      'max', 'median', or 'mean'
    :param upsample: True to upsample the layer to the DEM resolution for better
      looking hillshades

    :returns:
        * PDF and/or PNG of map
        * Downsampled and trimmed version of input grids. If no
        modification was needed for plotting, this will be identical to grids but
        without the metadata

    """

    if suptitle is None:
        suptitle = ' '

    plt.ioff()

    defaultcolormap = cm.jet

    if shakefile is not None:
        edict = ShakeGrid.load(shakefile, adjust='res').getEventDict()
        temp = ShakeGrid.load(shakefile, adjust='res').getShakeDict()
        edict['eventid'] = temp['shakemap_id']
        edict['version'] = temp['shakemap_version']
    else:
        edict = None

    # Get output file location
    if outputdir is None:
        print('No output location given, using current directory for outputs\n')
        outputdir = os.getcwd()
    if edict is not None:
        outfolder = os.path.join(outputdir, edict['event_id'])
    else:
        outfolder = outputdir
    if not os.path.isdir(outfolder):
        os.makedirs(outfolder)

    # Get plotting order, if not specified
    if plotorder is None:
        plotorder = list(grids.keys())

    # Get boundaries to use for all plots
    cut = True
    if boundaries is None:
        cut = False
        keytemp = list(grids.keys())
        boundaries = grids[keytemp[0]]['grid'].getGeoDict()
    elif boundaries == 'zoom':
        # Find probability layer (will just take the maximum bounds if there is
        # more than one)
        keytemp = list(grids.keys())
        key1 = [key for key in keytemp if 'model' in key.lower()]
        if len(key1) == 0:
            print('Could not find model layer to use for zoom, using default boundaries')
            keytemp = list(grids.keys())
            boundaries = grids[keytemp[0]]['grid'].getGeoDict()
        else:
            lonmax = -1.e10
            lonmin = 1.e10
            latmax = -1.e10
            latmin = 1.e10
            for key in key1:
                # get lat lons of areas affected and add, if no areas affected,
                # switch to shakemap boundaries
                temp = grids[key]['grid']
                xmin, xmax, ymin, ymax = temp.getBounds()
                lons = np.linspace(xmin, xmax, temp.getGeoDict().nx)
                lats = np.linspace(ymax, ymin, temp.getGeoDict().ny)  # backwards so it plots right
                row, col = np.where(temp.getData() > float(zthresh))
                lonmin = lons[col].min()
                lonmax = lons[col].max()
                latmin = lats[row].min()
                latmax = lats[row].max()
                # llons, llats = np.meshgrid(lons, lats)  # make meshgrid
                # llons1 = llons[temp.getData() > float(zthresh)]
                # llats1 = llats[temp.getData() > float(zthresh)]
                # if llons1.min() < lonmin:
                #     lonmin = llons1.min()
                # if llons1.max() > lonmax:
                #     lonmax = llons1.max()
                # if llats1.min() < latmin:
                #     latmin = llats1.min()
                # if llats1.max() > latmax:
                #     latmax = llats1.max()
            boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100}  # dummy fillers, only really care about bounds
            if xmin < lonmin-0.15*(lonmax-lonmin):
                boundaries1['xmin'] = lonmin-0.1*(lonmax-lonmin)
            else:
                boundaries1['xmin'] = xmin
            if xmax > lonmax+0.15*(lonmax-lonmin):
                boundaries1['xmax'] = lonmax+0.1*(lonmax-lonmin)
            else:
                boundaries1['xmax'] = xmax
            if ymin < latmin-0.15*(latmax-latmin):
                boundaries1['ymin'] = latmin-0.1*(latmax-latmin)
            else:
                boundaries1['ymin'] = ymin
            if ymax > latmax+0.15*(latmax-latmin):
                boundaries1['ymax'] = latmax+0.1*(latmax-latmin)
            else:
                boundaries1['ymax'] = ymax
            boundaries = GeoDict(boundaries1, adjust='res')
    else:
        # SEE IF BOUNDARIES ARE SAME AS BOUNDARIES OF LAYERS
        keytemp = list(grids.keys())
        tempgdict = grids[keytemp[0]]['grid'].getGeoDict()
        if np.abs(tempgdict.xmin-boundaries['xmin']) < 0.05 and \
           np.abs(tempgdict.ymin-boundaries['ymin']) < 0.05 and \
           np.abs(tempgdict.xmax-boundaries['xmax']) < 0.05 and \
           np.abs(tempgdict.ymax - boundaries['ymax']) < 0.05:
            print('Input boundaries are almost the same as specified boundaries, no cutting needed')
            boundaries = tempgdict
            cut = False
        else:
            try:
                if boundaries['xmin'] > boundaries['xmax'] or \
                   boundaries['ymin'] > boundaries['ymax']:
                    print('Input boundaries are not usable, using default boundaries')
                    keytemp = list(grids.keys())
                    boundaries = grids[keytemp[0]]['grid'].getGeoDict()
                    cut = False
                else:
                    # Build dummy GeoDict
                    boundaries = GeoDict({'xmin': boundaries['xmin'],
                                          'xmax': boundaries['xmax'],
                                          'ymin': boundaries['ymin'],
                                          'ymax': boundaries['ymax'],
                                          'dx': 100.,
                                          'dy': 100.,
                                          'ny': 100.,
                                          'nx': 100.},
                                         adjust='res')
            except:
                print('Input boundaries are not usable, using default boundaries')
                keytemp = list(grids.keys())
                boundaries = grids[keytemp[0]]['grid'].getGeoDict()
                cut = False

    # Pull out bounds for various uses
    bxmin, bxmax, bymin, bymax = boundaries.xmin, boundaries.xmax, boundaries.ymin, boundaries.ymax

    # Determine if need a single panel or multi-panel plot and if multi-panel,
    # how many and how it will be arranged
    fig = plt.figure()
    numpanels = len(grids)
    if numpanels == 1:
        rowpan = 1
        colpan = 1
        # create the figure and axes instances.
        fig.set_figwidth(5)
    elif numpanels == 2 or numpanels == 4:
        rowpan = np.ceil(numpanels/2.)
        colpan = 2
        fig.set_figwidth(13)
    else:
        rowpan = np.ceil(numpanels/3.)
        colpan = 3
        fig.set_figwidth(15)
    if rowpan == 1:
        fig.set_figheight(rowpan*6.0)
    else:
        fig.set_figheight(rowpan*5.3)

    # Need to update naming to reflect the shakemap version once can get
    # getHeaderData to work, add edict['version'] back into title, maybe
    # shakemap id also?
    fontsizemain = 14.
    fontsizesub = 12.
    fontsizesmallest = 10.
    if rowpan == 1.:
        fontsizemain = 12.
        fontsizesub = 10.
        fontsizesmallest = 8.
    if edict is not None:
        if isScenario:
            title = edict['event_description']
        else:
            timestr = edict['event_timestamp'].strftime('%b %d %Y')
            title = 'M%.1f %s v%i - %s' % (edict['magnitude'], timestr, edict['version'], edict['event_description'])
        plt.suptitle(title+'\n'+suptitle, fontsize=fontsizemain)
    else:
        plt.suptitle(suptitle, fontsize=fontsizemain)

    clear_color = [0, 0, 0, 0.0]

    # Cut all of them and release extra memory

    xbuff = (bxmax-bxmin)/10.
    ybuff = (bymax-bymin)/10.
    cutxmin = bxmin-xbuff
    cutymin = bymin-ybuff
    cutxmax = bxmax+xbuff
    cutymax = bymax+ybuff
    if cut is True:
        newgrids = collections.OrderedDict()
        for k, layer in enumerate(plotorder):
            templayer = grids[layer]['grid']
            try:
                newgrids[layer] = {'grid': templayer.cut(cutxmin, cutxmax, cutymin, cutymax, align=True)}
            except Exception as e:
                print(('Cutting failed, %s, continuing with full layers' % e))
                newgrids = grids
                continue
        del templayer
        gc.collect()
    else:
        newgrids = grids
    tempgdict = newgrids[list(grids.keys())[0]]['grid'].getGeoDict()

    # Upsample layers to same as topofile if desired for better looking hillshades
    if upsample is True and topofile is not None:
        try:
            topodict = GDALGrid.getFileGeoDict(topofile)
            if topodict.dx >= tempgdict.dx or topodict.dy >= tempgdict.dy:
                print('Upsampling not possible, resolution of results already smaller than DEM')
                pass
            else:
                tempgdict1 = GeoDict({'xmin': tempgdict.xmin-xbuff,
                                      'ymin': tempgdict.ymin-ybuff,
                                      'xmax': tempgdict.xmax+xbuff,
                                      'ymax': tempgdict.ymax+ybuff,
                                      'dx': topodict.dx,
                                      'dy': topodict.dy,
                                      'nx': topodict.nx,
                                      'ny': topodict.ny},
                                     adjust='res')
                tempgdict2 = tempgdict1.getBoundsWithin(tempgdict)
                for k, layer in enumerate(plotorder):
                    newgrids[layer]['grid'] = newgrids[layer]['grid'].subdivide(tempgdict2)
        except:
            print('Upsampling failed, continuing')

    # Downsample all of them for plotting, if needed, and replace them in
    # grids (to save memory)
    tempgrid = newgrids[list(grids.keys())[0]]['grid']
    xsize = tempgrid.getGeoDict().nx
    ysize = tempgrid.getGeoDict().ny
    inchesx, inchesy = fig.get_size_inches()
    divx = int(np.round(xsize/(500.*inchesx)))
    divy = int(np.round(ysize/(500.*inchesy)))
    xmin, xmax, ymin, ymax = tempgrid.getBounds()
    gdict = tempgrid.getGeoDict()  # Will be replaced if downsampled
    del tempgrid
    gc.collect()

    if divx <= 1:
        divx = 1
    if divy <= 1:
        divy = 1
    if (divx > 1. or divy > 1.) and ds:
        if dstype == 'max':
            func = np.nanmax
        elif dstype == 'min':
            func = np.nanmin
        elif dstype == 'med':
            func = np.nanmedian
        else:
            func = np.nanmean
        for k, layer in enumerate(plotorder):
            layergrid = newgrids[layer]['grid']
            dat = block_reduce(layergrid.getData().copy(),
                               block_size=(divy, divx),
                               cval=float('nan'),
                               func=func)
            if k == 0:
                lons = block_reduce(np.linspace(xmin, xmax, layergrid.getGeoDict().nx),
                                    block_size=(divx,),
                                    func=np.mean,
                                    cval=float('nan'))
                if math.isnan(lons[-1]):
                    lons[-1] = lons[-2] + (lons[1]-lons[0])
                lats = block_reduce(np.linspace(ymax, ymin, layergrid.getGeoDict().ny),
                                    block_size=(divy,),
                                    func=np.mean,
                                    cval=float('nan'))
                if math.isnan(lats[-1]):
                    lats[-1] = lats[-2] + (lats[1]-lats[0])
                gdict = GeoDict({'xmin': lons.min(),
                                 'xmax': lons.max(),
                                 'ymin': lats.min(),
                                 'ymax': lats.max(),
                                 'dx': np.abs(lons[1]-lons[0]),
                                 'dy': np.abs(lats[1]-lats[0]),
                                 'nx': len(lons),
                                 'ny': len(lats)},
                                adjust='res')
            newgrids[layer]['grid'] = Grid2D(dat, gdict)
        del layergrid, dat
    else:
        lons = np.linspace(xmin, xmax, xsize)
        lats = np.linspace(ymax, ymin, ysize)  # backwards so it plots right side up

    #make meshgrid
    llons1, llats1 = np.meshgrid(lons, lats)

    # See if there is an oceanfile for masking
    bbox = PolygonSH(((cutxmin, cutymin), (cutxmin, cutymax), (cutxmax, cutymax), (cutxmax, cutymin)))
    if oceanfile is not None:
        try:
            f = fiona.open(oceanfile)
            oc = next(f)
            f.close
            shapes = shape(oc['geometry'])
            # make boundaries into a shape
            ocean = shapes.intersection(bbox)
        except:
            print('Not able to read specified ocean file, will use default ocean masking')
            oceanfile = None
    if inventory_shapefile is not None:
        try:
            f = fiona.open(inventory_shapefile)
            invshp = list(f.items(bbox=(bxmin, bymin, bxmax, bymax)))
            f.close()
            inventory = [shape(inv[1]['geometry']) for inv in invshp]
        except:
            print('unable to read inventory shapefile specified, will not plot inventory')
            inventory_shapefile = None

    # # Find cities that will be plotted
    if mapcities is True and cityfile is not None:
        try:
            mycity = BasemapCities.loadFromGeoNames(cityfile=cityfile)
            bcities = mycity.limitByBounds((bxmin, bxmax, bymin, bymax))
            #bcities = bcities.limitByPopulation(40000)
            bcities = bcities.limitByGrid(nx=4, ny=4, cities_per_grid=2)
        except:
            print('Could not read in cityfile, not plotting cities')
            mapcities = False
            cityfile = None

    # Load in topofile
    if topofile is not None:
        try:
            topomap = GDALGrid.load(topofile, resample=True, method='linear', samplegeodict=gdict)
        except:
            topomap = GMTGrid.load(topofile, resample=True, method='linear', samplegeodict=gdict)
        topodata = topomap.getData().copy()
        # mask oceans if don't have ocean shapefile
        if oceanfile is None:
            topodata = maskoceans(llons1, llats1, topodata, resolution='h', grid=1.25, inlands=True)
    else:
        print('no hillshade is possible\n')
        topomap = None
        topodata = None

    # Load in roads, if needed
    if maproads is True and roadfolder is not None:
        try:
            roadslist = []
            for folder in os.listdir(roadfolder):
                road1 = os.path.join(roadfolder, folder)
                shpfiles = glob.glob(os.path.join(road1, '*.shp'))
                if len(shpfiles):
                    shpfile = shpfiles[0]
                    f = fiona.open(shpfile)
                    shapes = list(f.items(bbox=(bxmin, bymin, bxmax, bymax)))
                    for shapeid, shapedict in shapes:
                        roadslist.append(shapedict)
                    f.close()
        except:
            print('Not able to plot roads')
            roadslist = None

    val = 1
    for k, layer in enumerate(plotorder):
        layergrid = newgrids[layer]['grid']
        if 'label' in list(grids[layer].keys()):
            label1 = grids[layer]['label']
        else:
            label1 = layer
        try:
            sref = grids[layer]['description']['name']
        except:
            sref = None
        ax = fig.add_subplot(rowpan, colpan, val)
        val += 1
        clat = bymin + (bymax-bymin)/2.0
        clon = bxmin + (bxmax-bxmin)/2.0
        # setup of basemap ('lcc' = lambert conformal conic).
        # use major and minor sphere radii from WGS84 ellipsoid.
        m = Basemap(llcrnrlon=bxmin, llcrnrlat=bymin, urcrnrlon=bxmax, urcrnrlat=bymax,
                    rsphere=(6378137.00, 6356752.3142),
                    resolution='l', area_thresh=1000., projection='lcc',
                    lat_1=clat, lon_0=clon, ax=ax)

        x1, y1 = m(llons1, llats1)  # get projection coordinates
        axsize = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
        if k == 0:
            wid, ht = axsize.width, axsize.height
        if colormaps is not None and \
           len(colormaps) == len(newgrids) and \
           colormaps[k] is not None:
            palette = colormaps[k]
        else:  # Find preferred default color map for each type of layer
            if 'prob' in layer.lower() or 'pga' in layer.lower() or \
               'pgv' in layer.lower() or 'cohesion' in layer.lower() or \
               'friction' in layer.lower() or 'fs' in layer.lower():
                palette = cm.jet
            elif 'slope' in layer.lower():
                palette = cm.gnuplot2
            elif 'precip' in layer.lower():
                palette = cm2.s3pcpn
            else:
                palette = defaultcolormap

        if topodata is not None:
            if k == 0:
                ptopo = m.transform_scalar(
                    np.flipud(topodata), lons+0.5*gdict.dx,
                    lats[::-1]-0.5*gdict.dy, np.round(300.*wid),
                    np.round(300.*ht), returnxy=False, checkbounds=False,
                    order=1, masked=False)
                #use lightsource class to make our shaded topography
                ls = LightSource(azdeg=135, altdeg=45)
                ls1 = LightSource(azdeg=120, altdeg=45)
                ls2 = LightSource(azdeg=225, altdeg=45)
                intensity1 = ls1.hillshade(ptopo, fraction=0.25, vert_exag=1.)
                intensity2 = ls2.hillshade(ptopo, fraction=0.25, vert_exag=1.)
                intensity = intensity1*0.5 + intensity2*0.5
                #hillshm_im = m.transform_scalar(np.flipud(hillshm), lons, lats[::-1], np.round(300.*wid), np.round(300.*ht), returnxy=False, checkbounds=False, order=0, masked=False)
            #m.imshow(hillshm_im, cmap='Greys', vmin=0., vmax=3., zorder=1, interpolation='none')  # vmax = 3 to soften colors to light gray
            #m.pcolormesh(x1, y1, hillshm, cmap='Greys', linewidth=0., rasterized=True, vmin=0., vmax=3., edgecolors='none', zorder=1);
            # plt.draw()

        # Get the data
        dat = layergrid.getData().copy()

        # mask out anything below any specified thresholds
        # Might need to move this up to before downsampling...might give illusion of no hazard in places where there is some that just got averaged out
        if maskthreshes is not None and len(maskthreshes) == len(newgrids):
            if maskthreshes[k] is not None:
                dat[dat <= maskthreshes[k]] = float('NaN')
                dat = np.ma.array(dat, mask=np.isnan(dat))

        if logscale is not False and len(logscale) == len(newgrids):
            if logscale[k] is True:
                dat = np.log10(dat)
                label1 = r'$log_{10}$(' + label1 + ')'

        if scaletype.lower() == 'binned':
            # Find order of range to know how to scale
            order = np.round(np.log(np.nanmax(dat) - np.nanmin(dat)))
            if order < 1.:
                scal = 10**-order
            else:
                scal = 1.
            if lims is None or len(lims) != len(newgrids):
                clev = (np.linspace(np.floor(scal*np.nanmin(dat)), np.ceil(scal*np.nanmax(dat)), 10))/scal
            else:
                if lims[k] is None:
                    clev = (np.linspace(np.floor(scal*np.nanmin(dat)), np.ceil(scal*np.nanmax(dat)), 10))/scal
                else:
                    clev = lims[k]
            # Adjust to colorbar levels
            dat[dat < clev[0]] = clev[0]
            for j, level in enumerate(clev[:-1]):
                dat[(dat >= clev[j]) & (dat < clev[j+1])] = clev[j]
            # So colorbar saturates at top
            dat[dat > clev[-1]] = clev[-1]
            #panelhandle = m.contourf(x1, y1, datm, clev, cmap=palette, linewidth=0., alpha=ALPHA, rasterized=True)
            vmin = clev[0]
            vmax = clev[-1]
        else:
            if lims is not None and len(lims) == len(newgrids):
                if lims[k] is None:
                    vmin = np.nanmin(dat)
                    vmax = np.nanmax(dat)
                else:
                    vmin = lims[k][0]
                    vmax = lims[k][-1]
            else:
                vmin = np.nanmin(dat)
                vmax = np.nanmax(dat)

        # Mask out cells overlying oceans or block with a shapefile if available
        if oceanfile is None:
            dat = maskoceans(llons1, llats1, dat, resolution='h', grid=1.25, inlands=True)
        else:
            #patches = []
            if type(ocean) is PolygonSH:
                ocean = [ocean]
            for oc in ocean:
                patch = getProjectedPatch(oc, m, edgecolor="#006280", facecolor=watercolor, lw=0.5, zorder=4.)
                #x, y = m(oc.exterior.xy[0], oc.exterior.xy[1])
                #xy = zip(x, y)
                #patch = Polygon(xy, facecolor=watercolor, edgecolor="#006280", lw=0.5, zorder=4.)
                ##patches.append(Polygon(xy, facecolor=watercolor, edgecolor=watercolor, zorder=500.))
                ax.add_patch(patch)
            ##ax.add_collection(PatchCollection(patches))

        if inventory_shapefile is not None:
            for in1 in inventory:
                if 'point' in str(type(in1)):
                    x, y = in1.xy
                    x = x[0]
                    y = y[0]
                    m.scatter(x, y, c='m', s=50, latlon=True, marker='^',
                              zorder=100001)
                else:
                    x, y = m(in1.exterior.xy[0], in1.exterior.xy[1])
                    xy = list(zip(x, y))
                    patch = Polygon(xy, facecolor='none', edgecolor='k', lw=0.5, zorder=10.)
                    #patches.append(Polygon(xy, facecolor=watercolor, edgecolor=watercolor, zorder=500.))
                    ax.add_patch(patch)
        palette.set_bad(clear_color, alpha=0.0)
        # Plot it up
        dat_im = m.transform_scalar(
            np.flipud(dat), lons+0.5*gdict.dx, lats[::-1]-0.5*gdict.dy,
            np.round(300.*wid), np.round(300.*ht), returnxy=False,
            checkbounds=False, order=0, masked=True)
        if topodata is not None:  # Drape over hillshade
            #turn data into an RGBA image
            cmap = palette
            #adjust data so scaled between vmin and vmax and between 0 and 1
            dat1 = dat_im.copy()
            dat1[dat1 < vmin] = vmin
            dat1[dat1 > vmax] = vmax
            dat1 = (dat1 - vmin)/(vmax-vmin)
            rgba_img = cmap(dat1)
            maskvals = np.dstack((dat1.mask, dat1.mask, dat1.mask))
            rgb = np.squeeze(rgba_img[:, :, 0:3])
            rgb[maskvals] = 1.
            draped_hsv = ls.blend_hsv(rgb, np.expand_dims(intensity, 2))
            m.imshow(draped_hsv, zorder=3., interpolation='none')
            # This is just a dummy layer that will be deleted to make the
            # colorbar look right
            panelhandle = m.imshow(dat_im, cmap=palette, zorder=0.,
                                   vmin=vmin, vmax=vmax)
        else:
            panelhandle = m.imshow(dat_im, cmap=palette, zorder=3.,
                                   vmin=vmin, vmax=vmax, interpolation='none')
        #panelhandle = m.pcolormesh(x1, y1, dat, linewidth=0., cmap=palette, vmin=vmin, vmax=vmax, alpha=ALPHA, rasterized=True, zorder=2.);
        #panelhandle.set_edgecolors('face')
        # add colorbar
        cbfmt = '%1.1f'
        if vmax is not None and vmin is not None:
            if (vmax - vmin) < 1.:
                cbfmt = '%1.2f'
            elif vmax > 5.:  # (vmax - vmin) > len(clev):
                cbfmt = '%1.0f'

        #norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
        if scaletype.lower() == 'binned':
            cbar = fig.colorbar(panelhandle, spacing='proportional',
                                ticks=clev, boundaries=clev, fraction=0.036,
                                pad=0.04, format=cbfmt, extend='both')
            #cbar1 = ColorbarBase(cbar.ax, cmap=palette, norm=norm, spacing='proportional', ticks=clev, boundaries=clev, fraction=0.036, pad=0.04, format=cbfmt, extend='both', extendfrac='auto')

        else:
            cbar = fig.colorbar(panelhandle, fraction=0.036, pad=0.04,
                                extend='both', format=cbfmt)
            #cbar1 = ColorbarBase(cbar.ax, cmap=palette, norm=norm, fraction=0.036, pad=0.04, extend='both', extendfrac='auto', format=cbfmt)

        if topodata is not None:
            panelhandle.remove()

        cbar.set_label(label1, fontsize=10)
        cbar.ax.tick_params(labelsize=8)

        parallels = m.drawparallels(getMapLines(bymin, bymax, 3),
                                    labels=[1, 0, 0, 0], linewidth=0.5,
                                    labelstyle='+/-', fontsize=9, xoffset=-0.8,
                                    color='gray', zorder=100.)
        m.drawmeridians(getMapLines(bxmin, bxmax, 3), labels=[0, 0, 0, 1],
                        linewidth=0.5, labelstyle='+/-', fontsize=9,
                        color='gray', zorder=100.)
        for par in parallels:
            try:
                parallels[par][1][0].set_rotation(90)
            except:
                pass

        #draw roads on the map, if they were provided to us
        if maproads is True and roadslist is not None:
            try:
                for road in roadslist:
                    try:
                        xy = list(road['geometry']['coordinates'])
                        roadx, roady = list(zip(*xy))
                        mapx, mapy = m(roadx, roady)
                        m.plot(mapx, mapy, roadcolor, lw=0.5, zorder=9)
                    except:
                        continue
            except Exception as e:
                print(('Failed to plot roads, %s' % e))

        #add city names to map
        if mapcities is True and cityfile is not None:
            try:
                fontname = 'Arial'
                fontsize = 8
                if k == 0:  # Only need to choose cities first time and then apply to rest
                    fcities = bcities.limitByMapCollision(
                        m, fontname=fontname, fontsize=fontsize)
                    ctlats, ctlons, names = fcities.getCities()
                    cxis, cyis = m(ctlons, ctlats)
                for ctlat, ctlon, cxi, cyi, name in zip(ctlats, ctlons, cxis, cyis, names):
                    m.scatter(ctlon, ctlat, c='k', latlon=True, marker='.',
                              zorder=100000)
                    ax.text(cxi, cyi, name, fontname=fontname,
                            fontsize=fontsize, zorder=100000)
            except Exception as e:
                print('Failed to plot cities, %s' % e)

        #draw star at epicenter
        plt.sca(ax)
        if edict is not None:
            elat, elon = edict['lat'], edict['lon']
            ex, ey = m(elon, elat)
            plt.plot(ex, ey, '*', markeredgecolor='k', mfc='None', mew=1.0,
                     ms=15, zorder=10000.)

        m.drawmapboundary(fill_color=watercolor)

        m.fillcontinents(color=clear_color, lake_color=watercolor)
        m.drawrivers(color=watercolor)
        ##m.drawcoastlines()

        #draw country boundaries
        m.drawcountries(color=countrycolor, linewidth=1.0)

        #add map scale
        m.drawmapscale((bxmax+bxmin)/2., (bymin+(bymax-bymin)/9.), clon, clat, np.round((((bxmax-bxmin)*111)/5)/10.)*10, barstyle='fancy', zorder=10)

        # Add border
        autoAxis = ax.axis()
        rec = Rectangle((autoAxis[0]-0.7, autoAxis[2]-0.2), (autoAxis[1]-autoAxis[0])+1, (autoAxis[3]-autoAxis[2])+0.4, fill=False, lw=1, zorder=1e8)
        rec = ax.add_patch(rec)
        rec.set_clip_on(False)

        plt.draw()

        if sref is not None:
            label2 = '%s\nsource: %s' % (label1, sref)  # '%s\n' % label1 + r'{\fontsize{10pt}{3em}\selectfont{}%s}' % sref  #
        else:
            label2 = label1
        plt.title(label2, axes=ax, fontsize=fontsizesub)

        #draw scenario watermark, if scenario
        if isScenario:
            plt.sca(ax)
            cx, cy = m(clon, clat)
            plt.text(cx, cy, 'SCENARIO', rotation=45, alpha=0.10, size=72, ha='center', va='center', color='red')

        #if ds: # Could add this to print "downsampled" on map
        #    plt.text()

        if k == 1 and rowpan == 1:
            # adjust single level plot
            axsize = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
            ht2 = axsize.height
            fig.set_figheight(ht2*1.6)
        else:
            plt.tight_layout()

        # Make room for suptitle - tight layout doesn't account for it
        plt.subplots_adjust(top=0.92)

    if printparam is True:
        try:
            fig = plt.gcf()
            dictionary = grids['model']['description']['parameters']
            paramstring = 'Model parameters: '
            halfway = np.ceil(len(dictionary)/2.)
            for i, key in enumerate(dictionary):
                if i == halfway and colpan == 1:
                    paramstring += '\n'
                paramstring += ('%s = %s; ' % (key, dictionary[key]))
            print(paramstring)
            fig.text(0.01, 0.015, paramstring, fontsize=fontsizesmallest)
            plt.draw()
        except:
            print('Could not display model parameters')

    if edict is not None:
        eventid = edict['eventid']
    else:
        eventid = ''

    time1 = datetime.datetime.utcnow().strftime('%d%b%Y_%H%M')
    outfile = os.path.join(outfolder, '%s_%s_%s.pdf' % (eventid, suptitle, time1))
    pngfile = os.path.join(outfolder, '%s_%s_%s.png' % (eventid, suptitle, time1))

    if savepdf is True:
        print('Saving map output to %s' % outfile)
        plt.savefig(outfile, dpi=300)
    if savepng is True:
        print('Saving map output to %s' % pngfile)
        plt.savefig(pngfile)
    if showplots is True:
        plt.show()
    else:
        plt.close(fig)

    return newgrids
Exemple #48
0
def computeHagg(grid2D,
                proj='moll',
                probthresh=0.0,
                shakefile=None,
                shakethreshtype='pga',
                shakethresh=0.0):
    """
    Computes the Aggregate Hazard (Hagg) which is equal to the
    probability * area of grid cell For models that compute areal coverage,
    this is equivalant to the total predicted area affected in km2.

    Args:
        grid2D: grid2D object of model output.
        proj: projection to use to obtain equal area, 'moll'  mollweide, or
            'laea' lambert equal area.
        probthresh: Probability threshold, any values less than this will not
            be included in aggregate hazard estimation.
        shakefile: Optional, path to shakemap file to use for ground motion
            threshold.
        shakethreshtype: Optional, Type of ground motion to use for
            shakethresh, 'pga', 'pgv', or 'mmi'.
        shakethresh: Optional, Float or list of shaking thresholds in %g for
            pga, cm/s for pgv, float for mmi.

    Returns: Aggregate hazard (float) if no shakethresh or only one shakethresh was defined,
        otherwise, a list of floats of aggregate hazard for all shakethresh values.
    """
    Hagg = []
    bounds = grid2D.getBounds()
    lat0 = np.mean((bounds[2], bounds[3]))
    lon0 = np.mean((bounds[0], bounds[1]))
    projs = ('+proj=%s +lat_0=%f +lon_0=%f +x_0=0 +y_0=0 +ellps=WGS84 '
             '+units=km +no_defs' % (proj, lat0, lon0))
    geodict = grid2D.getGeoDict()

    if shakefile is not None:
        if type(shakethresh) != list and type(shakethresh) != np.ndarray:
            shakethresh = [shakethresh]
        for shaket in shakethresh:
            if shaket < 0.:
                raise Exception('shaking threshold must be equal or greater '
                                'than zero')
        tmpdir = tempfile.mkdtemp()
        # resample shakemap to grid2D
        temp = ShakeGrid.load(shakefile)
        junkfile = os.path.join(tmpdir, 'temp.bil')
        GDALGrid.copyFromGrid(temp.getLayer(shakethreshtype)).save(junkfile)
        shk = quickcut(junkfile, geodict, precise=True, method='bilinear')
        shutil.rmtree(tmpdir)
        if shk.getGeoDict() != geodict:
            raise Exception('shakemap was not resampled to exactly the same '
                            'geodict as the model')

    if probthresh < 0.:
        raise Exception('probability threshold must be equal or greater '
                        'than zero')

    grid = grid2D.project(projection=projs, method='bilinear')
    geodictRS = grid.getGeoDict()
    cell_area_km2 = geodictRS.dx * geodictRS.dy
    model = grid.getData()
    model[np.isnan(model)] = -1.
    if shakefile is not None:
        for shaket in shakethresh:
            modcop = model.copy()
            shkgrid = shk.project(projection=projs)
            shkdat = shkgrid.getData()
            # use -1 to avoid nan errors and warnings, will always be thrown
            # out because default is 0.
            shkdat[np.isnan(shkdat)] = -1.
            modcop[shkdat < shaket] = -1.
            Hagg.append(np.sum(modcop[modcop >= probthresh] * cell_area_km2))
    else:
        Hagg.append(np.sum(model[model >= probthresh] * cell_area_km2))
    if len(Hagg) == 1:
        Hagg = Hagg[0]
    return Hagg
Exemple #49
0
def hazus_liq(shakefile,
              config,
              uncertfile=None,
              saveinputs=False,
              modeltype=None,
              displmodel=None,
              probtype=None,
              bounds=None):
    """
    Method for computing the probability of liquefaction using the Hazus method
    using the Wills et al. (2015) Vs30 map of California to define the
    susceptibility classes and the Fan et al. global water table model. 
    """
    layers = config['hazus_liq_cal']['layers']
    vs30_file = layers['vs30']['file']
    wtd_file = layers['watertable']['file']
    shkgdict = ShakeGrid.getFileGeoDict(shakefile)
    fgeodict = GMTGrid.getFileGeoDict(vs30_file)[0]

    #---------------------------------------------------------------------------
    # Loading
    #---------------------------------------------------------------------------
    shakemap = ShakeGrid.load(shakefile,
                              fgeodict,
                              resample=True,
                              method='linear',
                              doPadding=True)
    PGA = shakemap.getLayer('pga').getData() / 100  # convert to g
    griddict, eventdict, specdict, fields, uncertainties = getHeaderData(
        shakefile)
    mag = eventdict['magnitude']

    # Correction factor for moment magnitudes other than M=7.5
    k_m = 0.0027 * mag**3 - 0.0267 * mag**2 - 0.2055 * mag + 2.9188

    #---------------------------------------------------------------------------
    # Susceptibility from Vs30
    #---------------------------------------------------------------------------
    vs30_grid = GMTGrid.load(vs30_file)

    vs30 = vs30_grid.getData()
    p_ml = np.zeros_like(vs30)
    a = np.zeros_like(vs30)
    b = np.zeros_like(vs30)
    for k, v in config['hazus_liq_cal']['parameters'].items():
        ind = np.where(vs30 == float(v[0]))
        if v[1] == "VH":
            p_ml[ind] = 0.25
            a[ind] = 9.09
            b[ind] = -0.82
        if v[1] == "H":
            p_ml[ind] = 0.2
            a[ind] = 7.67
            b[ind] = -0.92
        if v[1] == "M":
            p_ml[ind] = 0.1
            a[ind] = 6.67
            b[ind] = -1.0
        if v[1] == "L":
            p_ml[ind] = 0.05
            a[ind] = 5.57
            b[ind] = -1.18
        if v[1] == "VL":
            p_ml[ind] = 0.02
            a[ind] = 4.16
            b[ind] = -1.08

    # Conditional liquefaction probability for a given susceptibility category
    # at a specified PGA
    p_liq_pga = a * PGA + b
    p_liq_pga = p_liq_pga.clip(min=0, max=1)

    #---------------------------------------------------------------------------
    # Water table
    #---------------------------------------------------------------------------
    wtd_grid = GMTGrid.load(wtd_file,
                            fgeodict,
                            resample=True,
                            method=layers['watertable']['interpolation'],
                            doPadding=True)
    tmp = wtd_grid._data
    tmp = np.nan_to_num(tmp)

    # Convert to ft
    wt_ft = tmp * 3.28084

    # Correction factor for groundwater depths other than five feet
    k_w = 0.022 * wt_ft + 0.93

    #---------------------------------------------------------------------------
    # Combine to get conditional liquefaction probability
    #---------------------------------------------------------------------------
    p_liq_sc = p_liq_pga * p_ml / k_m / k_w

    #---------------------------------------------------------------------------
    # Turn output and inputs into into grids and put in maplayers dictionary
    #---------------------------------------------------------------------------
    maplayers = collections.OrderedDict()

    temp = shakemap.getShakeDict()
    shakedetail = '%s_ver%s' % (temp['shakemap_id'], temp['shakemap_version'])
    modelsref = config['hazus_liq_cal']['shortref']
    modellref = config['hazus_liq_cal']['longref']
    modeltype = 'Hazus/Wills'
    maplayers['model'] = {
        'grid': GDALGrid(p_liq_sc, fgeodict),
        'label': 'Probability',
        'type': 'output',
        'description': {
            'name': modelsref,
            'longref': modellref,
            'units': 'coverage',
            'shakemap': shakedetail,
            'parameters': {
                'modeltype': modeltype
            }
        }
    }

    if saveinputs is True:
        maplayers['pga'] = {
            'grid': GDALGrid(PGA, fgeodict),
            'label': 'PGA (g)',
            'type': 'input',
            'description': {
                'units': 'g',
                'shakemap': shakedetail
            }
        }
        maplayers['vs30'] = {
            'grid': GDALGrid(vs30, fgeodict),
            'label': 'Vs30 (m/s)',
            'type': 'input',
            'description': {
                'units': 'm/s'
            }
        }
        maplayers['wtd'] = {
            'grid': GDALGrid(wtd_grid._data, fgeodict),
            'label': 'wtd (m)',
            'type': 'input',
            'description': {
                'units': 'm'
            }
        }
    return maplayers
Exemple #50
0
def kritikos_fuzzygamma(shakefile, config, bounds=None):
    """
    Runs kritikos procedure with fuzzy gamma
    """

    cmodel = config['statistic_models']['kritikos_2014']
    gamma = cmodel['gamma_value']

    ## Read in layer files and get data
    layers = cmodel['layers']
    try:
        # Slope
        slope_file = layers['slope']
        # DFF
        dff_file = layers['dff']
        # DFS
        dfs_file = layers['dfs']
        # Slope Position
        slope_pos_file = layers['slope_pos']
    except:
        print('Unable to retrieve grid data.')

    try:
        div = cmodel['divisor']
        # Load in divisors
        MMI_div = div['MMI']
        slope_div = div['slope']
        dff_div = div['dff']
        dfs_div = div['dfs']
        slope_pos_div = div['slope_pos']
    except:
        print('Unable to retrieve divisors.')

    try:
        power = cmodel['power']
        # Load in powers
        MMI_power = power['MMI']
        slope_power = power['slope']
        dff_power = power['dff']
        dfs_power = power['dfs']
        slope_pos_power = power['slope_pos']
    except:
        print('Unable to retrieve powers.')

    # Cut and resample all files
    try:
        shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
        slopedict = GDALGrid.getFileGeoDict(slope_file)
        if bounds is not None:  # Make sure bounds are within ShakeMap Grid
            if shkgdict.xmin > bounds['xmin'] or shkgdict.xmax < bounds['xmax'] or shkgdict.ymin > bounds['ymin'] or shkgdict.ymax < bounds['ymax']:
                print('Specified bounds are outside shakemap area, using ShakeMap bounds instead')
                bounds = None
        if bounds is not None:
            tempgdict = GeoDict({'xmin': bounds['xmin'], 'ymin': bounds['ymin'], 'xmax': bounds['xmax'], 'ymax': bounds['ymax'], 'dx': 100., 'dy': 100., 'nx': 100., 'ny': 100.}, adjust='res')
            gdict = slpdict.getBoundsWithin(tempgdict)
        else:  # Get boundaries from shakemap if not specified
            shkgdict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
            slpdict = GDALGrid.getFileGeoDict(slopefile)
            gdict = slpdict.getBoundsWithin(shkgdict)
    except:
        print('Unable to create base geodict.')

    # Load in data
    try:
        # Load in slope data
        slopegrid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False)
        slope_data = slopefrid.getData().astype(float)
        # Load in MMI
        shakemap = ShakeGrid.load(shakefile, samplegeodict=gdict, resample=True, method='linear', adjust='res')
        MMI_data = shakemap.getLayer('MMI').getData().astype(float)
        # Load in Dff
        dffgrid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False)
        dff_data = dffgrid.getData().astype(float)
        # Load in DFS
        dfsgrid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False)
        dfs_data = dfsgrid.getData().astype(float)
        # Load in Slope Position
        slope_pos_grid = GDALGrid.load(slopefile, samplegeodict=gdict, resample=False)
        slope_pos_data = slop_pos_grid.getData().astype(float)
    except:
        print('Data could not be retrieved.')

            [[[classification]]]
            MMI = 5,6,7,8,9
            slope = 0-4, 5-9, 10-14, 15-19, 20-24, 25-29, 30-34, 35-39, 40-44, 45-49, 50+  # Reclassify as 1,2,3,etc.
            dff = 0-4, 5-9, 10-19, 20-29, 30-39, 40-49, 50+  # Reclassify as 1,2,3,etc.
            dfs = 0-0.49, 0.5-0.99, 1.0-1.49, 1.5-1.99, 2.0-2.49, 2.5+  # Reclassify as 1,2,3,etc.
            slope_pos = 'Flat', 'Valley', 'Mid-Slope', 'Ridge'  # Reclassify as 1,2,3,etc.