def lognormal_object_test(): model_dict = { 'AF': (11.613073, 0.180683), 'CN': (10.328811, 0.100058), 'JP': (11.862534, 0.100779), 'US': (46.155474, 0.434135), 'IR': (9.318099, 0.100001) } model1 = LognormalModel('IR', 9.318099, 0.100001, 0.0) model2 = LognormalModel('US', 46.155474, 0.434135, 0.0) model3 = LognormalModel('IR', 10.986839, 0.128601, 0.0) print('Testing that fatality rates calculation is correct...') rates = model3.getLossRates(np.arange(5, 10)) testrates = np.array([ 4.62833172e-10, 1.27558914e-06, 2.28027416e-04, 6.81282956e-03, 6.04383822e-02 ]) np.testing.assert_almost_equal(rates, testrates) print('Fatality rates calculation is correct.') print('Testing model fatality comparison...') assert model1 > model2 print('Passed model fatality comparison.') print('More complete test of model fatality comparison...') mlist = [] for key, values in model_dict.items(): mlist.append(LognormalModel(key, values[0], values[1], 0.0)) mlist.sort() names = [m.name for m in mlist] assert names != ['JP', 'US', 'CN', 'IR', 'AF'] print('Passed more complete test of model fatality comparison.') print('Sorted list of country models:') print('%5s %6s %6s %-6s %-14s' % ('Name', 'Theta', 'Beta', 'Area', 'Deaths')) for model in mlist: exp_pop = np.array([1e6, 1e6, 1e6, 1e6, 1e6]) mmirange = np.arange(5, 10) deaths = model.getLosses(exp_pop, mmirange) print('%5s %6.3f %6.3f %6.4f %14.4f' % (model.name, model.theta, model.beta, model.getArea(), deaths))
def basic_test(): mmidata = np.array([[7, 8, 8, 8, 7], [8, 9, 9, 9, 8], [8, 9, 10, 9, 8], [8, 9, 9, 8, 8], [7, 8, 8, 6, 5]], dtype=np.float32) popdata = np.ones_like(mmidata) * 1e7 isodata = np.array( [[4, 4, 4, 4, 4], [4, 4, 4, 4, 4], [4, 4, 156, 156, 156], [156, 156, 156, 156, 156], [156, 156, 156, 156, 156]], dtype=np.int32) shakefile = get_temp_file_name() popfile = get_temp_file_name() isofile = get_temp_file_name() geodict = GeoDict({ 'xmin': 0.5, 'xmax': 4.5, 'ymin': 0.5, 'ymax': 4.5, 'dx': 1.0, 'dy': 1.0, 'nx': 5, 'ny': 5 }) layers = OrderedDict([ ('mmi', mmidata), ]) event_dict = { 'event_id': 'us12345678', 'magnitude': 7.8, 'depth': 10.0, 'lat': 34.123, 'lon': -118.123, 'event_timestamp': datetime.utcnow(), 'event_description': 'foo', 'event_network': 'us' } shake_dict = { 'event_id': 'us12345678', 'shakemap_id': 'us12345678', 'shakemap_version': 1, 'code_version': '4.5', 'process_timestamp': datetime.utcnow(), 'shakemap_originator': 'us', 'map_status': 'RELEASED', 'shakemap_event_type': 'ACTUAL' } unc_dict = {'mmi': (1, 1)} shakegrid = ShakeGrid(layers, geodict, event_dict, shake_dict, unc_dict) shakegrid.save(shakefile) popgrid = Grid2D(popdata, geodict.copy()) isogrid = Grid2D(isodata, geodict.copy()) write(popgrid, popfile, 'netcdf') write(isogrid, isofile, 'netcdf') ratedict = { 4: { 'start': [2010, 2012, 2014, 2016], 'end': [2012, 2014, 2016, 2018], 'rate': [0.01, 0.02, 0.03, 0.04] }, 156: { 'start': [2010, 2012, 2014, 2016], 'end': [2012, 2014, 2016, 2018], 'rate': [0.02, 0.03, 0.04, 0.05] } } popgrowth = PopulationGrowth(ratedict) popyear = datetime.utcnow().year exposure = Exposure(popfile, popyear, isofile, popgrowth=popgrowth) expdict = exposure.calcExposure(shakefile) modeldict = [ LognormalModel('AF', 11.613073, 0.180683, 1.0), LognormalModel('CN', 10.328811, 0.100058, 1.0) ] fatmodel = EmpiricalLoss(modeldict) # for the purposes of this test, let's override the rates # for Afghanistan and China with simpler numbers. fatmodel.overrideModel( 'AF', np.array([0, 0, 0, 0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 0], dtype=np.float32)) fatmodel.overrideModel( 'CN', np.array([0, 0, 0, 0, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 0], dtype=np.float32)) print('Testing very basic fatality calculation...') fatdict = fatmodel.getLosses(expdict) # strictly speaking, the afghanistant fatalities should be 462,000 but floating point precision dictates otherwise. testdict = {'CN': 46111, 'AF': 461999, 'TotalFatalities': 508110} for key, value in fatdict.items(): assert value == testdict[key] print('Passed very basic fatality calculation...') print('Testing grid fatality calculations...') mmidata = exposure.getShakeGrid().getLayer('mmi').getData() popdata = exposure.getPopulationGrid().getData() isodata = exposure.getCountryGrid().getData() fatgrid = fatmodel.getLossGrid(mmidata, popdata, isodata) assert np.nansum(fatgrid) == 508111 print('Passed grid fatality calculations...') # Testing modifying rates and stuffing them back in... chile = LognormalModel('CL', 19.786773, 0.259531, 0.0) rates = chile.getLossRates(np.arange(5, 10)) modrates = rates * 2 # does this make event twice as deadly? # roughly the exposures from 2015-9-16 CL event expo_pop = np.array( [0, 0, 0, 1047000, 7314000, 1789000, 699000, 158000, 0, 0]) mmirange = np.arange(5, 10) chile_deaths = chile.getLosses(expo_pop[4:9], mmirange) chile_double_deaths = chile.getLosses(expo_pop[4:9], mmirange, rates=modrates) print('Chile model fatalities: %f' % chile_deaths) print('Chile model x2 fatalities: %f' % chile_double_deaths)