示例#1
0
def get_impact_comments(fatdict, ecodict, econexposure, event_year, ccode):
    """Create comments for a given event, describing economic and human (fatality) impacts.

    :param fatdict:
      Dictionary containing country code keys and integer population estimations of human loss.
    :param ecodict:
      Dictionary containing country code keys and integer population estimations of economic loss.
    :param econexposure:
      Dictionary containing country code (ISO2) keys, and values of
      10 element arrays representing population exposure to MMI 1-10.
      Dictionary will contain an additional key 'Total', with value of exposure across all countries.
    :param event_year:
      Year in which event occurred.
    :param ccode:
      Two letter country code of epicenter or 'UK' if not a country (usu. ocean).
    :returns:
      A tuple of two strings which describe the economic and human impacts.  The most impactful
      of these will be the first string.  Under certain situations, the second comment could be blank.
    """
    # first, figure out what the alert levels are for each loss result

    fatmodel = EmpiricalLoss.fromDefaultFatality()
    ecomodel = EmpiricalLoss.fromDefaultEconomic()
    fatlevel = fatmodel.getAlertLevel(fatdict)
    ecolevel = fatmodel.getAlertLevel(ecodict)
    levels = {'green': 0, 'yellow': 1, 'orange': 2, 'red': 3}
    rlevels = {0: 'green', 1: 'yellow', 2: 'orange', 3: 'red'}
    fat_higher = levels[fatlevel] > levels[ecolevel]
    eco_higher = levels[ecolevel] > levels[fatlevel]
    gdpcomment = get_gdp_comment(ecodict, ecomodel, econexposure, event_year,
                                 ccode)

    if fat_higher:
        if fatlevel == 'green':
            impact1 = GREEN_FAT_HIGH
        elif fatlevel == 'yellow':
            impact1 = YELLOW_FAT_HIGH
        elif fatlevel == 'orange':
            impact1 = ORANGE_FAT_HIGH
        elif fatlevel == 'red':
            impact1 = RED_FAT_HIGH

        if ecolevel == 'green':
            impact2 = GREEN_ECON_LOW
        elif ecolevel == 'yellow':
            impact2 = YELLOW_ECON_LOW
        elif ecolevel == 'orange':
            impact2 = ORANGE_ECON_LOW
        elif ecolevel == 'red':
            impact2 = RED_ECON_LOW
        impact2 = impact2.replace('[GDPCOMMENT]', gdpcomment)
    elif eco_higher:
        if ecolevel == 'green':
            impact1 = GREEN_ECON_HIGH
        elif ecolevel == 'yellow':
            impact1 = YELLOW_ECON_HIGH
        elif ecolevel == 'orange':
            impact1 = ORANGE_ECON_HIGH
        elif ecolevel == 'red':
            impact1 = RED_ECON_HIGH

        if fatlevel == 'green':
            impact2 = GREEN_FAT_LOW
        elif fatlevel == 'yellow':
            impact2 = YELLOW_FAT_LOW
        elif fatlevel == 'orange':
            impact2 = ORANGE_FAT_LOW
        elif fatlevel == 'red':
            impact2 = RED_FAT_LOW
        impact1 = impact1.replace('[GDPCOMMENT]', gdpcomment)
    else:
        if fatlevel == 'green':
            impact1 = GREEN_FAT_EQUAL
        elif fatlevel == 'yellow':
            impact1 = YELLOW_FAT_EQUAL
        elif fatlevel == 'orange':
            impact1 = ORANGE_FAT_EQUAL
        elif fatlevel == 'red':
            impact1 = RED_FAT_EQUAL

        if ecolevel == 'green':
            impact2 = GREEN_ECON_EQUAL
        elif ecolevel == 'yellow':
            impact2 = YELLOW_ECON_EQUAL
        elif ecolevel == 'orange':
            impact2 = ORANGE_ECON_EQUAL
        elif ecolevel == 'red':
            impact2 = RED_ECON_EQUAL
        impact2 = impact2.replace('[GDPCOMMENT]', gdpcomment)

    impact1 = impact1.replace('\n', ' ')
    impact2 = impact2.replace('\n', ' ')
    return (impact1, impact2)
示例#2
0
def test():
    # where is this script?
    homedir = os.path.dirname(os.path.abspath(__file__))
    event = 'northridge'
    shakefile = os.path.join(homedir, '..', 'data', 'eventdata', event,
                             '%s_grid.xml' % event)
    popfile = os.path.join(homedir, '..', 'data', 'eventdata', event,
                           '%s_gpw.flt' % event)
    isofile = os.path.join(homedir, '..', 'data', 'eventdata', event,
                           '%s_isogrid.bil' % event)
    shapefile = os.path.join(homedir, '..', 'data', 'eventdata', event,
                             'City_BoundariesWGS84', 'City_Boundaries.shp')

    print('Test loading empirical fatality model from XML file...')
    fatmodel = EmpiricalLoss.fromDefaultFatality()
    print('Passed loading empirical fatality model from XML file.')

    print('Test getting alert level from various losses...')
    assert fatmodel.getAlertLevel({'TotalFatalities': 0}) == 'green'
    assert fatmodel.getAlertLevel({'TotalFatalities': 5}) == 'yellow'
    assert fatmodel.getAlertLevel({'TotalFatalities': 100}) == 'orange'
    assert fatmodel.getAlertLevel({'TotalFatalities': 1000}) == 'red'
    # 1000 times Earth's population
    assert fatmodel.getAlertLevel({'TotalFatalities': 1e13}) == 'red'
    print('Passed getting alert level from various losses.')

    print('Test retrieving fatality model data from XML file...')
    model = fatmodel.getModel('af')
    testmodel = LognormalModel('dummy', 11.613073, 0.180683, 8.428822)
    assert model == testmodel
    print('Passed retrieving fatality model data from XML file.')

    print('Testing with known exposures/fatalities for 1994 Northridge EQ...')
    exposure = {
        'xf':
        np.array([
            0, 0, 1506.0, 1946880.0, 6509154.0, 6690236.0, 3405381.0,
            1892446.0, 5182.0, 0
        ])
    }
    fatdict = fatmodel.getLosses(exposure)
    testdict = {'xf': 22}
    assert fatdict['xf'] == testdict['xf']
    print(
        'Passed testing with known exposures/fatalities for 1994 Northridge EQ.'
    )

    print(
        'Testing combining G values from all countries that contributed to losses...'
    )
    fatdict = {'CO': 2.38005147e-01, 'EC': 8.01285916e+02}
    zetf = fatmodel.getCombinedG(fatdict)
    assert zetf == 2.5
    print(
        'Passed combining G values from all countries that contributed to losses...'
    )

    print('Testing calculating probabilities for standard PAGER ranges...')
    expected = {'UK': 70511, 'TotalFatalities': 70511}
    G = 2.5
    probs = fatmodel.getProbabilities(expected, G)
    testprobs = {
        '0-1': 3.99586017993e-06,
        '1-10': 0.00019277654968408576,
        '10-100': 0.0041568251597835061,
        '100-1000': 0.039995273501147441,
        '1000-10000': 0.17297196910604343,
        '10000-100000': 0.3382545813262674,
        '100000-10000000': 0.44442457847445394
    }
    for key, value in probs.items():
        np.testing.assert_almost_equal(value, testprobs[key])
    print(
        'Passed combining G values from all countries that contributed to losses...'
    )

    print('Testing calculating total fatalities for Northridge...')
    expobject = Exposure(popfile, 2012, isofile)
    expdict = expobject.calcExposure(shakefile)
    fatdict = fatmodel.getLosses(expdict)
    testdict = {'XF': 18}
    assert fatdict['XF'] == testdict['XF']
    print('Passed calculating total fatalities for Northridge...')

    print('Testing creating a fatality grid...')
    mmidata = expobject.getShakeGrid().getLayer('mmi').getData()
    popdata = expobject.getPopulationGrid().getData()
    isodata = expobject.getCountryGrid().getData()
    fatgrid = fatmodel.getLossGrid(mmidata, popdata, isodata)
    print(np.nansum(fatgrid))
    print('Passed creating a fatality grid.')

    print('Testing assigning fatalities to polygons...')
    popdict = expobject.getPopulationGrid().getGeoDict()
    shapes = []
    f = fiona.open(shapefile, 'r')
    for row in f:
        shapes.append(row)
    f.close()
    fatshapes, totfat = fatmodel.getLossByShapes(mmidata, popdata, isodata,
                                                 shapes, popdict)
    fatalities = 12
    for shape in fatshapes:
        if shape['id'] == '312':  # Los Angeles
            cname = shape['properties']['CITY_NAME']
            lalosses = shape['properties']['fatalities']
            assert lalosses == fatalities
            assert cname == 'Los Angeles'
            break
    print('Passed assigning fatalities to polygons...')
示例#3
0
def basic_test():

    mmidata = np.array([[7, 8, 8, 8, 7], [8, 9, 9, 9, 8], [8, 9, 10, 9, 8],
                        [8, 9, 9, 8, 8], [7, 8, 8, 6, 5]],
                       dtype=np.float32)
    popdata = np.ones_like(mmidata) * 1e7
    isodata = np.array(
        [[4, 4, 4, 4, 4], [4, 4, 4, 4, 4], [4, 4, 156, 156, 156],
         [156, 156, 156, 156, 156], [156, 156, 156, 156, 156]],
        dtype=np.int32)

    shakefile = get_temp_file_name()
    popfile = get_temp_file_name()
    isofile = get_temp_file_name()
    geodict = GeoDict({
        'xmin': 0.5,
        'xmax': 4.5,
        'ymin': 0.5,
        'ymax': 4.5,
        'dx': 1.0,
        'dy': 1.0,
        'nx': 5,
        'ny': 5
    })
    layers = OrderedDict([
        ('mmi', mmidata),
    ])
    event_dict = {
        'event_id': 'us12345678',
        'magnitude': 7.8,
        'depth': 10.0,
        'lat': 34.123,
        'lon': -118.123,
        'event_timestamp': datetime.utcnow(),
        'event_description': 'foo',
        'event_network': 'us'
    }
    shake_dict = {
        'event_id': 'us12345678',
        'shakemap_id': 'us12345678',
        'shakemap_version': 1,
        'code_version': '4.5',
        'process_timestamp': datetime.utcnow(),
        'shakemap_originator': 'us',
        'map_status': 'RELEASED',
        'shakemap_event_type': 'ACTUAL'
    }
    unc_dict = {'mmi': (1, 1)}
    shakegrid = ShakeGrid(layers, geodict, event_dict, shake_dict, unc_dict)
    shakegrid.save(shakefile)
    popgrid = Grid2D(popdata, geodict.copy())
    isogrid = Grid2D(isodata, geodict.copy())
    write(popgrid, popfile, 'netcdf')
    write(isogrid, isofile, 'netcdf')

    ratedict = {
        4: {
            'start': [2010, 2012, 2014, 2016],
            'end': [2012, 2014, 2016, 2018],
            'rate': [0.01, 0.02, 0.03, 0.04]
        },
        156: {
            'start': [2010, 2012, 2014, 2016],
            'end': [2012, 2014, 2016, 2018],
            'rate': [0.02, 0.03, 0.04, 0.05]
        }
    }

    popgrowth = PopulationGrowth(ratedict)
    popyear = datetime.utcnow().year
    exposure = Exposure(popfile, popyear, isofile, popgrowth=popgrowth)
    expdict = exposure.calcExposure(shakefile)

    modeldict = [
        LognormalModel('AF', 11.613073, 0.180683, 1.0),
        LognormalModel('CN', 10.328811, 0.100058, 1.0)
    ]
    fatmodel = EmpiricalLoss(modeldict)

    # for the purposes of this test, let's override the rates
    # for Afghanistan and China with simpler numbers.
    fatmodel.overrideModel(
        'AF',
        np.array([0, 0, 0, 0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 0],
                 dtype=np.float32))
    fatmodel.overrideModel(
        'CN',
        np.array([0, 0, 0, 0, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 0],
                 dtype=np.float32))

    print('Testing very basic fatality calculation...')
    fatdict = fatmodel.getLosses(expdict)
    # strictly speaking, the afghanistant fatalities should be 462,000 but floating point precision dictates otherwise.
    testdict = {'CN': 46111, 'AF': 461999, 'TotalFatalities': 508110}
    for key, value in fatdict.items():
        assert value == testdict[key]
    print('Passed very basic fatality calculation...')

    print('Testing grid fatality calculations...')
    mmidata = exposure.getShakeGrid().getLayer('mmi').getData()
    popdata = exposure.getPopulationGrid().getData()
    isodata = exposure.getCountryGrid().getData()
    fatgrid = fatmodel.getLossGrid(mmidata, popdata, isodata)

    assert np.nansum(fatgrid) == 508111
    print('Passed grid fatality calculations...')

    # Testing modifying rates and stuffing them back in...
    chile = LognormalModel('CL', 19.786773, 0.259531, 0.0)
    rates = chile.getLossRates(np.arange(5, 10))
    modrates = rates * 2  # does this make event twice as deadly?

    # roughly the exposures from 2015-9-16 CL event
    expo_pop = np.array(
        [0, 0, 0, 1047000, 7314000, 1789000, 699000, 158000, 0, 0])
    mmirange = np.arange(5, 10)
    chile_deaths = chile.getLosses(expo_pop[4:9], mmirange)
    chile_double_deaths = chile.getLosses(expo_pop[4:9],
                                          mmirange,
                                          rates=modrates)
    print('Chile model fatalities: %f' % chile_deaths)
    print('Chile model x2 fatalities: %f' % chile_double_deaths)
示例#4
0
def test():
    homedir = os.path.dirname(os.path.abspath(
        __file__))  # where is this script?
    fatfile = os.path.join(homedir, '..', 'data', 'fatality.xml')
    ecofile = os.path.join(homedir, '..', 'data', 'economy.xml')
    cityfile = os.path.join(homedir, '..', 'data', 'cities1000.txt')
    event = 'northridge'
    shakefile = os.path.join(homedir, '..', 'data',
                             'eventdata', event, '%s_grid.xml' % event)
    popfile = os.path.join(homedir, '..', 'data',
                           'eventdata', event, '%s_gpw.flt' % event)
    isofile = os.path.join(homedir, '..', 'data',
                           'eventdata', event, '%s_isogrid.bil' % event)
    urbanfile = os.path.join(homedir, '..', 'data',
                             'eventdata', 'northridge', 'northridge_urban.bil')
    oceanfile = os.path.join(
        homedir, '..', 'data', 'eventdata', 'northridge', 'northridge_ocean.json')
    oceangridfile = os.path.join(
        homedir, '..', 'data', 'eventdata', 'northridge', 'northridge_ocean.bil')
    timezonefile = os.path.join(
        homedir, '..', 'data', 'eventdata', 'northridge', 'northridge_timezone.shp')

    invfile = os.path.join(homedir, '..', 'data', 'semi_inventory.hdf')
    colfile = os.path.join(homedir, '..', 'data', 'semi_collapse_mmi.hdf')
    casfile = os.path.join(homedir, '..', 'data', 'semi_casualty.hdf')
    workfile = os.path.join(homedir, '..', 'data', 'semi_workforce.hdf')

    tdir = tempfile.mkdtemp()
    basename = os.path.join(tdir, 'output')

    exp = Exposure(popfile, 2012, isofile)
    results = exp.calcExposure(shakefile)
    shakegrid = exp.getShakeGrid()
    popgrid = exp.getPopulationGrid()

    pdffile, pngfile, mapcities = draw_contour(
        shakegrid, popgrid, oceanfile, oceangridfile, cityfile, basename)
    shutil.rmtree(tdir)

    popyear = 2012

    shake_tuple = getHeaderData(shakefile)
    tsunami = shake_tuple[1]['magnitude'] >= TSUNAMI_MAG_THRESH

    semi = SemiEmpiricalFatality.fromDefault()
    semi.setGlobalFiles(popfile, popyear, urbanfile, isofile)
    semiloss, resfat, nonresfat = semi.getLosses(shakefile)

    popgrowth = PopulationGrowth.fromDefault()
    econexp = EconExposure(popfile, 2012, isofile)
    fatmodel = EmpiricalLoss.fromDefaultFatality()
    expobject = Exposure(popfile, 2012, isofile, popgrowth)

    expdict = expobject.calcExposure(shakefile)
    fatdict = fatmodel.getLosses(expdict)
    econexpdict = econexp.calcExposure(shakefile)
    ecomodel = EmpiricalLoss.fromDefaultEconomic()
    ecodict = ecomodel.getLosses(expdict)
    shakegrid = econexp.getShakeGrid()
    pagerversion = 1
    cities = Cities.loadFromGeoNames(cityfile)
    impact1 = '''Red alert level for economic losses. Extensive damage is probable 
    and the disaster is likely widespread. Estimated economic losses are less 
    than 1% of GDP of Italy. Past events with this alert level have required 
    a national or international level response.'''
    impact2 = '''Orange alert level for shaking-related fatalities. Significant 
    casualties are likely.'''
    structcomment = '''Overall, the population in this region resides in structures 
    that are a mix of vulnerable and earthquake resistant construction. The predominant 
    vulnerable building types are unreinforced brick with mud and mid-rise nonductile 
    concrete frame with infill construction.'''
    histeq = [1, 2, 3]
    struct_comment = '''Overall, the population in this region resides
    in structures that are resistant to earthquake
    shaking, though some vulnerable structures
    exist.'''
    secondary_comment = '''Recent earthquakes in this area have caused secondary hazards 
    such as landslides that might have contributed to losses.'''
    hist_comment = ''''A magnitude 7.1 earthquake 240 km east of this event struck Reventador: Ecuador 
    on March 6, 1987 (UTC), with estimated population exposures of 14,000 at intensity VIII and 2,000 
    at intensity IX or greater, resulting in a reported 5,000 fatalities.'''.replace('\n', '')

    location = 'At the top of the world.'
    is_released = True

    doc = PagerData()
    eventcode = shakegrid.getEventDict()['event_id']
    versioncode = eventcode
    doc.setInputs(shakegrid, timezonefile, pagerversion,
                  versioncode, eventcode, tsunami, location, is_released)
    doc.setExposure(expdict, econexpdict)
    doc.setModelResults(fatmodel, ecomodel,
                        fatdict, ecodict,
                        semiloss, resfat, nonresfat)
    doc.setComments(impact1, impact2, struct_comment,
                    hist_comment, secondary_comment)
    doc.setMapInfo(cityfile, mapcities)
    doc.validate()

    # let's test the property methods
    tdoc(doc, shakegrid, impact1, impact2,
         expdict, struct_comment, hist_comment)

    # see if we can save this to a bunch of files then read them back in
    try:
        tdir = tempfile.mkdtemp()
        doc.saveToJSON(tdir)
        newdoc = PagerData()
        newdoc.loadFromJSON(tdir)
        tdoc(newdoc, shakegrid, impact1, impact2,
             expdict, struct_comment, hist_comment)

        # test the xml saving method
        xmlfile = doc.saveToLegacyXML(tdir)
    except Exception as e:
        assert 1 == 2
    finally:
        shutil.rmtree(tdir)
示例#5
0
def test():
    event = 'northridge'
    homedir = os.path.dirname(
        os.path.abspath(__file__))  # where is this script?
    shakefile = os.path.join(homedir, '..', 'data', 'eventdata', event,
                             '%s_grid.xml' % event)
    popfile = os.path.join(homedir, '..', 'data', 'eventdata', event,
                           '%s_gpw.flt' % event)
    isofile = os.path.join(homedir, '..', 'data', 'eventdata', event,
                           '%s_isogrid.bil' % event)
    shapefile = os.path.join(homedir, '..', 'data', 'eventdata', event,
                             'City_BoundariesWGS84', 'City_Boundaries.shp')

    print('Test loading economic exposure from inputs...')
    econexp = EconExposure(popfile, 2012, isofile)
    print('Passed loading economic exposure from inputs...')

    print('Test loading empirical fatality model from XML file...')
    ecomodel = EmpiricalLoss.fromDefaultEconomic()
    print('Passed loading empirical fatality model from XML file.')

    print('Testing calculating probabilities for standard PAGER ranges...')
    expected = {'UK': 6819.883892 * 1e6, 'TotalDollars': 6819.883892 * 1e6}
    G = 2.5
    probs = ecomodel.getProbabilities(expected, G)
    testprobs = {
        '0-1': 0.00020696841425738358,
        '1-10': 0.0043200811319132086,
        '10-100': 0.041085446477813294,
        '100-1000': 0.17564981840854255,
        '1000-10000': 0.33957681768639003,
        '10000-100000': 0.29777890303065313,
        '100000-10000000': 0.14138196485040311
    }
    for key, value in probs.items():
        np.testing.assert_almost_equal(value, testprobs[key])
    msg = ('Passed combining G values from all countries that '
           'contributed to losses...')
    print(msg)

    print('Test retrieving economic model data from XML file...')
    model = ecomodel.getModel('af')
    testmodel = LognormalModel('dummy',
                               9.013810,
                               0.100000,
                               4.113200,
                               alpha=15.065400)
    assert model == testmodel
    print('Passed retrieving economic model data from XML file.')

    print('Testing with known exposures/losses for 1994 Northridge EQ...')
    exposure = {
        'xf':
        np.array([
            0, 0, 556171936.807, 718990717350.0, 2.40385709638e+12,
            2.47073141687e+12, 1.2576210799e+12, 698888019337.0, 1913733716.16,
            0.0
        ])
    }
    expodict = ecomodel.getLosses(exposure)
    testdict = {'xf': 25945225582}
    assert expodict['xf'] == testdict['xf']
    msg = ('Passed testing with known exposures/fatalities for '
           '1994 Northridge EQ.')
    print(msg)

    print('Testing calculating total economic losses for Northridge...')
    expdict = econexp.calcExposure(shakefile)
    ecomodel = EmpiricalLoss.fromDefaultEconomic()
    lossdict = ecomodel.getLosses(expdict)
    testdict = {'XF': 23172277187}
    assert lossdict['XF'] == testdict['XF']
    print('Passed calculating total economic losses for Northridge...')

    print('Testing creating a economic loss grid...')
    mmidata = econexp.getShakeGrid().getLayer('mmi').getData()
    popdata = econexp.getEconPopulationGrid().getData()
    isodata = econexp.getCountryGrid().getData()
    ecogrid = ecomodel.getLossGrid(mmidata, popdata, isodata)
    ecosum = 23172275857.094917
    assert np.nansum(ecogrid) == ecosum
    print('Passed creating a economic loss grid.')

    print('Testing assigning economic losses to polygons...')
    popdict = econexp.getPopulationGrid().getGeoDict()
    shapes = []
    f = fiona.open(shapefile, 'r')
    for row in f:
        shapes.append(row)
    f.close()
    ecoshapes, toteco = ecomodel.getLossByShapes(mmidata, popdata, isodata,
                                                 shapes, popdict)
    ecoshapes = sorted(ecoshapes,
                       key=lambda shape: shape['properties']['dollars_lost'],
                       reverse=True)
    lalosses = 17323352577
    for shape in ecoshapes:
        if shape['id'] == '312':  # Los Angeles
            cname = shape['properties']['CITY_NAME']
            dollars = shape['properties']['dollars_lost']
            assert lalosses == dollars
            assert cname == 'Los Angeles'
    print('Passed assigning economic losses to polygons...')
示例#6
0
def drawImpactScale(lossdict, ranges, losstype, debug=False):
    """Draw a loss impact scale, showing the probabilities that estimated losses fall into one of many bins.

    :param lossdict:
      Dictionary containing either 'TotalFatalities' or 'TotalDollars', depending on losstype.
    :param ranges:
      Ordered Dictionary of probability of losses over ranges : 
           '0-1' (green alert)
           '1-10' (yellow alert)
           '10-100' (yellow alert)
           '100-1000' (orange alert)
           '1000-10000' (red alert)
           '10000-100000' (red alert)
           '100000-10000000' (red alert)
    :param losstype:
      String, one of 'fatality' or 'economic'.
    :returns:
      Matplotlib figure containing plot showing probabilities of loss falling into one of the bins listed above.       
    :raises:
      PagerException if input range OrderedDict list of keys is not complete, or
      if ranges is not an OrderedDict.
    """
    req_keys = [
        '0-1', '1-10', '10-100', '100-1000', '1000-10000', '10000-100000',
        '100000-10000000'
    ]
    if not isinstance(ranges, OrderedDict):
        raise PagerException('Input ranges must be an OrderedDict instance.')
    for key in req_keys:
        if key not in ranges:
            raise PagerException('Input ranges dictionary must have keys: %s' %
                                 str(req_keys))

    height = WIDTH / ASPECT
    f = plt.figure(figsize=(WIDTH, height))
    renderer = _find_renderer(f)
    ax = plt.gca()
    plt.axis([0, 1, 0, 1])
    if not debug:
        plt.axis('off')
    # reserve the left edge of the figure for the "sponge ball" - colored circle indicating most likely alert level.
    starting_left_edge = 11 / 63
    bottom_edge = 7 / 23
    bottom_bar_height = 3 / 23
    bar_width = 7 / 63
    barcolors = [GREEN, YELLOW, YELLOW, ORANGE, RED, RED, RED]
    ticklabels = [1, 10, 100, 1000, 10000, 100000]
    wfactor = 0
    ticklens = [0.03, 0.09, 0.03, 0.09, 0.03, 0.09]

    text_widths = []
    inv = ax.transData.inverted()
    for ticklabel in ticklabels:
        t = plt.text(0.5,
                     0.5,
                     format(ticklabel, ",d"),
                     weight='normal',
                     size=12)
        dxmin, dymin, dwidth, dheight = t.get_window_extent(
            renderer=renderer).bounds
        dxmax = dxmin + dwidth
        dymax = dymin + dheight
        dataxmin, dataymin = inv.transform((dxmin, dymin))
        dataxmax, dataymax = inv.transform((dxmax, dymax))
        text_widths.append((format(ticklabel, ",d"), dataxmax - dataxmin))
        t.remove()

    # draw the bottom bars indicating where the alert levels are
    for barcolor in barcolors:
        left_edge = starting_left_edge + bar_width * wfactor
        rect = Rectangle((left_edge, bottom_edge),
                         bar_width,
                         bottom_bar_height,
                         fc=barcolor,
                         ec='k')
        ax.add_patch(rect)
        if wfactor < len(barcolors) - 1:
            ticklen = ticklens[wfactor]
            ticklabel = text_widths[wfactor][0]
            twidth = text_widths[wfactor][1]
            plt.plot([left_edge + bar_width, left_edge + bar_width],
                     [bottom_edge - ticklen, bottom_edge], 'k')
            plt.text(left_edge + (bar_width) - (twidth / 2.0),
                     bottom_edge - (ticklen + 0.07),
                     ticklabel,
                     weight='normal',
                     size=12)
        wfactor += 1

    # now draw the top bars
    bottom_edge_bar_top = 10.5 / 23
    total_height = (23 - 10.5) / 23
    wfactor = 0
    fdict = {'weight': 'normal', 'size': 12}
    imax = np.array(list(ranges.values())).argmax()
    for rkey, pvalue in ranges.items():
        if pvalue < 0.03:
            wfactor += 1
            continue
        barcolor = barcolors[wfactor]
        left_edge = starting_left_edge + bar_width * wfactor
        bar_height = (pvalue * total_height)
        lw = 1
        zorder = 1
        bottom_value, top_value = [int(v) for v in rkey.split('-')]
        if losstype == 'fatality':
            expected = lossdict['TotalFatalities']
        else:
            expected = lossdict['TotalDollars'] / 1e6
        if expected >= bottom_value and expected < top_value:
            lw = 3
            zorder = 100
        rect = Rectangle((left_edge, bottom_edge_bar_top),
                         bar_width,
                         bar_height,
                         fc=barcolor,
                         ec='k',
                         lw=lw)
        rect.set_zorder(zorder)
        ax.add_patch(rect)
        ptext = '%i%%' % np.round(pvalue * 100)
        plt.text(left_edge + bar_width / 2.7,
                 bottom_edge_bar_top + bar_height + 0.02,
                 ptext,
                 fontdict=fdict)
        wfactor += 1

    # now draw the sponge ball on the left
    cx = 0.105
    cy = 0.6
    # because our axes is not equal, assuming a circle will be drawn as a circle doesn't work.
    x0, y0 = ax.transAxes.transform((0, 0))  # lower left in pixels
    x1, y1 = ax.transAxes.transform((1, 1))  # upper right in pixes
    dx = x1 - x0
    dy = y1 - y0
    maxd = max(dx, dy)
    width = .11 * maxd / dx
    height = .11 * maxd / dy

    # choose the spongeball color based on the expected total losses from lossdict
    sponge_dict = {
        'green': GREEN,
        'yellow': YELLOW,
        'orange': ORANGE,
        'red': RED
    }

    if losstype == 'fatality':
        lossmodel = EmpiricalLoss.fromDefaultFatality()
        alert_level = lossmodel.getAlertLevel(lossdict)
    else:
        lossmodel = EmpiricalLoss.fromDefaultEconomic()
        alert_level = lossmodel.getAlertLevel(lossdict)

    spongecolor = sponge_dict[alert_level]

    spongeball = Ellipse((cx, cy), width, height, fc=spongecolor, ec='k', lw=2)
    ax.add_patch(spongeball)
    font = {'style': 'italic'}

    # draw units at bottom
    if losstype == 'fatality':
        plt.text(0.5, 0.07, 'Fatalities', fontdict=font)
    if losstype == 'economic':
        plt.text(0.45, 0.07, 'USD (Millions)', fontdict=font)

    return f
示例#7
0
def test():
    homedir = os.path.dirname(os.path.abspath(__file__)) #where is this script?
    fatfile = os.path.join(homedir,'..','data','fatality.xml')
    ecofile = os.path.join(homedir,'..','data','economy.xml')
    cityfile = os.path.join(homedir,'..','data','cities1000.txt')
    event = 'northridge'
    shakefile = os.path.join(homedir,'..','data','eventdata',event,'%s_grid.xml' % event)
    popfile = os.path.join(homedir,'..','data','eventdata',event,'%s_gpw.flt' % event)
    isofile = os.path.join(homedir,'..','data','eventdata',event,'%s_isogrid.bil' % event)
    urbanfile = os.path.join(homedir,'..','data','eventdata','northridge','northridge_urban.bil')
    oceanfile = os.path.join(homedir,'..','data','eventdata','northridge','northridge_ocean.json')
    
    invfile = os.path.join(homedir,'..','data','semi_inventory.hdf')
    colfile = os.path.join(homedir,'..','data','semi_collapse_mmi.hdf')
    casfile = os.path.join(homedir,'..','data','semi_casualty.hdf')
    workfile = os.path.join(homedir,'..','data','semi_workforce.hdf')

    tdir = tempfile.mkdtemp()
    outfile = os.path.join(tdir,'output.pdf')
    pngfile,mapcities = draw_contour(shakefile,popfile,oceanfile,cityfile,outfile,make_png=True)
    shutil.rmtree(tdir)
    
    popyear = 2012

    semi = SemiEmpiricalFatality.fromDefault()
    semi.setGlobalFiles(popfile,popyear,urbanfile,isofile)
    semiloss,resfat,nonresfat = semi.getLosses(shakefile)
    
    popgrowth = PopulationGrowth.fromDefault()
    econexp = EconExposure(popfile,2012,isofile)
    fatmodel = EmpiricalLoss.fromDefaultFatality()
    expobject = Exposure(popfile,2012,isofile,popgrowth)
    
    expdict = expobject.calcExposure(shakefile)
    fatdict = fatmodel.getLosses(expdict)
    econexpdict = econexp.calcExposure(shakefile)
    ecomodel = EmpiricalLoss.fromDefaultEconomic()
    ecodict = ecomodel.getLosses(expdict)
    shakegrid = econexp.getShakeGrid()
    pagerversion = 1
    cities = Cities.loadFromGeoNames(cityfile)
    impact1 = '''Red alert level for economic losses. Extensive damage is probable 
    and the disaster is likely widespread. Estimated economic losses are less 
    than 1% of GDP of Italy. Past events with this alert level have required 
    a national or international level response.'''
    impact2 = '''Orange alert level for shaking-related fatalities. Significant 
    casualties are likely.'''
    structcomment = '''Overall, the population in this region resides in structures 
    that are a mix of vulnerable and earthquake resistant construction. The predominant 
    vulnerable building types are unreinforced brick with mud and mid-rise nonductile 
    concrete frame with infill construction.'''
    histeq = [1,2,3]
    struct_comment = '''Overall, the population in this region resides
    in structures that are resistant to earthquake
    shaking, though some vulnerable structures
    exist.'''
    secondary_comment = '''Recent earthquakes in this area have caused secondary hazards 
    such as landslides that might have contributed to losses.'''
    hist_comment = ''''A magnitude 7.1 earthquake 240 km east of this event struck Reventador: Ecuador 
    on March 6, 1987 (UTC), with estimated population exposures of 14,000 at intensity VIII and 2,000 
    at intensity IX or greater, resulting in a reported 5,000 fatalities.'''.replace('\n','')
    doc = PagerData()
    doc.setInputs(shakegrid,pagerversion,shakegrid.getEventDict()['event_id'])
    doc.setExposure(expdict,econexpdict)
    doc.setModelResults(fatmodel,ecomodel,
                        fatdict,ecodict,
                        semiloss,resfat,nonresfat)
    doc.setComments(impact1,impact2,struct_comment,hist_comment,secondary_comment)
    doc.setMapInfo(cityfile,mapcities)
    doc.validate()

    eventinfo = doc.getEventInfo()
    assert eventinfo['mag'] == shakegrid.getEventDict()['magnitude']
    
    imp1,imp2 = doc.getImpactComments()
    assert imp1 == impact1 and imp2 == impact2

    version = doc.getSoftwareVersion()
    elapsed = doc.getElapsed()

    exp = doc.getTotalExposure()
    assert np.isclose(np.array(exp),expdict['TotalExposure']).all()

    hist_table = doc.getHistoricalTable()
    assert hist_table[0]['EventID'] == '199206281505'

    scomm = doc.getStructureComment()
    assert scomm == struct_comment
    
    hcomm = doc.getHistoricalComment()
    assert hcomm == hist_comment

    citytable = doc.getCityTable()
    assert citytable.iloc[0]['name'] == 'Santa Clarita'

    summary = doc.getSummaryAlert()
    assert summary == 'yellow'
示例#8
0
def main(pargs, config):
    # get the users home directory
    homedir = os.path.expanduser("~")

    # handle cancel messages
    if pargs.cancel:
        # we presume that pargs.gridfile in this context is an event ID.
        msg = _cancel(pargs.gridfile, config)
        print(msg)
        return True

    # what kind of thing is gridfile?
    is_file = os.path.isfile(pargs.gridfile)
    is_url, url_gridfile = _is_url(pargs.gridfile)
    is_pdl, pdl_gridfile = _check_pdl(pargs.gridfile, config)
    if is_file:
        gridfile = pargs.gridfile
    elif is_url:
        gridfile = url_gridfile
    elif is_pdl:
        gridfile = pdl_gridfile
    else:
        print("ShakeMap Grid file %s does not exist." % pargs.gridfile)
        return False

    pager_folder = os.path.join(homedir, config["output_folder"])
    pager_archive = os.path.join(homedir, config["archive_folder"])

    admin = PagerAdmin(pager_folder, pager_archive)

    # stdout will now be logged as INFO, stderr will be logged as WARNING
    mail_host = config["mail_hosts"][0]
    mail_from = config["mail_from"]
    developers = config["developers"]
    logfile = os.path.join(pager_folder, "pager.log")
    plog = PagerLogger(logfile, developers, mail_from, mail_host, debug=pargs.debug)
    logger = plog.getLogger()

    try:
        eid = None
        pager_version = None
        # get all the basic event information and print it, if requested
        shake_tuple = getHeaderData(gridfile)
        eid = shake_tuple[1]["event_id"].lower()
        etime = shake_tuple[1]["event_timestamp"]
        if not len(eid):
            eid = shake_tuple[0]["event_id"].lower()
        network = shake_tuple[1]["event_network"].lower()
        if network == "":
            network = "us"
        if not eid.startswith(network):
            eid = network + eid

        # Create a ComcatInfo object to hopefully tell us a number of things about this event
        try:
            ccinfo = ComCatInfo(eid)
            location = ccinfo.getLocation()
            tsunami = ccinfo.getTsunami()
            authid, allids = ccinfo.getAssociatedIds()
            authsource, othersources = ccinfo.getAssociatedSources()
        except:  # fail over to what we can determine locally
            location = shake_tuple[1]["event_description"]
            tsunami = shake_tuple[1]["magnitude"] >= TSUNAMI_MAG_THRESH
            authid = eid
            authsource = network
            allids = []

        # location field can be empty (None), which breaks a bunch of things
        if location is None:
            location = ""

        # Check to see if user wanted to override default tsunami criteria
        if pargs.tsunami != "auto":
            if pargs.tsunami == "on":
                tsunami = True
            else:
                tsunami = False

        # check to see if this event is a scenario
        is_scenario = False
        shakemap_type = shake_tuple[0]["shakemap_event_type"]
        if shakemap_type == "SCENARIO":
            is_scenario = True

        # if event is NOT a scenario and event time is in the future,
        # flag the event as a scenario and yell about it.
        if etime > datetime.datetime.utcnow():
            is_scenario = True
            logger.warning(
                "Event origin time is in the future! Flagging this as a scenario."
            )

        if is_scenario:
            if re.search("scenario", location.lower()) is None:
                location = "Scenario " + location

        # create the event directory (if it does not exist), and start logging there
        logger.info("Creating event directory")
        event_folder = admin.createEventFolder(authid, etime)

        # Stop processing if there is a "stop" file in the event folder
        stopfile = os.path.join(event_folder, "stop")
        if os.path.isfile(stopfile):
            fmt = '"stop" file found in %s.  Stopping processing, returning with 1.'
            logger.info(fmt % (event_folder))
            return True

        pager_version = get_pager_version(event_folder)
        version_folder = os.path.join(event_folder, "version.%03d" % pager_version)
        os.makedirs(version_folder)
        event_logfile = os.path.join(version_folder, "event.log")

        # this will turn off the global rotating log file
        # and switch to the one in the version folder.
        plog.setVersionHandler(event_logfile)

        # Copy the grid.xml file to the version folder
        # sometimes (usu when testing) the input grid isn't called grid.xml.  Rename it here.
        version_grid = os.path.join(version_folder, "grid.xml")
        shutil.copyfile(gridfile, version_grid)

        # Check to see if the tsunami flag has been previously set
        tsunami_toggle = {"on": 1, "off": 0}
        tsunami_file = os.path.join(event_folder, "tsunami")
        if os.path.isfile(tsunami_file):
            tsunami = tsunami_toggle[open(tsunami_file, "rt").read().strip()]

        # get the rest of the event info
        etime = shake_tuple[1]["event_timestamp"]
        elat = shake_tuple[1]["lat"]
        elon = shake_tuple[1]["lon"]
        emag = shake_tuple[1]["magnitude"]

        # get the year of the event
        event_year = shake_tuple[1]["event_timestamp"].year

        # find the population data collected most closely to the event_year
        pop_year, popfile = _get_pop_year(
            event_year, config["model_data"]["population_data"]
        )
        logger.info("Population year: %i Population file: %s\n" % (pop_year, popfile))

        # Get exposure results
        logger.info("Calculating population exposure.")
        isofile = config["model_data"]["country_grid"]
        expomodel = Exposure(popfile, pop_year, isofile)
        exposure = None
        exposure = expomodel.calcExposure(gridfile)

        # incidentally grab the country code of the epicenter
        numcode = expomodel._isogrid.getValue(elat, elon)
        if np.isnan(numcode):
            cdict = None
        else:
            cdict = Country().getCountry(int(numcode))
        if cdict is None:
            ccode = "UK"
        else:
            ccode = cdict["ISO2"]

        logger.info("Country code at epicenter is %s" % ccode)

        # get fatality results, if requested
        logger.info("Calculating empirical fatalities.")
        fatmodel = EmpiricalLoss.fromDefaultFatality()
        fatdict = fatmodel.getLosses(exposure)

        # get economic results, if requested
        logger.info("Calculating economic exposure.")
        econexpmodel = EconExposure(popfile, pop_year, isofile)
        ecomodel = EmpiricalLoss.fromDefaultEconomic()
        econexposure = econexpmodel.calcExposure(gridfile)
        ecodict = ecomodel.getLosses(econexposure)
        shakegrid = econexpmodel.getShakeGrid()

        # Get semi-empirical losses
        logger.info("Calculating semi-empirical fatalities.")
        urbanfile = config["model_data"]["urban_rural_grid"]
        if not os.path.isfile(urbanfile):
            raise PagerException("Urban-rural grid file %s does not exist." % urbanfile)

        semi = SemiEmpiricalFatality.fromDefault()
        semi.setGlobalFiles(popfile, pop_year, urbanfile, isofile)
        semiloss, resfat, nonresfat = semi.getLosses(gridfile)

        # get all of the other components of PAGER
        logger.info("Getting all comments.")
        # get the fatality and economic comments
        impact1, impact2 = get_impact_comments(
            fatdict, ecodict, econexposure, event_year, ccode
        )
        # get comment describing vulnerable structures in the region.
        struct_comment = get_structure_comment(resfat, nonresfat, semi)
        # get the comment describing historic secondary hazards
        secondary_comment = get_secondary_comment(elat, elon, emag)
        # get the comment describing historical comments in the region
        historical_comment = get_historical_comment(elat, elon, emag, exposure, fatdict)

        # generate the probability plots
        logger.info("Drawing probability plots.")
        fat_probs_file, eco_probs_file = _draw_probs(
            fatmodel, fatdict, ecomodel, ecodict, version_folder
        )

        # generate the exposure map
        exposure_base = os.path.join(version_folder, "exposure")
        logger.info("Generating exposure map...")
        oceanfile = config["model_data"]["ocean_vectors"]
        oceangrid = config["model_data"]["ocean_grid"]
        cityfile = config["model_data"]["city_file"]
        borderfile = config["model_data"]["border_vectors"]
        shake_grid = expomodel.getShakeGrid()
        pop_grid = expomodel.getPopulationGrid()
        pdf_file, png_file, mapcities = draw_contour(
            shake_grid,
            pop_grid,
            oceanfile,
            oceangrid,
            cityfile,
            exposure_base,
            borderfile,
            is_scenario=is_scenario,
        )
        logger.info("Generated exposure map %s" % pdf_file)

        # figure out whether this event has been "released".
        is_released = _get_release_status(
            pargs,
            config,
            fatmodel,
            fatdict,
            ecomodel,
            ecodict,
            shake_tuple,
            event_folder,
        )

        # Create a data object to encapsulate everything we know about the PAGER
        # results, and then serialize that to disk in the form of a number of JSON files.
        logger.info("Making PAGER Data object.")
        doc = PagerData()
        timezone_file = config["model_data"]["timezones_file"]
        elapsed = pargs.elapsed
        doc.setInputs(
            shakegrid,
            timezone_file,
            pager_version,
            shakegrid.getEventDict()["event_id"],
            authid,
            tsunami,
            location,
            is_released,
            elapsed=elapsed,
        )
        logger.info("Setting inputs.")
        doc.setExposure(exposure, econexposure)
        logger.info("Setting exposure.")
        doc.setModelResults(
            fatmodel, ecomodel, fatdict, ecodict, semiloss, resfat, nonresfat
        )
        logger.info("Setting comments.")
        doc.setComments(
            impact1, impact2, struct_comment, historical_comment, secondary_comment
        )
        logger.info("Setting map info.")
        doc.setMapInfo(cityfile, mapcities)
        logger.info("Validating.")
        doc.validate()

        # if we have determined that the event is a scenario (origin time is in the future)
        # and the shakemap is not flagged as such, set the shakemap type in the
        # pagerdata object to be 'SCENARIO'.
        if is_scenario:
            doc.setToScenario()

        json_folder = os.path.join(version_folder, "json")
        os.makedirs(json_folder)
        logger.info("Saving output to JSON.")
        doc.saveToJSON(json_folder)
        logger.info("Saving output to XML.")
        doc.saveToLegacyXML(version_folder)

        logger.info("Creating onePAGER pdf...")
        onepager_pdf, error = create_onepager(doc, version_folder)
        if onepager_pdf is None:
            raise PagerException("Could not create onePAGER output: \n%s" % error)

        # copy the contents.xml file to the version folder
        contentsfile = get_data_path("contents.xml")
        if contentsfile is None:
            raise PagerException("Could not find contents.xml file.")
        shutil.copy(contentsfile, version_folder)

        # send pdf as attachment to internal team of PAGER users
        if not is_released and not is_scenario:
            message_pager(config, onepager_pdf, doc)

        # run transfer, as appropriate and as specified by config
        # the PAGER product eventsource and eventsourcecode should
        # match the input ShakeMap settings for these properties.
        # This can possibly cause confusion if a regional ShakeMap is
        # trumped with one from NEIC, but this should happen less often
        # than an NEIC origin being made authoritative over a regional one.
        eventsource = network
        eventsourcecode = eid
        res, msg = transfer(
            config,
            doc,
            eventsourcecode,
            eventsource,
            version_folder,
            is_scenario=is_scenario,
        )
        logger.info(msg)
        if not res:
            logger.critical('Error transferring PAGER content. "%s"' % msg)

        print("Created onePAGER pdf %s" % onepager_pdf)
        logger.info("Created onePAGER pdf %s" % onepager_pdf)

        logger.info("Done.")
        return True
    except Exception as e:
        f = io.StringIO()
        traceback.print_exc(file=f)
        msg = e
        msg = "%s\n %s" % (str(msg), f.getvalue())
        hostname = socket.gethostname()
        msg = msg + "\n" + "Error occurred on %s\n" % (hostname)
        if gridfile is not None:
            msg = msg + "\n" + "Error on file: %s\n" % (gridfile)
        if eid is not None:
            msg = msg + "\n" + "Error on event: %s\n" % (eid)
        if pager_version is not None:
            msg = msg + "\n" + "Error on version: %i\n" % (pager_version)
        f.close()
        logger.critical(msg)
        logger.info("Sent error to email")
        return False