Exemplo n.º 1
0
def export_asset_risk_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    path = '%s.%s' % (sanitize(ekey[0]), ekey[1])
    fname = dstore.export_path(path)
    md = extract(dstore, 'exposure_metadata')
    tostr = {'taxonomy': md.taxonomy}
    for tagname in md.tagnames:
        tostr[tagname] = getattr(md, tagname)
    tagnames = sorted(set(md.tagnames) - {'id'})
    arr = extract(dstore, 'asset_risk').array
    rows = []
    lossnames = sorted(name for name in arr.dtype.names if 'loss' in name)
    expnames = [name for name in arr.dtype.names if name not in md.tagnames
                and 'loss' not in name and name not in 'lon lat']
    colnames = ['id'] + tagnames + ['lon', 'lat'] + expnames + lossnames
    # sanity check
    assert len(colnames) == len(arr.dtype.names)
    for rec in arr:
        row = []
        for name in colnames:
            value = rec[name]
            try:
                row.append('"%s"' % tostr[name][value])
            except KeyError:
                row.append(value)
        rows.append(row)
    writer.save(rows, fname, colnames)
    return [fname]
Exemplo n.º 2
0
    def test_case_1c(self):
        # this is a case with more hazard sites than exposure sites
        test_dir = os.path.dirname(case_1c.__file__)
        self.run_calc(test_dir, 'job.ini', exports='csv')
        total = extract(self.calc.datastore, 'agg_damages/structural')
        aae([[0.4906653, 0.3249882, 0.0708492, 0.0211334, 0.092364]],
            total)  # shape (R, D) = (1, 5)

        # check extract gmf_data works with a filtered site collection
        gmf_data = dict(extract(self.calc.datastore, 'gmf_data'))
        self.assertEqual(gmf_data['rlz-000'].shape, (1,))
Exemplo n.º 3
0
    def test_case_7(self):
        self.assert_curves_ok(
            ['hazard_curve-mean.csv',
             'hazard_curve-smltp_b1-gsimltp_b1.csv',
             'hazard_curve-smltp_b2-gsimltp_b1.csv'],
            case_7.__file__)

        # exercising extract/mean_std_curves
        extract(self.calc.datastore, 'mean_std_curves')

        # exercise the warning for no output when mean_hazard_curves='false'
        self.run_calc(
            case_7.__file__, 'job.ini', mean_hazard_curves='false',
            calculation_mode='preclassical',  poes='0.1')
Exemplo n.º 4
0
def export_hmaps_xml(ekey, dstore):
    key, kind, fmt = get_kkf(ekey)
    oq = dstore['oqparam']
    sitecol = dstore['sitecol']
    sitemesh = get_mesh(sitecol)
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    R = len(rlzs_assoc.realizations)
    fnames = []
    writercls = hazard_writers.HazardMapXMLWriter
    for kind in oq.get_kinds(kind, R):
        # shape (N, M, P)
        hmaps = extract(dstore, 'hmaps?kind=' + kind)[kind]
        if kind.startswith('rlz-'):
            rlz = rlzs_assoc.realizations[int(kind[4:])]
            smlt_path = '_'.join(rlz.sm_lt_path)
            gsimlt_path = rlz.gsim_rlz.uid
        else:
            smlt_path = ''
            gsimlt_path = ''
        for m, imt in enumerate(oq.imtls):
            for p, poe in enumerate(oq.poes):
                suffix = '-%s-%s' % (poe, imt)
                fname = hazard_curve_name(dstore, ekey, kind + suffix)
                data = [HazardMap(site[0], site[1], hmap[m, p])
                        for site, hmap in zip(sitemesh, hmaps)]
                writer = writercls(
                    fname, investigation_time=oq.investigation_time,
                    imt=imt, poe=poe,
                    smlt_path=smlt_path, gsimlt_path=gsimlt_path)
                writer.serialize(data)
                fnames.append(fname)
    return sorted(fnames)
Exemplo n.º 5
0
 def test_case_shakemap(self):
     self.run_calc(case_shakemap.__file__, 'pre-job.ini')
     self.run_calc(case_shakemap.__file__, 'job.ini',
                   hazard_calculation_id=str(self.calc.datastore.calc_id))
     sitecol = self.calc.datastore['sitecol']
     self.assertEqual(len(sitecol), 9)
     gmfdict = dict(extract(self.calc.datastore, 'gmf_data'))
     gmfa = gmfdict['rlz-000']
     self.assertEqual(gmfa.shape, (9,))
     self.assertEqual(gmfa.dtype.names,
                      ('lon', 'lat', 'PGA', 'SA(0.3)', 'SA(1.0)'))
     agglosses = extract(self.calc.datastore, 'agglosses')
     aac(agglosses['mean'], numpy.array([1848876.5], numpy.float32),
         atol=.1)
     aac(agglosses['stddev'], numpy.array([1902063.], numpy.float32),
         atol=.1)
Exemplo n.º 6
0
    def test_case_2d(self):
        # time_event not specified in job_h.ini but specified in job_r.ini
        out = self.run_calc(case_2d.__file__, 'job_h.ini,job_r.ini',
                            exports='csv')
        # this is also a case with a single site but an exposure grid,
        # to test a corner case
        [fname] = out['losses_by_asset', 'csv']
        self.assertEqualFiles('expected/losses_by_asset.csv', fname)

        # test agglosses
        tot = extract(self.calc.datastore, 'agg_losses/occupants')
        aac(tot.array, [0.031716], atol=1E-5)

        # test agglosses with *
        tbl = extract(self.calc.datastore, 'agg_losses/occupants?taxonomy=*')
        self.assertEqual(tbl.array.shape, (1, 1))  # 1 taxonomy, 1 rlz
Exemplo n.º 7
0
 def post_execute(self, arr):
     """
     Compute aggregated risk
     """
     md = extract(self.datastore, 'exposure_metadata')
     categories = [cat.replace('value-', 'loss-') for cat in md] + [
         ds + '-structural' for ds in self.riskmodel.damage_states]
     multi_risk = list(md.array)
     multi_risk += sorted(
         set(arr.dtype.names) -
         set(self.datastore['assetcol/array'].dtype.names))
     tot = {risk: arr[risk].sum() for risk in multi_risk}
     cats = []
     values = []
     for cat in categories:
         val = [tot.get(f, numpy.nan) for f in self.get_fields(cat)]
         if not numpy.isnan(val).all():
             cats.append(cat)
             values.append(val)
     dt = [('peril', hdf5.vstr)] + [(c, float) for c in cats]
     agg_risk = numpy.zeros(len(self.all_perils), dt)
     for cat, val in zip(cats, values):
         agg_risk[cat] = val
     agg_risk['peril'] = self.all_perils
     self.datastore['agg_risk'] = agg_risk
Exemplo n.º 8
0
    def test_case_13(self):
        self.assert_curves_ok(
            ['hazard_curve-mean_PGA.csv', 'hazard_curve-mean_SA(0.2).csv',
             'hazard_map-mean.csv'], case_13.__file__)

        # test recomputing the hazard maps
        self.run_calc(
            case_13.__file__, 'job.ini', exports='csv',
            hazard_calculation_id=str(self.calc.datastore.calc_id),
            gsim_logic_tree_file='', source_model_logic_tree_file='')
        [fname] = export(('hmaps', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/hazard_map-mean.csv', fname,
                              delta=1E-5)

        # test extract/hcurves/rlz-0, used by the npz exports
        haz = vars(extract(self.calc.datastore, 'hcurves'))
        self.assertEqual(sorted(haz), ['all', 'investigation_time'])
        self.assertEqual(
            haz['all'].dtype.names, ('lon', 'lat', 'depth', 'mean'))
        array = haz['all']['mean']
        self.assertEqual(array.dtype.names, ('PGA', 'SA(0.2)'))
        self.assertEqual(array['PGA'].dtype.names,
                         ('0.005', '0.007', '0.0098', '0.0137', '0.0192',
                          '0.0269', '0.0376', '0.0527', '0.0738', '0.103',
                          '0.145', '0.203', '0.284'))
Exemplo n.º 9
0
    def test_case_1(self):
        self.assert_curves_ok(
            ['hazard_curve-PGA.csv', 'hazard_curve-SA(0.1).csv'],
            case_1.__file__)

        if parallel.oq_distribute() != 'no':
            info = view('job_info', self.calc.datastore)
            self.assertIn('task', info)
            self.assertIn('sent', info)
            self.assertIn('received', info)

        # there is a single source
        self.assertEqual(len(self.calc.datastore['source_info']), 1)

        # check npz export
        export(('hcurves', 'npz'), self.calc.datastore)

        # check extraction
        sitecol = extract(self.calc.datastore, 'sitecol')
        self.assertEqual(len(sitecol.array), 1)

        # check minimum_magnitude discards the source
        with self.assertRaises(RuntimeError) as ctx:
            self.run_calc(case_1.__file__, 'job.ini', minimum_magnitude='4.5')
        self.assertEqual(str(ctx.exception), 'All sources were filtered away!')
Exemplo n.º 10
0
    def test_case_1(self):
        # case with volcanic multiperil ASH, LAVA, LAHAR, PYRO
        self.run_calc(case_1.__file__, 'job.ini')

        [fname] = export(('asset_risk', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/asset_risk.csv', fname)
        [fname] = export(('agg_risk', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_risk.csv', fname)

        # check extract
        md = extract(self.calc.datastore, 'exposure_metadata')
        ae(md.array, ['number', 'occupants_night', 'value-structural'])
        ae(md.multi_risk, ['collapse-structural-ASH_DRY',
                           'collapse-structural-ASH_WET',
                           'loss-structural-ASH_DRY',
                           'loss-structural-ASH_WET',
                           'loss-structural-LAHAR',
                           'loss-structural-LAVA',
                           'loss-structural-PYRO',
                           'no_damage-structural-ASH_DRY',
                           'no_damage-structural-ASH_WET',
                           'number-LAHAR',
                           'number-LAVA',
                           'number-PYRO',
                           'occupants_night-LAHAR',
                           'occupants_night-LAVA',
                           'occupants_night-PYRO'])
Exemplo n.º 11
0
def export_hcurves_xml(ekey, dstore):
    key, kind, fmt = get_kkf(ekey)
    len_ext = len(fmt) + 1
    oq = dstore['oqparam']
    sitemesh = get_mesh(dstore['sitecol'])
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    R = len(rlzs_assoc.realizations)
    fnames = []
    writercls = hazard_writers.HazardCurveXMLWriter
    for kind in oq.get_kinds(kind, R):
        if kind.startswith('rlz-'):
            rlz = rlzs_assoc.realizations[int(kind[4:])]
            smlt_path = '_'.join(rlz.sm_lt_path)
            gsimlt_path = rlz.gsim_rlz.uid
        else:
            smlt_path = ''
            gsimlt_path = ''
        name = hazard_curve_name(dstore, ekey, kind)
        hcurves = extract(dstore, 'hcurves?kind=' + kind)[kind]
        for im in oq.imtls:
            slc = oq.imtls(im)
            imt = from_string(im)
            fname = name[:-len_ext] + '-' + im + '.' + fmt
            data = [HazardCurve(Location(site), poes[slc])
                    for site, poes in zip(sitemesh, hcurves)]
            writer = writercls(fname,
                               investigation_time=oq.investigation_time,
                               imls=oq.imtls[im], imt=imt.name,
                               sa_period=getattr(imt, 'period', None) or None,
                               sa_damping=getattr(imt, 'damping', None),
                               smlt_path=smlt_path, gsimlt_path=gsimlt_path)
            writer.serialize(data)
            fnames.append(fname)
    return sorted(fnames)
Exemplo n.º 12
0
    def test_case_miriam(self):
        # this is a case with a grid and asset-hazard association
        self.run_calc(case_miriam.__file__, 'job.ini')

        # check minimum_magnitude >= 5.2
        minmag = self.calc.datastore['ruptures']['mag'].min()
        self.assertGreaterEqual(minmag, 5.2)

        # check asset_loss_table
        tot = self.calc.datastore['asset_loss_table'].value.sum()
        self.assertEqual(tot, 15787827.0)
        [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_losses-rlz000-structural.csv',
                              fname, delta=1E-5)
        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_losses.txt', fname, delta=1E-5)

        # this is a case with exposure and region_grid_spacing=1
        self.run_calc(case_miriam.__file__, 'job2.ini')
        hcurves = dict(extract(self.calc.datastore, 'hcurves'))['all']
        sitecol = self.calc.datastore['sitecol']  # filtered sitecol
        self.assertEqual(len(hcurves), len(sitecol))
        assetcol = self.calc.datastore['assetcol']
        self.assertEqual(len(sitecol), 15)
        self.assertGreater(sitecol.vs30.sum(), 0)
        self.assertEqual(len(assetcol), 548)
Exemplo n.º 13
0
def export_losses_by_asset_npz(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    fname = dstore.export_path('%s.%s' % ekey)
    savez(fname, **dict(extract(dstore, 'losses_by_asset')))
    return [fname]
Exemplo n.º 14
0
    def test_case_8(self):
        # a complex scenario_risk from GMFs where the hazard sites are
        # not in the asset locations
        self.run_calc(case_8.__file__, 'job.ini')
        agglosses = extract(self.calc.datastore, 'agg_losses/structural')
        aac(agglosses.array, [1159817.1])

        # make sure the fullreport can be extracted
        view('fullreport', self.calc.datastore)
Exemplo n.º 15
0
def plot_losses(calc_id, bins=7):
    """
    losses_by_event plotter
    """
    # read the hazard data
    dstore = util.read(calc_id)
    losses_by_rlzi = dict(extract(dstore, 'losses_by_event'))
    oq = dstore['oqparam']
    plt = make_figure(losses_by_rlzi, oq.loss_dt().names, bins)
    plt.show()
Exemplo n.º 16
0
    def test_case_master(self):
        # a case with two GSIMs
        self.run_calc(case_master.__file__, 'job.ini', exports='npz')

        # check realizations
        [fname] = export(('realizations', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/realizations.csv', fname)

        # check losses by taxonomy
        agglosses = extract(self.calc.datastore, 'agg_losses/structural?'
                            'taxonomy=*').array  # shape (T, R) = (3, 2)
        self.assertEqualFiles('expected/agglosses_taxo.txt',
                              gettemp(str(agglosses)))

        # extract agglosses with a * and a selection
        obj = extract(self.calc.datastore, 'agg_losses/structural?'
                      'state=*&cresta=0.11')
        self.assertEqual(obj.selected, [b'state=*', b'cresta=0.11'])
        self.assertEqual(obj.tags, [b'state=01'])
        aac(obj.array, [[2493.7097, 2943.6640]])
Exemplo n.º 17
0
def export_hcurves_csv(ekey, dstore):
    """
    Exports the hazard curves into several .csv files

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    info = get_info(dstore)
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    R = len(rlzs_assoc.realizations)
    sitecol = dstore['sitecol']
    sitemesh = get_mesh(sitecol)
    key, kind, fmt = get_kkf(ekey)
    fnames = []
    checksum = dstore.get_attr('/', 'checksum32')
    hmap_dt = oq.hmap_dt()
    for kind in oq.get_kinds(kind, R):
        fname = hazard_curve_name(dstore, (key, fmt), kind, rlzs_assoc)
        comment = _comment(rlzs_assoc, kind, oq.investigation_time)
        if (key in ('hmaps', 'uhs') and oq.uniform_hazard_spectra or
                oq.hazard_maps):
            hmap = extract(dstore, 'hmaps?kind=' + kind)[kind]
        if key == 'uhs' and oq.poes and oq.uniform_hazard_spectra:
            uhs_curves = calc.make_uhs(hmap, info)
            writers.write_csv(
                fname, util.compose_arrays(sitemesh, uhs_curves),
                comment=comment + ', checksum=%d' % checksum)
            fnames.append(fname)
        elif key == 'hmaps' and oq.poes and oq.hazard_maps:
            fnames.extend(
                export_hmaps_csv(ekey, fname, sitemesh,
                                 hmap.flatten().view(hmap_dt),
                                 comment + ', checksum=%d' % checksum))
        elif key == 'hcurves':
            hcurves = extract(dstore, 'hcurves?kind=' + kind)[kind]
            fnames.extend(
                export_hcurves_by_imt_csv(
                    ekey, kind, rlzs_assoc, fname, sitecol, hcurves, oq,
                    checksum))
    return sorted(fnames)
Exemplo n.º 18
0
    def test_case_1(self):
        out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv')
        [fname] = out['agglosses', 'csv']
        self.assertEqualFiles('expected/agg.csv', fname)

        # check the exported GMFs
        [fname, _, sitefile] = export(('gmf_data', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/gmf-FromFile.csv', fname)
        self.assertEqualFiles('expected/sites.csv', sitefile)

        [fname] = out['losses_by_event', 'csv']
        self.assertEqualFiles('expected/losses_by_event.csv', fname)

        # check the asset values by sid
        [val] = extract(self.calc.datastore, 'asset_values/0')
        self.assertEqual(val['aref'], b'a2')
        self.assertEqual(val['aid'], 0)
        self.assertEqual(val['structural'], 2000.)

        with self.assertRaises(IndexError):  # non-existing site_id
            extract(self.calc.datastore, 'asset_values/1')
Exemplo n.º 19
0
    def test_case_1(self):
        # test with a single event
        self.assert_ok(case_1, 'job_risk.ini')
        got = view('num_units', self.calc.datastore)
        self.assertEqual('''\
======== =========
taxonomy num_units
======== =========
RC       2,000    
RM       4,000    
*ALL*    6,000    
======== =========''', got)

        # test agg_damages, 1 realization x 3 damage states
        [dmg] = extract(self.calc.datastore, 'agg_damages/structural?'
                        'taxonomy=RC&CRESTA=01.1')
        numpy.testing.assert_almost_equal(
            [1498.0121, 472.96616, 29.021801], dmg, decimal=4)
        # test no intersection
        dmg = extract(self.calc.datastore, 'agg_damages/structural?'
                      'taxonomy=RM&CRESTA=01.1')
        self.assertEqual(dmg.shape, ())
Exemplo n.º 20
0
    def check_multi_tag(self, dstore):
        # multi-tag aggregations
        arr = extract(dstore, 'aggregate/avg_losses?'
                      'tag=taxonomy&tag=occupancy&kind=quantile-0.5')
        self.assertEqual(len(arr.to_table()), 1)

        # aggregate by all loss types
        fnames = export(
            ('aggregate_by/avg_losses?tag=taxonomy&tag=occupancy&kind=mean',
             'csv'),
            dstore)
        for fname in fnames:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
Exemplo n.º 21
0
def export_aggregate_by_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    token, what = ekey[0].split('/', 1)
    aw = extract(dstore, 'aggregate/' + what)
    fnames = []
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    path = '%s.%s' % (sanitize(ekey[0]), ekey[1])
    fname = dstore.export_path(path)
    writer.save(aw.to_table(), fname)
    fnames.append(fname)
    return fnames
Exemplo n.º 22
0
def export_by_tag_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    token, tag = ekey[0].split('/')
    data = extract(dstore, token + '/' + tag)
    fnames = []
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    for stat, arr in data:
        tup = (ekey[0].replace('/', '-'), stat, ekey[1])
        path = '%s-%s.%s' % tup
        fname = dstore.export_path(path)
        writer.save(arr, fname)
        fnames.append(fname)
    return fnames
Exemplo n.º 23
0
    def test_case_1(self):
        out = self.run_calc(case_1.__file__, 'job.ini', exports='csv,xml')

        [fname, _, _] = out['gmf_data', 'csv']
        self.assertEqualFiles('expected/gmf-data.csv', fname)

        [fname] = export(('hcurves', 'csv'), self.calc.datastore)
        self.assertEqualFiles(
            'expected/hazard_curve-smltp_b1-gsimltp_b1.csv', fname)

        [fname] = export(('gmf_scenario/rup-0', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/gmf-rlz-0-PGA.csv', fname)

        # test that the .npz export runs
        export(('gmf_data', 'npz'), self.calc.datastore)

        export(('hcurves', 'xml'), self.calc.datastore)

        [fname] = out['hcurves', 'xml']
        self.assertEqualFiles(
            'expected/hazard_curve-smltp_b1-gsimltp_b1-PGA.xml', fname)

        # test gsim_by_imt
        out = self.run_calc(case_1.__file__, 'job.ini',
                            ses_per_logic_tree_path='20',
                            gsim_logic_tree_file='gsim_by_imt_logic_tree.xml',
                            exports='csv')

        # testing event_info
        einfo = dict(extract(self.calc.datastore, 'event_info/0'))
        self.assertEqual(einfo['trt'], 'active shallow crust')
        self.assertEqual(einfo['rupture_class'],
                         'ParametricProbabilisticRupture')
        self.assertEqual(einfo['surface_class'], 'PlanarSurface')
        self.assertEqual(einfo['serial'], 1066)
        self.assertEqual(str(einfo['gsim']),
                         '[MultiGMPE."PGA".AkkarBommer2010]\n'
                         '[MultiGMPE."SA(0.1)".SadighEtAl1997]')
        self.assertEqual(einfo['rlzi'], 0)
        self.assertEqual(einfo['grp_id'], 0)
        self.assertEqual(einfo['occurrence_rate'], 1.0)
        self.assertEqual(list(einfo['hypo']), [0., 0., 4.])

        [fname, _, _] = out['gmf_data', 'csv']
        self.assertEqualFiles('expected/gsim_by_imt.csv', fname)
Exemplo n.º 24
0
def view_hmap(token, dstore):
    """
    Display the highest 20 points of the mean hazard map. Called as
    $ oq show hmap:0.1  # 10% PoE
    """
    try:
        poe = valid.probability(token.split(':')[1])
    except IndexError:
        poe = 0.1
    mean = dict(extract(dstore, 'hcurves?kind=mean'))['mean']
    oq = dstore['oqparam']
    hmap = calc.make_hmap_array(mean, oq.imtls, [poe], len(mean))
    dt = numpy.dtype([('sid', U32)] + [(imt, F32) for imt in oq.imtls])
    array = numpy.zeros(len(hmap), dt)
    for i, vals in enumerate(hmap):
        array[i] = (i, ) + tuple(vals)
    array.sort(order=list(oq.imtls)[0])
    return rst_table(array[:20])
Exemplo n.º 25
0
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job.ini')
        ekeys = [('agg_curves-stats', 'csv')]
        for ekey in ekeys:
            for fname in export(ekey, self.calc.datastore):
                self.assertEqualFiles(
                    'expected/%s' % strip_calc_id(fname), fname)

        # make sure the agg_curves-stats has the right attrs
        self.check_attr('return_periods', [30, 60, 120, 240, 480, 960])
        self.check_attr('units', [b'EUR', b'EUR'])
        self.check_attr('nbytes', 96)

        # test the loss curves exporter
        [f1] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore)
        [f2] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-rlz-000.csv', f1)
        self.assertEqualFiles('expected/loss_curves-rlz-001.csv', f2)

        [f] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-mean.csv', f)

        # test the loss maps exporter
        fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        assert fnames
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname, delta=1E-5)

        # test portfolio loss
        tmp = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', tmp)

        # test the rup_loss_table exporter
        fnames = export(('rup_loss_table', 'xml'), self.calc.datastore)
        self.assertEqual(len(fnames), 2)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname)

        # test the src_loss_table extractor
        arr = extract(self.calc.datastore, 'src_loss_table/structural')
        tmp = gettemp(rst_table(arr))
        self.assertEqualFiles('expected/src_loss_table.txt', tmp)
Exemplo n.º 26
0
    def test_case_18(self):  # GMPEtable
        self.assert_curves_ok(
            ['hazard_curve-mean_PGA.csv',
             'hazard_curve-mean_SA(0.2).csv',
             'hazard_curve-mean_SA(1.0).csv',
             'hazard_map-mean.csv',
             'hazard_uhs-mean.csv'],
            case_18.__file__, kind='stats', delta=1E-7)
        [fname] = export(('realizations', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/realizations.csv', fname)

        # check exporting a single realization in XML and CSV
        [fname] = export(('uhs/rlz-001', 'xml'),  self.calc.datastore)
        if NOT_DARWIN:  # broken on macOS
            self.assertEqualFiles('expected/uhs-rlz-1.xml', fname)
        [fname] = export(('uhs/rlz-001', 'csv'),  self.calc.datastore)
        self.assertEqualFiles('expected/uhs-rlz-1.csv', fname)

        # extracting hmaps
        hmaps = extract(self.calc.datastore, 'hmaps')['all']['mean']
        self.assertEqual(hmaps.dtype.names, ('PGA', 'SA(0.2)', 'SA(1.0)'))
Exemplo n.º 27
0
    def test_case_2(self):
        # case with two damage states
        self.run_calc(case_1.__file__, 'job_2.ini')

        [fname] = export(('asset_risk', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/asset_risk_2.csv', fname)

        # check extract
        md = extract(self.calc.datastore, 'exposure_metadata')
        ae(md.array, ['number', 'occupants_night', 'value-structural'])
        ae(md.multi_risk, ['loss-structural-LAHAR', 'number-LAHAR',
                           'occupants_night-LAHAR'])

        # check invalid key structural_fragility_file
        with self.assertRaises(ValueError):
            self.run_calc(case_1.__file__, 'job.ini',
                          structura_fragility_file='fragility_model.xml')

        # check invalid key structural_consequence_file
        with self.assertRaises(ValueError):
            self.run_calc(case_1.__file__, 'job.ini',
                          structura_consequence_file='consequence_model.xml')
Exemplo n.º 28
0
def export_uhs_xml(ekey, dstore):
    oq = dstore['oqparam']
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    R = len(rlzs_assoc.realizations)
    sitemesh = get_mesh(dstore['sitecol'].complete)
    key, kind, fmt = get_kkf(ekey)
    fnames = []
    periods = [imt.period for imt in oq.imt_periods()]
    for kind in oq.get_kinds(kind, R):
        metadata = get_metadata(rlzs_assoc.realizations, kind)
        uhs = extract(dstore, 'uhs?kind=' + kind)[kind]
        for p, poe in enumerate(oq.poes):
            fname = hazard_curve_name(dstore, (key, fmt), kind + '-%s' % poe)
            writer = hazard_writers.UHSXMLWriter(
                fname, periods=periods, poe=poe,
                investigation_time=oq.investigation_time, **metadata)
            data = []
            for site, curve in zip(sitemesh, uhs):
                data.append(UHS(curve[:, p], Location(site)))
            writer.serialize(data)
            fnames.append(fname)
    return sorted(fnames)
Exemplo n.º 29
0
def export_hazard_npz(ekey, dstore):
    fname = dstore.export_path('%s.%s' % ekey)
    savez(fname, **dict(extract(dstore, ekey[0])))
    return [fname]
Exemplo n.º 30
0
def export_gmf_scenario_npz(ekey, dstore):
    fname = dstore.export_path('%s.%s' % ekey)
    savez(fname, **dict(extract(dstore, 'gmf_data')))
    return [fname]
Exemplo n.º 31
0
def get_mean_curves(dstore):
    """
    Extract the mean hazard curves from the datastore, as a composite
    array of length nsites.
    """
    return dict(extract.extract(dstore, 'hcurves?kind=mean'))['mean']
Exemplo n.º 32
0
 def test_case_5a(self):
     # this is a case with two gsims and one asset
     self.assert_ok(case_5a, 'job_haz.ini,job_risk.ini')
     dmg = extract(self.calc.datastore, 'agg_damages/structural?taxonomy=*')
     tmpname = write_csv(None, dmg)  # shape (T, R, D) == (1, 2, 5)
     self.assertEqualFiles('expected/dmg_by_taxon.csv', tmpname)
Exemplo n.º 33
0
def export_dmg_by_asset_npz(ekey, dstore):
    fname = dstore.export_path('%s.%s' % ekey)
    savez(fname, **dict(extract(dstore, 'dmg_by_asset')))
    return [fname]
Exemplo n.º 34
0
def export_hazard_npz(ekey, dstore):
    fname = dstore.export_path('%s.%s' % ekey)
    out = extract(dstore, ekey[0])
    kw = {k: v for k, v in vars(out).items() if not k.startswith('_')}
    savez(fname, **kw)
    return [fname]
Exemplo n.º 35
0
def export_disagg_csv_xml(ekey, dstore):
    oq = dstore['oqparam']
    sitecol = dstore['sitecol']
    hmap4 = dstore['hmap4']
    N, M, P, Z = hmap4.shape
    imts = list(oq.imtls)
    rlzs = dstore['full_lt'].get_realizations()
    fnames = []
    writercls = hazard_writers.DisaggXMLWriter
    bins = {name: dset[:] for name, dset in dstore['disagg-bins'].items()}
    ex = 'disagg?kind=%s&imt=%s&site_id=%s&poe_id=%d&z=%d'
    skip_keys = ('Mag', 'Dist', 'Lon', 'Lat', 'Eps', 'TRT')
    for s, m, p, z in iproduct(N, M, P, Z):
        dic = {
            k: dstore['disagg/' + k][s, m, p, ..., z]
            for k in oq.disagg_outputs
        }
        if sum(arr.sum() for arr in dic.values()) == 0:  # no data
            continue
        imt = from_string(imts[m])
        r = hmap4.rlzs[s, z]
        rlz = rlzs[r]
        iml = hmap4[s, m, p, z]
        poe_agg = dstore['poe4'][s, m, p, z]
        fname = dstore.export_path('rlz-%d-%s-sid-%d-poe-%d.xml' %
                                   (r, imt, s, p))
        lon, lat = sitecol.lons[s], sitecol.lats[s]
        metadata = dstore.metadata
        metadata.update(investigation_time=oq.investigation_time,
                        imt=imt.name,
                        smlt_path='_'.join(rlz.sm_lt_path),
                        gsimlt_path=rlz.gsim_rlz.pid,
                        lon=lon,
                        lat=lat,
                        mag_bin_edges=bins['Mag'].tolist(),
                        dist_bin_edges=bins['Dist'].tolist(),
                        lon_bin_edges=bins['Lon'][s].tolist(),
                        lat_bin_edges=bins['Lat'][s].tolist(),
                        eps_bin_edges=bins['Eps'].tolist(),
                        tectonic_region_types=decode(bins['TRT'].tolist()))
        if ekey[1] == 'xml':
            metadata['sa_period'] = getattr(imt, 'period', None) or None
            metadata['sa_damping'] = getattr(imt, 'damping', None)
            writer = writercls(fname, **metadata)
            data = []
            for k in oq.disagg_outputs:
                data.append(DisaggMatrix(poe_agg, iml, k.split('_'), dic[k]))
            writer.serialize(data)
            fnames.append(fname)
        else:  # csv
            metadata['poe'] = poe_agg
            for k in oq.disagg_outputs:
                header = k.lower().split('_') + ['poe']
                com = {
                    key: value
                    for key, value in metadata.items()
                    if value is not None and key not in skip_keys
                }
                com.update(metadata)
                fname = dstore.export_path('rlz-%d-%s-sid-%d-poe-%d_%s.csv' %
                                           (r, imt, s, p, k))
                values = extract(dstore, ex % (k, imt, s, p, z))
                writers.write_csv(fname,
                                  values,
                                  header=header,
                                  comment=com,
                                  fmt='%.5E')
                fnames.append(fname)
    return sorted(fnames)
Exemplo n.º 36
0
def export_realizations(ekey, dstore):
    data = extract(dstore, 'realizations').array
    path = dstore.export_path('realizations.csv')
    writers.write_csv(path, data, fmt='%.7e')
    return [path]
Exemplo n.º 37
0
def get_mean_curves(dstore):
    """
    Extract the mean hazard curves from the datastore, as a composite
    array of length nsites.
    """
    return dict(extract.extract(dstore, 'hcurves?kind=mean'))['mean']
Exemplo n.º 38
0
 def test_case_6(self):
     # this is a case with 5 assets on the same point
     self.assert_ok(case_6, 'job_h.ini,job_r.ini')
     dmg = extract(self.calc.datastore, 'agg_damages/structural?taxonomy=*')
     tmpname = write_csv(None, dmg)  # shape (T, R, D) == (5, 1, 5)
     self.assertEqualFiles('expected/dmg_by_taxon.csv', tmpname)
Exemplo n.º 39
0
    def test_case_1_eb(self):
        # this is a case with insured losses
        self.run_calc(case_1.__file__, 'job_eb.ini')

        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        aw = extract(self.calc.datastore, 'agg_losses/structural')
        self.assertEqual(aw.stats, ['mean'])
        self.assertEqual(aw.array, numpy.float32([767.82324]))

        fnames = export(('agg_curves-stats', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/eb_%s' % strip_calc_id(fname),
                                  fname)

        fnames = export(('agg_losses-stats', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        # extract agg_curves, no tags
        aw = extract(
            self.calc.datastore, 'agg_curves?kind=stats&'
            'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves1.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=rlzs&'
                     'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves2.csv', tmp)

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=stats&'
            'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves3.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=rlzs&'
                     'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves4.csv', tmp)

        # TODO: fix extract agg_curves for insured types

        # extract agg_curves with tags
        self.run_calc(case_1.__file__,
                      'job_eb.ini',
                      aggregate_by='policy,taxonomy',
                      hazard_calculation_id=str(self.calc.datastore.calc_id))

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=stats&'
            'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves5.csv', tmp)

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=rlzs&'
            'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves6.csv', tmp)

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=stats&'
            'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves7.csv', tmp)

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=rlzs&'
            'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves8.csv', tmp)
Exemplo n.º 40
0
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract loss_curves/rlz-1 (with the first asset having zero losses)
        [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract curves by tag
        tags = 'taxonomy=tax1&state=01&cresta=0.11'
        a = extract(self.calc.datastore, 'agg_curves/structural?' + tags)
        self.assertEqual(a.array.shape, (4, 3))  # 4 stats, 3 return periods

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_losses.txt',
                              fname,
                              delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(('aggregate_by/occupancy/avg_losses', 'csv'),
                        self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0])

        self.check_multi_tag(self.calc.datastore)

        # ------------------------- ebrisk calculator ---------------------- #
        self.run_calc(case_master.__file__,
                      'job.ini',
                      hazard_calculation_id=str(self.calc.datastore.calc_id),
                      calculation_mode='ebrisk',
                      exports='',
                      aggregate_by='taxonomy',
                      insured_losses='false')

        # agg_losses-rlzs has shape (L=5, R=9)
        # agg_losses-stats has shape (L=5, S=4)
        fname = export(('agg_losses-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/agglosses.csv', fname)

        fname = export(('avg_losses', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/avglosses.csv', fname, delta=1E-5)
Exemplo n.º 41
0
def export_disagg_csv(ekey, dstore):
    oq = dstore['oqparam']
    sitecol = dstore['sitecol']
    hmap4 = dstore['hmap4']
    rlzs = dstore['full_lt'].get_realizations()
    best_rlzs = dstore['best_rlzs'][:]
    N, M, P, Z = hmap4.shape
    imts = list(oq.imtls)
    fnames = []
    bins = {name: dset[:] for name, dset in dstore['disagg-bins'].items()}
    ex = 'disagg?kind=%s&imt=%s&site_id=%s&poe_id=%d'
    if ekey[0] == 'disagg_traditional':
        ex += '&traditional=1'
        trad = '-traditional'
    else:
        trad = ''
    skip_keys = ('Mag', 'Dist', 'Lon', 'Lat', 'Eps', 'TRT')
    metadata = dstore.metadata
    poes_disagg = ['nan'] * P
    for p in range(P):
        try:
            poes_disagg[p] = str(oq.poes_disagg[p])
        except IndexError:
            pass
    for s in range(N):
        lon, lat = sitecol.lons[s], sitecol.lats[s]
        weights = numpy.array([rlzs[r].weight['weight'] for r in best_rlzs[s]])
        weights /= weights.sum()  # normalize to 1
        metadata.update(investigation_time=oq.investigation_time,
                        mag_bin_edges=bins['Mag'].tolist(),
                        dist_bin_edges=bins['Dist'].tolist(),
                        lon_bin_edges=bins['Lon'][s].tolist(),
                        lat_bin_edges=bins['Lat'][s].tolist(),
                        eps_bin_edges=bins['Eps'].tolist(),
                        tectonic_region_types=decode(bins['TRT'].tolist()),
                        rlz_ids=best_rlzs[s].tolist(),
                        weights=weights.tolist(),
                        lon=lon,
                        lat=lat)
        for k in oq.disagg_outputs:
            splits = k.lower().split('_')
            header = (['imt', 'poe'] + splits +
                      ['poe%d' % z for z in range(Z)])
            values = []
            nonzeros = []
            for m, p in iproduct(M, P):
                imt = imts[m]
                aw = extract(dstore, ex % (k, imt, s, p))
                # for instance for Mag_Dist [(mag, dist, poe0, poe1), ...]
                poes = aw[:, len(splits):]
                if 'trt' in header:
                    nonzeros.append(True)
                else:
                    nonzeros.append(poes.any())  # nonzero poes
                for row in aw:
                    values.append([imt, poes_disagg[p]] + list(row))
            if any(nonzeros):
                com = {
                    key: value
                    for key, value in metadata.items()
                    if value is not None and key not in skip_keys
                }
                com.update(metadata)
                fname = dstore.export_path('%s%s-%d.csv' % (k, trad, s))
                writers.write_csv(fname,
                                  values,
                                  header=header,
                                  comment=com,
                                  fmt='%.5E')
                fnames.append(fname)
    return sorted(fnames)
Exemplo n.º 42
0
 def test_case_9(self):
     # using gmfs.xml
     self.run_calc(case_9.__file__, 'job.ini')
     agglosses = extract(self.calc.datastore, 'agglosses/structural')
     aac(agglosses.array, [7306.7124])
Exemplo n.º 43
0
 def test_case_6(self):
     # this is a case with 5 assets on the same point
     self.assert_ok(case_6, 'job_h.ini,job_r.ini')
     dmg = extract(self.calc.datastore, 'agg_damages/structural?taxonomy=*')
     tmpname = write_csv(None, dmg)  # shape (T, R, D) == (5, 1, 5)
     self.assertEqualFiles('expected/dmg_by_taxon.csv', tmpname)