Example #1
0
    def test_insured_losses(self):
        # TODO: fix extract agg_curves for insured types

        # extract agg_curves with tags
        self.run_calc(case_1.__file__, 'job_eb.ini',
                      aggregate_by='policy,taxonomy')

        aw = extract(self.calc.datastore, 'agg_curves?kind=stats&'
                     'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves5.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=rlzs&'
                     'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves6.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=stats&'
                     'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves7.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=rlzs&'
                     'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves8.csv', tmp)
Example #2
0
 def compare(self, what, imt, files, samplesites, atol, rtol):
     sids = self.getsids(samplesites)
     if what.startswith('avg_gmf'):
         arrays = self.getgmf(what, imt, sids)
     else:
         arrays = self.getdata(what, imt, sids)
     header = ['site_id'] if files else ['site_id', 'calc_id']
     if what == 'hcurves':
         header += ['%.5f' % lvl for lvl in self.oq.imtls[imt]]
     elif what == 'hmaps':
         header += [str(poe) for poe in self.oq.poes]
     else:  # avg_gmf
         header += ['gmf']
     rows = collections.defaultdict(list)
     diff_idxs = get_diff_idxs(arrays, rtol, atol)
     if len(diff_idxs) == 0:
         print('There are no differences within the tolerances '
               'atol=%s, rtol=%d%%, sids=%s' % (atol, rtol * 100, sids))
         return
     arr = arrays.transpose(1, 0, 2)  # shape (N, C, L)
     for sid, array in sorted(zip(sids[diff_idxs], arr[diff_idxs])):
         for ex, cols in zip(self.extractors, array):
             if files:
                 rows[ex.calc_id].append([sid] + list(cols))
             else:
                 rows['all'].append([sid, ex.calc_id] + list(cols))
     if files:
         fdict = {ex.calc_id: open('%s.txt' % ex.calc_id, 'w')
                  for ex in self.extractors}
         for calc_id, f in fdict.items():
             f.write(views.rst_table(rows[calc_id], header))
             print('Generated %s' % f.name)
     else:
         print(views.rst_table(rows['all'], header))
     return arrays
Example #3
0
    def test_case_48(self):
        # pointsource_distance effects on a simple point source
        self.run_calc(case_48.__file__, 'job.ini')
        tmp = general.gettemp(
            rst_table(self.calc.datastore['rup/rrup_'], ['sid0', 'sid1']))
        self.assertEqualFiles('expected/exact_dists.txt', tmp)

        self.run_calc(case_48.__file__, 'job.ini', pointsource_distance='?')
        tmp = general.gettemp(
            rst_table(self.calc.datastore['rup/rrup_'], ['sid0', 'sid1']))
        self.assertEqualFiles('expected/approx_dists.txt', tmp)
Example #4
0
    def test_case_1_eb(self):
        # this is a case with no insured losses, no tags
        self.run_calc(case_1.__file__, 'job_eb.ini', concurrent_tasks='4')

        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        aw = extract(self.calc.datastore, 'agg_losses/structural')
        self.assertEqual(aw.stats, ['mean'])
        numpy.testing.assert_allclose(aw.array, [662.6701])

        fnames = export(('tot_curves-stats', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/eb_%s' % strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        fnames = export(('tot_losses-stats', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        # extract tot_curves, no tags
        aw = extract(
            self.calc.datastore, 'tot_curves?kind=stats&'
            'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves1.csv', tmp)

        aw = extract(self.calc.datastore, 'tot_curves?kind=rlzs&'
                     'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves2.csv', tmp)

        aw = extract(
            self.calc.datastore, 'tot_curves?kind=stats&'
            'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves3.csv', tmp)

        aw = extract(self.calc.datastore, 'tot_curves?kind=rlzs&'
                     'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves4.csv', tmp)
Example #5
0
 def sanity_check(self):
     """
     Sanity check on the total number of assets
     """
     if self.R == 1:
         arr = self.datastore.sel('damages-rlzs')  # shape (A, 1, L, D)
     else:
         arr = self.datastore.sel('damages-stats', stat='mean')
     avg = arr.sum(axis=(0, 1))  # shape (L, D)
     if not len(self.datastore['dd_data/aid']):
         logging.warning('There is no damage at all!')
     elif 'avg_portfolio_damage' in self.datastore:
         df = views.portfolio_damage_error('avg_portfolio_damage',
                                           self.datastore)
         rst = views.rst_table(df)
         logging.info('Portfolio damage\n%s' % rst)
     num_assets = avg.sum(axis=1)  # by loss_type
     expected = self.assetcol['number'].sum()
     nums = set(num_assets) | {expected}
     if len(nums) > 1:
         numdic = dict(expected=expected)
         for lt, num in zip(self.oqparam.loss_names, num_assets):
             numdic[lt] = num
         logging.info(
             'Due to rounding errors inherent in floating-point arithmetic,'
             ' the total number of assets is not exact: %s', numdic)
Example #6
0
def get_pstats(pstatfile, n):
    """
    Return profiling information as an RST table.

    :param pstatfile: path to a .pstat file
    :param n: the maximum number of stats to retrieve
    """
    with tempfile.TemporaryFile(mode='w+') as stream:
        ps = pstats.Stats(pstatfile, stream=stream)
        ps.sort_stats('cumtime')
        ps.print_stats(n)
        stream.seek(0)
        lines = list(stream)
    for i, line in enumerate(lines):
        if line.startswith('   ncalls'):
            break
    data = []
    for line in lines[i + 2:]:
        columns = line.split()
        if len(columns) == 6:
            data.append(PStatData(*columns))
    rows = [(rec.ncalls, rec.cumtime, rec.path) for rec in data]
    # here is an example of the expected output table:
    # ====== ======= ========================================================
    # ncalls cumtime path
    # ====== ======= ========================================================
    # 1      33.502  commands/run.py:77(_run)
    # 1      33.483  calculators/base.py:110(run)
    # 1      25.166  calculators/classical.py:115(execute)
    # 1      25.104  baselib.parallel.py:249(apply_reduce)
    # 1      25.099  calculators/classical.py:41(classical)
    # 1      25.099  hazardlib/calc/hazard_curve.py:164(classical)
    return views.rst_table(rows, header='ncalls cumtime path'.split())
Example #7
0
def print_(aw):
    if hasattr(aw, 'json'):
        print(json.dumps(json.loads(aw.json), indent=2))
    elif hasattr(aw, 'shape_descr'):
        print(rst_table(aw.to_table()))
    if hasattr(aw, 'array') and aw.dtype.names:
        print(write_csv(io.StringIO(), aw.array))
Example #8
0
 def sanity_check(self):
     """
     Sanity check on the total number of assets
     """
     if self.R == 1:
         arr = self.datastore.sel('damages-rlzs')  # shape (A, 1, L, D)
     else:
         arr = self.datastore.sel('damages-stats', stat='mean')
     avg = arr.sum(axis=(0, 1))  # shape (L, D)
     if not len(self.datastore['dd_data/aid']):
         logging.warning('There is no damage at all!')
     else:
         df = views.portfolio_damage_error(self.datastore, avg[:, 1:])
         rst = views.rst_table(numpy.array(df), list(df.columns))
         logging.info('Portfolio damage\n%s' % rst)
     num_assets = avg.sum(axis=1)  # by loss_type
     expected = self.assetcol['number'].sum()
     nums = set(num_assets) | {expected}
     if len(nums) > 1:
         numdic = dict(expected=expected)
         for lt, num in zip(self.oqparam.loss_names, num_assets):
             numdic[lt] = num
         logging.info(
             'Due to numeric errors the total number of assets'
             ' is imprecise: %s', numdic)
Example #9
0
def source_model_info(nodes):
    """
    Extract information about NRML/0.5 source models. Returns a table
    with TRTs as rows and source classes as columns.
    """
    c = collections.Counter()
    for node in nodes:
        for src_group in node:
            trt = src_group['tectonicRegion']
            for src in src_group:
                src_class = src.tag.split('}')[1]
                c[trt, src_class] += 1
    trts, classes = zip(*c)
    trts = sorted(set(trts))
    classes = sorted(set(classes))
    dtlist = [('TRT', (bytes, 30))] + [(name, int) for name in classes]
    out = numpy.zeros(len(trts) + 1, dtlist)  # +1 for the totals
    for i, trt in enumerate(trts):
        out[i]['TRT'] = trt
        for src_class in classes:
            out[i][src_class] = c[trt, src_class]
    out[-1]['TRT'] = 'Total'
    for name in out.dtype.names[1:]:
        out[-1][name] = out[name][:-1].sum()
    return rst_table(out)
Example #10
0
def source_model_info(node):
    """
    Extract information about a NRML/0.5 source model
    """
    trts = []
    counters = []
    src_classes = set()
    for src_group in node:
        c = collections.Counter()
        trts.append(src_group['tectonicRegion'])
        for src in src_group:
            tag = src.tag.split('}')[1]
            c[tag] += 1
        counters.append(c)
        src_classes.update(c)
    dtlist = [('TRT', (bytes, 30))] + [(name, int)
                                       for name in sorted(src_classes)]
    out = numpy.zeros(len(node) + 1, dtlist)
    for i, c in enumerate(counters):
        out[i]['TRT'] = trts[i]
        for name in src_classes:
            out[i][name] = c[name]
    out[-1]['TRT'] = 'Total'
    for name in out.dtype.names[1:]:
        out[-1][name] = out[name][:-1].sum()
    return rst_table(out)
Example #11
0
def source_model_info(nodes):
    """
    Extract information about NRML/0.5 source models. Returns a table
    with TRTs as rows and source classes as columns.
    """
    c = collections.Counter()
    for node in nodes:
        for src_group in node:
            trt = src_group['tectonicRegion']
            for src in src_group:
                src_class = src.tag.split('}')[1]
                c[trt, src_class] += 1
    trts, classes = zip(*c)
    trts = sorted(set(trts))
    classes = sorted(set(classes))
    dtlist = [('TRT', (bytes, 30))] + [(name, int) for name in classes]
    out = numpy.zeros(len(trts) + 1, dtlist)  # +1 for the totals
    for i, trt in enumerate(trts):
        out[i]['TRT'] = trt
        for src_class in classes:
            out[i][src_class] = c[trt, src_class]
    out[-1]['TRT'] = 'Total'
    for name in out.dtype.names[1:]:
        out[-1][name] = out[name][:-1].sum()
    return rst_table(out)
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job.ini')
        ekeys = [('tot_curves-stats', 'csv')]
        for ekey in ekeys:
            for fname in export(ekey, self.calc.datastore):
                self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                                      fname)

        # make sure the tot_curves-stats has the right attrs
        self.check_attr('return_periods', [30, 60, 120, 240, 480, 960])
        self.check_attr('units', [b'EUR', b'EUR'])

        # test portfolio loss
        tmp = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', tmp)

        # test the src_loss_table extractor
        arr = extract(self.calc.datastore, 'src_loss_table/structural')
        tmp = gettemp(rst_table(arr))
        self.assertEqualFiles('expected/src_loss_table.txt', tmp)

        # test event_based_damage
        self.run_calc(case_1.__file__,
                      'job_damage.ini',
                      hazard_calculation_id=str(self.calc.datastore.calc_id))
        fnames = export(('dmg_by_asset', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
Example #13
0
def get_pstats(pstatfile, n):
    """
    Return profiling information as an RST table.

    :param pstatfile: path to a .pstat file
    :param n: the maximum number of stats to retrieve
    """
    with tempfile.TemporaryFile(mode='w+') as stream:
        ps = pstats.Stats(pstatfile, stream=stream)
        ps.sort_stats('cumtime')
        ps.print_stats(n)
        stream.seek(0)
        lines = list(stream)
    for i, line in enumerate(lines):
        if line.startswith('   ncalls'):
            break
    data = []
    for line in lines[i + 2:]:
        columns = line.split()
        if len(columns) == 6:
            data.append(PStatData(*columns))
    rows = [(rec.ncalls, rec.cumtime, rec.path) for rec in data]
    # here is an example of the expected output table:
    # ====== ======= ========================================================
    # ncalls cumtime path
    # ====== ======= ========================================================
    # 1      33.502  commands/run.py:77(_run)
    # 1      33.483  calculators/base.py:110(run)
    # 1      25.166  calculators/classical.py:115(execute)
    # 1      25.104  baselib.parallel.py:249(apply_reduce)
    # 1      25.099  calculators/classical.py:41(classical)
    # 1      25.099  hazardlib/calc/hazard_curve.py:164(pmap_from_grp)
    return views.rst_table(rows, header='ncalls cumtime path'.split())
Example #14
0
    def test_event_based(self):
        self.run_calc(ucerf.__file__, 'job.ini')
        [fname] = export(('ruptures', 'csv'), self.calc.datastore)
        # check that we get the expected number of events
        with open(fname) as f:
            self.assertEqual(len(f.readlines()), 37)
        self.assertEqualFiles('expected/ruptures.csv', fname, lastline=20)

        # run a regular event based on top of the UCERF ruptures and
        # check the generated hazard maps
        self.run_calc(ucerf.__file__,
                      'job.ini',
                      calculation_mode='event_based',
                      hazard_calculation_id=str(self.calc.datastore.calc_id))

        # check the GMFs
        gmdata = self.calc.datastore['gmdata'].value
        got = writetmp(rst_table(gmdata, fmt='%.6f'))
        self.assertEqualFiles('expected/gmdata_eb.csv', got)

        # check the mean hazard map
        [fname] = [
            f for f in export(('hmaps', 'csv'), self.calc.datastore)
            if 'mean' in f
        ]
        self.assertEqualFiles('expected/hazard_map-mean.csv', fname)
Example #15
0
def source_model_info(sm_nodes):
    """
    Extract information about source models. Returns a table
    with TRTs as rows and source classes as columns.
    """
    c = collections.Counter()
    for sm in sm_nodes:
        groups = [sm[0]] if sm['xmlns'].endswith('nrml/0.4') else sm[0]
        for group in groups:
            grp_trt = group.get('tectonicRegion')
            for src in group:
                trt = src.get('tectonicRegion', grp_trt)
                src_class = src.tag.split('}')[1]
                c[trt, src_class] += 1
    trts, classes = zip(*c)
    trts = sorted(set(trts))
    classes = sorted(set(classes))
    dtlist = [('TRT', (bytes, 30))] + [(name, int) for name in classes]
    out = numpy.zeros(len(trts) + 1, dtlist)  # +1 for the totals
    for i, trt in enumerate(trts):
        out[i]['TRT'] = trt
        for src_class in classes:
            out[i][src_class] = c[trt, src_class]
    out[-1]['TRT'] = 'Total'
    for name in out.dtype.names[1:]:
        out[-1][name] = out[name][:-1].sum()
    return rst_table(out)
Example #16
0
def print_csm_info(fname):
    """
    Parse the composite source model without instantiating the sources and
    prints information about its composition and the full logic tree
    """
    oqparam = readinput.get_oqparam(fname)
    csm = readinput.get_composite_source_model(oqparam, in_memory=False)
    print(csm.info)
    print('See http://docs.openquake.org/oq-engine/stable/'
          'effective-realizations.html for an explanation')
    rlzs_assoc = csm.info.get_rlzs_assoc()
    print(rlzs_assoc)
    dupl = [(srcs[0]['id'], len(srcs)) for srcs in csm.check_dupl_sources()]
    if dupl:
        print(rst_table(dupl, ['source_id', 'multiplicity']))
    tot, pairs = get_pickled_sizes(rlzs_assoc)
    print(rst_table(pairs, ['attribute', 'nbytes']))
Example #17
0
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = dstore['oqparam']
     self.text = (decode(oq.description) + '\n' + '=' * len(oq.description))
     versions = sorted(dstore['/'].attrs.items())
     self.text += '\n\n' + views.rst_table(versions)
     self.text += '\n\nnum_sites = %d, num_levels = %d' % (
         len(dstore['sitecol']), len(oq.imtls.array))
Example #18
0
def print_csm_info(fname):
    """
    Parse the composite source model without instantiating the sources and
    prints information about its composition and the full logic tree
    """
    oqparam = readinput.get_oqparam(fname)
    csm = readinput.get_composite_source_model(oqparam, in_memory=False)
    print(csm.info)
    print('See http://docs.openquake.org/oq-engine/stable/'
          'effective-realizations.html for an explanation')
    rlzs_assoc = csm.info.get_rlzs_assoc()
    print(rlzs_assoc)
    dupl = [(srcs[0]['id'], len(srcs)) for srcs in csm.check_dupl_sources()]
    if dupl:
        print(rst_table(dupl, ['source_id', 'multiplicity']))
    tot, pairs = get_pickled_sizes(rlzs_assoc)
    print(rst_table(pairs, ['attribute', 'nbytes']))
Example #19
0
def print_(aw):
    if hasattr(aw, 'json'):
        print(json.dumps(json.loads(aw.json), indent=2))
    elif hasattr(aw, 'shape_descr'):
        print(rst_table(aw.to_dframe()))
    if hasattr(aw, 'array') and aw.dtype.names:
        sio = io.StringIO()
        write_csv(sio, aw.array)
        print(sio.getvalue())
Example #20
0
def compare(what, imt, calc_ids, files, samplesites=100, rtol=.1, atol=1E-4):
    """
    Compare the hazard curves or maps of two or more calculations
    """
    sids, imtls, poes, arrays = getdata(what, calc_ids, samplesites)
    try:
        levels = imtls[imt]
    except KeyError:
        sys.exit('%s not found. The available IMTs are %s' %
                 (imt, list(imtls)))
    P = len(poes)
    head = ['site_id'] if files else ['site_id', 'calc_id']
    if what == 'hcurves':
        array_imt = arrays[:, :, imtls(imt)]
        header = head + ['%.5f' % lvl for lvl in levels]
    else:  # hmaps
        for imti, imt_ in enumerate(imtls):
            if imt_ == imt:
                slc = slice(imti * P, imti * P + P)
        array_imt = arrays[:, :, slc]
        header = head + [str(poe) for poe in poes]
    rows = collections.defaultdict(list)
    diff_idxs = get_diff_idxs(array_imt, rtol, atol)
    if len(diff_idxs) == 0:
        print('There are no differences within the tolerance of %d%%' %
              (rtol * 100))
        return
    arr = array_imt.transpose(1, 0, 2)  # shape (N, C, L)
    for sid, array in sorted(zip(sids[diff_idxs], arr[diff_idxs])):
        for calc_id, cols in zip(calc_ids, array):
            if files:
                rows[calc_id].append([sid] + list(cols))
            else:
                rows['all'].append([sid, calc_id] + list(cols))
    if files:
        fdict = {
            calc_id: open('%s.txt' % calc_id, 'w')
            for calc_id in calc_ids
        }
        for calc_id, f in fdict.items():
            f.write(views.rst_table(rows[calc_id], header))
            print('Generated %s' % f.name)
    else:
        print(views.rst_table(rows['all'], header))
Example #21
0
    def test_event_based_sampling(self):
        self.run_calc(ucerf.__file__, 'job_ebh.ini')

        # check the GMFs
        gmdata = self.calc.datastore['gmdata'].value
        got = writetmp(rst_table(gmdata, fmt='%.6f'))
        self.assertEqualFiles('expected/gmdata.csv', got)

        # check the mean hazard map
        got = writetmp(view('hmap', self.calc.datastore))
        self.assertEqualFiles('expected/hmap.rst', got)
Example #22
0
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = dstore['oqparam']
     self.text = (decode(oq.description) + '\n' + '=' * len(oq.description))
     try:
         num_rlzs = dstore['full_lt'].get_num_rlzs()
     except KeyError:
         num_rlzs = '?'
     versions = sorted(dstore['/'].attrs.items())
     self.text += '\n\n' + views.rst_table(versions)
     self.text += '\n\nnum_sites = %d, num_levels = %d, num_rlzs = %s' % (
         len(dstore['sitecol']), oq.imtls.size, num_rlzs)
Example #23
0
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = dstore['oqparam']
     self.text = (decode(oq.description) + '\n' + '=' * len(oq.description))
     try:
         num_rlzs = dstore['csm_info'].get_num_rlzs()
     except KeyError:
         num_rlzs = '?'
     versions = sorted(dstore['/'].attrs.items())
     self.text += '\n\n' + views.rst_table(versions)
     self.text += '\n\nnum_sites = %d, num_levels = %d, num_rlzs = %s' % (
         len(dstore['sitecol']), len(oq.imtls.array), num_rlzs)
Example #24
0
def show(what='contents', calc_id=-1, extra=()):
    """
    Show the content of a datastore (by default the last one).
    """
    datadir = datastore.get_datadir()
    if what == 'all':  # show all
        if not os.path.exists(datadir):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datadir):
            try:
                ds = util.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except Exception:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datadir, 'calc_%s.hdf5' % calc_id)
                logging.warning('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return

    ds = util.read(calc_id)

    # this part is experimental
    if view.keyfunc(what) in view:
        print(view(what, ds))
    elif what.split('/', 1)[0] in extract:
        obj = extract(ds, what, *extra)
        if hasattr(obj, 'dtype') and obj.dtype.names:
            print(write_csv(io.BytesIO(), obj).decode('utf8'))
        else:
            print(obj)
    elif what in ds:
        obj = ds.getitem(what)
        if hasattr(obj, 'items'):  # is a group of datasets
            print(obj)
        else:  # is a single dataset
            obj.refresh()  # for SWMR mode
            aw = hdf5.ArrayWrapper.from_(obj)
            if hasattr(aw, 'shape_descr'):
                print(rst_table(aw.to_table()))
            else:
                print(write_csv(io.BytesIO(), aw.array).decode('utf8'))
    else:
        print('%s not found' % what)

    ds.close()
Example #25
0
def compare(what, imt, calc_ids, files, samplesites=100, rtol=.1, atol=1E-4):
    """
    Compare the hazard curves or maps of two or more calculations
    """
    sids, imtls, poes, arrays = getdata(what, calc_ids, samplesites)
    try:
        levels = imtls[imt]
    except KeyError:
        sys.exit(
            '%s not found. The available IMTs are %s' % (imt, list(imtls)))
    imt2idx = {imt: i for i, imt in enumerate(imtls)}
    head = ['site_id'] if files else ['site_id', 'calc_id']
    if what == 'hcurves':
        array_imt = arrays[:, :, imtls(imt)]
        header = head + ['%.5f' % lvl for lvl in levels]
    else:  # hmaps
        array_imt = arrays[:, :, imt2idx[imt]]
        header = head + [str(poe) for poe in poes]
    rows = collections.defaultdict(list)
    diff_idxs = get_diff_idxs(array_imt, rtol, atol)
    if len(diff_idxs) == 0:
        print('There are no differences within the tolerance of %d%%' %
              (rtol * 100))
        return
    arr = array_imt.transpose(1, 0, 2)  # shape (N, C, L)
    for sid, array in sorted(zip(sids[diff_idxs], arr[diff_idxs])):
        for calc_id, cols in zip(calc_ids, array):
            if files:
                rows[calc_id].append([sid] + list(cols))
            else:
                rows['all'].append([sid, calc_id] + list(cols))
    if files:
        fdict = {calc_id: open('%s.txt' % calc_id, 'w')
                 for calc_id in calc_ids}
        for calc_id, f in fdict.items():
            f.write(views.rst_table(rows[calc_id], header))
            print('Generated %s' % f.name)
    else:
        print(views.rst_table(rows['all'], header))
Example #26
0
def compare_hmaps(imt, calc_ids: int, files=False, *,
                  samplesites='', rtol: float = 0, atol: float = 1E-3):
    """
    Compare the hazard maps of two or more calculations.
    """
    c = Comparator(calc_ids)
    arrays = c.compare('hmaps', imt, files, samplesites, atol, rtol)
    if len(calc_ids) == 2:
        ms = numpy.mean((arrays[0] - arrays[1])**2, axis=0)  # P
        maxdiff = numpy.abs(arrays[0] - arrays[1]).max(axis=0)  # P
        rows = [(str(poe), rms, md) for poe, rms, md in zip(
            c.oq.poes, numpy.sqrt(ms), maxdiff)]
        print(views.rst_table(rows, ['poe', 'rms-diff', 'max-diff']))
Example #27
0
def print_csm_info(fname):
    """
    Parse the composite source model without instantiating the sources and
    prints information about its composition and the full logic tree
    """
    oqparam = readinput.get_oqparam(fname)
    csm = readinput.get_composite_source_model(oqparam, in_memory=False)
    print(csm.info)
    print('See https://github.com/gem/oq-risklib/blob/master/doc/'
          'effective-realizations.rst for an explanation')
    rlzs_assoc = csm.info.get_rlzs_assoc()
    print(rlzs_assoc)
    tot, pairs = get_pickled_sizes(rlzs_assoc)
    print(views.rst_table(pairs, ['attribute', 'nbytes']))
Example #28
0
def print_csm_info(fname):
    """
    Parse the composite source model without instantiating the sources and
    prints information about its composition and the full logic tree
    """
    oqparam = readinput.get_oqparam(fname)
    csm = readinput.get_composite_source_model(oqparam, in_memory=False)
    print(csm.info)
    print('See https://github.com/gem/oq-risklib/blob/master/doc/'
          'effective-realizations.rst for an explanation')
    rlzs_assoc = csm.info.get_rlzs_assoc()
    print(rlzs_assoc)
    tot, pairs = get_pickled_sizes(rlzs_assoc)
    print(rst_table(pairs, ['attribute', 'nbytes']))
Example #29
0
def print_csm_info(fname):
    """
    Parse the composite source model and
    prints information about its composition and the full logic tree
    """
    oqparam = readinput.get_oqparam(fname)
    csm = readinput.get_composite_source_model(oqparam)
    print(csm.info)
    print('See http://docs.openquake.org/oq-engine/stable/'
          'effective-realizations.html for an explanation')
    rlzs_assoc = csm.info.get_rlzs_assoc()
    print(rlzs_assoc)
    tot, pairs = get_pickled_sizes(rlzs_assoc)
    print(rst_table(pairs, ['attribute', 'nbytes']))
Example #30
0
def main(cmd, args=()):
    """
    Run a database command
    """
    if cmd in commands and len(args) != len(commands[cmd]):
        sys.exit('Wrong number of arguments, expected %s, got %s' %
                 (commands[cmd], args))
    elif (cmd not in commands and not cmd.upper().startswith('SELECT')
          and config.dbserver.multi_user and getpass.getuser() != 'openquake'):
        sys.exit('You have no permission to run %s' % cmd)
    dbserver.ensure_on()
    res = logs.dbcmd(cmd, *convert(args))
    if hasattr(res, '_fields') and res.__class__.__name__ != 'Row':
        print(rst_table(res))
    else:
        print(res)
Example #31
0
def view_col_rlz_assocs(name, dstore):
    """
    :returns: an array with the association array col_ids -> rlz_ids
    """
    rlzs_assoc = dstore['rlzs_assoc']
    num_ruptures = dstore['num_ruptures']
    num_rlzs = len(rlzs_assoc.realizations)
    col_ids_list = [[] for _ in range(num_rlzs)]
    for rlz in rlzs_assoc.realizations:
        for col_id in sorted(rlzs_assoc.get_col_ids(rlz)):
            if num_ruptures[col_id]:
                col_ids_list[rlz.ordinal].append(col_id)
    assocs = collections.defaultdict(list)
    for i, col_ids in enumerate(col_ids_list):
        assocs[tuple(col_ids)].append(i)
    tbl = [['Collections', 'Realizations']] + sorted(assocs.items())
    return views.rst_table(tbl)
Example #32
0
def view_col_rlz_assocs(name, dstore):
    """
    :returns: an array with the association array col_ids -> rlz_ids
    """
    rlzs_assoc = dstore['rlzs_assoc']
    num_ruptures = dstore['num_ruptures']
    num_rlzs = len(rlzs_assoc.realizations)
    col_ids_list = [[] for _ in range(num_rlzs)]
    for rlz in rlzs_assoc.realizations:
        for col_id in sorted(rlzs_assoc.get_col_ids(rlz)):
            if num_ruptures[col_id]:
                col_ids_list[rlz.ordinal].append(col_id)
    assocs = collections.defaultdict(list)
    for i, col_ids in enumerate(col_ids_list):
        assocs[tuple(col_ids)].append(i)
    tbl = [['Collections', 'Realizations']] + sorted(assocs.items())
    return views.rst_table(tbl)
Example #33
0
    def test_case_6(self):
        # 2 models x 3 GMPEs, different weights
        expected = [
            'hazard_curve-mean.csv',
            'quantile_curve-0.1.csv',
        ]
        out = self.run_calc(case_6.__file__, 'job.ini', exports='csv')
        fnames = out['hcurves', 'csv']
        for exp, got in zip(expected, fnames):
            self.assertEqualFiles('expected/%s' % exp, got)

        [fname] = export(('realizations', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/realizations.csv', fname)

        # test for the mean gmv
        got = writetmp(rst_table(self.calc.datastore['gmdata'].value))
        self.assertEqualFiles('expected/gmdata.csv', got)
Example #34
0
def print_(aw):
    if hasattr(aw, 'json'):
        try:
            attrs = hdf5.get_shape_descr(aw.json)
        except KeyError:  # no shape_descr, for instance for oqparam
            print(json.dumps(json.loads(aw.json), indent=2))
            return
        vars(aw).update(attrs)
    if hasattr(aw, 'shape_descr'):
        print(rst_table(aw.to_dframe()))
    elif hasattr(aw, 'array') and aw.dtype.names:
        sio = io.StringIO()
        write_csv(sio, aw.array)
        print(sio.getvalue())
    elif hasattr(aw, 'array'):
        print(aw.array)
    else:
        print(aw)
Example #35
0
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = dstore['oqparam']
     self.text = (decode(oq.description) + '\n' + '=' * len(oq.description))
     info = dstore['job_info']
     dpath = dstore.hdf5path
     mtime = os.path.getmtime(dpath)
     host = '%s:%s' % (info.hostname, decode(dpath))
     updated = str(time.ctime(mtime))
     versions = sorted(dstore['/'].attrs.items())
     self.text += '\n\n' + views.rst_table([[host, updated]] + versions)
     # NB: in the future, the sitecol could be transferred as
     # an array by leveraging the HDF5 serialization protocol;
     # for the moment however the size of the
     # data to transfer is given by the usual pickle
     sitecol_size = humansize(len(parallel.Pickled(dstore['sitecol'])))
     self.text += '\n\nnum_sites = %d, sitecol = %s' % (
         len(dstore['sitecol']), sitecol_size)
Example #36
0
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job.ini')
        ekeys = [('agg_curves-stats', 'csv')]
        for ekey in ekeys:
            for fname in export(ekey, self.calc.datastore):
                self.assertEqualFiles(
                    'expected/%s' % strip_calc_id(fname), fname)

        # make sure the agg_curves-stats has the right attrs
        self.check_attr('return_periods', [30, 60, 120, 240, 480, 960])
        self.check_attr('units', [b'EUR', b'EUR'])
        self.check_attr('nbytes', 96)

        # test the loss curves exporter
        [f1] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore)
        [f2] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-rlz-000.csv', f1)
        self.assertEqualFiles('expected/loss_curves-rlz-001.csv', f2)

        [f] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-mean.csv', f)

        # test the loss maps exporter
        fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        assert fnames
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname, delta=1E-5)

        # test portfolio loss
        tmp = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', tmp)

        # test the rup_loss_table exporter
        fnames = export(('rup_loss_table', 'xml'), self.calc.datastore)
        self.assertEqual(len(fnames), 2)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname)

        # test the src_loss_table extractor
        arr = extract(self.calc.datastore, 'src_loss_table/structural')
        tmp = gettemp(rst_table(arr))
        self.assertEqualFiles('expected/src_loss_table.txt', tmp)
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job.ini')
        ekeys = [('agg_curves-stats', 'csv')]
        for ekey in ekeys:
            for fname in export(ekey, self.calc.datastore):
                self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                                      fname)

        # make sure the agg_curves-stats has the right attrs
        self.check_attr('return_periods', [30, 60, 120, 240, 480, 960])
        self.check_attr('units', [b'EUR', b'EUR'])
        self.check_attr('nbytes', 96)

        # test the loss curves exporter
        [f1] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore)
        [f2] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-rlz-000.csv', f1)
        self.assertEqualFiles('expected/loss_curves-rlz-001.csv', f2)

        [f] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-mean.csv', f)

        # test the loss maps exporter
        fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        assert fnames
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # test portfolio loss
        tmp = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', tmp)

        # test the rup_loss_table exporter
        fnames = export(('rup_loss_table', 'xml'), self.calc.datastore)
        self.assertEqual(len(fnames), 2)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)

        # test the src_loss_table extractor
        arr = extract(self.calc.datastore, 'src_loss_table/structural')
        tmp = gettemp(rst_table(arr))
        self.assertEqualFiles('expected/src_loss_table.txt', tmp)
Example #38
0
def view_mean_avg_losses(key, job_id):
    losses = mean_avg_losses(
        key, job_id, 'Mean Loss Curves', 'loss curves. type=')
    if losses is None:
        return 'No %s for calculation %d' % (key, job_id)
    ins_losses = mean_avg_losses(
        key, job_id, 'Mean Insured Curves', 'insured loss curves. type=')
    names = losses.dtype.names
    rows = []
    if ins_losses is None:
        ins_losses = numpy.empty_like(losses)
        ins_losses.fill(numpy.nan)
    for loss, iloss in zip(losses, ins_losses):
        row = [loss['asset_ref']] + [
            numpy.array([loss[lt], iloss[lt]]) for lt in names[1:]]
        rows.append(row)
    if len(rows) > 1:
        rows.append(sum_table(rows))
    return rst_table(rows, header=names, fmt='%8.6E')
Example #39
0
def main(datadir):
    lst = []
    for fname in glob.glob(datadir + '/calc_*.hdf5'):
        try:
            dstore = read(fname)
        except OSError:  # already open
            continue
        with dstore:
            try:
                descr = dstore['oqparam'].description
            except (KeyError, AttributeError):  # not a calculation
                continue
            try:
                tot_ruptures = dstore['csm_info/sg_data']['totrup'].sum()
            except KeyError:
                tot_ruptures = 0
            else:
                lst.append((descr, tot_ruptures))
    print(rst_table(lst, ['calculation', 'total number of ruptures']))
Example #40
0
def db(cmd, args=()):
    """
    Run a database command
    """
    if cmd not in commands:
        okcmds = '\n'.join(
            '%s %s' % (name, repr(' '.join(args)) if args else '')
            for name, args in sorted(commands.items()))
        print('Invalid command "%s": choose one from\n%s' % (cmd, okcmds))
    elif len(args) != len(commands[cmd]):
        print('Wrong number of arguments, expected %s, got %s' % (
            commands[cmd], args))
    else:
        dbserver.ensure_on()
        res = logs.dbcmd(cmd, *convert(args))
        if hasattr(res, '_fields') and res.__class__.__name__ != 'Row':
            print(rst_table(res))
        else:
            print(res)
Example #41
0
def main(datadir):
    lst = []
    for fname in glob.glob(datadir + '/calc_*.hdf5'):
        try:
            dstore = read(fname)
        except OSError:  # already open
            continue
        with dstore:
            try:
                descr = dstore['oqparam'].description
            except (KeyError, AttributeError):  # not a calculation
                continue
            try:
                tot_ruptures = dstore['csm_info/sg_data']['totrup'].sum()
            except KeyError:
                tot_ruptures = 0
            else:
                lst.append((descr, tot_ruptures))
    print(rst_table(lst, ['calculation', 'total number of ruptures']))