Пример #1
0
    def test_case_1(self):
        self.assert_curves_ok(
            ['hazard_curve-PGA.csv', 'hazard_curve-SA(0.1).csv'],
            case_1.__file__)

        if parallel.oq_distribute() != 'no':
            info = text_table(view('job_info', self.calc.datastore))
            self.assertIn('task', info)
            self.assertIn('sent', info)
            self.assertIn('received', info)

            slow = view('task:classical:-1', self.calc.datastore)
            self.assertIn('taskno', slow)
            self.assertIn('duration', slow)
            self.assertIn('sources', slow)

        # there is a single source
        self.assertEqual(len(self.calc.datastore['source_info']), 1)

        # check npz export
        export(('hcurves', 'npz'), self.calc.datastore)

        # check extraction
        sitecol = extract(self.calc.datastore, 'sitecol')
        self.assertEqual(len(sitecol.array), 1)

        # check minimum_magnitude discards the source
        with self.assertRaises(RuntimeError) as ctx:
            self.run_calc(case_1.__file__, 'job.ini', minimum_magnitude='4.5')
        self.assertEqual(str(ctx.exception), 'All sources were discarded!?')
Пример #2
0
 def sanity_check(self):
     """
     Sanity check on the total number of assets
     """
     if self.R == 1:
         arr = self.datastore.sel('damages-rlzs')  # shape (A, 1, L, D)
     else:
         arr = self.datastore.sel('damages-stats', stat='mean')
     avg = arr.sum(axis=(0, 1))  # shape (L, D)
     if not len(self.datastore['risk_by_event/agg_id']):
         logging.warning('There is no damage at all!')
     elif 'avg_portfolio_damage' in self.datastore:
         df = views.portfolio_damage_error('avg_portfolio_damage',
                                           self.datastore)
         rst = views.text_table(df, ext='org')
         logging.info('Portfolio damage\n%s' % rst)
     num_assets = avg.sum(axis=1)  # by loss_type
     expected = self.assetcol['value-number'].sum()
     nums = set(num_assets) | {expected}
     if len(nums) > 1:
         numdic = dict(expected=expected)
         for lt, num in zip(self.oqparam.loss_names, num_assets):
             numdic[lt] = num
         logging.info(
             'Due to rounding errors inherent in floating-point arithmetic,'
             ' the total number of assets is not exact: %s', numdic)
Пример #3
0
def print_(aw):
    if hasattr(aw, 'json'):
        try:
            attrs = hdf5.get_shape_descr(aw.json)
        except KeyError:  # no shape_descr, for instance for oqparam
            print(json.dumps(json.loads(aw.json), indent=2))
            return
        vars(aw).update(attrs)
    if hasattr(aw, 'shape_descr'):
        print(text_table(aw.to_dframe(), ext='org'))
    elif hasattr(aw, 'array'):
        print(text_table(aw.array, ext='org'))
    elif isinstance(aw, numpy.ndarray):
        print(text_table(aw, ext='org'))
    else:
        print(aw)
Пример #4
0
def source_model_info(sm_nodes):
    """
    Extract information about source models. Returns a table
    with TRTs as rows and source classes as columns.
    """
    c = collections.Counter()
    for sm in sm_nodes:
        groups = [sm[0]] if sm['xmlns'].endswith('nrml/0.4') else sm[0]
        for group in groups:
            grp_trt = group.get('tectonicRegion')
            for src in group:
                trt = src.get('tectonicRegion', grp_trt)
                src_class = src.tag.split('}')[1]
                c[trt, src_class] += 1
    trts, classes = zip(*c)
    trts = sorted(set(trts))
    classes = sorted(set(classes))
    dtlist = [('TRT', (bytes, 30))] + [(name, int) for name in classes]
    out = numpy.zeros(len(trts) + 1, dtlist)  # +1 for the totals
    for i, trt in enumerate(trts):
        out[i]['TRT'] = trt
        for src_class in classes:
            out[i][src_class] = c[trt, src_class]
    out[-1]['TRT'] = 'Total'
    for name in out.dtype.names[1:]:
        out[-1][name] = out[name][:-1].sum()
    return text_table(out)
Пример #5
0
def get_pstats(pstatfile, n):
    """
    Return profiling information as an RST table.

    :param pstatfile: path to a .pstat file
    :param n: the maximum number of stats to retrieve
    """
    with tempfile.TemporaryFile(mode='w+') as stream:
        ps = pstats.Stats(pstatfile, stream=stream)
        ps.sort_stats('cumtime')
        ps.print_stats(n)
        stream.seek(0)
        lines = list(stream)
    for i, line in enumerate(lines):
        if line.startswith('   ncalls'):
            break
    data = []
    for line in lines[i + 2:]:
        columns = line.split()
        if len(columns) == 6:
            data.append(PStatData(*columns))
    rows = [(rec.ncalls, rec.cumtime, rec.path) for rec in data]
    # here is an example of the expected output table:
    # ====== ======= ========================================================
    # ncalls cumtime path
    # ====== ======= ========================================================
    # 1      33.502  commands/run.py:77(_run)
    # 1      33.483  calculators/base.py:110(run)
    # 1      25.166  calculators/classical.py:115(execute)
    # 1      25.104  baselib.parallel.py:249(apply_reduce)
    # 1      25.099  calculators/classical.py:41(classical)
    # 1      25.099  hazardlib/calc/hazard_curve.py:164(classical)
    return views.text_table(rows, header='ncalls cumtime path'.split())
Пример #6
0
    def test_case_12(self):
        # test Modified GMPE
        self.assert_curves_ok(['hazard_curve-smltp_b1-gsimltp_b1_b2.csv'],
                              case_12.__file__)

        # test disagg_by_grp
        df = self.calc.datastore.read_df('disagg_by_grp')
        fname = general.gettemp(text_table(df))
        self.assertEqualFiles('expected/disagg_by_grp.rst', fname)
Пример #7
0
 def compare(self, what, imt, files, samplesites, atol, rtol):
     sids = self.getsids(samplesites)
     if what == 'uhs':
         arrays = self.getuhs(what, imt, sids)
     elif what.startswith('avg_gmf'):
         arrays = self.getgmf(what, imt, sids)
     else:
         arrays = self.getdata(what, imt, sids)
     header = ['site_id'] if files else ['site_id', 'calc_id']
     if what == 'hcurves':
         header += ['%.5f' % lvl for lvl in self.oq.imtls[imt]]
     elif what == 'hmaps':
         header += [str(poe) for poe in self.oq.poes]
     elif what == 'uhs':
         header += self.oq.imt_periods()
     else:  # avg_gmf
         header += ['gmf']
     rows = collections.defaultdict(list)
     diff_idxs = get_diff_idxs(arrays, rtol, atol)
     if len(diff_idxs) == 0:
         print('There are no differences within the tolerances '
               'atol=%s, rtol=%d%%, sids=%s' % (atol, rtol * 100, sids))
         return []
     arr = arrays.transpose(1, 0, 2)  # shape (N, C, L)
     for sid, array in sorted(zip(sids[diff_idxs], arr[diff_idxs])):
         # each array has shape (C, L)
         for ex, cols in zip(self.extractors, array):
             # cols has shape L
             if files:
                 rows[ex.calc_id].append([sid] + list(cols))
             else:
                 rows['all'].append([sid, ex.calc_id] + list(cols))
     if files:
         fdict = {ex.calc_id: open('%s.txt' % ex.calc_id, 'w')
                  for ex in self.extractors}
         for calc_id, f in fdict.items():
             f.write(views.text_table(rows[calc_id], header))
             print('Generated %s' % f.name)
     else:
         print(views.text_table(rows['all'], header))
     return arrays
Пример #8
0
    def test_case_1_eb(self):
        # this is a case with insured losses and tags
        self.run_calc(case_1.__file__, 'job_eb.ini', concurrent_tasks='4')

        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                              delta=1E-5)

        aw = extract(self.calc.datastore, 'agg_losses/structural')
        self.assertEqual(aw.stats, ['mean'])
        numpy.testing.assert_allclose(aw.array, [685.5015], atol=.001)

        [fname] = export(('aggrisk', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                              fname, delta=1E-5)

        fnames = export(('aggcurves', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/eb_%s' % strip_calc_id(fname),
                                  fname, delta=1E-5)

        [fname] = export(('risk_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                              delta=1E-5)

        # extract agg_curves with tags
        aw = extract(self.calc.datastore, 'agg_curves?kind=stats&'
                     'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(text_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves5.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=stats&'
                     'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(text_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves7.csv', tmp)

        # test ct_independence
        loss4 = view('portfolio_losses', self.calc.datastore)
        self.run_calc(case_1.__file__, 'job_eb.ini', concurrent_tasks='0')
        loss0 = view('portfolio_losses', self.calc.datastore)
        self.assertEqual(loss0, loss4)
Пример #9
0
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = dstore['oqparam']
     self.text = (decode(oq.description) + '\n' + '=' * len(oq.description))
     try:
         num_rlzs = dstore['full_lt'].get_num_rlzs()
     except KeyError:
         num_rlzs = '?'
     versions = sorted(dstore['/'].attrs.items())
     self.text += '\n\n' + views.text_table(versions)
     self.text += '\n\nnum_sites = %d, num_levels = %d, num_rlzs = %s' % (
         len(dstore['sitecol']), oq.imtls.size, num_rlzs)
Пример #10
0
    def test_case_2(self):
        self.run_calc(case_2.__file__, 'job.ini')

        # check view inputs
        lines = text_table(view('inputs', self.calc.datastore)).splitlines()
        self.assertEqual(len(lines), 13)  # rst table with 13 rows

        [fname] = export(('hcurves', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/hcurve.csv', fname)

        # check disagg_by_src for a single realization
        check_disagg_by_src(self.calc.datastore)
Пример #11
0
def compare_uhs(calc_ids: int, files=False, *, poe_id: int = 0,
                samplesites='', rtol: float = 0, atol: float = 1E-3):
    """
    Compare the uniform hazard spectra of two or more calculations.
    """
    c = Comparator(calc_ids)
    arrays = c.compare('uhs', poe_id, files, samplesites, atol, rtol)
    if len(arrays) and len(calc_ids) == 2:
        # each array has shape (N, M)
        ms = numpy.mean((arrays[0] - arrays[1])**2)
        maxdiff = numpy.abs(arrays[0] - arrays[1]).max()
        row = ('%.5f' % c.oq.poes[poe_id], numpy.sqrt(ms), maxdiff)
        print(views.text_table([row], ['poe', 'rms-diff', 'max-diff']))
Пример #12
0
def compare_hmaps(imt, calc_ids: int, files=False, *,
                  samplesites='', rtol: float = 0, atol: float = 1E-3):
    """
    Compare the hazard maps of two or more calculations.
    """
    c = Comparator(calc_ids)
    arrays = c.compare('hmaps', imt, files, samplesites, atol, rtol)
    if len(arrays) and len(calc_ids) == 2:
        ms = numpy.mean((arrays[0] - arrays[1])**2, axis=0)  # P
        maxdiff = numpy.abs(arrays[0] - arrays[1]).max(axis=0)  # P
        rows = [(str(poe), rms, md) for poe, rms, md in zip(
            c.oq.poes, numpy.sqrt(ms), maxdiff)]
        print(views.text_table(rows, ['poe', 'rms-diff', 'max-diff']))
Пример #13
0
 def add(self, name, obj=None):
     """Add the view named `name` to the report text"""
     if obj:
         text = '\n::\n\n' + indent(str(obj))
     else:
         res = views.view(name, self.dstore)
         if isinstance(res, (numpy.ndarray, pandas.DataFrame)):
             text = views.text_table(res)
         else:
             text = res
     if text:
         title = self.title[name]
         line = '-' * len(title)
         self.text += '\n'.join(['\n\n' + title, line, text])
Пример #14
0
    def test_case_master(self):
        # this tests exercise the case of a complex logic tree
        self.run_calc(case_master.__file__, 'job.ini')
        fname = gettemp(text_table(view('mean_disagg', self.calc.datastore)))
        self.assertEqualFiles('expected/mean_disagg.rst', fname)
        os.remove(fname)

        fnames = export(('disagg', 'csv'), self.calc.datastore)
        self.assertEqual(len(fnames), 64)  # 2 sid x 8 keys x 2 poe x 2 imt
        for fname in fnames:
            if 'Mag_Dist' in fname and 'Eps' not in fname:
                self.assertEqualFiles(
                    'expected_output/%s' % strip_calc_id(fname), fname)

        check_disagg_by_src(self.calc.datastore)
Пример #15
0
def main(cmd, args=()):
    """
    Run a database command
    """
    if cmd in commands and len(args) != len(commands[cmd]):
        sys.exit('Wrong number of arguments, expected %s, got %s' %
                 (commands[cmd], args))
    elif (cmd not in commands and not cmd.upper().startswith('SELECT')
          and config.dbserver.multi_user and getpass.getuser() != 'openquake'):
        sys.exit('You have no permission to run %s' % cmd)
    dbserver.ensure_on()
    res = logs.dbcmd(cmd, *convert(args))
    if hasattr(res, '_fields') and res.__class__.__name__ != 'Row':
        print(text_table(res))
    else:
        print(res)
Пример #16
0
    def post_execute(self, result):
        """
        Export the result in CSV format.

        :param result:
            a dictionary asset_ordinal -> array(R, L, D)
        """
        D = len(self.crmodel.damage_states)
        damages = numpy.zeros((self.A, self.R, self.L, D), numpy.float32)
        for a in result:
            damages[a] = result[a]
        self.datastore['damages-rlzs'] = damages
        stats.set_rlzs_stats(self.datastore, 'damages',
                             assets=self.assetcol['id'],
                             loss_type=self.oqparam.loss_names,
                             dmg_state=self.crmodel.damage_states)
        dmg = views.view('portfolio_damage', self.datastore)
        logging.info('\n' + views.text_table(dmg, ext='org'))
Пример #17
0
def main(datadir):
    lst = []
    for fname in glob.glob(datadir + '/calc_*.hdf5'):
        try:
            dstore = read(fname)
        except OSError:  # already open
            continue
        with dstore:
            try:
                descr = dstore['oqparam'].description
            except (KeyError, AttributeError):  # not a calculation
                continue
            try:
                tot_ruptures = dstore['full_lt/sg_data']['totrup'].sum()
            except KeyError:
                tot_ruptures = 0
            else:
                lst.append((descr, tot_ruptures))
    print(text_table(lst, ['calculation', 'total number of ruptures']))
Пример #18
0
def export_ruptures_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    if 'scenario' in oq.calculation_mode:
        return []
    dest = dstore.export_path('ruptures.csv')
    arr = extract(dstore, 'rupture_info')
    if export.sanity_check:
        bad = view('bad_ruptures', dstore)
        if len(bad):  # nonempty
            print(text_table(bad), file=sys.stderr)
    comment = dstore.metadata
    comment.update(investigation_time=oq.investigation_time,
                   ses_per_logic_tree_path=oq.ses_per_logic_tree_path)
    arr.array.sort(order='rup_id')
    writers.write_csv(dest, arr, comment=comment)
    return [dest]
Пример #19
0
    def test_case_11(self):
        # secondary perils without secondary simulations
        self.run_calc(case_11.__file__, 'job.ini', secondary_simulations="{}")
        calc1 = self.calc.datastore
        [fname] = export(('risk_by_event', 'csv'), calc1)
        self.assertEqualFiles('expected/risk_by_event_1.csv', fname)

        # secondary perils with secondary simulations
        self.run_calc(case_11.__file__, 'job.ini')
        calc2 = self.calc.datastore
        [fname] = export(('risk_by_event', 'csv'), calc2)
        self.assertEqualFiles('expected/risk_by_event_2.csv', fname)

        # check mean_perils
        fname = gettemp(text_table(view('mean_perils', self.calc.datastore)))
        self.assertEqualFiles('expected/mean_perils.rst', fname)

        # check damages-rlzs
        [fname] = export(('damages-rlzs', 'csv'), calc1)
        self.assertEqualFiles('expected/avg_damages1.csv', fname)
        [fname] = export(('damages-rlzs', 'csv'), calc2)
        self.assertEqualFiles('expected/avg_damages2.csv', fname)
Пример #20
0
    def test_case_13(self):
        self.assert_curves_ok([
            'hazard_curve-mean_PGA.csv', 'hazard_curve-mean_SA(0.2).csv',
            'hazard_map-mean.csv'
        ], case_13.__file__)

        # test recomputing the hazard maps
        self.run_calc(case_13.__file__,
                      'job.ini',
                      exports='csv',
                      hazard_calculation_id=str(self.calc.datastore.calc_id),
                      gsim_logic_tree_file='',
                      source_model_logic_tree_file='')
        [fname] = export(('hmaps', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/hazard_map-mean.csv',
                              fname,
                              delta=1E-5)

        csv = general.gettemp(
            text_table(view('extreme_sites', self.calc.datastore)))
        self.assertEqualFiles('expected/extreme_sites.rst', csv)

        # test extract/hcurves/rlz-0, used by the npz exports
        haz = vars(extract(self.calc.datastore, 'hcurves'))
        self.assertEqual(sorted(haz), ['_extra', 'all', 'investigation_time'])
        self.assertEqual(haz['all'].dtype.names,
                         ('lon', 'lat', 'depth', 'mean'))
        array = haz['all']['mean']
        self.assertEqual(array.dtype.names, ('PGA', 'SA(0.2)'))
        self.assertEqual(
            array['PGA'].dtype.names,
            ('0.005', '0.007', '0.0098', '0.0137', '0.0192', '0.0269',
             '0.0376', '0.0527', '0.0738', '0.103', '0.145', '0.203', '0.284'))

        # test disagg_by_src in a complex case with duplicated sources
        check_disagg_by_src(self.calc.datastore)
Пример #21
0
 def test_case_17(self):
     # CSV exposure in latin1
     self.run_calc(case_17.__file__, 'job.ini')
     tbl = text_table(self.calc.datastore['agg_keys'][:], ext='org')
     self.assertEqualFiles('agg_keys.org', gettemp(tbl))
Пример #22
0
 def cumtime(self):
     data = []
     for ex in self.extractors:
         time = ex.get('performance_data')['time_sec'].sum()
         data.append((ex.calc_id, time))
     print(views.text_table(data, ['calc_id', 'time']))