예제 #1
0
    def test_case_4(self):
        # this test is sensitive to the ordering of the epsilons
        # in openquake.riskinput.make_eps
        out = self.run_calc(case_4.__file__, 'job.ini', exports='csv')
        fname = writetmp(view('totlosses', self.calc.datastore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        [fname] = out['agglosses-rlzs', 'csv']
        self.assertEqualFiles('expected/agglosses.csv', fname)
예제 #2
0
def show(calc_id, key=None, rlzs=None):
    """
    Show the content of a datastore.

    :param calc_id: numeric calculation ID; if 0, show all calculations
    :param key: key of the datastore
    :param rlzs: flag; if given, print out the realizations in order
    """
    if not calc_id:
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                oq = OqParam.from_(datastore.DataStore(calc_id).attrs)
                cmode, descr = oq.calculation_mode, oq.description
            except:  # invalid datastore directory
                logging.warn('Removed invalid calculation %d', calc_id)
                shutil.rmtree(
                    os.path.join(datastore.DATADIR, 'calc_%s' % calc_id))
            else:
                rows.append((calc_id, cmode, descr))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    ds = datastore.DataStore(calc_id)
    if key:
        if key in datastore.view:
            print(datastore.view(key, ds))
            return
        obj = ds[key]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
        return
    # print all keys
    oq = OqParam.from_(ds.attrs)
    print(
        oq.calculation_mode, 'calculation (%r) saved in %s contains:' %
        (oq.description, ds.calc_dir))
    for key in ds:
        print(key, humansize(ds.getsize(key)))

    # this part is experimental and not tested on purpose
    if rlzs and 'curves_by_trt_gsim' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = combined_curves(ds)
        dists = []
        for rlz in sorted(curves_by_rlz):
            curves = curves_by_rlz[rlz]
            dist = sum(
                rmsep(mean_curves[imt], curves[imt], min_value)
                for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        for dist, rlz in sorted(dists):
            print('rlz=%s, rmsep=%s' % (rlz, dist))
예제 #3
0
    def test_case_4(self):
        # this test is sensitive to the ordering of the epsilons
        # in openquake.riskinput.make_eps
        out = self.run_calc(case_4.__file__, 'job.ini', exports='csv')
        fname = writetmp(view('totlosses', self.calc.datastore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        [fname] = out['agglosses-rlzs', 'csv']
        self.assertEqualFiles('expected/agglosses.csv', fname)
예제 #4
0
 def add(self, name, obj=None):
     """Add the view named `name` to the report text"""
     title = self.title[name]
     line = '-' * len(title)
     if obj:
         text = '\n::\n\n' + indent(str(obj))
     else:
         text = datastore.view(name, self.dstore)
     self.text += '\n'.join(['\n\n' + title, line, text])
예제 #5
0
파일: show.py 프로젝트: amirj700/oq-risklib
def show(what, calc_id=-1):
    """
    Show the content of a datastore.

    :param what: key or view of the datastore
    :param calc_id: numeric calculation ID; if -1, show the last calculation
    """
    if what == 'all':  # show all
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                ds = datastore.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datastore.DATADIR, 'calc_%s.hdf5' % calc_id)
                logging.warn('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
                ds.close()
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    elif what == 'views':
        for name in sorted(datastore.view):
            print(name)
        return

    ds = datastore.read(calc_id)

    # this part is experimental
    if what == 'rlzs' and 'hcurves' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = get_hcurves_and_means(ds)
        dists = []
        for rlz, curves in curves_by_rlz.items():
            dist = sum(
                rmsep(mean_curves[imt], curves[imt], min_value)
                for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    elif what in datastore.view:
        print(datastore.view(what, ds))
    else:
        obj = ds[what]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
예제 #6
0
파일: show.py 프로젝트: amirj700/oq-risklib
def show(what, calc_id=-1):
    """
    Show the content of a datastore.

    :param what: key or view of the datastore
    :param calc_id: numeric calculation ID; if -1, show the last calculation
    """
    if what == 'all':  # show all
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                ds = datastore.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datastore.DATADIR, 'calc_%s.hdf5' % calc_id)
                logging.warn('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
                ds.close()
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    elif what == 'views':
        for name in sorted(datastore.view):
            print(name)
        return

    ds = datastore.read(calc_id)

    # this part is experimental
    if what == 'rlzs' and 'hcurves' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = get_hcurves_and_means(ds)
        dists = []
        for rlz, curves in curves_by_rlz.items():
            dist = sum(rmsep(mean_curves[imt], curves[imt], min_value)
                       for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    elif what in datastore.view:
        print(datastore.view(what, ds))
    else:
        obj = ds[what]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
예제 #7
0
def show(calc_id, key=None, rlzs=None):
    """
    Show the content of a datastore.

    :param calc_id: numeric calculation ID; if 0, show all calculations
    :param key: key of the datastore
    :param rlzs: flag; if given, print out the realizations in order
    """
    if not calc_id:
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                oq = OqParam.from_(datastore.DataStore(calc_id).attrs)
                cmode, descr = oq.calculation_mode, oq.description
            except:  # invalid datastore directory
                logging.warn('Removed invalid calculation %d', calc_id)
                shutil.rmtree(os.path.join(
                    datastore.DATADIR, 'calc_%s' % calc_id))
            else:
                rows.append((calc_id, cmode, descr))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    ds = datastore.DataStore(calc_id)
    if key:
        if key in datastore.view:
            print(datastore.view(key, ds))
            return
        obj = ds[key]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
        return
    # print all keys
    oq = OqParam.from_(ds.attrs)
    print(oq.calculation_mode, 'calculation (%r) saved in %s contains:' %
          (oq.description, ds.hdf5path))
    for key in ds:
        print(key, humansize(ds.getsize(key)))

    # this part is experimental and not tested on purpose
    if rlzs and 'curves_by_trt_gsim' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = combined_curves(ds)
        dists = []
        for rlz in sorted(curves_by_rlz):
            curves = curves_by_rlz[rlz]
            dist = sum(rmsep(mean_curves[imt], curves[imt], min_value)
                       for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        for dist, rlz in sorted(dists):
            print('rlz=%s, rmsep=%s' % (rlz, dist))
예제 #8
0
    def test_case_5(self):
        # test with different curve resolution for different taxonomies
        out = self.run_calc(case_5.__file__, 'job_h.ini,job_r.ini',
                            exports='xml', individual_curves='false')

        # check mean loss curves
        [fname] = out['loss_curves-stats', 'xml']
        self.assertEqualFiles('expected/loss_curves-mean.xml', fname)

        # check individual avg losses
        fname = writetmp(view('loss_curves_avg', self.calc.datastore))
        self.assertEqualFiles('expected/loss_curves_avg.txt', fname)
예제 #9
0
 def add(self, name, obj=None):
     """Add the view named `name` to the report text"""
     title = self.title[name]
     line = '-' * len(title)
     if obj:
         text = '\n::\n\n' + indent(str(obj))
     else:
         orig = views.rst_table.__defaults__
         views.rst_table.__defaults__ = (None, '%s')  # disable formatting
         text = datastore.view(name, self.dstore)
         views.rst_table.__defaults__ = orig
     self.text += '\n'.join(['\n\n' + title, line, text])
예제 #10
0
    def test_pik(self):
        # store pickleable Python objects
        self.dstore['key1'] = 'value1'
        self.assertEqual(len(self.dstore), 1)
        self.dstore['key2'] = 'value2'
        self.assertEqual(list(self.dstore), ['key1', 'key2'])
        del self.dstore['key2']
        self.assertEqual(list(self.dstore), ['key1'])
        self.assertEqual(self.dstore['key1'], 'value1')

        # test a datastore view
        self.assertEqual(view('key1_upper', self.dstore), 'VALUE1')
예제 #11
0
    def test_pik(self):
        # store pickleable Python objects
        self.dstore['key1'] = 'value1'
        self.assertEqual(len(self.dstore), 1)
        self.dstore['key2'] = 'value2'
        self.assertEqual(list(self.dstore), ['key1', 'key2'])
        del self.dstore['key2']
        self.assertEqual(list(self.dstore), ['key1'])
        self.assertEqual(self.dstore['key1'], 'value1')

        # test a datastore view
        self.assertEqual(view('key1_upper', self.dstore), 'VALUE1')
예제 #12
0
 def add(self, name, obj=None):
     """Add the view named `name` to the report text"""
     title = self.title[name]
     line = '-' * len(title)
     if obj:
         text = '\n::\n\n' + indent(str(obj))
     else:
         orig = views.rst_table.__defaults__
         views.rst_table.__defaults__ = (None, '%s')  # disable formatting
         text = datastore.view(name, self.dstore)
         views.rst_table.__defaults__ = orig
     self.text += '\n'.join(['\n\n' + title, line, text])
예제 #13
0
    def test_case_5(self):
        # test with different curve resolution for different taxonomies
        out = self.run_calc(case_5.__file__,
                            'job_h.ini,job_r.ini',
                            exports='xml',
                            individual_curves='false')

        # check mean loss curves
        [fname] = out['loss_curves-stats', 'xml']
        self.assertEqualFiles('expected/loss_curves-mean.xml', fname)

        # check individual avg losses
        fname = writetmp(view('loss_curves_avg', self.calc.datastore))
        self.assertEqualFiles('expected/loss_curves_avg.txt', fname)
예제 #14
0
    def test_case_6a(self):
        # case with two gsims
        out = self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
                            exports='csv')
        f1, f2 = out['agglosses-rlzs', 'csv']
        self.assertEqualFiles('expected/agg-gsimltp_b1_structural.csv', f1)
        self.assertEqualFiles('expected/agg-gsimltp_b2_structural.csv', f2)

        # testing the totlosses view
        dstore = self.calc.datastore
        fname = writetmp(view('totlosses', dstore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        # testing the specific GMF exporter
        [gmf] = export(('gmfs:0', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/gmf-0-PGA.csv', gmf)
예제 #15
0
    def test_case_6a(self):
        # case with two gsims
        out = self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
                            exports='csv')
        f1, f2 = out['agglosses-rlzs', 'csv']
        self.assertEqualFiles('expected/agg-gsimltp_b1_structural.csv', f1)
        self.assertEqualFiles('expected/agg-gsimltp_b2_structural.csv', f2)

        # testing the totlosses view
        dstore = self.calc.datastore
        fname = writetmp(view('totlosses', dstore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        # testing the specific GMF exporter
        [gmf] = export(('gmfs:0', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/gmf-0-PGA.csv', gmf)
예제 #16
0
    def test_case_6a(self):
        # case with two gsims
        out = self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
                            exports='csv')
        f1, f2 = out['agglosses-rlzs', 'csv']
        self.assertEqualFiles('expected/agg-gsimltp_b1_structural.csv', f1)
        self.assertEqualFiles('expected/agg-gsimltp_b2_structural.csv', f2)

        # testing the totlosses view
        dstore = self.calc.datastore
        text = view('totlosses', dstore)
        self.assertEqual(text, '''\
=============== ===================
structural-mean structural-mean_ins
=============== ===================
2.6872496E+03         NAN          
3.2341374E+03         NAN          
=============== ===================''')
예제 #17
0
def _print_info(dstore, filtersources=True, weightsources=True):
    assoc = dstore["rlzs_assoc"]
    oqparam = OqParam.from_(dstore.attrs)
    csm = dstore["composite_source_model"]
    sitecol = dstore["sitecol"]
    print(csm.get_info())
    print("See https://github.com/gem/oq-risklib/blob/master/doc/" "effective-realizations.rst for an explanation")
    print(assoc)
    if filtersources or weightsources:
        [info] = readinput.get_job_info(oqparam, csm, sitecol)
        info["n_sources"] = csm.get_num_sources()
        curve_matrix_size = info["n_sites"] * info["n_levels"] * info["n_imts"] * len(assoc) * 8
        for k in info.dtype.fields:
            if k == "input_weight" and not weightsources:
                pass
            else:
                print(k, info[k])
        print("curve_matrix_size", humansize(curve_matrix_size))
    if "num_ruptures" in dstore:
        print(datastore.view("rupture_collections", dstore))
예제 #18
0
    def test_case_6a(self):
        # case with two gsims
        out = self.run_calc(case_6a.__file__,
                            'job_haz.ini,job_risk.ini',
                            exports='csv')
        f1, f2 = out['agglosses-rlzs', 'csv']
        self.assertEqualFiles('expected/agg-gsimltp_b1_structural.csv', f1)
        self.assertEqualFiles('expected/agg-gsimltp_b2_structural.csv', f2)

        # testing the totlosses view
        dstore = self.calc.datastore
        text = view('totlosses', dstore)
        self.assertEqual(
            text, '''\
=============== ===================
structural-mean structural-mean_ins
=============== ===================
2.6872496E+03         NAN          
3.2341374E+03         NAN          
=============== ===================''')
예제 #19
0
def _print_info(dstore, filtersources=True, weightsources=True):
    assoc = dstore['rlzs_assoc']
    oqparam = OqParam.from_(dstore.attrs)
    csm = dstore['composite_source_model']
    sitecol = dstore['sitecol']
    print(csm.get_info())
    print('See https://github.com/gem/oq-risklib/blob/master/doc/'
          'effective-realizations.rst for an explanation')
    print(assoc)
    if filtersources or weightsources:
        [info] = readinput.get_job_info(oqparam, csm, sitecol)
        info['n_sources'] = csm.get_num_sources()
        curve_matrix_size = (info['n_sites'] * info['n_levels'] *
                             info['n_imts'] * len(assoc) * 8)
        for k in info.dtype.fields:
            if k == 'input_weight' and not weightsources:
                pass
            else:
                print(k, info[k])
        print('curve_matrix_size', humansize(curve_matrix_size))
    if 'num_ruptures' in dstore:
        print(datastore.view('rupture_collections', dstore))
예제 #20
0
파일: info.py 프로젝트: ruthali/oq-risklib
def _print_info(dstore, filtersources=True, weightsources=True):
    assoc = dstore['rlzs_assoc']
    oqparam = dstore['oqparam']
    csm = dstore['composite_source_model']
    sitecol = dstore['sitecol']
    print(csm.get_info())
    print('See https://github.com/gem/oq-risklib/blob/master/doc/'
          'effective-realizations.rst for an explanation')
    print(assoc)
    if filtersources or weightsources:
        [info] = readinput.get_job_info(oqparam, csm, sitecol)
        info['n_sources'] = csm.get_num_sources()
        curve_matrix_size = (
            info['n_sites'] * info['n_levels'] *
            info['n_imts'] * len(assoc) * 8)
        for k in info.dtype.fields:
            if k == 'input_weight' and not weightsources:
                pass
            else:
                print(k, info[k])
        print('curve_matrix_size', humansize(curve_matrix_size))
    if 'num_ruptures' in dstore:
        print(datastore.view('rupture_collections', dstore))
예제 #21
0
def show(calc_id, key=None, rlzs=None):
    """
    Show the content of a datastore.

    :param calc_id: numeric calculation ID; if 0, show all calculations
    :param key: key of the datastore
    :param rlzs: flag; if given, print out the realizations in order
    """
    if calc_id == 0:  # show all
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                ds = datastore.DataStore(calc_id, mode='r')
                oq = OqParam.from_(ds.attrs)
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                logging.warn('Removed invalid calculation %d', calc_id)
                os.remove(
                    os.path.join(datastore.DATADIR, 'calc_%s.hdf5' % calc_id))
                continue
            else:
                rows.append((calc_id, cmode, descr))
                ds.close()
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    ds = datastore.DataStore(calc_id, mode='r')
    if key:
        if key in datastore.view:
            print(datastore.view(key, ds))
            return
        obj = ds[key]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
        return

    oq = OqParam.from_(ds.attrs)

    # this part is experimental
    if rlzs and 'hcurves' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = get_hcurves_and_means(ds)
        dists = []
        for rlz, curves in curves_by_rlz.items():
            dist = sum(
                rmsep(mean_curves[imt], curves[imt], min_value)
                for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    else:
        # print all keys
        print(
            oq.calculation_mode, 'calculation (%r) saved in %s contains:' %
            (oq.description, ds.hdf5path))
        for key in ds:
            print(key, humansize(ds.getsize(key)))