Ejemplo n.º 1
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (trt_model_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     if oq.hazard_curves_from_gmfs:
         ClassicalCalculator.post_execute.__func__(self, result)
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # use a different datastore
         self.cl = ClassicalCalculator(oq, self.monitor)
         self.cl.datastore.parent = self.datastore
         result = self.cl.run(pre_execute=False, clean_up=False)
         for imt in self.mean_curves.dtype.fields:
             rdiff, index = max_rel_diff_index(self.cl.mean_curves[imt],
                                               self.mean_curves[imt])
             logging.warn(
                 'Relative difference with the classical '
                 'mean curves for IMT=%s: %d%% at site index %d', imt,
                 rdiff * 100, index)
Ejemplo n.º 2
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (src_group_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     elif oq.hazard_curves_from_gmfs:
         rlzs = self.rlzs_assoc.realizations
         dic = {}
         for rlzi in result:
             dic[rlzs[rlzi]] = array_of_curves(
                 result[rlzi], len(self.sitecol), oq.imtls)
         self.save_curves(dic)
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # one could also set oq.number_of_logic_tree_samples = 0
         self.cl = ClassicalCalculator(oq, self.monitor)
         # TODO: perhaps it is possible to avoid reprocessing the source
         # model, however usually this is quite fast and do not dominate
         # the computation
         self.cl.run()
         for imt in self.mean_curves.dtype.fields:
             rdiff, index = max_rel_diff_index(
                 self.cl.mean_curves[imt], self.mean_curves[imt])
             logging.warn('Relative difference with the classical '
                          'mean curves for IMT=%s: %d%% at site index %d',
                          imt, rdiff * 100, index)
Ejemplo n.º 3
0
    def test_case_7(self):
        # 2 models x 3 GMPEs, 1000 samples * 10 SES
        expected = [
            'hazard_curve-mean.csv',
        ]
        out = self.run_calc(case_7.__file__, 'job.ini', exports='csv')
        aw = extract(self.calc.datastore, 'realizations')
        dic = countby(aw.array, 'branch_path')
        self.assertEqual(
            {
                b'b11~BA': 332,  # w = .6 * .5 = .30
                b'b11~CB': 169,  # w = .6 * .3 = .18
                b'b11~CY': 108,  # w = .6 * .2 = .12
                b'b12~BA': 193,  # w = .4 * .5 = .20
                b'b12~CB': 115,  # w = .4 * .3 = .12
                b'b12~CY': 83
            },  # w = .4 * .2 = .08
            dic)

        fnames = out['hcurves', 'csv']
        mean_eb = get_mean_curves(self.calc.datastore)
        for exp, got in zip(expected, fnames):
            self.assertEqualFiles('expected/%s' % exp, got)
        mean_cl = get_mean_curves(self.calc.cl.datastore)
        reldiff, _index = max_rel_diff_index(mean_cl, mean_eb, min_value=0.1)
        self.assertLess(reldiff, 0.07)
Ejemplo n.º 4
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (trt_model_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     elif oq.hazard_curves_from_gmfs:
         rlzs = self.rlzs_assoc.realizations
         dic = {}
         for rlzi in result:
             dic[rlzs[rlzi]] = array_of_curves(result[rlzi],
                                               len(self.sitecol), oq.imtls)
         self.save_curves(dic)
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # one could also set oq.number_of_logic_tree_samples = 0
         self.cl = ClassicalCalculator(oq, self.monitor)
         # TODO: perhaps it is possible to avoid reprocessing the source
         # model, however usually this is quite fast and do not dominate
         # the computation
         self.cl.run(hazard_calculation_id=self.datastore.calc_id)
         for imt in self.mean_curves.dtype.fields:
             rdiff, index = max_rel_diff_index(self.cl.mean_curves[imt],
                                               self.mean_curves[imt])
             logging.warn(
                 'Relative difference with the classical '
                 'mean curves for IMT=%s: %d%% at site index %d', imt,
                 rdiff * 100, index)
Ejemplo n.º 5
0
 def post_execute(self, result):
     """
     Return a dictionary with the output files, i.e. gmfs (if any)
     and hazard curves (if any).
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     if oq.hazard_curves_from_gmfs:
         ClassicalCalculator.post_execute.__func__(self, result)
     if oq.ground_motion_fields:
         for (trt_id, gsim), gmf_by_tag in self.gmf_dict.items():
             self.gmf_dict[trt_id, gsim] = {tag: gmf_by_tag[tag][gsim]
                                            for tag in gmf_by_tag}
         self.gmf_by_trt_gsim = self.gmf_dict
         self.gmf_dict.clear()
     if oq.mean_hazard_curves:  # compute classical ones
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # use a different datastore
         self.cl = ClassicalCalculator(oq, self.monitor)
         # copy the relevant attributes
         self.cl.composite_source_model = self.csm
         self.cl.sitecol = self.sitecol
         self.cl.rlzs_assoc = self.csm.get_rlzs_assoc()
         result = self.cl.run(pre_execute=False, clean_up=False)
         for imt in self.mean_curves.dtype.fields:
             rdiff, index = max_rel_diff_index(
                 self.cl.mean_curves[imt], self.mean_curves[imt])
             logging.warn('Relative difference with the classical '
                          'mean curves for IMT=%s: %d%% at site index %d',
                          imt, rdiff * 100, index)
Ejemplo n.º 6
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (trt_model_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     if oq.hazard_curves_from_gmfs:
         ClassicalCalculator.post_execute.__func__(self, result)
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # use a different datastore
         self.cl = ClassicalCalculator(oq, self.monitor)
         # TODO: perhaps it is possible to avoid reprocessing the source
         # model, however usually this is quite fast and do not dominate
         # the computation
         self.cl.run(hazard_calculation_id=self.datastore.calc_id)
         for imt in self.mean_curves.dtype.fields:
             rdiff, index = max_rel_diff_index(
                 self.cl.mean_curves[imt], self.mean_curves[imt])
             logging.warn('Relative difference with the classical '
                          'mean curves for IMT=%s: %d%% at site index %d',
                          imt, rdiff * 100, index)
Ejemplo n.º 7
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (trt_model_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     if oq.hazard_curves_from_gmfs:
         ClassicalCalculator.post_execute.__func__(self, result)
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # use a different datastore
         self.cl = ClassicalCalculator(oq, self.monitor)
         # copy the relevant attributes
         self.cl.composite_source_model = self.csm
         self.cl.sitecol = self.sitecol.complete
         self.cl.rlzs_assoc = self.csm.get_rlzs_assoc()
         result = self.cl.run(pre_execute=False, clean_up=False)
         for imt in self.mean_curves.dtype.fields:
             rdiff, index = max_rel_diff_index(
                 self.cl.mean_curves[imt], self.mean_curves[imt])
             logging.warn('Relative difference with the classical '
                          'mean curves for IMT=%s: %d%% at site index %d',
                          imt, rdiff * 100, index)
Ejemplo n.º 8
0
    def post_execute(self, result):
        """
        :param result:
            a dictionary (src_group_id, gsim) -> haz_curves or an empty
            dictionary if hazard_curves_from_gmfs is false
        """
        oq = self.oqparam
        if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
            return
        elif oq.hazard_curves_from_gmfs:
            rlzs = self.rlzs_assoc.realizations
            # save individual curves
            if self.oqparam.individual_curves:
                for i in sorted(result):
                    key = 'hcurves/rlz-%03d' % i
                    if result[i]:
                        self.datastore[key] = result[i]
                    else:
                        logging.info('Zero curves for %s', key)
            # compute and save statistics; this is done in process
            # we don't need to parallelize, since event based calculations
            # involves a "small" number of sites (<= 65,536)
            weights = (None if self.oqparam.number_of_logic_tree_samples
                       else [rlz.weight for rlz in rlzs])
            pstats = PmapStats(self.oqparam.quantile_hazard_curves, weights)
            for kind, stat in pstats.compute(
                    self.sitecol.sids, list(result.values())):
                if kind == 'mean' and not self.oqparam.mean_hazard_curves:
                    continue
                self.datastore['hcurves/' + kind] = stat

        if ('gmf_data' in self.datastore and 'nbytes' not
                in self.datastore['gmf_data'].attrs):
            self.datastore.set_nbytes('gmf_data')
            for sm_id in self.datastore['gmf_data']:
                for rlzno in self.datastore['gmf_data/' + sm_id]:
                    self.datastore.set_nbytes(
                        'gmf_data/%s/%s' % (sm_id, rlzno))

        if oq.compare_with_classical:  # compute classical curves
            export_dir = os.path.join(oq.export_dir, 'cl')
            if not os.path.exists(export_dir):
                os.makedirs(export_dir)
            oq.export_dir = export_dir
            # one could also set oq.number_of_logic_tree_samples = 0
            self.cl = ClassicalCalculator(oq, self.monitor)
            # TODO: perhaps it is possible to avoid reprocessing the source
            # model, however usually this is quite fast and do not dominate
            # the computation
            self.cl.run(close=False)
            cl_mean_curves = get_mean_curves(self.cl.datastore)
            eb_mean_curves = get_mean_curves(self.datastore)
            for imt in eb_mean_curves.dtype.names:
                rdiff, index = util.max_rel_diff_index(
                    cl_mean_curves[imt], eb_mean_curves[imt])
                logging.warn('Relative difference with the classical '
                             'mean curves for IMT=%s: %d%% at site index %d',
                             imt, rdiff * 100, index)
Ejemplo n.º 9
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (src_group_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     elif oq.hazard_curves_from_gmfs:
         rlzs = self.rlzs_assoc.realizations
         # save individual curves
         for i in sorted(result):
             key = 'hcurves/rlz-%03d' % i
             if result[i]:
                 self.datastore[key] = result[i]
             else:
                 self.datastore[key] = ProbabilityMap(oq.imtls.array.size)
                 logging.info('Zero curves for %s', key)
         # compute and save statistics; this is done in process
         # we don't need to parallelize, since event based calculations
         # involves a "small" number of sites (<= 65,536)
         weights = [rlz.weight for rlz in rlzs]
         hstats = self.oqparam.hazard_stats()
         if len(hstats) and len(rlzs) > 1:
             for kind, stat in hstats:
                 pmap = compute_pmap_stats(result.values(), [stat], weights)
                 self.datastore['hcurves/' + kind] = pmap
     if self.datastore.parent:
         self.datastore.parent.open()
     if 'gmf_data' in self.datastore:
         self.save_gmf_bytes()
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # one could also set oq.number_of_logic_tree_samples = 0
         self.cl = ClassicalCalculator(oq, self.monitor('classical'))
         # TODO: perhaps it is possible to avoid reprocessing the source
         # model, however usually this is quite fast and do not dominate
         # the computation
         self.cl.run(close=False)
         cl_mean_curves = get_mean_curves(self.cl.datastore)
         eb_mean_curves = get_mean_curves(self.datastore)
         for imt in eb_mean_curves.dtype.names:
             rdiff, index = util.max_rel_diff_index(cl_mean_curves[imt],
                                                    eb_mean_curves[imt])
             logging.warn(
                 'Relative difference with the classical '
                 'mean curves for IMT=%s: %d%% at site index %d', imt,
                 rdiff * 100, index)
Ejemplo n.º 10
0
 def test_case_7(self):
     # 2 models x 3 GMPEs, 100 samples * 20 SES
     expected = [
         'hazard_curve-mean.csv',
     ]
     out = self.run_calc(case_7.__file__, 'job.ini', exports='csv')
     fnames = out['hcurves', 'csv']
     mean_eb = get_mean_curves(self.calc.datastore)
     for exp, got in zip(expected, fnames):
         self.assertEqualFiles('expected/%s' % exp, got)
     mean_cl = get_mean_curves(self.calc.cl.datastore)
     reldiff, _index = max_rel_diff_index(mean_cl, mean_eb, min_value=0.1)
     self.assertLess(reldiff, 0.10)
Ejemplo n.º 11
0
 def test_case_7(self):
     # 2 models x 3 GMPEs, 100 samples * 20 SES
     expected = [
         'hazard_curve-mean.csv',
     ]
     out = self.run_calc(case_7.__file__, 'job.ini', exports='csv')
     fnames = out['hcurves', 'csv']
     mean_eb = get_mean_curves(self.calc.datastore)
     for exp, got in zip(expected, fnames):
         self.assertEqualFiles('expected/%s' % exp, got)
     mean_cl = get_mean_curves(self.calc.cl.datastore)
     reldiff, _index = max_rel_diff_index(
         mean_cl, mean_eb, min_value=0.1)
     self.assertLess(reldiff, 0.10)
Ejemplo n.º 12
0
 def test_case_7(self):
     # 2 models x 3 GMPEs, 100 samples * 10 SES
     expected = [
         'hazard_curve-mean.csv',
         'quantile_curve-0.1.csv',
         'quantile_curve-0.9.csv',
     ]
     out = self.run_calc(case_7.__file__, 'job.ini', exports='csv')
     fnames = out['hcurves', 'csv']
     mean_eb = self.calc.mean_curves
     for exp, got in zip(expected, fnames):
         self.assertEqualFiles('expected/%s' % exp, got)
     mean_cl = self.calc.cl.mean_curves
     for imt in mean_cl.dtype.fields:
         reldiff, _index = max_rel_diff_index(
             mean_cl[imt], mean_eb[imt], min_value=0.1)
         self.assertLess(reldiff, 0.41)
Ejemplo n.º 13
0
 def test_case_7(self):
     # 2 models x 3 GMPEs, 100 samples * 10 SES
     expected = [
         'hazard_curve-mean.csv',
         'quantile_curve-0.1.csv',
         'quantile_curve-0.9.csv',
     ]
     out = self.run_calc(case_7.__file__, 'job.ini', exports='csv')
     fnames = out['hcurves', 'csv']
     mean_eb = self.calc.mean_curves
     for exp, got in zip(expected, fnames):
         self.assertEqualFiles('expected/%s' % exp, got)
     mean_cl = self.calc.cl.mean_curves
     for imt in mean_cl.dtype.fields:
         reldiff, _index = max_rel_diff_index(
             mean_cl[imt], mean_eb[imt], min_value=0.1)
         self.assertLess(reldiff, 0.41)
Ejemplo n.º 14
0
    def test_case_7(self):
        # 2 models x 3 GMPEs, 10 samples * 40 SES
        expected = [
            'hazard_curve-mean.csv',
            'quantile_curve-0.1.csv',
            'quantile_curve-0.9.csv',
        ]
        out = self.run_calc(case_7.__file__, 'job.ini', exports='csv')
        fnames = out['hcurves', 'csv']
        mean_eb = get_mean_curves(self.calc.datastore)
        for exp, got in zip(expected, fnames):
            self.assertEqualFiles('expected/%s' % exp, got)
        mean_cl = get_mean_curves(self.calc.cl.datastore)
        for imt in mean_cl.dtype.fields:
            reldiff, _index = max_rel_diff_index(mean_cl[imt],
                                                 mean_eb[imt],
                                                 min_value=0.1)
            self.assertLess(reldiff, 0.20)

        exp = self.calc.datastore.get_attr('events', 'max_gmf_size')
        self.assertEqual(exp, 375496)
Ejemplo n.º 15
0
    def post_execute(self, result):
        """
        Save the SES collection
        """
        oq = self.oqparam
        N = len(self.sitecol.complete)
        L = len(oq.imtls.array)
        if oq.hazard_calculation_id is None:
            self.rupser.close()
            num_events = sum(set_counts(self.datastore, 'events').values())
            if num_events == 0:
                raise RuntimeError(
                    'No seismic events! Perhaps the investigation time is too '
                    'small or the maximum_distance is too small')
            if oq.save_ruptures:
                logging.info('Setting %d event years on %d ruptures',
                             num_events, self.rupser.nruptures)
            with self.monitor('setting event years',
                              measuremem=True,
                              autoflush=True):
                numpy.random.seed(self.oqparam.ses_seed)
                set_random_years(self.datastore, 'events',
                                 int(self.oqparam.investigation_time))

        if self.gmf_size:
            self.datastore.set_attrs('events', max_gmf_size=self.gmf_size)
            msg = 'less than ' if self.get_min_iml(self.oqparam).sum() else ''
            logging.info('Generating %s%s of GMFs', msg,
                         humansize(self.gmf_size))

        if oq.hazard_curves_from_gmfs:
            rlzs = self.csm_info.rlzs_assoc.realizations
            # compute and save statistics; this is done in process and can
            # be very slow if there are thousands of realizations
            weights = [rlz.weight for rlz in rlzs]
            hstats = self.oqparam.hazard_stats()
            if len(hstats):
                logging.info('Computing statistical hazard curves')
                for kind, stat in hstats:
                    pmap = compute_pmap_stats(result.values(), [stat], weights)
                    arr = numpy.zeros((N, L), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array[:, 0]
                    self.datastore['hcurves/' + kind] = arr
            self.save_hmaps()
        if self.datastore.parent:
            self.datastore.parent.open('r')
        if 'gmf_data' in self.datastore:
            self.save_gmf_bytes()
        if oq.compare_with_classical:  # compute classical curves
            export_dir = os.path.join(oq.export_dir, 'cl')
            if not os.path.exists(export_dir):
                os.makedirs(export_dir)
            oq.export_dir = export_dir
            # one could also set oq.number_of_logic_tree_samples = 0
            self.cl = ClassicalCalculator(oq)
            # TODO: perhaps it is possible to avoid reprocessing the source
            # model, however usually this is quite fast and do not dominate
            # the computation
            self.cl.run(close=False)
            cl_mean_curves = get_mean_curves(self.cl.datastore)
            eb_mean_curves = get_mean_curves(self.datastore)
            rdiff, index = util.max_rel_diff_index(cl_mean_curves,
                                                   eb_mean_curves)
            logging.warn(
                'Relative difference with the classical '
                'mean curves: %d%% at site index %d', rdiff * 100, index)
Ejemplo n.º 16
0
    def post_execute(self, result):
        oq = self.oqparam
        if not oq.ground_motion_fields and not oq.hazard_curves_from_gmfs:
            return
        N = len(self.sitecol.complete)
        M = len(oq.imtls)
        L = len(oq.imtls.array)
        L1 = L // M
        if result and oq.hazard_curves_from_gmfs:
            rlzs = self.datastore['full_lt'].get_realizations()
            # compute and save statistics; this is done in process and can
            # be very slow if there are thousands of realizations
            weights = [rlz.weight for rlz in rlzs]
            # NB: in the future we may want to save to individual hazard
            # curves if oq.individual_curves is set; for the moment we
            # save the statistical curves only
            hstats = oq.hazard_stats()
            S = len(hstats)
            pmaps = list(result.values())
            R = len(weights)
            if len(pmaps) != R:
                # this should never happen, unless I break the
                # logic tree reduction mechanism during refactoring
                raise AssertionError('Expected %d pmaps, got %d' %
                                     (len(weights), len(pmaps)))
            if oq.individual_curves:
                logging.info('Saving individual hazard curves')
                self.datastore.create_dset('hcurves-rlzs', F32, (N, R, M, L1))
                self.datastore.set_shape_attrs('hcurves-rlzs',
                                               site_id=N,
                                               rlz_id=R,
                                               imt=list(oq.imtls),
                                               lvl=numpy.arange(L1))
                if oq.poes:
                    P = len(oq.poes)
                    M = len(oq.imtls)
                    ds = self.datastore.create_dset('hmaps-rlzs', F32,
                                                    (N, R, M, P))
                    self.datastore.set_shape_attrs('hmaps-rlzs',
                                                   site_id=N,
                                                   rlz_id=R,
                                                   imt=list(oq.imtls),
                                                   poe=oq.poes)
                for r, pmap in enumerate(pmaps):
                    arr = numpy.zeros((N, M, L1), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array.reshape(M, L1)
                    self.datastore['hcurves-rlzs'][:, r] = arr
                    if oq.poes:
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        for sid in hmap:
                            ds[sid, r] = hmap[sid].array

            if S:
                logging.info('Computing statistical hazard curves')
                self.datastore.create_dset('hcurves-stats', F32, (N, S, M, L1))
                self.datastore.set_shape_attrs('hcurves-stats',
                                               site_id=N,
                                               stat=list(hstats),
                                               imt=list(oq.imtls),
                                               lvl=numpy.arange(L1))
                if oq.poes:
                    P = len(oq.poes)
                    M = len(oq.imtls)
                    ds = self.datastore.create_dset('hmaps-stats', F32,
                                                    (N, S, M, P))
                    self.datastore.set_shape_attrs('hmaps-stats',
                                                   site_id=N,
                                                   stat=list(hstats),
                                                   imt=list(oq.imtls),
                                                   poes=oq.poes)
                for s, stat in enumerate(hstats):
                    pmap = compute_pmap_stats(pmaps, [hstats[stat]], weights,
                                              oq.imtls)
                    arr = numpy.zeros((N, M, L1), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array.reshape(M, L1)
                    self.datastore['hcurves-stats'][:, s] = arr
                    if oq.poes:
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        for sid in hmap:
                            ds[sid, s] = hmap[sid].array

        if self.datastore.parent:
            self.datastore.parent.open('r')
        if oq.compare_with_classical:  # compute classical curves
            export_dir = os.path.join(oq.export_dir, 'cl')
            if not os.path.exists(export_dir):
                os.makedirs(export_dir)
            oq.export_dir = export_dir
            job_id = logs.init('job')
            oq.calculation_mode = 'classical'
            self.cl = ClassicalCalculator(oq, job_id)
            # TODO: perhaps it is possible to avoid reprocessing the source
            # model, however usually this is quite fast and do not dominate
            # the computation
            self.cl.run()
            engine.expose_outputs(self.datastore)
            for imt in oq.imtls:
                cl_mean_curves = get_mean_curves(self.datastore, imt)
                eb_mean_curves = get_mean_curves(self.datastore, imt)
                self.rdiff, index = util.max_rel_diff_index(
                    cl_mean_curves, eb_mean_curves)
                logging.warning(
                    'Relative difference with the classical '
                    'mean curves: %d%% at site index %d, imt=%s',
                    self.rdiff * 100, index, imt)
Ejemplo n.º 17
0
    def post_execute(self, result):
        """
        Save the SES collection
        """
        oq = self.oqparam
        if 'ucerf' in oq.calculation_mode:
            self.rupser.close()
            self.csm.info.update_eff_ruptures(self.csm.get_num_ruptures())
            self.setting_events()
        N = len(self.sitecol.complete)
        L = len(oq.imtls.array)
        if result and oq.hazard_curves_from_gmfs:
            rlzs = self.csm_info.get_rlzs_assoc().realizations
            # compute and save statistics; this is done in process and can
            # be very slow if there are thousands of realizations
            weights = [rlz.weight for rlz in rlzs]
            # NB: in the future we may want to save to individual hazard
            # curves if oq.individual_curves is set; for the moment we
            # save the statistical curves only
            hstats = oq.hazard_stats()
            if len(hstats):
                logging.info('Computing statistical hazard curves')
                for statname, stat in hstats:
                    pmap = compute_pmap_stats(result.values(), [stat], weights)
                    arr = numpy.zeros((N, L), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array[:, 0]
                    self.datastore['hcurves/' + statname] = arr
                    if oq.poes:
                        P = len(oq.poes)
                        I = len(oq.imtls)
                        self.datastore.create_dset(
                            'hmaps/' + statname, F32, (N, P * I))
                        self.datastore.set_attrs(
                            'hmaps/' + statname, nbytes=N * P * I * 4)
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        ds = self.datastore['hmaps/' + statname]
                        for sid in hmap:
                            ds[sid] = hmap[sid].array[:, 0]

        if self.datastore.parent:
            self.datastore.parent.open('r')
        if 'gmf_data' in self.datastore:
            self.save_gmf_bytes()
        if oq.compare_with_classical:  # compute classical curves
            export_dir = os.path.join(oq.export_dir, 'cl')
            if not os.path.exists(export_dir):
                os.makedirs(export_dir)
            oq.export_dir = export_dir
            # one could also set oq.number_of_logic_tree_samples = 0
            self.cl = ClassicalCalculator(oq)
            # TODO: perhaps it is possible to avoid reprocessing the source
            # model, however usually this is quite fast and do not dominate
            # the computation
            self.cl.run(close=False)
            cl_mean_curves = get_mean_curves(self.cl.datastore)
            eb_mean_curves = get_mean_curves(self.datastore)
            rdiff, index = util.max_rel_diff_index(
                cl_mean_curves, eb_mean_curves)
            logging.warn('Relative difference with the classical '
                         'mean curves: %d%% at site index %d',
                         rdiff * 100, index)
Ejemplo n.º 18
0
    def post_execute(self, result):
        oq = self.oqparam
        if not oq.ground_motion_fields:
            return
        N = len(self.sitecol.complete)
        L = len(oq.imtls.array)
        if result and oq.hazard_curves_from_gmfs:
            rlzs = self.rlzs_assoc.realizations
            # compute and save statistics; this is done in process and can
            # be very slow if there are thousands of realizations
            weights = [rlz.weight for rlz in rlzs]
            # NB: in the future we may want to save to individual hazard
            # curves if oq.individual_curves is set; for the moment we
            # save the statistical curves only
            hstats = oq.hazard_stats()
            pmaps = list(result.values())
            if len(hstats):
                logging.info('Computing statistical hazard curves')
                if len(weights) != len(pmaps):
                    # this should never happen, unless I break the
                    # logic tree reduction mechanism during refactoring
                    raise AssertionError('Expected %d pmaps, got %d' %
                                         (len(weights), len(pmaps)))
                for statname, stat in hstats:
                    pmap = compute_pmap_stats(pmaps, [stat], weights, oq.imtls)
                    arr = numpy.zeros((N, L), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array[:, 0]
                    self.datastore['hcurves/' + statname] = arr
                    if oq.poes:
                        P = len(oq.poes)
                        M = len(oq.imtls)
                        self.datastore.create_dset(
                            'hmaps/' + statname, F32, (N, M, P))
                        self.datastore.set_attrs(
                            'hmaps/' + statname, nbytes=N * P * M * 4)
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        ds = self.datastore['hmaps/' + statname]
                        for sid in hmap:
                            ds[sid] = hmap[sid].array

        if self.datastore.parent:
            self.datastore.parent.open('r')
        if oq.compare_with_classical:  # compute classical curves
            export_dir = os.path.join(oq.export_dir, 'cl')
            if not os.path.exists(export_dir):
                os.makedirs(export_dir)
            oq.export_dir = export_dir
            job_id = logs.init('job')
            self.cl = ClassicalCalculator(oq, job_id)
            # TODO: perhaps it is possible to avoid reprocessing the source
            # model, however usually this is quite fast and do not dominate
            # the computation
            self.cl.run(close=False)
            cl_mean_curves = get_mean_curves(self.cl.datastore)
            eb_mean_curves = get_mean_curves(self.datastore)
            rdiff, index = util.max_rel_diff_index(
                cl_mean_curves, eb_mean_curves)
            logging.warning('Relative difference with the classical '
                            'mean curves: %d%% at site index %d',
                            rdiff * 100, index)
Ejemplo n.º 19
0
    def post_execute(self, result):
        oq = self.oqparam
        if not oq.ground_motion_fields:
            return
        N = len(self.sitecol.complete)
        L = len(oq.imtls.array)
        if result and oq.hazard_curves_from_gmfs:
            rlzs = self.rlzs_assoc.realizations
            # compute and save statistics; this is done in process and can
            # be very slow if there are thousands of realizations
            weights = [rlz.weight for rlz in rlzs]
            # NB: in the future we may want to save to individual hazard
            # curves if oq.individual_curves is set; for the moment we
            # save the statistical curves only
            hstats = oq.hazard_stats()
            S = len(hstats)
            pmaps = list(result.values())
            R = len(weights)
            if len(pmaps) != R:
                # this should never happen, unless I break the
                # logic tree reduction mechanism during refactoring
                raise AssertionError('Expected %d pmaps, got %d' %
                                     (len(weights), len(pmaps)))
            if oq.individual_curves:
                logging.info('Saving individual hazard curves')
                self.datastore.create_dset('hcurves-rlzs', F32, (N, R, L))
                self.datastore.set_attrs('hcurves-rlzs', nbytes=N * R * L * 4)
                if oq.poes:
                    P = len(oq.poes)
                    M = len(oq.imtls)
                    ds = self.datastore.create_dset(
                        'hmaps-rlzs', F32, (N, R, M, P))
                    self.datastore.set_attrs(
                        'hmaps-rlzs', nbytes=N * R * P * M * 4)
                for r, pmap in enumerate(pmaps):
                    arr = numpy.zeros((N, L), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array[:, 0]
                    self.datastore['hcurves-rlzs'][:, r] = arr
                    if oq.poes:
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        for sid in hmap:
                            ds[sid, r] = hmap[sid].array

            if S:
                logging.info('Computing statistical hazard curves')
                self.datastore.create_dset('hcurves-stats', F32, (N, S, L))
                self.datastore.set_attrs('hcurves-stats', nbytes=N * S * L * 4)
                if oq.poes:
                    P = len(oq.poes)
                    M = len(oq.imtls)
                    ds = self.datastore.create_dset(
                        'hmaps-stats', F32, (N, S, M, P))
                    self.datastore.set_attrs(
                        'hmaps-stats', nbytes=N * S * P * M * 4)
                for s, stat in enumerate(hstats):
                    pmap = compute_pmap_stats(
                        pmaps, [hstats[stat]], weights, oq.imtls)
                    arr = numpy.zeros((N, L), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array[:, 0]
                    self.datastore['hcurves-stats'][:, s] = arr
                    if oq.poes:
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        for sid in hmap:
                            ds[sid, s] = hmap[sid].array

        if self.datastore.parent:
            self.datastore.parent.open('r')
        if oq.compare_with_classical:  # compute classical curves
            export_dir = os.path.join(oq.export_dir, 'cl')
            if not os.path.exists(export_dir):
                os.makedirs(export_dir)
            oq.export_dir = export_dir
            job_id = logs.init('job')
            self.cl = ClassicalCalculator(oq, job_id)
            # TODO: perhaps it is possible to avoid reprocessing the source
            # model, however usually this is quite fast and do not dominate
            # the computation
            self.cl.run(close=False)
            engine.expose_outputs(self.cl.datastore)
            cl_mean_curves = get_mean_curves(self.cl.datastore)
            eb_mean_curves = get_mean_curves(self.datastore)
            self.rdiff, index = util.max_rel_diff_index(
                cl_mean_curves, eb_mean_curves)
            logging.warning('Relative difference with the classical '
                            'mean curves: %d%% at site index %d',
                            self.rdiff * 100, index)