Beispiel #1
0
    def build_agg_curve_and_stats(self, builder):
        """
        Build a single loss curve per realization. It is NOT obtained
        by aggregating the loss curves; instead, it is obtained without
        generating the loss curves, directly from the the aggregate losses.
        """
        oq = self.oqparam
        C = oq.loss_curve_resolution
        loss_curve_dt, _ = self.riskmodel.build_all_loss_dtypes(
            C, oq.conditional_loss_poes, oq.insured_losses)
        lts = self.riskmodel.loss_types
        lr_data = [(l, r, dset.value) for (l, r), dset in
                   numpy.ndenumerate(self.agg_loss_table)]
        ses_ratio = self.oqparam.ses_ratio
        result = parallel.apply(
            build_agg_curve, (lr_data, self.I, ses_ratio, C, self.L,
                              self.monitor('')),
            concurrent_tasks=self.oqparam.concurrent_tasks).reduce()
        agg_curve = numpy.zeros(self.R, loss_curve_dt)
        for l, r, name in result:
            agg_curve[lts[l]][name][r] = result[l, r, name]
        if oq.individual_curves:
            self.datastore['agg_curve-rlzs'] = agg_curve
            self.saved['agg_curve-rlzs'] = agg_curve.nbytes

        if self.R > 1:
            self.build_agg_curve_stats(builder, agg_curve, loss_curve_dt)
Beispiel #2
0
 def execute(self):
     """
     Run the ucerf calculation
     """
     monitor = self.monitor(oqparam=self.oqparam)
     res = parallel.apply(
         compute_ruptures_gmfs_curves,
         (self.csm.source_models, self.sitecol, self.rlzs_assoc, monitor),
         concurrent_tasks=self.oqparam.concurrent_tasks).submit_all()
     L = len(self.oqparam.imtls.array)
     acc = {rlz.ordinal: ProbabilityMap(L, 1)
            for rlz in self.rlzs_assoc.realizations}
     data = functools.reduce(
         self.combine_pmaps_and_save_gmfs, res, AccumDict(acc))
     self.save_data_transfer(res)
     self.datastore['csm_info'] = self.csm.info
     self.datastore['source_info'] = numpy.array(
         self.infos, source.SourceInfo.dt)
     if 'gmf_data' in self.datastore:
         self.datastore.set_nbytes('gmf_data')
     return data
Beispiel #3
0
    def execute(self):
        """
        Run in parallel `core_task(sources, sitecol, monitor)`, by
        parallelizing on the sources according to their weight and
        tectonic region type.
        """
        monitor = self.monitor.new(self.core_task.__name__)
        monitor.oqparam = oq = self.oqparam
        ucerf_source = self.src_group.sources[0]
        max_dist = oq.maximum_distance[DEFAULT_TRT]
        acc = AccumDict({
            grp_id: ProbabilityMap(len(oq.imtls.array), len(gsims))
            for grp_id, gsims in self.rlzs_assoc.gsims_by_grp_id.items()})
        acc.calc_times = []
        acc.eff_ruptures = AccumDict()  # grp_id -> eff_ruptures
        acc.bb_dict = {}

        if len(self.csm) > 1:
            # when multiple branches, parallelise by branch
            branches = [br.value for br in self.smlt.branches.values()]
            rup_res = parallel.starmap(
                ucerf_classical_hazard_by_branch,
                self.gen_args(branches, ucerf_source, monitor)).submit_all()
        else:
            # single branch
            gsims = self.rlzs_assoc.gsims_by_grp_id[0]
            [(branch_id, branch)] = self.smlt.branches.items()
            branchname = branch.value
            ucerf_source.src_group_id = 0
            ucerf_source.weight = 1
            ucerf_source.nsites = len(self.sitecol)
            self.infos[0, ucerf_source.source_id] = source.SourceInfo(
                ucerf_source)
            logging.info('Getting the background point sources')
            with self.monitor('getting background sources', autoflush=True):
                ucerf_source.build_idx_set()
                background_sids = ucerf_source.get_background_sids(
                    self.sitecol, max_dist)
                bckgnd_sources = ucerf_source.get_background_sources(
                    background_sids)

            # parallelize on the background sources, small tasks
            args = (bckgnd_sources, self.sitecol, oq.imtls,
                    gsims, self.oqparam.truncation_level,
                    'SourceSitesFilter', max_dist, (), monitor)
            bg_res = parallel.apply(
                pmap_from_grp, args,
                concurrent_tasks=self.oqparam.concurrent_tasks).submit_all()

            # parallelize by rupture subsets
            tasks = self.oqparam.concurrent_tasks * 2  # they are big tasks
            rup_sets = ucerf_source.get_rupture_indices(branchname)
            rup_res = parallel.apply(
                ucerf_classical_hazard_by_rupture_set,
                (rup_sets, branchname, ucerf_source, self.src_group.id,
                 self.sitecol, gsims, monitor),
                concurrent_tasks=tasks).submit_all()

            # compose probabilities from background sources
            for pmap in bg_res:
                acc[0] |= pmap
            self.save_data_transfer(bg_res)

        pmap_by_grp_id = functools.reduce(self.agg_dicts, rup_res, acc)
        with self.monitor('store source_info', autoflush=True):
            self.store_source_info(self.infos)
            self.save_data_transfer(rup_res)
        self.datastore['csm_info'] = self.csm.info
        self.rlzs_assoc = self.csm.info.get_rlzs_assoc(
            functools.partial(self.count_eff_ruptures, pmap_by_grp_id))
        self.datastore['csm_info'] = self.csm.info
        return pmap_by_grp_id
Beispiel #4
0
 def test_received(self):
     with mock.patch("os.environ", OQ_DISTRIBUTE="celery"):
         res = parallel.apply(get_length, (numpy.arange(10),)).submit_all()
         list(res)  # iterate on the results
         self.assertGreater(len(res.received), 0)
Beispiel #5
0
 def test_apply_maxweight(self):
     res = parallel.apply(get_length, ("aaabb",), maxweight=2, key=lambda char: char)
     # chunks ['aa', 'ab', 'b']
     partial_sums = sorted(dic["n"] for dic in res)
     self.assertEqual(partial_sums, [1, 2, 2])
Beispiel #6
0
 def test_apply_no_tasks(self):
     res = parallel.apply(get_length, ("aaabb",), concurrent_tasks=0, key=lambda char: char)
     # chunks [['a', 'a', 'a'], ['b', 'b']]
     partial_sums = sorted(dic["n"] for dic in res)
     self.assertEqual(partial_sums, [2, 3])
Beispiel #7
0
 def test_apply(self):
     res = parallel.apply(get_length, (numpy.arange(10),), concurrent_tasks=3).reduce()
     self.assertEqual(res, {"n": 10})  # chunks [4, 4, 2]