示例#1
0
    def post_execute(self, curves_by_trt_id):
        """
        Collect the hazard curves by realization and export them.

        :param curves_by_trt_id:
            a dictionary trt_id -> hazard curves
        """
        nsites = len(self.sitecol)
        imtls = self.oqparam.imtls
        curves_by_trt_gsim = {}

        with self.monitor('saving probability maps', autoflush=True):
            for trt_id in curves_by_trt_id:
                key = 'poes/%04d' % trt_id
                self.datastore[key] = curves_by_trt_id[trt_id]
                self.datastore.set_attrs(
                    key, trt=self.csm.info.get_trt(trt_id))
                gsims = self.rlzs_assoc.gsims_by_trt_id[trt_id]
                for i, gsim in enumerate(gsims):
                    curves_by_trt_gsim[trt_id, gsim] = (
                        curves_by_trt_id[trt_id].extract(i))
            self.datastore.set_nbytes('poes')

        with self.monitor('combine curves_by_rlz', autoflush=True):
            curves_by_rlz = self.rlzs_assoc.combine_curves(curves_by_trt_gsim)

        self.save_curves({rlz: array_of_curves(curves, nsites, imtls)
                          for rlz, curves in curves_by_rlz.items()})
示例#2
0
    def post_execute(self, curves_by_trt_id):
        """
        Collect the hazard curves by realization and export them.

        :param curves_by_trt_id:
            a dictionary trt_id -> hazard curves
        """
        nsites = len(self.sitecol)
        imtls = self.oqparam.imtls
        curves_by_trt_gsim = {}

        with self.monitor('saving probability maps', autoflush=True):
            for trt_id in curves_by_trt_id:
                key = 'poes/%04d' % trt_id
                self.datastore[key] = curves_by_trt_id[trt_id]
                self.datastore.set_attrs(key,
                                         trt=self.csm.info.get_trt(trt_id))
                gsims = self.rlzs_assoc.gsims_by_trt_id[trt_id]
                for i, gsim in enumerate(gsims):
                    curves_by_trt_gsim[trt_id, gsim] = (
                        curves_by_trt_id[trt_id].extract(i))
            self.datastore.set_nbytes('poes')

        with self.monitor('combine curves_by_rlz', autoflush=True):
            curves_by_rlz = self.rlzs_assoc.combine_curves(curves_by_trt_gsim)

        self.save_curves({
            rlz: array_of_curves(curves, nsites, imtls)
            for rlz, curves in curves_by_rlz.items()
        })
示例#3
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (src_group_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     elif oq.hazard_curves_from_gmfs:
         rlzs = self.rlzs_assoc.realizations
         dic = {}
         for rlzi in result:
             dic[rlzs[rlzi]] = array_of_curves(
                 result[rlzi], len(self.sitecol), oq.imtls)
         self.save_curves(dic)
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # one could also set oq.number_of_logic_tree_samples = 0
         self.cl = ClassicalCalculator(oq, self.monitor)
         # TODO: perhaps it is possible to avoid reprocessing the source
         # model, however usually this is quite fast and do not dominate
         # the computation
         self.cl.run()
         for imt in self.mean_curves.dtype.fields:
             rdiff, index = max_rel_diff_index(
                 self.cl.mean_curves[imt], self.mean_curves[imt])
             logging.warn('Relative difference with the classical '
                          'mean curves for IMT=%s: %d%% at site index %d',
                          imt, rdiff * 100, index)
示例#4
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (trt_model_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     elif oq.hazard_curves_from_gmfs:
         rlzs = self.rlzs_assoc.realizations
         dic = {}
         for rlzi in result:
             dic[rlzs[rlzi]] = array_of_curves(result[rlzi],
                                               len(self.sitecol), oq.imtls)
         self.save_curves(dic)
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # one could also set oq.number_of_logic_tree_samples = 0
         self.cl = ClassicalCalculator(oq, self.monitor)
         # TODO: perhaps it is possible to avoid reprocessing the source
         # model, however usually this is quite fast and do not dominate
         # the computation
         self.cl.run(hazard_calculation_id=self.datastore.calc_id)
         for imt in self.mean_curves.dtype.fields:
             rdiff, index = max_rel_diff_index(self.cl.mean_curves[imt],
                                               self.mean_curves[imt])
             logging.warn(
                 'Relative difference with the classical '
                 'mean curves for IMT=%s: %d%% at site index %d', imt,
                 rdiff * 100, index)
示例#5
0
 def post_execute(self, pmap_by_grp_gsim):
     """
     Combine the curves and store them
     """
     nsites = len(self.sitecol)
     imtls = self.oqparam.imtls
     with self.monitor('combine curves_by_rlz', autoflush=True):
         curves_by_rlz = self.rlzs_assoc.combine_curves(pmap_by_grp_gsim)
     self.save_curves({rlz: array_of_curves(curves, nsites, imtls)
                       for rlz, curves in curves_by_rlz.items()})
示例#6
0
    def pre_execute(self):
        """
        Associate the assets to the sites and build the riskinputs.
        """
        if 'hazard_curves' in self.oqparam.inputs:  # read hazard from file
            haz_sitecol, haz_curves = readinput.get_hcurves(self.oqparam)
            self.save_params()
            self.read_exposure()  # define .assets_by_site
            self.load_riskmodel()
            self.assetcol = riskinput.AssetCollection(
                self.assets_by_site, self.cost_calculator,
                self.oqparam.time_event)
            self.sitecol, self.assets_by_site = self.assoc_assets_sites(
                haz_sitecol)
            curves_by_trt_gsim = {(0, 'FromFile'): haz_curves}
            self.datastore['csm_info'] = fake = source.CompositionInfo.fake()
            self.rlzs_assoc = fake.get_rlzs_assoc()
            self.save_mesh()
        else:  # compute hazard or read it from the datastore
            super(ClassicalRiskCalculator, self).pre_execute()
            logging.info('Preparing the risk input')
            curves_by_trt_gsim = {}
            for key in self.datastore['poes']:
                pmap = self.datastore['poes/' + key]
                trt_id = int(key)
                gsims = self.rlzs_assoc.gsims_by_trt_id[trt_id]
                for i, gsim in enumerate(gsims):
                    curves_by_trt_gsim[trt_id, gsim] = array_of_curves(
                        pmap, len(self.sitecol), self.oqparam.imtls, i)
        self.riskinputs = self.build_riskinputs(curves_by_trt_gsim)
        self.monitor.oqparam = self.oqparam

        self.N = sum(len(assets) for assets in self.assets_by_site)
        self.L = len(self.riskmodel.loss_types)
        self.R = len(self.rlzs_assoc.realizations)
        self.I = self.oqparam.insured_losses
        self.Q1 = len(self.oqparam.quantile_loss_curves) + 1