コード例 #1
0
ファイル: ebrisk.py プロジェクト: rodolfopuglia/oq-engine
 def execute(self):
     oq = self.oqparam
     self.set_param(num_taxonomies=self.assetcol.num_taxonomies_by_site(),
                    maxweight=oq.ebrisk_maxweight /
                    (oq.concurrent_tasks or 1))
     parent = self.datastore.parent
     if parent:
         hdf5path = parent.filename
         grp_indices = parent['ruptures'].attrs['grp_indices']
         nruptures = len(parent['ruptures'])
     else:
         hdf5path = self.datastore.hdf5cache()
         grp_indices = self.datastore['ruptures'].attrs['grp_indices']
         nruptures = len(self.datastore['ruptures'])
         with hdf5.File(hdf5path, 'r+') as cache:
             self.datastore.hdf5.copy('weights', cache)
             self.datastore.hdf5.copy('ruptures', cache)
             self.datastore.hdf5.copy('rupgeoms', cache)
     self.init_logic_tree(self.csm_info)
     smap = parallel.Starmap(self.core_task.__func__,
                             monitor=self.monitor())
     trt_by_grp = self.csm_info.grp_by("trt")
     samples = self.csm_info.get_samples_by_grp()
     rlzs_by_gsim_grp = self.csm_info.get_rlzs_by_gsim_grp()
     ruptures_per_block = numpy.ceil(nruptures / (oq.concurrent_tasks or 1))
     for grp_id, rlzs_by_gsim in rlzs_by_gsim_grp.items():
         start, stop = grp_indices[grp_id]
         for indices in general.block_splitter(range(start, stop),
                                               ruptures_per_block):
             rgetter = getters.RuptureGetter(hdf5path, list(indices),
                                             grp_id, trt_by_grp[grp_id],
                                             samples[grp_id], rlzs_by_gsim)
             smap.submit(rgetter, self.src_filter, self.param)
     return smap.reduce(self.agg_dicts, numpy.zeros(self.N))
コード例 #2
0
 def execute(self):
     oq = self.oqparam
     self.set_param(
         num_taxonomies=self.assetcol.num_taxonomies_by_site(),
         maxweight=oq.ebrisk_maxweight / (oq.concurrent_tasks or 1),
         epspath=cache_epsilons(self.datastore, oq, self.assetcol,
                                self.crmodel, self.E))
     parent = self.datastore.parent
     if parent:
         hdf5path = parent.filename
         grp_indices = parent['ruptures'].attrs['grp_indices']
         nruptures = len(parent['ruptures'])
     else:
         hdf5path = self.datastore.hdf5cache()
         grp_indices = self.datastore['ruptures'].attrs['grp_indices']
         nruptures = len(self.datastore['ruptures'])
         with hdf5.File(hdf5path, 'r+') as cache:
             self.datastore.hdf5.copy('weights', cache)
             self.datastore.hdf5.copy('ruptures', cache)
             self.datastore.hdf5.copy('rupgeoms', cache)
     self.init_logic_tree(self.csm_info)
     smap = parallel.Starmap(self.core_task.__func__,
                             hdf5path=self.datastore.filename)
     trt_by_grp = self.csm_info.grp_by("trt")
     samples = self.csm_info.get_samples_by_grp()
     rlzs_by_gsim_grp = self.csm_info.get_rlzs_by_gsim_grp()
     ruptures_per_block = numpy.ceil(nruptures / (oq.concurrent_tasks or 1))
     first_event = 0
     for grp_id, rlzs_by_gsim in rlzs_by_gsim_grp.items():
         start, stop = grp_indices[grp_id]
         for indices in general.block_splitter(range(start, stop),
                                               ruptures_per_block):
             rgetter = getters.RuptureGetter(hdf5path, list(indices),
                                             grp_id, trt_by_grp[grp_id],
                                             samples[grp_id], rlzs_by_gsim,
                                             first_event)
             first_event += rgetter.num_events
             smap.submit(rgetter, self.src_filter, self.param)
     self.events_per_sid = []
     self.gmf_nbytes = 0
     res = smap.reduce(self.agg_dicts, numpy.zeros(self.N))
     logging.info('Produced %s of GMFs', general.humansize(self.gmf_nbytes))
     return res
コード例 #3
0
    def execute(self):
        self.datastore.flush()  # just to be sure
        oq = self.oqparam
        parent = self.datastore.parent
        if parent:
            grp_indices = parent['ruptures'].attrs['grp_indices']
            n_occ = parent['ruptures']['n_occ']
            dstore = parent
            csm_info = parent['csm_info']
        else:
            grp_indices = self.datastore['ruptures'].attrs['grp_indices']
            n_occ = self.datastore['ruptures']['n_occ']
            dstore = self.datastore
            csm_info = self.csm_info
        per_block = numpy.ceil(n_occ.sum() / (oq.concurrent_tasks or 1))
        self.set_param(
            hdf5path=self.datastore.filename,
            task_duration=oq.task_duration or 1200,  # 20min
            tempname=cache_epsilons(self.datastore, oq, self.assetcol,
                                    self.crmodel, self.E))

        self.init_logic_tree(csm_info)
        trt_by_grp = csm_info.grp_by("trt")
        samples = csm_info.get_samples_by_grp()
        rlzs_by_gsim_grp = csm_info.get_rlzs_by_gsim_grp()
        ngroups = 0
        fe = 0
        eslices = self.datastore['eslices']
        allargs = []
        allpairs = list(enumerate(n_occ))
        srcfilter = self.src_filter(self.datastore.tempname)
        for grp_id, rlzs_by_gsim in rlzs_by_gsim_grp.items():
            start, stop = grp_indices[grp_id]
            if start == stop:  # no ruptures for the given grp_id
                continue
            ngroups += 1
            for pairs in general.block_splitter(allpairs[start:stop],
                                                per_block,
                                                weight=get_n_occ):
                indices = [i for i, n in pairs]
                rup_array = dstore['ruptures'][indices]
                rgetter = getters.RuptureGetter(
                    rup_array, dstore.filename, grp_id, trt_by_grp[grp_id],
                    samples[grp_id], rlzs_by_gsim,
                    eslices[fe:fe + len(indices), 0])
                allargs.append((rgetter, srcfilter, self.param))
                fe += len(indices)
        logging.info('Found %d/%d source groups with ruptures', ngroups,
                     len(rlzs_by_gsim_grp))
        self.events_per_sid = []
        self.lossbytes = 0
        self.datastore.swmr_on()
        smap = parallel.Starmap(self.core_task.__func__,
                                allargs,
                                h5=self.datastore.hdf5)
        res = smap.reduce(self.agg_dicts, numpy.zeros(self.N))
        gmf_bytes = self.datastore['gmf_info']['gmfbytes'].sum()
        logging.info('Produced %s of GMFs', general.humansize(gmf_bytes))
        logging.info('Produced %s of losses',
                     general.humansize(self.lossbytes))
        return res
コード例 #4
0
ファイル: ebrisk.py プロジェクト: guyomd/oq-engine
 def execute(self):
     oq = self.oqparam
     parent = self.datastore.parent
     if parent:
         hdf5path = parent.filename
         grp_indices = parent['ruptures'].attrs['grp_indices']
         n_occ = parent['ruptures']['n_occ']
     else:
         hdf5path = self.datastore.cachepath()
         grp_indices = self.datastore['ruptures'].attrs['grp_indices']
         n_occ = self.datastore['ruptures']['n_occ']
         with hdf5.File(hdf5path, 'r+') as cache:
             self.datastore.hdf5.copy('weights', cache)
             self.datastore.hdf5.copy('ruptures', cache)
             self.datastore.hdf5.copy('rupgeoms', cache)
     num_cores = oq.__class__.concurrent_tasks.default // 2 or 1
     per_block = numpy.ceil(n_occ.sum() / (oq.concurrent_tasks or 1))
     logging.info(
         'Using %d occurrences per block (over %d occurrences, '
         '%d events)', per_block, n_occ.sum(), self.E)
     self.set_param(
         task_duration=oq.task_duration or 600,  # 10min
         epspath=cache_epsilons(self.datastore, oq, self.assetcol,
                                self.crmodel, self.E))
     self.init_logic_tree(self.csm_info)
     trt_by_grp = self.csm_info.grp_by("trt")
     samples = self.csm_info.get_samples_by_grp()
     rlzs_by_gsim_grp = self.csm_info.get_rlzs_by_gsim_grp()
     ngroups = 0
     fe = 0
     eslices = self.datastore['eslices']
     allargs = []
     allpairs = list(enumerate(n_occ))
     for grp_id, rlzs_by_gsim in rlzs_by_gsim_grp.items():
         start, stop = grp_indices[grp_id]
         if start == stop:  # no ruptures for the given grp_id
             continue
         ngroups += 1
         for pairs in general.block_splitter(allpairs[start:stop],
                                             per_block,
                                             weight=get_n_occ):
             indices = [i for i, n in pairs]
             rgetter = getters.RuptureGetter(
                 hdf5path, indices, grp_id, trt_by_grp[grp_id],
                 samples[grp_id], rlzs_by_gsim,
                 eslices[fe:fe + len(indices), 0])
             allargs.append((rgetter, self.src_filter, self.param))
             fe += len(indices)
     logging.info('Found %d/%d source groups with ruptures', ngroups,
                  len(rlzs_by_gsim_grp))
     self.events_per_sid = []
     self.lossbytes = 0
     smap = parallel.Starmap(self.core_task.__func__,
                             allargs,
                             num_cores=num_cores,
                             hdf5path=self.datastore.filename)
     res = smap.reduce(self.agg_dicts, numpy.zeros(self.N))
     gmf_bytes = self.datastore['gmf_info']['gmfbytes'].sum()
     self.datastore.set_attrs('gmf_info',
                              events_per_sid=self.events_per_sid)
     logging.info('Produced %s of GMFs', general.humansize(gmf_bytes))
     logging.info('Produced %s of losses',
                  general.humansize(self.lossbytes))
     return res