Esempio n. 1
0
def weight_ruptures(rup_array, srcfilter, trt_by, scenario):
    """
    :param rup_array: an array of ruptures
    :param srcfilter: a SourceFilter
    :param trt_by: a function trt_smr -> TRT
    :param scenario: True for ruptures of kind scenario
    :returns: list of RuptureProxies
    """
    proxies = []
    for rec in rup_array:
        proxy = RuptureProxy(rec, scenario=scenario)
        sids = srcfilter.close_sids(proxy.rec, trt_by(rec['trt_smr']))
        proxy.nsites = len(sids)
        proxies.append(proxy)
    return proxies
Esempio n. 2
0
def get_rupture_getters(dstore, ct=0, slc=slice(None), srcfilter=None):
    """
    :param dstore: a :class:`openquake.commonlib.datastore.DataStore`
    :param ct: number of concurrent tasks
    :returns: a list of RuptureGetters
    """
    full_lt = dstore['full_lt']
    rlzs_by_gsim = full_lt.get_rlzs_by_gsim()
    rup_array = dstore['ruptures'][slc]
    if len(rup_array) == 0:
        raise NotFound('There are no ruptures in %s' % dstore)
    rup_array.sort(order=['trt_smr', 'n_occ'])
    scenario = 'scenario' in dstore['oqparam'].calculation_mode
    proxies = [RuptureProxy(rec, scenario) for rec in rup_array]
    maxweight = rup_array['n_occ'].sum() / (ct / 2 or 1)
    rgetters = []
    for block in general.block_splitter(proxies,
                                        maxweight,
                                        operator.itemgetter('n_occ'),
                                        key=operator.itemgetter('trt_smr')):
        trt_smr = block[0]['trt_smr']
        rbg = rlzs_by_gsim[trt_smr]
        rg = RuptureGetter(block, dstore.filename, trt_smr,
                           full_lt.trt_by(trt_smr), rbg)
        rgetters.append(rg)
    return rgetters
Esempio n. 3
0
def get_rupture_getters(dstore, ct=0, slc=slice(None), srcfilter=None):
    """
    :param dstore: a :class:`openquake.commonlib.datastore.DataStore`
    :param ct: number of concurrent tasks
    :returns: a list of RuptureGetters
    """
    full_lt = dstore['full_lt']
    rlzs_by_gsim = full_lt.get_rlzs_by_gsim()
    rup_array = dstore['ruptures'][slc]
    if len(rup_array) == 0:
        raise NotFound('There are no ruptures in %s' % dstore)
    rup_array.sort(order='trt_smr')  # avoid generating too many tasks
    scenario = 'scenario' in dstore['oqparam'].calculation_mode
    if srcfilter is None:
        proxies = [RuptureProxy(rec, None, scenario) for rec in rup_array]
    elif len(rup_array) <= 1000:  # do not parallelize
        proxies = weight_ruptures(rup_array, srcfilter, full_lt.trt_by,
                                  scenario)
    else:  # parallelize the weighting of the ruptures
        proxies = parallel.Starmap.apply(
            weight_ruptures, (rup_array, srcfilter, full_lt.trt_by, scenario),
            concurrent_tasks=ct).reduce(acc=[])
    maxweight = sum(proxy.weight for proxy in proxies) / (ct or 1)
    rgetters = []
    for block in general.block_splitter(proxies,
                                        maxweight,
                                        operator.attrgetter('weight'),
                                        key=operator.itemgetter('trt_smr')):
        trt_smr = block[0]['trt_smr']
        rg = RuptureGetter(block, dstore.filename, trt_smr,
                           full_lt.trt_by(trt_smr), rlzs_by_gsim[trt_smr])
        rgetters.append(rg)
    return rgetters
Esempio n. 4
0
 def gen(arr):
     if srcfilter:
         for rec in arr:
             sids = srcfilter.close_sids(rec, trt)
             if len(sids):
                 yield RuptureProxy(rec, sids)
     else:
         yield from map(RuptureProxy, arr)
Esempio n. 5
0
 def split(self, srcfilter, maxw):
     """
     :yields: RuptureProxies with weight < maxw
     """
     proxies = []
     for proxy in self.proxies:
         sids = srcfilter.close_sids(proxy.rec, self.trt)
         if len(sids):
             proxies.append(RuptureProxy(proxy.rec, len(sids)))
     for block in general.block_splitter(proxies, maxw, weight):
         yield RuptureGetter(block, self.filename, self.et_id, self.trt,
                             self.rlzs_by_gsim)
Esempio n. 6
0
 def execute(self):
     """
     Compute risk from GMFs or ruptures depending on what is stored
     """
     oq = self.oqparam
     self.gmf_bytes = 0
     if 'gmf_data' not in self.datastore:  # start from ruptures
         if not hasattr(oq, 'maximum_distance'):
             raise InvalidFile('Missing maximum_distance in %s' %
                               oq.inputs['job_ini'])
         srcfilter = self.src_filter()
         scenario = 'scenario' in oq.calculation_mode
         proxies = [
             RuptureProxy(rec, scenario)
             for rec in self.datastore['ruptures'][:]
         ]
         full_lt = self.datastore['full_lt']
         self.datastore.swmr_on()  # must come before the Starmap
         smap = parallel.Starmap.apply_split(
             ebrisk, (proxies, full_lt, oq, self.datastore),
             key=operator.itemgetter('trt_smr'),
             weight=operator.itemgetter('n_occ'),
             h5=self.datastore.hdf5,
             duration=oq.time_per_task,
             split_level=5)
         smap.monitor.save('srcfilter', srcfilter)
         smap.monitor.save('crmodel', self.crmodel)
         smap.monitor.save('rlz_id', self.rlzs)
         smap.reduce(self.agg_dicts)
         if self.gmf_bytes == 0:
             raise RuntimeError('No GMFs were generated, perhaps they were '
                                'all below the minimum_intensity threshold')
         logging.info('Produced %s of GMFs',
                      general.humansize(self.gmf_bytes))
     else:  # start from GMFs
         eids = self.datastore['gmf_data/eid'][:]
         self.log_info(eids)
         self.datastore.swmr_on()  # crucial!
         smap = parallel.Starmap(event_based_risk,
                                 self.gen_args(eids),
                                 h5=self.datastore.hdf5)
         smap.monitor.save('assets', self.assetcol.to_dframe())
         smap.monitor.save('crmodel', self.crmodel)
         smap.monitor.save('rlz_id', self.rlzs)
         smap.reduce(self.agg_dicts)
     if self.parent_events:
         assert self.parent_events == len(self.datastore['events'])
     return 1
Esempio n. 7
0
def gen_rgetters(dstore, slc=slice(None)):
    """
    :yields: unfiltered RuptureGetters
    """
    full_lt = dstore['full_lt']
    trt_by_grp = full_lt.trt_by_grp
    samples = full_lt.get_samples_by_grp()
    rlzs_by_gsim = full_lt.get_rlzs_by_gsim_grp()
    rup_array = dstore['ruptures'][slc]
    nr = len(dstore['ruptures'])
    for grp_id, arr in general.group_array(rup_array, 'grp_id').items():
        if not rlzs_by_gsim.get(grp_id, []):  # the model has no sources
            continue
        for block in general.split_in_blocks(arr, len(arr) / nr):
            rgetter = RuptureGetter(
                [RuptureProxy(rec) for rec in block], dstore.filename, grp_id,
                trt_by_grp[grp_id], samples[grp_id], rlzs_by_gsim[grp_id])
            yield rgetter
Esempio n. 8
0
def gen_rupture_getters(dstore, ct=0, slc=slice(None)):
    """
    :param dstore: a :class:`openquake.baselib.datastore.DataStore`
    :param ct: number of concurrent tasks
    :yields: RuptureGetters
    """
    full_lt = dstore['full_lt']
    trt_by_et = full_lt.trt_by_et
    rlzs_by_gsim = full_lt.get_rlzs_by_gsim_grp()
    rup_array = dstore['ruptures'][slc]
    rup_array.sort(order='et_id')  # avoid generating too many tasks
    maxweight = rup_array['n_occ'].sum() / (ct or 1)
    for block in general.block_splitter(
            rup_array, maxweight, operator.itemgetter('n_occ'),
            key=operator.itemgetter('et_id')):
        et_id = block[0]['et_id']
        trt = trt_by_et[et_id]
        proxies = [RuptureProxy(rec) for rec in block]
        yield RuptureGetter(proxies, dstore.filename, et_id,
                            trt, rlzs_by_gsim[et_id])
Esempio n. 9
0
def gen_rgetters(dstore, slc=slice(None)):
    """
    :yields: unfiltered RuptureGetters
    """
    csm_info = dstore['csm_info']
    trt_by_grp = csm_info.grp_by("trt")
    samples = csm_info.get_samples_by_grp()
    rlzs_by_gsim = csm_info.get_rlzs_by_gsim_grp()
    rup_array = dstore['ruptures'][slc]
    ct = dstore['oqparam'].concurrent_tasks or 1
    nr = len(dstore['ruptures'])
    for grp_id, arr in general.group_array(rup_array, 'grp_id').items():
        if not rlzs_by_gsim[grp_id]:  # the model has no sources
            continue
        for block in general.split_in_blocks(arr, len(arr) / nr * ct):
            rgetter = RuptureGetter([RuptureProxy(rec)
                                     for rec in block], dstore.filename,
                                    grp_id, trt_by_grp[grp_id],
                                    samples[grp_id], rlzs_by_gsim[grp_id])
            yield rgetter
Esempio n. 10
0
def gen_rupture_getters(dstore, ct=0, slc=slice(None)):
    """
    :param dstore: a :class:`openquake.commonlib.datastore.DataStore`
    :param ct: number of concurrent tasks
    :yields: RuptureGetters
    """
    full_lt = dstore['full_lt']
    trt_by_et = full_lt.trt_by_et
    rlzs_by_gsim = full_lt.get_rlzs_by_gsim()
    rup_array = dstore['ruptures'][slc]
    rup_array.sort(order='trt_smrlz')  # avoid generating too many tasks
    maxweight = rup_array['n_occ'].sum() / (ct or 1)
    scenario = 'scenario' in dstore['oqparam'].calculation_mode
    for block in general.block_splitter(rup_array,
                                        maxweight,
                                        operator.itemgetter('n_occ'),
                                        key=operator.itemgetter('trt_smrlz')):
        trt_smrlz = block[0]['trt_smrlz']
        trt = trt_by_et[trt_smrlz]
        proxies = [RuptureProxy(rec, scenario=scenario) for rec in block]
        yield RuptureGetter(proxies, dstore.filename, trt_smrlz, trt,
                            rlzs_by_gsim[trt_smrlz])
Esempio n. 11
0
def _gen(arr, srcfilter, trt, samples):
    for rec in arr:
        sids = srcfilter.close_sids(rec, trt)
        if len(sids):
            yield RuptureProxy(rec, len(sids), samples)
Esempio n. 12
0
    def execute(self):
        oq = self.oqparam
        dstore = self.datastore
        if oq.ground_motion_fields and oq.min_iml.sum() == 0:
            logging.warning('The GMFs are not filtered: '
                            'you may want to set a minimum_intensity')
        else:
            logging.info('minimum_intensity=%s', oq.minimum_intensity)
        self.offset = 0
        if oq.hazard_calculation_id:  # from ruptures
            dstore.parent = datastore.read(oq.hazard_calculation_id)
        elif hasattr(self, 'csm'):  # from sources
            self.build_events_from_sources()
            if (oq.ground_motion_fields is False
                    and oq.hazard_curves_from_gmfs is False):
                return {}
        elif 'rupture_model' not in oq.inputs:
            logging.warning(
                'There is no rupture_model, the calculator will just '
                'import data without performing any calculation')
            fake = logictree.FullLogicTree.fake()
            dstore['full_lt'] = fake  # needed to expose the outputs
            dstore['weights'] = [1.]
            return {}
        else:  # scenario
            self._read_scenario_ruptures()
            if (oq.ground_motion_fields is False
                    and oq.hazard_curves_from_gmfs is False):
                return {}

        if oq.ground_motion_fields:
            imts = oq.get_primary_imtls()
            nrups = len(dstore['ruptures'])
            base.create_gmf_data(dstore, imts, oq.get_sec_imts())
            dstore.create_dset('gmf_data/sigma_epsilon', sig_eps_dt(oq.imtls))
            dstore.create_dset('gmf_data/time_by_rup',
                               time_dt, (nrups, ),
                               fillvalue=None)

        # event_based in parallel
        nr = len(dstore['ruptures'])
        logging.info('Reading {:_d} ruptures'.format(nr))
        scenario = 'scenario' in oq.calculation_mode
        proxies = [
            RuptureProxy(rec, scenario) for rec in dstore['ruptures'][:]
        ]
        full_lt = self.datastore['full_lt']
        dstore.swmr_on()  # must come before the Starmap
        smap = parallel.Starmap.apply_split(
            self.core_task.__func__, (proxies, full_lt, oq, self.datastore),
            key=operator.itemgetter('trt_smr'),
            weight=operator.itemgetter('n_occ'),
            h5=dstore.hdf5,
            concurrent_tasks=oq.concurrent_tasks or 1,
            duration=oq.time_per_task,
            split_level=oq.split_level)
        acc = smap.reduce(self.agg_dicts, self.acc0())
        if 'gmf_data' not in dstore:
            return acc
        if oq.ground_motion_fields:
            with self.monitor('saving avg_gmf', measuremem=True):
                self.save_avg_gmf()
        return acc