Пример #1
0
    def pre_execute(self):
        oq = self.oqparam
        if oq.calculation_mode == 'ebrisk':
            oq.ground_motion_fields = False
            logging.warning('You should be using the event_based_risk '
                            'calculator, not ebrisk!')
        parent = self.datastore.parent
        if parent:
            self.datastore['full_lt'] = parent['full_lt']
            self.parent_events = ne = len(parent['events'])
            logging.info('There are %d ruptures and %d events',
                         len(parent['ruptures']), ne)
        else:
            self.parent_events = None

        if oq.investigation_time and oq.return_periods != [0]:
            # setting return_periods = 0 disable loss curves
            eff_time = oq.investigation_time * oq.ses_per_logic_tree_path
            if eff_time < 2:
                logging.warning(
                    'eff_time=%s is too small to compute loss curves',
                    eff_time)
        super().pre_execute()
        parentdir = (os.path.dirname(self.datastore.ppath)
                     if self.datastore.ppath else None)
        oq.hdf5path = self.datastore.filename
        oq.parentdir = parentdir
        logging.info('There are {:_d} ruptures'.format(
            len(self.datastore['ruptures'])))
        self.events_per_sid = numpy.zeros(self.N, U32)
        self.datastore.swmr_on()
        sec_losses = []  # one insured loss for each loss type with a policy
        oq.D = 2
        if self.policy_dict:
            sec_losses.append(InsuredLosses(self.policy_name,
                                            self.policy_dict))
            self.oqparam.D = 3
        if not hasattr(self, 'aggkey'):
            self.aggkey = self.assetcol.tagcol.get_aggkey(oq.aggregate_by)
        oq._sec_losses = sec_losses
        oq.M = len(oq.all_imts())
        oq.N = self.N
        oq.K = len(self.aggkey)
        ct = oq.concurrent_tasks or 1
        oq.maxweight = int(oq.ebrisk_maxsize / ct)
        self.A = A = len(self.assetcol)
        self.L = L = len(oq.loss_types)
        if (oq.aggregate_by and self.E * A > oq.max_potential_gmfs
                and all(val == 0 for val in oq.minimum_asset_loss.values())):
            logging.warning('The calculation is really big; consider setting '
                            'minimum_asset_loss')
        base.create_risk_by_event(self)
        self.rlzs = self.datastore['events']['rlz_id']
        self.num_events = numpy.bincount(self.rlzs)  # events by rlz
        if oq.avg_losses:
            self.create_avg_losses()
        alt_nbytes = 4 * self.E * L
        if alt_nbytes / (oq.concurrent_tasks or 1) > TWO32:
            raise RuntimeError('The risk_by_event is too big to be transfer'
                               'ed with %d tasks' % oq.concurrent_tasks)
Пример #2
0
    def pre_execute(self):
        oq = self.oqparam
        oq.ground_motion_fields = False
        super().pre_execute()
        sec_losses = []
        if self.policy_dict:
            sec_losses.append(InsuredLosses(self.policy_name,
                                            self.policy_dict))
        self.aggkey, attrs = get_aggkey_attrs(self.assetcol.tagcol,
                                              oq.aggregate_by)
        logging.info('Building %d event loss table(s)', len(self.aggkey))
        if len(self.aggkey) > oq.max_num_loss_curves:
            logging.warning('Too many aggregations, the performance will be '
                            'bad on a cluster!')
        self.param['elt'] = elt = EventLossTable(self.aggkey,
                                                 oq.loss_dt().names,
                                                 sec_losses)
        self.param['ses_ratio'] = oq.ses_ratio
        self.param['aggregate_by'] = oq.aggregate_by
        ct = oq.concurrent_tasks or 1
        self.param['maxweight'] = int(oq.ebrisk_maxsize / ct)
        self.A = A = len(self.assetcol)
        self.L = L = len(elt.loss_names)
        self.check_number_loss_curves()
        mal = self.param['minimum_asset_loss']
        if (oq.aggregate_by and self.E * A > oq.max_potential_gmfs
                and any(val == 0 for val in mal.values())):
            logging.warning('The calculation is really big; consider setting '
                            'minimum_asset_loss')

        elt_dt = [('event_id', U32), ('loss', (F32, (L, )))]
        for idxs, attr in zip(self.aggkey, attrs):
            idx = ','.join(map(str, idxs)) + ','
            self.datastore.create_dset('event_loss_table/' + idx,
                                       elt_dt,
                                       attrs=attr)
        self.param['aggkey'] = self.aggkey
        self.param.pop('oqparam', None)  # unneeded
        self.datastore.create_dset('avg_losses-stats', F32, (A, 1, L))  # mean
        elt_nbytes = 4 * self.E * L
        if elt_nbytes / (oq.concurrent_tasks or 1) > TWO32:
            raise RuntimeError('The event loss table is too big to be transfer'
                               'red with %d tasks' % oq.concurrent_tasks)
        self.datastore.create_dset('gmf_info', gmf_info_dt)
Пример #3
0
    def pre_execute(self):
        oq = self.oqparam
        oq.ground_motion_fields = False
        super().pre_execute()
        sec_losses = []
        if self.policy_dict:
            sec_losses.append(InsuredLosses(self.policy_name,
                                            self.policy_dict))
        if not hasattr(self, 'aggkey'):
            self.aggkey = self.assetcol.tagcol.get_aggkey(oq.aggregate_by)
        self.param['alt'] = alt = AggLossTable.new(self.aggkey,
                                                   oq.loss_dt().names,
                                                   sec_losses)
        self.param['ses_ratio'] = oq.ses_ratio
        self.param['aggregate_by'] = oq.aggregate_by
        ct = oq.concurrent_tasks or 1
        self.param['maxweight'] = int(oq.ebrisk_maxsize / ct)
        self.A = A = len(self.assetcol)
        self.L = L = len(alt.loss_names)
        mal = self.param['minimum_asset_loss']
        if (oq.aggregate_by and self.E * A > oq.max_potential_gmfs
                and any(val == 0 for val in mal.values())):
            logging.warning('The calculation is really big; consider setting '
                            'minimum_asset_loss')

        descr = [('event_id', U32), ('agg_id', U32)]
        for name in oq.loss_names:
            descr.append((name, F32))
        self.datastore.create_dframe('agg_loss_table',
                                     descr,
                                     K=len(self.aggkey))
        self.param.pop('oqparam', None)  # unneeded
        self.datastore.create_dset('avg_losses-stats', F32, (A, 1, L))
        self.datastore.set_shape_descr('avg_losses-stats',
                                       asset_id=self.assetcol['id'],
                                       stat=['mean'],
                                       loss_type=oq.loss_names)
        alt_nbytes = 4 * self.E * L
        if alt_nbytes / (oq.concurrent_tasks or 1) > TWO32:
            raise RuntimeError('The event loss table is too big to be transfer'
                               'red with %d tasks' % oq.concurrent_tasks)
        self.datastore.create_dset('gmf_info', gmf_info_dt)
Пример #4
0
    def pre_execute(self):
        oq = self.oqparam
        if oq.calculation_mode == 'ebrisk':
            oq.ground_motion_fields = False
        parent = self.datastore.parent
        if parent:
            self.datastore['full_lt'] = parent['full_lt']
            ne = len(parent['events'])
            logging.info('There are %d ruptures and %d events',
                         len(parent['ruptures']), ne)

        if oq.investigation_time and oq.return_periods != [0]:
            # setting return_periods = 0 disable loss curves
            eff_time = oq.investigation_time * oq.ses_per_logic_tree_path
            if eff_time < 2:
                logging.warning(
                    'eff_time=%s is too small to compute loss curves',
                    eff_time)
        super().pre_execute()
        if oq.hazard_calculation_id:
            parentdir = os.path.dirname(
                datastore.read(oq.hazard_calculation_id).filename)
        else:
            parentdir = None
        self.set_param(hdf5path=self.datastore.filename,
                       parentdir=parentdir,
                       ignore_covs=oq.ignore_covs,
                       master_seed=oq.master_seed,
                       asset_correlation=int(oq.asset_correlation))
        logging.info(
            'There are {:_d} ruptures'.format(len(self.datastore['ruptures'])))
        self.events_per_sid = numpy.zeros(self.N, U32)
        self.datastore.swmr_on()
        sec_losses = []
        if self.policy_dict:
            sec_losses.append(
                InsuredLosses(self.policy_name, self.policy_dict))
        if not hasattr(self, 'aggkey'):
            self.aggkey = self.assetcol.tagcol.get_aggkey(oq.aggregate_by)
        self.param['sec_losses'] = sec_losses
        self.param['aggregate_by'] = oq.aggregate_by
        self.param['min_iml'] = oq.min_iml
        self.param['M'] = len(oq.all_imts())
        self.param['N'] = self.N
        self.param['K'] = len(self.aggkey)
        ct = oq.concurrent_tasks or 1
        self.param['maxweight'] = int(oq.ebrisk_maxsize / ct)
        self.param['collect_rlzs'] = oq.collect_rlzs
        self.A = A = len(self.assetcol)
        self.L = L = len(oq.loss_names)
        if (oq.aggregate_by and self.E * A > oq.max_potential_gmfs and
                all(val == 0 for val in oq.minimum_asset_loss.values())):
            logging.warning('The calculation is really big; consider setting '
                            'minimum_asset_loss')

        if 'risk' in oq.calculation_mode:
            descr = [('event_id', U32), ('agg_id', U32), ('loss_id', U8),
                     ('loss', F32), ('variance', F32)]
            self.datastore.create_df(
                'agg_loss_table', descr,
                K=len(self.aggkey), L=len(oq.loss_names))
        else:  # damage
            dmgs = ' '.join(self.crmodel.damage_states[:1])
            descr = ([('event_id', U32), ('agg_id', U32), ('loss_id', U8)] +
                     [(dc, F32) for dc in self.crmodel.get_dmg_csq()])
            self.datastore.create_df(
                'agg_loss_table', descr,
                K=len(self.aggkey), L=len(oq.loss_names), limit_states=dmgs)
        ws = self.datastore['weights']
        R = 1 if oq.collect_rlzs else len(ws)
        self.rlzs = self.datastore['events']['rlz_id']
        self.num_events = numpy.bincount(self.rlzs)  # events by rlz
        if oq.avg_losses:
            if oq.collect_rlzs:
                if oq.investigation_time:  # event_based
                    self.avg_ratio = numpy.array([oq.time_ratio / len(ws)])
                else:  # scenario
                    self.avg_ratio = numpy.array([1. / self.num_events.sum()])
            else:
                if oq.investigation_time:  # event_based
                    self.avg_ratio = numpy.array([oq.time_ratio] * len(ws))
                else:  # scenario
                    self.avg_ratio = 1. / self.num_events
            self.avg_losses = numpy.zeros((A, R, L), F32)
            self.datastore.create_dset('avg_losses-rlzs', F32, (A, R, L))
            self.datastore.set_shape_descr(
                'avg_losses-rlzs', asset_id=self.assetcol['id'], rlz=R,
                loss_type=oq.loss_names)
        alt_nbytes = 4 * self.E * L
        if alt_nbytes / (oq.concurrent_tasks or 1) > TWO32:
            raise RuntimeError('The event loss table is too big to be transfer'
                               'red with %d tasks' % oq.concurrent_tasks)
        self.datastore.create_dset('gmf_info', gmf_info_dt)