def test_from_ruptures(self): oq = self.oqparam correl_model = readinput.get_correl_model(oq) rupcalc = event_based.EventBasedRuptureCalculator(oq) rupcalc.run() dstore = get_datastore(rupcalc) # this is case with a single SES collection ses_ruptures = list(dstore['sescollection'][0].values()) gsims_by_trt_id = rupcalc.rlzs_assoc.gsims_by_trt_id eps_dict = riskinput.make_eps_dict(self.assets_by_site, len(ses_ruptures), oq.master_seed, oq.asset_correlation) [ri] = self.riskmodel.build_inputs_from_ruptures( self.sitecol, ses_ruptures, gsims_by_trt_id, oq.truncation_level, correl_model, eps_dict, 1) assets, hazards, epsilons = ri.get_all(rlzs_assoc, self.assets_by_site, eps_dict) self.assertEqual([a.id for a in assets], [b'a0', b'a1', b'a2', b'a3', b'a4']) self.assertEqual(set(a.taxonomy for a in assets), set(['RM', 'RC', 'W'])) self.assertEqual(list(map(len, epsilons)), [20] * 5)
def pre_execute(self): """ Read the precomputed ruptures (or compute them on the fly) and prepare some empty files in the export directory to store the gmfs (if any). If there were pre-existing files, they will be erased. """ super(EventBasedRiskCalculator, self).pre_execute() oq = self.oqparam epsilon_sampling = getattr(oq, 'epsilon_sampling', 1000) correl_model = readinput.get_correl_model(oq) gsims_by_col = self.rlzs_assoc.get_gsims_by_col() assets_by_site = self.assets_by_site logging.info('Building the epsilons') logging.info('Populating the risk inputs') rup_by_tag = sum(self.datastore['sescollection'], AccumDict()) all_ruptures = [rup_by_tag[tag] for tag in sorted(rup_by_tag)] num_samples = min(len(all_ruptures), epsilon_sampling) eps_dict = riskinput.make_eps_dict( assets_by_site, num_samples, oq.master_seed, oq.asset_correlation) logging.info('Generated %d epsilons', num_samples * len(eps_dict)) self.epsilon_matrix = numpy.array( [eps_dict[a['asset_ref']] for a in self.assetcol]) self.riskinputs = list(self.riskmodel.build_inputs_from_ruptures( self.sitecol.complete, all_ruptures, gsims_by_col, oq.truncation_level, correl_model, eps_dict, oq.concurrent_tasks or 1)) logging.info('Built %d risk inputs', len(self.riskinputs))
def test_from_ruptures(self): oq = self.oqparam correl_model = readinput.get_correl_model(oq) rupcalc = event_based.EventBasedRuptureCalculator(oq) rupcalc.run() dstore = get_datastore(rupcalc) # this is case with a single SES collection ses_ruptures = dstore['sescollection'][0].values() gsims_by_trt_id = rupcalc.rlzs_assoc.get_gsims_by_trt_id() eps_dict = riskinput.make_eps_dict( self.assets_by_site, len(ses_ruptures), oq.master_seed, oq.asset_correlation) [ri] = self.riskmodel.build_inputs_from_ruptures( self.sitecol, ses_ruptures, gsims_by_trt_id, oq.truncation_level, correl_model, eps_dict, 1) assets, hazards, epsilons = ri.get_all(rlzs_assoc, self.assets_by_site) self.assertEqual([a.id for a in assets], ['a0', 'a1', 'a2', 'a3', 'a4']) self.assertEqual(set(a.taxonomy for a in assets), set(['RM', 'RC', 'W'])) self.assertEqual(map(len, epsilons), [20] * 5)
def make_eps_dict(self, num_ruptures): """ :param num_ruptures: the size of the epsilon array for each asset """ oq = self.oqparam with self.monitor('building epsilons', autoflush=True): eps = riskinput.make_eps_dict(self.assets_by_site, num_ruptures, oq.master_seed, oq.asset_correlation) return eps
def make_eps_dict(self, num_ruptures): """ :param num_ruptures: the size of the epsilon array for each asset """ oq = self.oqparam with self.monitor('building epsilons', autoflush=True): eps = riskinput.make_eps_dict( self.assets_by_site, num_ruptures, oq.master_seed, oq.asset_correlation) return eps
def pre_execute(self): """ Read the precomputed ruptures (or compute them on the fly) and prepare some datasets in the datastore. """ super(EventBasedRiskCalculator, self).pre_execute() if not self.riskmodel: # there is no riskmodel, exit early self.execute = lambda: None self.post_execute = lambda result: None return oq = self.oqparam epsilon_sampling = oq.epsilon_sampling correl_model = readinput.get_correl_model(oq) gsims_by_col = self.rlzs_assoc.get_gsims_by_col() assets_by_site = self.assets_by_site logging.info('Populating the risk inputs') rup_by_tag = sum(self.datastore['sescollection'], AccumDict()) all_ruptures = [rup_by_tag[tag] for tag in sorted(rup_by_tag)] num_samples = min(len(all_ruptures), epsilon_sampling) eps_dict = riskinput.make_eps_dict( assets_by_site, num_samples, oq.master_seed, oq.asset_correlation) logging.info('Generated %d epsilons', num_samples * len(eps_dict)) self.epsilon_matrix = numpy.array( [eps_dict[a['asset_ref']] for a in self.assetcol]) self.riskinputs = list(self.riskmodel.build_inputs_from_ruptures( self.sitecol.complete, all_ruptures, gsims_by_col, oq.truncation_level, correl_model, eps_dict, oq.concurrent_tasks or 1)) logging.info('Built %d risk inputs', len(self.riskinputs)) # preparing empty datasets loss_types = self.riskmodel.get_loss_types() self.L = len(loss_types) self.R = len(self.rlzs_assoc.realizations) self.outs = ['event_loss_table-rlzs'] if oq.insured_losses: self.outs.append('insured_loss_table-rlzs') self.datasets = {} for o, out in enumerate(self.outs): self.datastore.hdf5.create_group(out) for l, loss_type in enumerate(loss_types): for r, rlz in enumerate(self.rlzs_assoc.realizations): key = '/%s/%s' % (loss_type, rlz.uid) dset = self.datastore.create_dset(out + key, elt_dt) self.datasets[o, l, r] = dset
def pre_execute(self): """ Read the precomputed ruptures (or compute them on the fly) and prepare some datasets in the datastore. """ super(EventBasedRiskCalculator, self).pre_execute() if not self.riskmodel: # there is no riskmodel, exit early self.execute = lambda: None self.post_execute = lambda result: None return oq = self.oqparam epsilon_sampling = oq.epsilon_sampling correl_model = readinput.get_correl_model(oq) gsims_by_col = self.rlzs_assoc.get_gsims_by_col() assets_by_site = self.assets_by_site # the following is needed to set the asset idx attribute self.assetcol = riskinput.build_asset_collection( assets_by_site, oq.time_event) logging.info('Populating the risk inputs') rup_by_tag = sum(self.datastore['sescollection'], AccumDict()) all_ruptures = [rup_by_tag[tag] for tag in sorted(rup_by_tag)] num_samples = min(len(all_ruptures), epsilon_sampling) eps_dict = riskinput.make_eps_dict( assets_by_site, num_samples, oq.master_seed, oq.asset_correlation) logging.info('Generated %d epsilons', num_samples * len(eps_dict)) self.epsilon_matrix = numpy.array( [eps_dict[a['asset_ref']] for a in self.assetcol]) self.riskinputs = list(self.riskmodel.build_inputs_from_ruptures( self.sitecol.complete, all_ruptures, gsims_by_col, oq.truncation_level, correl_model, eps_dict, oq.concurrent_tasks or 1)) logging.info('Built %d risk inputs', len(self.riskinputs)) # preparing empty datasets loss_types = self.riskmodel.loss_types self.L = len(loss_types) self.R = len(self.rlzs_assoc.realizations) self.outs = OUTPUTS self.datasets = {} self.monitor.oqparam = self.oqparam # ugly: attaching an attribute needed in the task function self.monitor.num_outputs = len(self.outs) # attaching two other attributes used in riskinput.gen_outputs self.monitor.assets_by_site = self.assets_by_site self.monitor.num_assets = N = self.count_assets() for o, out in enumerate(self.outs): self.datastore.hdf5.create_group(out) for l, loss_type in enumerate(loss_types): cb = self.riskmodel.curve_builders[l] build_curves = len(cb.ratios) for r, rlz in enumerate(self.rlzs_assoc.realizations): key = '/%s/rlz-%03d' % (loss_type, rlz.ordinal) if o in (ELT, ILT): # loss tables dset = self.datastore.create_dset(out + key, elt_dt) else: # risk curves if not build_curves: continue dset = self.datastore.create_dset( out + key, cb.poes_dt, N) self.datasets[o, l, r] = dset if o in (FRC, IRC) and build_curves: grp = self.datastore['%s/%s' % (out, loss_type)] grp.attrs['loss_ratios'] = cb.ratios
def pre_execute(self): """ Read the precomputed ruptures (or compute them on the fly) and prepare some datasets in the datastore. """ super(EventBasedRiskCalculator, self).pre_execute() if not self.riskmodel: # there is no riskmodel, exit early self.execute = lambda: None self.post_execute = lambda result: None return oq = self.oqparam if self.riskmodel.covs: epsilon_sampling = oq.epsilon_sampling else: epsilon_sampling = 1 # only one ignored epsilon correl_model = readinput.get_correl_model(oq) gsims_by_col = self.rlzs_assoc.get_gsims_by_col() assets_by_site = self.assets_by_site # the following is needed to set the asset idx attribute self.assetcol = riskinput.build_asset_collection( assets_by_site, oq.time_event) self.spec_indices = numpy.array( [a['asset_ref'] in oq.specific_assets for a in self.assetcol]) logging.info('Populating the risk inputs') rup_by_tag = sum(self.datastore['sescollection'], AccumDict()) all_ruptures = [rup_by_tag[tag] for tag in sorted(rup_by_tag)] for i, rup in enumerate(all_ruptures): rup.ordinal = i num_samples = min(len(all_ruptures), epsilon_sampling) eps_dict = riskinput.make_eps_dict(assets_by_site, num_samples, oq.master_seed, oq.asset_correlation) logging.info('Generated %d epsilons', num_samples * len(eps_dict)) self.epsilon_matrix = numpy.array( [eps_dict[a['asset_ref']] for a in self.assetcol]) self.riskinputs = list( self.riskmodel.build_inputs_from_ruptures( self.sitecol.complete, all_ruptures, gsims_by_col, oq.truncation_level, correl_model, eps_dict, oq.concurrent_tasks or 1)) logging.info('Built %d risk inputs', len(self.riskinputs)) # preparing empty datasets loss_types = self.riskmodel.loss_types self.L = len(loss_types) self.R = len(self.rlzs_assoc.realizations) self.outs = OUTPUTS self.datasets = {} self.monitor.oqparam = self.oqparam # ugly: attaching an attribute needed in the task function self.monitor.num_outputs = len(self.outs) # attaching two other attributes used in riskinput.gen_outputs self.monitor.assets_by_site = self.assets_by_site self.monitor.eps_dict = eps_dict self.monitor.num_assets = N = self.count_assets() for o, out in enumerate(self.outs): self.datastore.hdf5.create_group(out) for l, loss_type in enumerate(loss_types): cb = self.riskmodel.curve_builders[l] C = len(cb.ratios) # curve resolution for r, rlz in enumerate(self.rlzs_assoc.realizations): key = '/%s/%s' % (loss_type, rlz.uid) if o == AGGLOSS: # loss tables dset = self.datastore.create_dset(out + key, elt_dt) elif o == AVGLOSS: # average losses dset = self.datastore.create_dset( out + key, numpy.float32, (N, 2)) elif o == SPECLOSS: # specific losses dset = self.datastore.create_dset(out + key, ela_dt) else: # risk curves if not C: continue dset = self.datastore.create_dset( out + key, cb.lr_dt, N) self.datasets[o, l, r] = dset if o == RC and C: grp = self.datastore['%s/%s' % (out, loss_type)] grp.attrs['loss_ratios'] = cb.ratios