示例#1
0
文件: base.py 项目: gem/oq-risklib
 def make_eps(self, num_ruptures):
     """
     :param num_ruptures: the size of the epsilon array for each asset
     """
     oq = self.oqparam
     with self.monitor("building epsilons", autoflush=True):
         return riskinput.make_eps(self.assets_by_site, num_ruptures, oq.master_seed, oq.asset_correlation)
示例#2
0
    def pre_execute(self):
        """
        Compute the GMFs, build the epsilons, the riskinputs, and a dictionary
        with the unit of measure, used in the export phase.
        """
        oq = self.oqparam
        super().pre_execute()
        self.assetcol = self.datastore['assetcol']
        A = len(self.assetcol)
        R = self.R
        self.event_slice = functools.partial(
            _event_slice, oq.number_of_ground_motion_fields)
        E = oq.number_of_ground_motion_fields * self.R
        if oq.ignore_covs:
            # all zeros; the data transfer is not so big in scenario
            eps = numpy.zeros((A, E), numpy.float32)
        else:
            logging.info('Building the epsilons')
            eps = riskinput.make_eps(
                self.assetcol, E, oq.master_seed, oq.asset_correlation)

        self.riskinputs = self.build_riskinputs('gmf', eps, E)
        self.param['E'] = E
        imt = list(oq.imtls)[0]  # assuming the weights are the same for IMT
        try:
            self.param['weights'] = self.datastore['weights'][imt]
        except KeyError:
            self.param['weights'] = [1 / R for _ in range(R)]
        self.param['event_slice'] = self.event_slice
        self.param['insured_losses'] = self.oqparam.insured_losses
        self.param['asset_loss_table'] = self.oqparam.asset_loss_table
示例#3
0
    def test_from_ruptures(self):
        oq = self.oqparam
        correl_model = readinput.get_correl_model(oq)
        rupcalc = event_based.EventBasedRuptureCalculator(oq)
        rupcalc.run()
        dstore = get_datastore(rupcalc)

        # this is case with a single SES collection
        ses_ruptures = list(dstore['sescollection'][0].values())

        gsims_by_trt_id = rupcalc.rlzs_assoc.gsims_by_trt_id

        eps = riskinput.make_eps(self.assets_by_site, len(ses_ruptures),
                                 oq.master_seed, oq.asset_correlation)

        [ri] = self.riskmodel.build_inputs_from_ruptures(self.sitecol,
                                                         ses_ruptures,
                                                         gsims_by_trt_id,
                                                         oq.truncation_level,
                                                         correl_model,
                                                         eps,
                                                         hint=1)

        assets, hazards, epsilons = ri.get_all(rlzs_assoc, self.assets_by_site)
        self.assertEqual([a.id for a in assets],
                         [b'a0', b'a1', b'a2', b'a3', b'a4'])
        self.assertEqual(set(a.taxonomy for a in assets),
                         set(['RM', 'RC', 'W']))
        self.assertEqual(list(map(len, epsilons)), [26] * 5)
    def test_from_ruptures(self):
        oq = self.oqparam
        correl_model = readinput.get_correl_model(oq)
        rupcalc = event_based.EventBasedRuptureCalculator(oq)
        rupcalc.run()
        dstore = get_datastore(rupcalc)

        # this is case with a single SES collection
        ses_ruptures = list(dstore['sescollection'][0].values())

        gsims_by_trt_id = rupcalc.rlzs_assoc.gsims_by_trt_id

        eps = riskinput.make_eps(
            self.assets_by_site, len(ses_ruptures), oq.master_seed,
            oq.asset_correlation)

        [ri] = self.riskmodel.build_inputs_from_ruptures(
            self.sitecol, ses_ruptures, gsims_by_trt_id, oq.truncation_level,
            correl_model, eps, 1)

        assets, hazards, epsilons = ri.get_all(
            rlzs_assoc, self.assets_by_site, eps)
        self.assertEqual([a.id for a in assets],
                         [b'a0', b'a1', b'a2', b'a3', b'a4'])
        self.assertEqual(set(a.taxonomy for a in assets),
                         set(['RM', 'RC', 'W']))
        self.assertEqual(list(map(len, epsilons)), [20] * 5)
示例#5
0
 def make_eps(self, num_ruptures):
     """
     :param num_ruptures: the size of the epsilon array for each asset
     """
     oq = self.oqparam
     with self.monitor('building epsilons', autoflush=True):
         return riskinput.make_eps(self.assetcol, num_ruptures,
                                   oq.master_seed, oq.asset_correlation)
    def pre_execute(self):
        """
        Read the precomputed ruptures (or compute them on the fly) and
        prepare some datasets in the datastore.
        """
        super(EventBasedRiskCalculator, self).pre_execute()
        if not self.riskmodel:  # there is no riskmodel, exit early
            self.execute = lambda: None
            self.post_execute = lambda result: None
            return
        oq = self.oqparam
        if self.riskmodel.covs:
            epsilon_sampling = oq.epsilon_sampling
        else:
            epsilon_sampling = 1  # only one ignored epsilon
        correl_model = readinput.get_correl_model(oq)
        gsims_by_col = self.rlzs_assoc.get_gsims_by_col()
        assets_by_site = self.assets_by_site
        # the following is needed to set the asset idx attribute
        self.assetcol = riskinput.build_asset_collection(
            assets_by_site, oq.time_event)
        self.spec_indices = numpy.array([a['asset_ref'] in oq.specific_assets
                                         for a in self.assetcol])

        logging.info('Populating the risk inputs')
        rup_by_tag = sum(self.datastore['sescollection'], AccumDict())
        all_ruptures = [rup_by_tag[tag] for tag in sorted(rup_by_tag)]
        for i, rup in enumerate(all_ruptures):
            rup.ordinal = i
        num_samples = min(len(all_ruptures), epsilon_sampling)
        self.epsilon_matrix = eps = riskinput.make_eps(
            assets_by_site, num_samples, oq.master_seed, oq.asset_correlation)
        logging.info('Generated %d epsilons', num_samples * len(eps))
        self.riskinputs = list(self.riskmodel.build_inputs_from_ruptures(
            self.sitecol.complete, all_ruptures, gsims_by_col,
            oq.truncation_level, correl_model, eps,
            oq.concurrent_tasks or 1))
        logging.info('Built %d risk inputs', len(self.riskinputs))

        # preparing empty datasets
        loss_types = self.riskmodel.loss_types
        self.L = len(loss_types)
        self.R = len(self.rlzs_assoc.realizations)
        self.outs = OUTPUTS
        self.datasets = {}
        # ugly: attaching an attribute needed in the task function
        self.monitor.num_outputs = len(self.outs)
        self.monitor.num_assets = self.count_assets()
        for o, out in enumerate(self.outs):
            self.datastore.hdf5.create_group(out)
            for l, loss_type in enumerate(loss_types):
                for r, rlz in enumerate(self.rlzs_assoc.realizations):
                    key = '/%s/%s' % (loss_type, rlz.uid)
                    if o == AGGLOSS:  # loss tables
                        dset = self.datastore.create_dset(out + key, elt_dt)
                    elif o == SPECLOSS:  # specific losses
                        dset = self.datastore.create_dset(out + key, ela_dt)
                    self.datasets[o, l, r] = dset
示例#7
0
    def pre_execute(self):
        """
        Compute the GMFs, build the epsilons, the riskinputs, and a dictionary
        with the unit of measure, used in the export phase.
        """
        oq = self.oqparam
        super().pre_execute('scenario')
        self.assetcol = self.datastore['assetcol']
        A = len(self.assetcol)
        E = oq.number_of_ground_motion_fields
        if oq.ignore_covs:
            # all zeros; the data transfer is not so big in scenario
            eps = numpy.zeros((A, E), numpy.float32)
        else:
            logging.info('Building the epsilons')
            eps = riskinput.make_eps(self.assetcol, E, oq.master_seed,
                                     oq.asset_correlation)

        self.riskinputs = self.build_riskinputs('gmf', eps, E)
        self.param['number_of_ground_motion_fields'] = E
        self.param['insured_losses'] = self.oqparam.insured_losses
        self.param['asset_loss_table'] = self.oqparam.asset_loss_table
示例#8
0
    def execute(self):
        """
        Run the event_based_risk calculator and aggregate the results
        """
        oq = self.oqparam
        correl_model = oq.get_correl_model()
        self.N = len(self.assetcol)
        self.E = sum(len(v) for v in self.datastore['events'].values())
        logging.info('Populating the risk inputs')
        all_ruptures = []
        preprecalc = getattr(self.precalc, 'precalc', None)
        if preprecalc:  # the ruptures are already in memory
            for grp_id, sesruptures in preprecalc.result.items():
                for sr in sesruptures:
                    all_ruptures.append(sr)
        else:  # read the ruptures from the datastore
            for serial in self.datastore['sescollection']:
                rup = self.datastore['sescollection/' + serial]
                all_ruptures.append(rup)
        all_ruptures.sort(key=operator.attrgetter('serial'))
        if not self.riskmodel.covs:
            # do not generate epsilons
            eps = None
        else:
            eps = riskinput.make_eps(
                self.assets_by_site, self.E, oq.master_seed,
                oq.asset_correlation)
            logging.info('Generated %s epsilons', eps.shape)

        # preparing empty datasets
        loss_types = self.riskmodel.loss_types
        self.C = self.oqparam.loss_curve_resolution
        self.L = L = len(loss_types)
        self.R = R = len(self.rlzs_assoc.realizations)
        self.I = self.oqparam.insured_losses

        # ugly: attaching attributes needed in the task function
        mon = self.monitor
        mon.num_assets = self.count_assets()
        mon.avg_losses = self.oqparam.avg_losses
        mon.asset_loss_table = self.oqparam.asset_loss_table
        mon.insured_losses = self.I
        mon.ses_ratio = (
            oq.risk_investigation_time or oq.investigation_time) / (
                oq.investigation_time * oq.ses_per_logic_tree_path)

        self.N = N = len(self.assetcol)
        self.E = sum(len(v) for v in self.datastore['events'].values())

        # average losses, stored in a composite array of shape N, R
        self.avg_losses = numpy.zeros((N, R), oq.loss_dt())

        self.ass_loss_table = square(L, R, lambda: None)
        self.agg_loss_table = square(L, R, lambda: None)

        self.ela_dt, self.elt_dt = mon.ela_dt, mon.elt_dt = build_el_dtypes(
            self.I)
        for (l, r) in itertools.product(range(L), range(R)):
            lt = loss_types[l]
            if self.oqparam.asset_loss_table:
                self.ass_loss_table[l, r] = self.datastore.create_dset(
                    'ass_loss_table/rlz-%03d/%s' % (r, lt), self.ela_dt)
            self.agg_loss_table[l, r] = self.datastore.create_dset(
                'agg_loss_table/rlz-%03d/%s' % (r, lt), self.elt_dt)

        self.saved = collections.Counter()  # nbytes per HDF5 key
        self.ass_bytes = 0
        self.agg_bytes = 0
        self.gmfbytes = 0
        rlz_ids = getattr(self.oqparam, 'rlz_ids', ())
        if rlz_ids:
            self.rlzs_assoc = self.rlzs_assoc.extract(rlz_ids)

        if not oq.minimum_intensity:
            # infer it from the risk models if not directly set in job.ini
            oq.minimum_intensity = self.riskmodel.get_min_iml()
        min_iml = calc.fix_minimum_intensity(
            oq.minimum_intensity, oq.imtls)
        if min_iml.sum() == 0:
            logging.warn('The GMFs are not filtered: '
                         'you may want to set a minimum_intensity')
        else:
            logging.info('minimum_intensity=%s', oq.minimum_intensity)
        csm_info = self.datastore['csm_info']
        grp_trt = {sg.id: sg.trt for sm in csm_info.source_models
                   for sg in sm.src_groups}
        with self.monitor('building riskinputs', autoflush=True):
            riskinputs = self.riskmodel.build_inputs_from_ruptures(
                grp_trt, list(oq.imtls), self.sitecol.complete, all_ruptures,
                oq.truncation_level, correl_model, min_iml, eps,
                oq.concurrent_tasks or 1)
            # NB: I am using generators so that the tasks are submitted one at
            # the time, without keeping all of the arguments in memory
            res = starmap(
                self.core_task.__func__,
                ((riskinput, self.riskmodel, self.rlzs_assoc,
                  self.assetcol, self.monitor.new('task'))
                 for riskinput in riskinputs)).submit_all()
        acc = functools.reduce(self.agg, res, AccumDict())
        self.save_data_transfer(res)
        return acc
示例#9
0
    def pre_execute(self):
        """
        Read the precomputed ruptures (or compute them on the fly) and
        prepare some datasets in the datastore.
        """
        super(EventBasedRiskCalculator, self).pre_execute()
        if not self.riskmodel:  # there is no riskmodel, exit early
            self.execute = lambda: None
            self.post_execute = lambda result: None
            return
        oq = self.oqparam
        if self.riskmodel.covs:
            epsilon_sampling = oq.epsilon_sampling
        else:
            epsilon_sampling = 1  # only one ignored epsilon
        correl_model = readinput.get_correl_model(oq)
        gsims_by_col = self.rlzs_assoc.get_gsims_by_col()
        assets_by_site = self.assets_by_site
        # the following is needed to set the asset idx attribute
        self.assetcol = riskinput.build_asset_collection(
            assets_by_site, oq.time_event)
        self.spec_indices = numpy.array(
            [a['asset_ref'] in oq.specific_assets for a in self.assetcol])

        logging.info('Populating the risk inputs')
        rup_by_tag = sum(self.datastore['sescollection'], AccumDict())
        all_ruptures = [rup_by_tag[tag] for tag in sorted(rup_by_tag)]
        for i, rup in enumerate(all_ruptures):
            rup.ordinal = i
        num_samples = min(len(all_ruptures), epsilon_sampling)
        self.epsilon_matrix = eps = riskinput.make_eps(assets_by_site,
                                                       num_samples,
                                                       oq.master_seed,
                                                       oq.asset_correlation)
        logging.info('Generated %d epsilons', num_samples * len(eps))
        self.riskinputs = list(
            self.riskmodel.build_inputs_from_ruptures(
                self.sitecol.complete, all_ruptures, gsims_by_col,
                oq.truncation_level, correl_model, eps, oq.concurrent_tasks
                or 1))
        logging.info('Built %d risk inputs', len(self.riskinputs))

        # preparing empty datasets
        loss_types = self.riskmodel.loss_types
        self.L = len(loss_types)
        self.R = len(self.rlzs_assoc.realizations)
        self.outs = OUTPUTS
        self.datasets = {}
        # ugly: attaching an attribute needed in the task function
        self.monitor.num_outputs = len(self.outs)
        self.monitor.num_assets = self.count_assets()
        for o, out in enumerate(self.outs):
            self.datastore.hdf5.create_group(out)
            for l, loss_type in enumerate(loss_types):
                for r, rlz in enumerate(self.rlzs_assoc.realizations):
                    key = '/%s/%s' % (loss_type, rlz.uid)
                    if o == AGGLOSS:  # loss tables
                        dset = self.datastore.create_dset(out + key, elt_dt)
                    elif o == SPECLOSS:  # specific losses
                        dset = self.datastore.create_dset(out + key, ela_dt)
                    self.datasets[o, l, r] = dset
示例#10
0
    def execute(self):
        """
        Run the event_based_risk calculator and aggregate the results
        """
        oq = self.oqparam
        correl_model = readinput.get_correl_model(oq)
        self.N = len(self.assetcol)
        self.E = len(self.etags)
        logging.info('Populating the risk inputs')
        rlzs_by_tr_id = self.rlzs_assoc.get_rlzs_by_trt_id()
        num_rlzs = {t: len(rlzs) for t, rlzs in rlzs_by_tr_id.items()}
        num_assets = {
            sid: len(self.assets_by_site[sid])
            for sid in self.sitecol.sids
        }
        all_ruptures = []
        for serial in self.datastore['sescollection']:
            rup = self.datastore['sescollection/' + serial]
            rup.set_weight(num_rlzs, num_assets)
            all_ruptures.append(rup)
        all_ruptures.sort(key=operator.attrgetter('serial'))
        if not self.riskmodel.covs:
            # do not generate epsilons
            eps = None
        else:
            eps = riskinput.make_eps(self.assets_by_site, self.E,
                                     oq.master_seed, oq.asset_correlation)
            logging.info('Generated %s epsilons', eps.shape)

        # preparing empty datasets
        loss_types = self.riskmodel.loss_types
        self.C = self.oqparam.loss_curve_resolution
        self.L = L = len(loss_types)
        self.R = R = len(self.rlzs_assoc.realizations)
        self.I = self.oqparam.insured_losses

        # ugly: attaching attributes needed in the task function
        mon = self.monitor
        mon.num_assets = self.count_assets()
        mon.avg_losses = self.oqparam.avg_losses
        mon.asset_loss_table = self.oqparam.asset_loss_table
        mon.insured_losses = self.I
        mon.ses_ratio = (oq.risk_investigation_time or
                         oq.investigation_time) / (oq.investigation_time *
                                                   oq.ses_per_logic_tree_path)

        self.N = N = len(self.assetcol)
        self.E = len(self.datastore['etags'])

        # average losses, stored in a composite array of shape N, R
        multi_avg_dt = self.riskmodel.loss_type_dt(insured=self.I)
        self.avg_losses = numpy.zeros((N, R), multi_avg_dt)

        self.ass_loss_table = square(L, R, lambda: None)
        self.agg_loss_table = square(L, R, lambda: None)

        self.ela_dt, self.elt_dt = mon.ela_dt, mon.elt_dt = build_el_dtypes(
            self.I)
        for (l, r) in itertools.product(range(L), range(R)):
            lt = loss_types[l]
            if self.oqparam.asset_loss_table:
                self.ass_loss_table[l, r] = self.datastore.create_dset(
                    'ass_loss_table/rlz-%03d/%s' % (r, lt), self.ela_dt)
            self.agg_loss_table[l, r] = self.datastore.create_dset(
                'agg_loss_table/rlz-%03d/%s' % (r, lt), self.elt_dt)

        self.saved = collections.Counter()  # nbytes per HDF5 key
        self.ass_bytes = 0
        self.agg_bytes = 0
        self.gmfbytes = 0
        rlz_ids = getattr(self.oqparam, 'rlz_ids', ())
        if rlz_ids:
            self.rlzs_assoc = self.rlzs_assoc.extract(rlz_ids)

        if not oq.minimum_intensity:
            # infer it from the risk models if not directly set in job.ini
            oq.minimum_intensity = self.riskmodel.get_min_iml()
        min_iml = calc.fix_minimum_intensity(oq.minimum_intensity, oq.imtls)
        if min_iml.sum() == 0:
            logging.warn('The GMFs are not filtered: '
                         'you may want to set a minimum_intensity')
        else:
            logging.info('minimum_intensity=%s', oq.minimum_intensity)

        with self.monitor('building riskinputs', autoflush=True):
            riskinputs = self.riskmodel.build_inputs_from_ruptures(
                self.sitecol.complete, all_ruptures, oq.truncation_level,
                correl_model, min_iml, eps, oq.concurrent_tasks or 1)
            # NB: I am using generators so that the tasks are submitted one at
            # the time, without keeping all of the arguments in memory
            tm = starmap(self.core_task.__func__,
                         ((riskinput, self.riskmodel, self.rlzs_assoc,
                           self.assetcol, self.monitor.new('task'))
                          for riskinput in riskinputs))
        res = tm.reduce(agg=self.agg)
        self.save_data_transfer(tm)
        return res
示例#11
0
    def execute(self):
        """
        Run the event_based_risk calculator and aggregate the results
        """
        oq = self.oqparam
        correl_model = readinput.get_correl_model(oq)
        self.N = len(self.assetcol)
        self.E = len(self.etags)
        logging.info('Populating the risk inputs')
        rlzs_by_tr_id = self.rlzs_assoc.get_rlzs_by_trt_id()
        num_rlzs = {t: len(rlzs) for t, rlzs in rlzs_by_tr_id.items()}
        num_assets = {sid: len(self.assets_by_site[sid])
                      for sid in self.sitecol.sids}
        all_ruptures = []
        for serial in self.datastore['sescollection']:
            rup = self.datastore['sescollection/' + serial]
            rup.set_weight(num_rlzs, num_assets)
            all_ruptures.append(rup)
        all_ruptures.sort(key=operator.attrgetter('serial'))
        if not self.riskmodel.covs:
            # do not generate epsilons
            eps = None
        else:
            eps = riskinput.make_eps(
                self.assets_by_site, self.E, oq.master_seed,
                oq.asset_correlation)
            logging.info('Generated %s epsilons', eps.shape)

        # preparing empty datasets
        loss_types = self.riskmodel.loss_types
        self.C = self.oqparam.loss_curve_resolution
        self.L = L = len(loss_types)
        self.R = R = len(self.rlzs_assoc.realizations)
        self.I = self.oqparam.insured_losses

        # ugly: attaching attributes needed in the task function
        mon = self.monitor
        mon.num_assets = self.count_assets()
        mon.avg_losses = self.oqparam.avg_losses
        mon.asset_loss_table = self.oqparam.asset_loss_table
        mon.insured_losses = self.I
        mon.ses_ratio = (
            oq.risk_investigation_time or oq.investigation_time) / (
                oq.investigation_time * oq.ses_per_logic_tree_path)

        self.N = N = len(self.assetcol)
        self.E = len(self.datastore['etags'])

        # average losses, stored in a composite array of shape N, R
        multi_avg_dt = self.riskmodel.loss_type_dt(insured=self.I)
        self.avg_losses = numpy.zeros((N, R), multi_avg_dt)

        self.ass_loss_table = square(L, R, lambda: None)
        self.agg_loss_table = square(L, R, lambda: None)

        self.ela_dt, self.elt_dt = mon.ela_dt, mon.elt_dt = build_el_dtypes(
            self.I)
        for (l, r) in itertools.product(range(L), range(R)):
            lt = loss_types[l]
            if self.oqparam.asset_loss_table:
                self.ass_loss_table[l, r] = self.datastore.create_dset(
                    'ass_loss_table/rlz-%03d/%s' % (r, lt), self.ela_dt)
            self.agg_loss_table[l, r] = self.datastore.create_dset(
                'agg_loss_table/rlz-%03d/%s' % (r, lt), self.elt_dt)

        self.saved = collections.Counter()  # nbytes per HDF5 key
        self.ass_bytes = 0
        self.agg_bytes = 0
        self.gmfbytes = 0
        rlz_ids = getattr(self.oqparam, 'rlz_ids', ())
        if rlz_ids:
            self.rlzs_assoc = self.rlzs_assoc.extract(rlz_ids)

        if not oq.minimum_intensity:
            # infer it from the risk models if not directly set in job.ini
            oq.minimum_intensity = self.riskmodel.get_min_iml()
        min_iml = calc.fix_minimum_intensity(
            oq.minimum_intensity, oq.imtls)
        if min_iml.sum() == 0:
            logging.warn('The GMFs are not filtered: '
                         'you may want to set a minimum_intensity')
        else:
            logging.info('minimum_intensity=%s', oq.minimum_intensity)

        with self.monitor('building riskinputs', autoflush=True):
            riskinputs = self.riskmodel.build_inputs_from_ruptures(
                self.sitecol.complete, all_ruptures, oq.truncation_level,
                correl_model, min_iml, eps, oq.concurrent_tasks or 1)
            # NB: I am using generators so that the tasks are submitted one at
            # the time, without keeping all of the arguments in memory
            tm = starmap(
                self.core_task.__func__,
                ((riskinput, self.riskmodel, self.rlzs_assoc,
                  self.assetcol, self.monitor.new('task'))
                 for riskinput in riskinputs))
        return tm.reduce(agg=self.agg, posthook=self.save_data_transfer)
示例#12
0
    def pre_execute(self):
        """
        Read the precomputed ruptures (or compute them on the fly) and
        prepare some datasets in the datastore.
        """
        super(EventBasedRiskCalculator, self).pre_execute()
        if not self.riskmodel:  # there is no riskmodel, exit early
            self.execute = lambda: None
            self.post_execute = lambda result: None
            return
        oq = self.oqparam
        correl_model = readinput.get_correl_model(oq)
        self.N = len(self.assetcol)
        self.E = len(self.etags)
        logging.info('Populating the risk inputs')
        all_ruptures = []
        for serial in self.datastore['sescollection']:
            all_ruptures.append(self.datastore['sescollection/' + serial])
        all_ruptures.sort(key=operator.attrgetter('serial'))
        if not self.riskmodel.covs:
            # do not generate epsilons
            eps = FakeMatrix(self.N, self.E)
        else:
            eps = riskinput.make_eps(self.assets_by_site, self.E,
                                     oq.master_seed, oq.asset_correlation)
            logging.info('Generated %s epsilons', eps.shape)

        self.riskinputs = list(
            self.riskmodel.build_inputs_from_ruptures(
                self.sitecol.complete, all_ruptures,
                self.rlzs_assoc.gsims_by_trt_id, oq.truncation_level,
                correl_model, eps, oq.concurrent_tasks or 1))
        logging.info('Built %d risk inputs', len(self.riskinputs))

        # preparing empty datasets
        loss_types = self.riskmodel.loss_types
        self.C = self.oqparam.loss_curve_resolution
        self.L = L = len(loss_types)
        self.R = R = len(self.rlzs_assoc.realizations)
        self.I = self.oqparam.insured_losses

        # ugly: attaching an attribute needed in the task function
        mon = self.monitor
        mon.num_assets = self.count_assets()
        mon.avg_losses = self.oqparam.avg_losses
        mon.asset_loss_table = self.oqparam.asset_loss_table
        mon.insured_losses = self.I

        self.N = N = len(self.assetcol)
        self.E = len(self.datastore['etags'])

        # average losses, stored in a composite array of shape N, R
        multi_avg_dt = self.riskmodel.loss_type_dt(insured=self.I)
        self.avg_losses = numpy.zeros((N, R), multi_avg_dt)

        self.ass_loss_table = square(L, R, lambda: None)
        self.agg_loss_table = square(L, R, lambda: None)

        self.ela_dt, self.elt_dt = mon.ela_dt, mon.elt_dt = build_el_dtypes(
            self.I)
        for (l, r) in itertools.product(range(L), range(R)):
            lt = loss_types[l]
            if self.oqparam.asset_loss_table:
                self.ass_loss_table[l, r] = self.datastore.create_dset(
                    'ass_loss_table/rlz-%03d/%s' % (r, lt), self.ela_dt)
            self.agg_loss_table[l, r] = self.datastore.create_dset(
                'agg_loss_table/rlz-%03d/%s' % (r, lt), self.elt_dt)