Пример #1
0
    def export_curves_rlzs(self, aids, key):
        """
        :returns: a dictionary key -> record of dtype loss_curve_dt
        """
        if 'loss_curves-stats' in self.dstore:  # classical_risk
            if self.R == 1:
                data = self.dstore['loss_curves-stats'][aids]  # shape (A, R)
            else:
                data = self.dstore['loss_curves-rlzs'][aids]  # shape (A, R)
            if key.startswith('rlz-'):
                rlzi = int(key[4:])
                return {key: data[:, rlzi]}
            # else key == 'rlzs', returns all data
            return {'rlz-%03d' % rlzi: data[:, rlzi] for rlzi in range(self.R)}

        # otherwise event_based
        avalues = self.assetcol.values(aids)
        lrgetter = getters.LossRatiosGetter(self.dstore, aids)
        build = self.builder.build_rlz
        if key.startswith('rlz-'):
            rlzi = int(key[4:])
            ratios = lrgetter.get(rlzi)
            return {'rlz-%03d' % rlzi: build(avalues, ratios, rlzi)}
        else:  # key is 'rlzs', return a dictionary will all realizations
            # this may be disabled in the future unless an asset is specified
            dic = {}
            for rlzi in range(self.R):
                ratios = lrgetter.get(rlzi)
                dic['rlz-%03d' % rlzi] = build(avalues, ratios, rlzi)
            return dic
Пример #2
0
 def execute(self):
     oq = self.oqparam
     R = len(self.loss_builder.weights)
     # build loss maps
     if 'all_loss_ratios' in self.datastore and oq.conditional_loss_poes:
         assetcol = self.datastore['assetcol']
         stats = oq.risk_stats()
         builder = self.loss_builder
         A = len(assetcol)
         S = len(stats)
         P = len(builder.return_periods)
         # create loss_maps datasets
         self.datastore.create_dset('loss_maps-rlzs',
                                    self.loss_maps_dt, (A, R),
                                    fillvalue=None)
         if R > 1:
             self.datastore.create_dset('loss_maps-stats',
                                        self.loss_maps_dt, (A, S),
                                        fillvalue=None)
             self.datastore.set_attrs(
                 'loss_maps-stats',
                 stats=[encode(name) for (name, func) in stats])
             self.datastore.create_dset('curves-stats',
                                        oq.loss_dt(), (A, S, P),
                                        fillvalue=None)
             self.datastore.set_attrs(
                 'curves-stats',
                 return_periods=builder.return_periods,
                 stats=[encode(name) for (name, func) in stats])
         mon = self.monitor('loss maps')
         lazy = ('all_loss_ratios' in self.datastore.parent
                 and self.can_read_parent())
         logging.info('Instantiating LossRatiosGetters')
         with self.monitor('building lrgetters',
                           measuremem=True,
                           autoflush=True):
             allargs = []
             for aids in split_in_blocks(range(A), oq.concurrent_tasks):
                 dstore = self.datastore.parent if lazy else self.datastore
                 getter = getters.LossRatiosGetter(dstore, aids, lazy)
                 # a lazy getter will read the loss_ratios from the workers
                 # an eager getter reads the loss_ratios upfront
                 allargs.append((assetcol.values(aids), builder, getter,
                                 stats, oq.conditional_loss_poes, mon))
         if lazy:
             # avoid OSError: Can't read data (Wrong b-tree signature)
             self.datastore.parent.close()
         parallel.Starmap(build_curves_maps,
                          allargs).reduce(self.save_curves_maps)
         if lazy:  # the parent was closed, reopen it
             self.datastore.parent.open()
Пример #3
0
def export_asset_loss_table(ekey, dstore):
    """
    Export in parallel the asset loss table from the datastore.

    NB1: for large calculation this may run out of memory
    NB2: due to an heisenbug in the parallel reading of .hdf5 files this works
    reliably only if the datastore has been created by a different process

    The recommendation is: *do not use this exporter*: rather, study its source
    code and write what you need. Every postprocessing is different.
    """
    key, fmt = ekey
    oq = dstore['oqparam']
    assetcol = dstore['assetcol']
    arefs = assetcol.asset_refs
    avals = assetcol.values()
    loss_types = dstore.get_attr('all_loss_ratios', 'loss_types').split()
    dtlist = [(lt, F32) for lt in loss_types]
    if oq.insured_losses:
        for lt in loss_types:
            dtlist.append((lt + '_ins', F32))
    lrs_dt = numpy.dtype([('rlzi', U16), ('losses', dtlist)])
    fname = dstore.export_path('%s.%s' % ekey)
    monitor = performance.Monitor(key, fname)
    aids = range(len(assetcol))
    allargs = [(getters.LossRatiosGetter(dstore, block), monitor)
               for block in split_in_blocks(aids, oq.concurrent_tasks)]
    dstore.close()  # avoid OSError: Can't read data (Wrong b-tree signature)
    L = len(loss_types)
    with hdf5.File(fname, 'w') as f:
        nbytes = 0
        total = numpy.zeros(len(dtlist), F32)
        for pairs in parallel.Starmap(get_loss_ratios, allargs):
            for aid, data in pairs:
                asset = assetcol[aid]
                avalue = avals[aid]
                for l, lt in enumerate(loss_types):
                    aval = avalue[lt]
                    for i in range(oq.insured_losses + 1):
                        data['ratios'][:, l + L * i] *= aval
                aref = arefs[asset.ordinal]
                f[b'asset_loss_table/' + aref] = data.view(lrs_dt)
                total += data['ratios'].sum(axis=0)
                nbytes += data.nbytes
        f['asset_loss_table'].attrs['loss_types'] = ' '.join(loss_types)
        f['asset_loss_table'].attrs['total'] = total
        f['asset_loss_table'].attrs['nbytes'] = nbytes
    return [fname]