コード例 #1
0
ファイル: views.py プロジェクト: yasser64b/oq-engine
def view_delta_loss(token, dstore):
    """
    Estimate the stocastic error on the loss curve by splitting the events
    in odd and even. Example:

    $ oq show delta_loss  # consider the first loss type
    """
    if ':' in token:
        _, li = token.split(':')
        li = int(li)
    else:
        li = 0
    oq = dstore['oqparam']
    efftime = oq.investigation_time * oq.ses_per_logic_tree_path * len(
        dstore['weights'])
    num_events = len(dstore['events'])
    num_events0 = num_events // 2 + (num_events % 2)
    num_events1 = num_events // 2
    periods = return_periods(efftime, num_events)[1:-1]

    K = dstore['risk_by_event'].attrs.get('K', 0)
    df = dstore.read_df('risk_by_event', 'event_id', dict(agg_id=K,
                                                          loss_id=li))
    if len(df) == 0:  # for instance no fatalities
        return {'delta': numpy.zeros(1)}
    mod2 = df.index % 2
    losses0 = df['loss'][mod2 == 0]
    losses1 = df['loss'][mod2 == 1]
    c0 = losses_by_period(losses0, periods, num_events0, efftime / 2)
    c1 = losses_by_period(losses1, periods, num_events1, efftime / 2)
    dic = dict(loss=losses_by_period(df['loss'], periods, num_events, efftime),
               even=c0,
               odd=c1,
               delta=numpy.abs(c0 - c1) / (c0 + c1))
    return pandas.DataFrame(dic, periods)
コード例 #2
0
    def post_execute(self, dummy):
        """
        Compute and store average losses from the losses_by_event dataset,
        and then loss curves and maps.
        """
        oq = self.oqparam
        if oq.avg_losses:
            self.datastore['avg_losses-stats'].attrs['stats'] = [b'mean']
        logging.info('Building loss tables')
        build_loss_tables(self.datastore)
        self.datastore.flush()  # just to be sure
        shp = self.get_shape(self.L)  # (L, T...)
        text = ' x '.join('%d(%s)' % (n, t)
                          for t, n in zip(oq.aggregate_by, shp[1:]))
        logging.info('Producing %d(loss_types) x %s loss curves', self.L, text)
        builder = get_loss_builder(self.datastore)
        self.build_datasets(builder)
        self.datastore.swmr_on()
        args = [(self.datastore.filename, builder, oq.ses_ratio, rlzi)
                for rlzi in range(self.R)]
        acc = list(parallel.Starmap(postprocess, args, h5=self.datastore.hdf5))
        for r, (curves, maps), agg_losses in acc:
            if len(curves):  # some realization can give zero contribution
                self.datastore['agg_curves-rlzs'][:, r] = curves
            if len(maps):  # conditional_loss_poes can be empty
                self.datastore['agg_maps-rlzs'][:, r] = maps
            self.datastore['agg_losses-rlzs'][:, r] = agg_losses
        if self.R > 1:
            logging.info('Computing aggregate statistics')
            set_rlzs_stats(self.datastore, 'agg_curves')
            set_rlzs_stats(self.datastore, 'agg_losses')
            if oq.conditional_loss_poes:
                set_rlzs_stats(self.datastore, 'agg_maps')

        # sanity check with the asset_loss_table
        if oq.asset_loss_table and len(oq.aggregate_by) == 1:
            alt = self.datastore['asset_loss_table'][()]
            if alt.sum() == 0:  # nothing was saved
                return
            logging.info('Checking the loss curves')
            tags = getattr(self.assetcol.tagcol, oq.aggregate_by[0])[1:]
            T = len(tags)
            P = len(builder.return_periods)
            # sanity check on the loss curves for simple tag aggregation
            arr = self.assetcol.aggregate_by(oq.aggregate_by, alt)
            # shape (T, E, L)
            rlzs = self.datastore['events']['rlz_id']
            curves = numpy.zeros((P, self.R, self.L, T))
            for t in range(T):
                for r in range(self.R):
                    for l in range(self.L):
                        curves[:, r, l,
                               t] = losses_by_period(arr[t, rlzs == r, l],
                                                     builder.return_periods,
                                                     builder.num_events[r],
                                                     builder.eff_time)
            numpy.testing.assert_allclose(
                curves, self.datastore['agg_curves-rlzs'][()])
コード例 #3
0
 def test(self):
     # testing convergency of the mean curve
     periods = [10, 20, 50, 100, 150, 200, 250]
     eff_time = 500
     losses = 10**numpy.random.default_rng(42).random(2000)
     losses0 = losses[:1000]
     losses1 = losses[1000:]
     curve0 = scientific.losses_by_period(losses0,
                                          periods,
                                          eff_time=eff_time)
     curve1 = scientific.losses_by_period(losses1,
                                          periods,
                                          eff_time=eff_time)
     mean = (curve0 + curve1) / 2
     full = scientific.losses_by_period(losses,
                                        periods,
                                        eff_time=2 * eff_time)
     aae(mean, full, rtol=1E-2)  # converges only at 1%
コード例 #4
0
ファイル: ebrisk.py プロジェクト: digitalsatori/oq-engine
    def post_execute(self, times):
        """
        Compute and store average losses from the losses_by_event dataset,
        and then loss curves and maps.
        """
        if len(times):
            self.datastore.set_attrs(
                'task_info/start_ebrisk', times=times,
                events_per_sid=numpy.mean(self.events_per_sid))
        oq = self.oqparam
        shp = self.get_shape(self.L)  # (L, T...)
        text = ' x '.join(
            '%d(%s)' % (n, t) for t, n in zip(oq.aggregate_by, shp[1:]))
        logging.info('Producing %d(loss_types) x %s loss curves', self.L, text)
        builder = get_loss_builder(self.datastore)
        self.build_datasets(builder)
        self.datastore.close()
        if 'losses_by_event' in self.datastore.parent:
            dstore = self.datastore.parent
        else:
            dstore = self.datastore
        allargs = [(dstore.filename, builder, rlzi) for rlzi in range(self.R)]
        mon = performance.Monitor(hdf5=hdf5.File(self.datastore.hdf5cache()))
        acc = list(parallel.Starmap(compute_loss_curves_maps, allargs, mon))
        # copy performance information from the cache to the datastore
        pd = mon.hdf5['performance_data'][()]
        hdf5.extend3(self.datastore.filename, 'performance_data', pd)
        self.datastore.open('r+')  # reopen
        self.datastore['task_info/compute_loss_curves_and_maps'] = (
            mon.hdf5['task_info/compute_loss_curves_maps'][()])
        self.datastore.open('r+')
        with self.monitor('saving loss_curves and maps', autoflush=True):
            for r, (curves, maps) in acc:
                if len(curves):  # some realization can give zero contribution
                    self.datastore['agg_curves-rlzs'][:, r] = curves
                if len(maps):  # conditional_loss_poes can be empty
                    self.datastore['agg_maps-rlzs'][:, r] = maps
        if self.R > 1:
            logging.info('Computing aggregate loss curves statistics')
            set_rlzs_stats(self.datastore, 'agg_curves')
            self.datastore.set_attrs(
                'agg_curves-stats', return_periods=builder.return_periods,
                loss_types=' '.join(self.riskmodel.loss_types))
            if oq.conditional_loss_poes:
                logging.info('Computing aggregate loss maps statistics')
                set_rlzs_stats(self.datastore, 'agg_maps')

        # sanity check with the asset_loss_table
        if oq.asset_loss_table and len(oq.aggregate_by) == 1:
            alt = self.datastore['asset_loss_table'][()]
            if alt.sum() == 0:  # nothing was saved
                return
            logging.info('Checking the loss curves')
            tags = getattr(self.assetcol.tagcol, oq.aggregate_by[0])[1:]
            T = len(tags)
            P = len(builder.return_periods)
            # sanity check on the loss curves for simple tag aggregation
            arr = self.assetcol.aggregate_by(oq.aggregate_by, alt)
            # shape (T, E, L)
            rlzs = self.datastore['events']['rlz']
            curves = numpy.zeros((P, self.R, self.L, T))
            for t in range(T):
                for r in range(self.R):
                    for l in range(self.L):
                        curves[:, r, l, t] = losses_by_period(
                            arr[t, rlzs == r, l],
                            builder.return_periods,
                            builder.num_events[r],
                            builder.eff_time)
            numpy.testing.assert_allclose(
                curves, self.datastore['agg_curves-rlzs'][()])
コード例 #5
0
    def post_execute(self, times):
        """
        Compute and store average losses from the losses_by_event dataset,
        and then loss curves and maps.
        """
        if len(times):
            self.datastore.set_attrs('task_info/start_ebrisk',
                                     times=times,
                                     events_per_sid=numpy.mean(
                                         self.events_per_sid))
        oq = self.oqparam
        shp = self.get_shape(self.L)  # (L, T...)
        text = ' x '.join('%d(%s)' % (n, t)
                          for t, n in zip(oq.aggregate_by, shp[1:]))
        logging.info('Producing %d(loss_types) x %s loss curves', self.L, text)
        builder = get_loss_builder(self.datastore)
        self.build_datasets(builder)
        self.datastore.close()
        if 'losses_by_event' in self.datastore.parent:
            dstore = self.datastore.parent
        else:
            dstore = self.datastore
        allargs = [(dstore.filename, builder, rlzi) for rlzi in range(self.R)]
        mon = performance.Monitor(hdf5=hdf5.File(self.datastore.hdf5cache()))
        acc = list(parallel.Starmap(compute_loss_curves_maps, allargs, mon))
        # copy performance information from the cache to the datastore
        pd = mon.hdf5['performance_data'][()]
        hdf5.extend3(self.datastore.filename, 'performance_data', pd)
        self.datastore.open('r+')  # reopen
        self.datastore['task_info/compute_loss_curves_and_maps'] = (
            mon.hdf5['task_info/compute_loss_curves_maps'][()])
        self.datastore.open('r+')
        with self.monitor('saving loss_curves and maps', autoflush=True):
            for r, (curves, maps) in acc:
                if len(curves):  # some realization can give zero contribution
                    self.datastore['agg_curves-rlzs'][:, r] = curves
                if len(maps):  # conditional_loss_poes can be empty
                    self.datastore['agg_maps-rlzs'][:, r] = maps
        if self.R > 1:
            logging.info('Computing aggregate loss curves statistics')
            set_rlzs_stats(self.datastore, 'agg_curves')
            self.datastore.set_attrs('agg_curves-stats',
                                     return_periods=builder.return_periods,
                                     loss_types=' '.join(
                                         self.riskmodel.loss_types))
            if oq.conditional_loss_poes:
                logging.info('Computing aggregate loss maps statistics')
                set_rlzs_stats(self.datastore, 'agg_maps')

        # sanity check with the asset_loss_table
        if oq.asset_loss_table and len(oq.aggregate_by) == 1:
            alt = self.datastore['asset_loss_table'][()]
            if alt.sum() == 0:  # nothing was saved
                return
            logging.info('Checking the loss curves')
            tags = getattr(self.assetcol.tagcol, oq.aggregate_by[0])[1:]
            T = len(tags)
            P = len(builder.return_periods)
            # sanity check on the loss curves for simple tag aggregation
            arr = self.assetcol.aggregate_by(oq.aggregate_by, alt)
            # shape (T, E, L)
            rlzs = self.datastore['events']['rlz']
            curves = numpy.zeros((P, self.R, self.L, T))
            for t in range(T):
                for r in range(self.R):
                    for l in range(self.L):
                        curves[:, r, l,
                               t] = losses_by_period(arr[t, rlzs == r, l],
                                                     builder.return_periods,
                                                     builder.num_events[r],
                                                     builder.eff_time)
            numpy.testing.assert_allclose(
                curves, self.datastore['agg_curves-rlzs'][()])