示例#1
0
 def execute(self):
     self.datastore.flush()  # just to be sure
     oq = self.oqparam
     self.set_param(hdf5path=self.datastore.filename,
                    tempname=cache_epsilons(self.datastore, oq,
                                            self.assetcol, self.crmodel,
                                            self.E))
     srcfilter = self.src_filter()
     logging.info('Sending {:_d} ruptures'.format(
         len(self.datastore['ruptures'])))
     self.events_per_sid = []
     self.numlosses = 0
     self.datastore.swmr_on()
     self.indices = general.AccumDict(accum=[])  # rlzi -> [(start, stop)]
     smap = parallel.Starmap(start_ebrisk, h5=self.datastore.hdf5)
     smap.monitor.save('srcfilter', srcfilter)
     smap.monitor.save('crmodel', self.crmodel)
     for rg in getters.gen_rupture_getters(self.datastore,
                                           oq.concurrent_tasks):
         smap.submit((rg, self.param))
     smap.reduce(self.agg_dicts)
     if self.indices:
         self.datastore['event_loss_table/indices'] = self.indices
     gmf_bytes = self.datastore['gmf_info']['gmfbytes'].sum()
     logging.info('Produced %s of GMFs', general.humansize(gmf_bytes))
     logging.info('Considered {:_d} / {:_d} losses'.format(*self.numlosses))
     return 1
示例#2
0
 def pre_execute(self):
     """
     Compute the GMFs, build the epsilons, the riskinputs, and a dictionary
     with the unit of measure, used in the export phase.
     """
     oq = self.oqparam
     super().pre_execute()
     self.assetcol = self.datastore['assetcol']
     self.event_slice = functools.partial(_event_slice,
                                          oq.number_of_ground_motion_fields)
     E = oq.number_of_ground_motion_fields * self.R
     self.riskinputs = self.build_riskinputs('gmf')
     self.param['tempname'] = riskinput.cache_epsilons(
         self.datastore, oq, self.assetcol, self.crmodel, E)
     self.param['E'] = E
     # assuming the weights are the same for all IMTs
     try:
         self.param['weights'] = self.datastore['weights'][()]
     except KeyError:
         self.param['weights'] = [1 / self.R for _ in range(self.R)]
     self.param['event_slice'] = self.event_slice
     self.param['ael_dt'] = dt = ael_dt(oq.loss_names)
     A = len(self.assetcol)
     self.datastore.create_dset('loss_data/data', dt)
     self.datastore.create_dset('loss_data/indices', U32, (A, 2))
     self.start = 0
示例#3
0
 def execute(self):
     self.datastore.flush()  # just to be sure
     oq = self.oqparam
     parent = self.datastore.parent
     csm_info = parent['csm_info'] if parent else self.csm_info
     self.init_logic_tree(csm_info)
     self.set_param(hdf5path=self.datastore.filename,
                    tempname=cache_epsilons(self.datastore, oq,
                                            self.assetcol, self.crmodel,
                                            self.E))
     srcfilter = self.src_filter(self.datastore.tempname)
     logging.info('Sending %d ruptures', len(self.datastore['ruptures']))
     self.events_per_sid = []
     self.numlosses = 0
     self.datastore.swmr_on()
     self.indices = general.AccumDict(accum=[])  # rlzi -> [(start, stop)]
     self.offset = 0
     smap = parallel.Starmap(self.core_task.__func__,
                             h5=self.datastore.hdf5)
     for rgetter in getters.gen_rupture_getters(self.datastore,
                                                srcfilter=srcfilter):
         smap.submit((rgetter, srcfilter, self.param))
     smap.reduce(self.agg_dicts)
     if self.indices:
         self.datastore['asset_loss_table/indices'] = self.indices
     gmf_bytes = self.datastore['gmf_info']['gmfbytes'].sum()
     logging.info('Produced %s of GMFs', general.humansize(gmf_bytes))
     logging.info('Stored {:_d} / {:_d} losses'.format(*self.numlosses))
     return 1
示例#4
0
 def execute(self):
     self.datastore.flush()  # just to be sure
     oq = self.oqparam
     parent = self.datastore.parent
     csm_info = parent['csm_info'] if parent else self.csm_info
     self.init_logic_tree(csm_info)
     self.set_param(
         hdf5path=self.datastore.filename,
         tempname=cache_epsilons(
             self.datastore, oq, self.assetcol, self.crmodel, self.E))
     srcfilter = self.src_filter(self.datastore.tempname)
     maxw = self.E / (oq.concurrent_tasks or 1)
     logging.info('Reading %d ruptures', len(self.datastore['ruptures']))
     allargs = ((rgetter, srcfilter, self.param)
                for rgetter in getters.gen_rupture_getters(
                        self.datastore, maxweight=maxw))
     self.events_per_sid = []
     self.lossbytes = 0
     self.datastore.swmr_on()
     smap = parallel.Starmap(
         self.core_task.__func__, allargs, h5=self.datastore.hdf5)
     smap.reduce(self.agg_dicts)
     gmf_bytes = self.datastore['gmf_info']['gmfbytes'].sum()
     logging.info(
         'Produced %s of GMFs', general.humansize(gmf_bytes))
     logging.info(
         'Produced %s of losses', general.humansize(self.lossbytes))
     return 1
示例#5
0
 def execute(self):
     self.datastore.flush()  # just to be sure
     oq = self.oqparam
     self.set_param(hdf5path=self.datastore.filename,
                    tempname=cache_epsilons(self.datastore, oq,
                                            self.assetcol, self.crmodel,
                                            self.E))
     srcfilter = self.src_filter()
     logging.info('Sending {:_d} ruptures'.format(
         len(self.datastore['ruptures'])))
     self.events_per_sid = []
     self.datastore.swmr_on()
     self.avg_gmf = general.AccumDict(accum=numpy.zeros(self.N,
                                                        F32))  # imt -> gmvs
     smap = parallel.Starmap(start_ebrisk, h5=self.datastore.hdf5)
     smap.monitor.save('srcfilter', srcfilter)
     smap.monitor.save('crmodel', self.crmodel)
     for rg in getters.gen_rupture_getters(self.datastore,
                                           oq.concurrent_tasks):
         smap.submit((rg, self.param))
     smap.reduce(self.agg_dicts)
     gmf_bytes = self.datastore['gmf_info']['gmfbytes'].sum()
     logging.info('Produced %s of GMFs', general.humansize(gmf_bytes))
     size = general.humansize(self.datastore.getsize('agg_loss_table'))
     logging.info('Stored %s in the agg_loss_table', size)
     return 1
示例#6
0
    def pre_execute(self):
        oq = self.oqparam
        super().pre_execute()
        parent = self.datastore.parent
        if not oq.ground_motion_fields:
            return  # this happens in the reportwriter

        self.L = len(self.crmodel.lti)
        self.T = len(self.assetcol.tagcol)
        self.A = len(self.assetcol)
        if parent:
            self.datastore['csm_info'] = parent['csm_info']
            self.events = parent['events'][('id', 'rlz_id')]
            logging.info('There are %d ruptures and %d events',
                         len(parent['ruptures']), len(self.events))
        else:
            self.events = self.datastore['events'][('id', 'rlz_id')]
        if oq.return_periods != [0]:
            # setting return_periods = 0 disable loss curves and maps
            eff_time = oq.investigation_time * oq.ses_per_logic_tree_path
            if eff_time < 2:
                logging.warning(
                    'eff_time=%s is too small to compute loss curves',
                    eff_time)
            else:
                self.param['builder'] = get_loss_builder(
                    parent if parent else self.datastore, oq.return_periods,
                    oq.loss_dt())
        # sorting the eids is essential to get the epsilons in the right
        # order (i.e. consistent with the one used in ebr from ruptures)
        self.riskinputs = self.build_riskinputs('gmf')
        self.param['tempname'] = riskinput.cache_epsilons(
            self.datastore, oq, self.assetcol, self.crmodel, self.E)
        self.param['avg_losses'] = oq.avg_losses
        self.param['ses_ratio'] = oq.ses_ratio
        self.param['stats'] = list(oq.hazard_stats().items())
        self.param['conditional_loss_poes'] = oq.conditional_loss_poes
        self.taskno = 0
        self.start = 0
        avg_losses = oq.avg_losses
        if avg_losses:
            self.dset = self.datastore.create_dset('avg_losses-rlzs', F32,
                                                   (self.A, self.R, self.L))
        self.agglosses = numpy.zeros((self.E, self.L), F32)
        if 'builder' in self.param:
            logging.warning(
                'Building the loss curves and maps for each asset is '
                'deprecated: consider building the aggregate curves and '
                'maps with the ebrisk calculator instead')
            self.build_datasets(self.param['builder'])
        if parent:
            parent.close()  # avoid concurrent reading issues
    def pre_execute(self):
        oq = self.oqparam
        super().pre_execute()
        parent = self.datastore.parent
        if not oq.ground_motion_fields:
            return  # this happens in the reportwriter

        self.L = len(self.riskmodel.lti)
        self.T = len(self.assetcol.tagcol)
        self.A = len(self.assetcol)
        if parent:
            self.datastore['csm_info'] = parent['csm_info']
            self.events = parent['events'][('id', 'rlz')]
        else:
            self.events = self.datastore['events'][('id', 'rlz')]
        if oq.return_periods != [0]:
            # setting return_periods = 0 disable loss curves and maps
            eff_time = oq.investigation_time * oq.ses_per_logic_tree_path
            if eff_time < 2:
                logging.warning(
                    'eff_time=%s is too small to compute loss curves',
                    eff_time)
            else:
                self.param['builder'] = get_loss_builder(
                    parent if parent else self.datastore,
                    oq.return_periods, oq.loss_dt())
        # sorting the eids is essential to get the epsilons in the right
        # order (i.e. consistent with the one used in ebr from ruptures)
        self.riskinputs = self.build_riskinputs('gmf')
        self.param['epspath'] = riskinput.cache_epsilons(
            self.datastore, oq, self.assetcol, self.riskmodel, self.E)
        self.param['avg_losses'] = oq.avg_losses
        self.param['ses_ratio'] = oq.ses_ratio
        self.param['stats'] = list(oq.hazard_stats().items())
        self.param['conditional_loss_poes'] = oq.conditional_loss_poes
        self.taskno = 0
        self.start = 0
        avg_losses = oq.avg_losses
        if avg_losses:
            self.dset = self.datastore.create_dset(
                'avg_losses-rlzs', F32, (self.A, self.R, self.L))
        self.agglosses = numpy.zeros((self.E, self.L), F32)
        if 'builder' in self.param:
            logging.warning(
                'Building the loss curves and maps for each asset is '
                'deprecated: consider building the aggregate curves and '
                'maps with the ebrisk calculator instead')
            self.build_datasets(self.param['builder'])
        if parent:
            parent.close()  # avoid concurrent reading issues
示例#8
0
 def execute(self):
     oq = self.oqparam
     self.set_param(
         num_taxonomies=self.assetcol.num_taxonomies_by_site(),
         maxweight=oq.ebrisk_maxweight / (oq.concurrent_tasks or 1),
         epspath=cache_epsilons(
             self.datastore, oq, self.assetcol, self.riskmodel, self.E))
     parent = self.datastore.parent
     if parent:
         hdf5path = parent.filename
         grp_indices = parent['ruptures'].attrs['grp_indices']
         nruptures = len(parent['ruptures'])
     else:
         hdf5path = self.datastore.hdf5cache()
         grp_indices = self.datastore['ruptures'].attrs['grp_indices']
         nruptures = len(self.datastore['ruptures'])
         with hdf5.File(hdf5path, 'r+') as cache:
             self.datastore.hdf5.copy('weights', cache)
             self.datastore.hdf5.copy('ruptures', cache)
             self.datastore.hdf5.copy('rupgeoms', cache)
     self.init_logic_tree(self.csm_info)
     smap = parallel.Starmap(
         self.core_task.__func__, monitor=self.monitor())
     trt_by_grp = self.csm_info.grp_by("trt")
     samples = self.csm_info.get_samples_by_grp()
     rlzs_by_gsim_grp = self.csm_info.get_rlzs_by_gsim_grp()
     ruptures_per_block = numpy.ceil(nruptures / (oq.concurrent_tasks or 1))
     first_event = 0
     for grp_id, rlzs_by_gsim in rlzs_by_gsim_grp.items():
         start, stop = grp_indices[grp_id]
         for indices in general.block_splitter(
                 range(start, stop), ruptures_per_block):
             rgetter = getters.RuptureGetter(
                 hdf5path, list(indices), grp_id,
                 trt_by_grp[grp_id], samples[grp_id], rlzs_by_gsim,
                 first_event)
             first_event += rgetter.num_events
             smap.submit(rgetter, self.src_filter, self.param)
     self.events_per_sid = []
     self.gmf_nbytes = 0
     res = smap.reduce(self.agg_dicts, numpy.zeros(self.N))
     logging.info('Produced %s of GMFs', general.humansize(self.gmf_nbytes))
     return res
示例#9
0
 def execute(self):
     oq = self.oqparam
     self.set_param(
         num_taxonomies=self.assetcol.num_taxonomies_by_site(),
         maxweight=oq.ebrisk_maxweight / (oq.concurrent_tasks or 1),
         epspath=cache_epsilons(self.datastore, oq, self.assetcol,
                                self.riskmodel, self.E))
     parent = self.datastore.parent
     if parent:
         hdf5path = parent.filename
         grp_indices = parent['ruptures'].attrs['grp_indices']
         nruptures = len(parent['ruptures'])
     else:
         hdf5path = self.datastore.hdf5cache()
         grp_indices = self.datastore['ruptures'].attrs['grp_indices']
         nruptures = len(self.datastore['ruptures'])
         with hdf5.File(hdf5path, 'r+') as cache:
             self.datastore.hdf5.copy('weights', cache)
             self.datastore.hdf5.copy('ruptures', cache)
             self.datastore.hdf5.copy('rupgeoms', cache)
     self.init_logic_tree(self.csm_info)
     smap = parallel.Starmap(self.core_task.__func__,
                             monitor=self.monitor())
     trt_by_grp = self.csm_info.grp_by("trt")
     samples = self.csm_info.get_samples_by_grp()
     rlzs_by_gsim_grp = self.csm_info.get_rlzs_by_gsim_grp()
     ruptures_per_block = numpy.ceil(nruptures / (oq.concurrent_tasks or 1))
     first_event = 0
     for grp_id, rlzs_by_gsim in rlzs_by_gsim_grp.items():
         start, stop = grp_indices[grp_id]
         for indices in general.block_splitter(range(start, stop),
                                               ruptures_per_block):
             rgetter = getters.RuptureGetter(hdf5path, list(indices),
                                             grp_id, trt_by_grp[grp_id],
                                             samples[grp_id], rlzs_by_gsim,
                                             first_event)
             first_event += rgetter.num_events
             smap.submit(rgetter, self.src_filter, self.param)
     self.events_per_sid = []
     self.gmf_nbytes = 0
     res = smap.reduce(self.agg_dicts, numpy.zeros(self.N))
     logging.info('Produced %s of GMFs', general.humansize(self.gmf_nbytes))
     return res
示例#10
0
    def pre_execute(self):
        """
        Compute the GMFs, build the epsilons, the riskinputs, and a dictionary
        with the unit of measure, used in the export phase.
        """
        oq = self.oqparam
        if not oq.ground_motion_fields:
            return  # this happens in the reportwriter

        parent = self.datastore.parent
        if parent:
            self.datastore['full_lt'] = parent['full_lt']
            ne = len(parent['events'])
            logging.info('There are %d ruptures and %d events',
                         len(parent['ruptures']), ne)

        if oq.investigation_time and oq.return_periods != [0]:
            # setting return_periods = 0 disable loss curves
            eff_time = oq.investigation_time * oq.ses_per_logic_tree_path
            if eff_time < 2:
                logging.warning(
                    'eff_time=%s is too small to compute loss curves',
                    eff_time)
        super().pre_execute()
        self.assetcol = self.datastore['assetcol']
        self.riskinputs = self.build_riskinputs('gmf')
        self.param['tempname'] = riskinput.cache_epsilons(
            self.datastore, oq, self.assetcol, self.crmodel, self.E)
        self.param['aggregate_by'] = oq.aggregate_by
        self.param['secondary_simulations'] = oq.secondary_simulations
        self.param['master_seed'] = oq.master_seed
        self.rlzs = self.datastore['events']['rlz_id']
        self.num_events = numpy.bincount(self.rlzs)  # events by rlz
        aggkey = self.assetcol.tagcol.get_aggkey(oq.aggregate_by)
        self.param['alt'] = self.acc = scientific.AggLossTable.new(
            aggkey, oq.loss_names, sec_losses=[])
        L = len(oq.loss_names)
        self.avglosses = numpy.zeros((len(self.assetcol), self.R, L), F32)
        if oq.investigation_time:  # event_based
            self.avg_ratio = numpy.array([oq.ses_ratio] * self.R)
        else:  # scenario
            self.avg_ratio = 1. / self.num_events
示例#11
0
 def pre_execute(self):
     """
     Compute the GMFs, build the epsilons, the riskinputs, and a dictionary
     with the unit of measure, used in the export phase.
     """
     oq = self.oqparam
     super().pre_execute()
     self.assetcol = self.datastore['assetcol']
     E = oq.number_of_ground_motion_fields * self.R
     self.riskinputs = self.build_riskinputs('gmf')
     self.param['tempname'] = riskinput.cache_epsilons(
         self.datastore, oq, self.assetcol, self.crmodel, E)
     self.param['E'] = E
     # assuming the weights are the same for all IMTs
     try:
         self.param['weights'] = self.datastore['weights'][()]
     except KeyError:
         self.param['weights'] = [1 / self.R for _ in range(self.R)]
     self.rlzs = self.datastore['events']['rlz_id']
     self.param['num_events'] = numpy.bincount(self.rlzs)  # events by rlz
示例#12
0
 def pre_execute(self):
     """
     Compute the GMFs, build the epsilons, the riskinputs, and a dictionary
     with the unit of measure, used in the export phase.
     """
     oq = self.oqparam
     super().pre_execute()
     self.assetcol = self.datastore['assetcol']
     self.event_slice = functools.partial(
         _event_slice, oq.number_of_ground_motion_fields)
     E = oq.number_of_ground_motion_fields * self.R
     self.riskinputs = self.build_riskinputs('gmf')
     self.param['epspath'] = riskinput.cache_epsilons(
         self.datastore, oq, self.assetcol, self.riskmodel, E)
     self.param['E'] = E
     # assuming the weights are the same for all IMTs
     try:
         self.param['weights'] = self.datastore['weights'][()]
     except KeyError:
         self.param['weights'] = [1 / self.R for _ in range(self.R)]
     self.param['event_slice'] = self.event_slice
     self.param['asset_loss_table'] = self.oqparam.asset_loss_table
示例#13
0
 def pre_execute(self):
     """
     Compute the GMFs, build the epsilons, the riskinputs, and a dictionary
     with the unit of measure, used in the export phase.
     """
     oq = self.oqparam
     super().pre_execute()
     self.assetcol = self.datastore['assetcol']
     self.event_slice = functools.partial(_event_slice,
                                          oq.number_of_ground_motion_fields)
     E = oq.number_of_ground_motion_fields * self.R
     self.riskinputs = self.build_riskinputs('gmf')
     self.param['epspath'] = riskinput.cache_epsilons(
         self.datastore, oq, self.assetcol, self.crmodel, E)
     self.param['E'] = E
     # assuming the weights are the same for all IMTs
     try:
         self.param['weights'] = self.datastore['weights'][()]
     except KeyError:
         self.param['weights'] = [1 / self.R for _ in range(self.R)]
     self.param['event_slice'] = self.event_slice
     self.param['asset_loss_table'] = self.oqparam.asset_loss_table
示例#14
0
    def execute(self):
        self.datastore.flush()  # just to be sure
        oq = self.oqparam
        parent = self.datastore.parent
        if parent:
            grp_indices = parent['ruptures'].attrs['grp_indices']
            n_occ = parent['ruptures']['n_occ']
            dstore = parent
            csm_info = parent['csm_info']
        else:
            grp_indices = self.datastore['ruptures'].attrs['grp_indices']
            n_occ = self.datastore['ruptures']['n_occ']
            dstore = self.datastore
            csm_info = self.csm_info
        per_block = numpy.ceil(n_occ.sum() / (oq.concurrent_tasks or 1))
        self.set_param(
            hdf5path=self.datastore.filename,
            task_duration=oq.task_duration or 1200,  # 20min
            tempname=cache_epsilons(self.datastore, oq, self.assetcol,
                                    self.crmodel, self.E))

        self.init_logic_tree(csm_info)
        trt_by_grp = csm_info.grp_by("trt")
        samples = csm_info.get_samples_by_grp()
        rlzs_by_gsim_grp = csm_info.get_rlzs_by_gsim_grp()
        ngroups = 0
        fe = 0
        eslices = self.datastore['eslices']
        allargs = []
        allpairs = list(enumerate(n_occ))
        srcfilter = self.src_filter(self.datastore.tempname)
        for grp_id, rlzs_by_gsim in rlzs_by_gsim_grp.items():
            start, stop = grp_indices[grp_id]
            if start == stop:  # no ruptures for the given grp_id
                continue
            ngroups += 1
            for pairs in general.block_splitter(allpairs[start:stop],
                                                per_block,
                                                weight=get_n_occ):
                indices = [i for i, n in pairs]
                rup_array = dstore['ruptures'][indices]
                rgetter = getters.RuptureGetter(
                    rup_array, dstore.filename, grp_id, trt_by_grp[grp_id],
                    samples[grp_id], rlzs_by_gsim,
                    eslices[fe:fe + len(indices), 0])
                allargs.append((rgetter, srcfilter, self.param))
                fe += len(indices)
        logging.info('Found %d/%d source groups with ruptures', ngroups,
                     len(rlzs_by_gsim_grp))
        self.events_per_sid = []
        self.lossbytes = 0
        self.datastore.swmr_on()
        smap = parallel.Starmap(self.core_task.__func__,
                                allargs,
                                h5=self.datastore.hdf5)
        res = smap.reduce(self.agg_dicts, numpy.zeros(self.N))
        gmf_bytes = self.datastore['gmf_info']['gmfbytes'].sum()
        logging.info('Produced %s of GMFs', general.humansize(gmf_bytes))
        logging.info('Produced %s of losses',
                     general.humansize(self.lossbytes))
        return res
示例#15
0
 def execute(self):
     oq = self.oqparam
     parent = self.datastore.parent
     if parent:
         hdf5path = parent.filename
         grp_indices = parent['ruptures'].attrs['grp_indices']
         n_occ = parent['ruptures']['n_occ']
     else:
         hdf5path = self.datastore.cachepath()
         grp_indices = self.datastore['ruptures'].attrs['grp_indices']
         n_occ = self.datastore['ruptures']['n_occ']
         with hdf5.File(hdf5path, 'r+') as cache:
             self.datastore.hdf5.copy('weights', cache)
             self.datastore.hdf5.copy('ruptures', cache)
             self.datastore.hdf5.copy('rupgeoms', cache)
     num_cores = oq.__class__.concurrent_tasks.default // 2 or 1
     per_block = numpy.ceil(n_occ.sum() / (oq.concurrent_tasks or 1))
     logging.info(
         'Using %d occurrences per block (over %d occurrences, '
         '%d events)', per_block, n_occ.sum(), self.E)
     self.set_param(
         task_duration=oq.task_duration or 600,  # 10min
         epspath=cache_epsilons(self.datastore, oq, self.assetcol,
                                self.crmodel, self.E))
     self.init_logic_tree(self.csm_info)
     trt_by_grp = self.csm_info.grp_by("trt")
     samples = self.csm_info.get_samples_by_grp()
     rlzs_by_gsim_grp = self.csm_info.get_rlzs_by_gsim_grp()
     ngroups = 0
     fe = 0
     eslices = self.datastore['eslices']
     allargs = []
     allpairs = list(enumerate(n_occ))
     for grp_id, rlzs_by_gsim in rlzs_by_gsim_grp.items():
         start, stop = grp_indices[grp_id]
         if start == stop:  # no ruptures for the given grp_id
             continue
         ngroups += 1
         for pairs in general.block_splitter(allpairs[start:stop],
                                             per_block,
                                             weight=get_n_occ):
             indices = [i for i, n in pairs]
             rgetter = getters.RuptureGetter(
                 hdf5path, indices, grp_id, trt_by_grp[grp_id],
                 samples[grp_id], rlzs_by_gsim,
                 eslices[fe:fe + len(indices), 0])
             allargs.append((rgetter, self.src_filter, self.param))
             fe += len(indices)
     logging.info('Found %d/%d source groups with ruptures', ngroups,
                  len(rlzs_by_gsim_grp))
     self.events_per_sid = []
     self.lossbytes = 0
     smap = parallel.Starmap(self.core_task.__func__,
                             allargs,
                             num_cores=num_cores,
                             hdf5path=self.datastore.filename)
     res = smap.reduce(self.agg_dicts, numpy.zeros(self.N))
     gmf_bytes = self.datastore['gmf_info']['gmfbytes'].sum()
     self.datastore.set_attrs('gmf_info',
                              events_per_sid=self.events_per_sid)
     logging.info('Produced %s of GMFs', general.humansize(gmf_bytes))
     logging.info('Produced %s of losses',
                  general.humansize(self.lossbytes))
     return res