Esempio n. 1
0
 def agg_dicts(self, acc, result):
     """
     :param acc: accumulator dictionary
     :param result: an AccumDict with events, ruptures, gmfs and hcurves
     """
     sav_mon = self.monitor('saving gmfs')
     agg_mon = self.monitor('aggregating hcurves')
     with sav_mon:
         data = result.pop('gmfdata')
         if len(data):
             times = result.pop('times')
             rupids = list(times['rup_id'])
             self.datastore['gmf_data/time_by_rup'][rupids] = times
             hdf5.extend(self.datastore['gmf_data/data'], data)
             sig_eps = result.pop('sig_eps')
             hdf5.extend(self.datastore['gmf_data/sigma_epsilon'], sig_eps)
             for sid, start, stop in result['indices']:
                 self.indices[sid, 0].append(start + self.offset)
                 self.indices[sid, 1].append(stop + self.offset)
             self.offset += len(data)
     if self.offset >= TWO32:
         raise RuntimeError('The gmf_data table has more than %d rows' %
                            TWO32)
     imtls = self.oqparam.imtls
     with agg_mon:
         for key, poes in result.get('hcurves', {}).items():
             r, sid, imt = str2rsi(key)
             array = acc[r].setdefault(sid, 0).array[imtls(imt), 0]
             array[:] = 1. - (1. - array) * (1. - poes)
     self.datastore.flush()
     return acc
Esempio n. 2
0
    def combine_pmaps_and_save_gmfs(self, acc, res):
        """
        Combine the hazard curves (if any) and save the gmfs (if any)
        sequentially; notice that the gmfs may come from
        different tasks in any order.

        :param acc: an accumulator for the hazard curves
        :param res: a dictionary rlzi, imt -> [gmf_array, curves_by_imt]
        :returns: a new accumulator
        """
        sav_mon = self.monitor('saving gmfs')
        agg_mon = self.monitor('aggregating hcurves')
        self.gmdata += res['gmdata']
        data = res['gmfdata']
        if data is not None:
            with sav_mon:
                hdf5.extend3(self.datastore.hdf5path, 'gmf_data/data', data)
                for sid, start, stop in res['indices']:
                    self.indices[sid].append(
                        (start + self.offset, stop + self.offset))
                self.offset += len(data)
        slicedic = self.oqparam.imtls.slicedic
        with agg_mon:
            for key, poes in res['hcurves'].items():
                rlzi, sid, imt = str2rsi(key)
                array = acc[rlzi].setdefault(sid, 0).array[slicedic[imt], 0]
                array[:] = 1. - (1. - array) * (1. - poes)
        sav_mon.flush()
        agg_mon.flush()
        self.datastore.flush()
        if 'ruptures' in res:
            vars(EventBasedRuptureCalculator)['save_ruptures'](self,
                                                               res['ruptures'])
        return acc
Esempio n. 3
0
 def agg_dicts(self, acc, result):
     """
     :param acc: accumulator dictionary
     :param result: an AccumDict with events, ruptures, gmfs and hcurves
     """
     sav_mon = self.monitor('saving gmfs')
     agg_mon = self.monitor('aggregating hcurves')
     with sav_mon:
         data = result.pop('gmfdata')
         if len(data):
             idxs = base.get_idxs(data, self.eid2idx)  # this has to be fast
             data['eid'] = idxs  # replace eid with idx
             self.datastore.extend('gmf_data/data', data)
             sig_eps = result.pop('sig_eps')
             sig_eps['eid'] = base.get_idxs(sig_eps, self.eid2idx)
             self.datastore.extend('gmf_data/sigma_epsilon', sig_eps)
             # it is important to save the number of bytes while the
             # computation is going, to see the progress
             update_nbytes(self.datastore, 'gmf_data/data', data)
             for sid, start, stop in result['indices']:
                 self.indices[sid, 0].append(start + self.offset)
                 self.indices[sid, 1].append(stop + self.offset)
             self.offset += len(data)
             if self.offset >= TWO32:
                 raise RuntimeError(
                     'The gmf_data table has more than %d rows' % TWO32)
     imtls = self.oqparam.imtls
     with agg_mon:
         for key, poes in result.get('hcurves', {}).items():
             r, sid, imt = str2rsi(key)
             array = acc[r].setdefault(sid, 0).array[imtls(imt), 0]
             array[:] = 1. - (1. - array) * (1. - poes)
     self.datastore.flush()
     return acc
Esempio n. 4
0
    def combine_pmaps_and_save_gmfs(self, acc, res):
        """
        Combine the hazard curves (if any) and save the gmfs (if any)
        sequentially; notice that the gmfs may come from
        different tasks in any order.

        :param acc: an accumulator for the hazard curves
        :param res: a dictionary rlzi, imt -> [gmf_array, curves_by_imt]
        :returns: a new accumulator
        """
        sav_mon = self.monitor('saving gmfs')
        agg_mon = self.monitor('aggregating hcurves')
        if res['gmfcoll'] is not None:
            with sav_mon:
                for rlz, array in res['gmfcoll'].items():
                    if len(array):
                        key = 'gmf_data/%04d' % rlz.ordinal
                        self.datastore.extend(key, array)
        slicedic = self.oqparam.imtls.slicedic
        with agg_mon:
            for key, poes in res['hcurves'].items():
                rlzi, sid, imt = str2rsi(key)
                array = acc[rlzi].setdefault(sid, 0).array[slicedic[imt], 0]
                array[:] = 1. - (1. - array) * (1. - poes)
        sav_mon.flush()
        agg_mon.flush()
        self.datastore.flush()
        if 'ruptures' in res:
            vars(EventBasedRuptureCalculator)['save_ruptures'](
                self, res['ruptures'])
        return acc
Esempio n. 5
0
    def combine_pmaps_and_save_gmfs(self, acc, res):
        """
        Combine the hazard curves (if any) and save the gmfs (if any)
        sequentially; notice that the gmfs may come from
        different tasks in any order.

        :param acc: an accumulator for the hazard curves
        :param res: a dictionary rlzi, imt -> [gmf_array, curves_by_imt]
        :returns: a new accumulator
        """
        sav_mon = self.monitor('saving gmfs')
        agg_mon = self.monitor('aggregating hcurves')
        self.gmdata += res['gmdata']
        if res['gmfcoll'] is not None:
            with sav_mon:
                for (grp_id, gsim), array in res['gmfcoll'].items():
                    if len(array):
                        key = 'gmf_data/grp-%02d/%s' % (grp_id, gsim)
                        hdf5.extend3(self.datastore.hdf5path, key, array)
        slicedic = self.oqparam.imtls.slicedic
        with agg_mon:
            for key, poes in res['hcurves'].items():
                rlzi, sid, imt = str2rsi(key)
                array = acc[rlzi].setdefault(sid, 0).array[slicedic[imt], 0]
                array[:] = 1. - (1. - array) * (1. - poes)
        sav_mon.flush()
        agg_mon.flush()
        self.datastore.flush()
        if 'ruptures' in res:
            vars(EventBasedRuptureCalculator)['save_ruptures'](self,
                                                               res['ruptures'])
        return acc
Esempio n. 6
0
 def agg_dicts(self, acc, result):
     """
     :param acc: accumulator dictionary
     :param result: an AccumDict with events, ruptures, gmfs and hcurves
     """
     oq = self.oqparam
     if oq.save_ruptures and not oq.ground_motion_fields:
         self.gmf_size += max_gmf_size(
             result['ruptures'], self.csm_info.rlzs_assoc.get_rlzs_by_gsim,
             self.csm_info.get_samples_by_grp(), len(self.oqparam.imtls))
     if hasattr(result, 'calc_times'):
         for srcid, nsites, eids, dt in result.calc_times:
             info = self.csm.infos[srcid]
             info.num_sites += nsites
             info.calc_time += dt
             info.num_split += 1
             info.events += len(eids)
     if hasattr(result, 'eff_ruptures'):
         acc.eff_ruptures += result.eff_ruptures
     if hasattr(result, 'events'):
         self.datastore.extend('events', result.events)
     self.save_ruptures(result['ruptures'])
     sav_mon = self.monitor('saving gmfs')
     agg_mon = self.monitor('aggregating hcurves')
     hdf5path = self.datastore.hdf5path
     if 'gmdata' in result:
         self.gmdata += result['gmdata']
         data = result['gmfdata']
         with sav_mon:
             hdf5.extend3(hdf5path, 'gmf_data/data', data)
             # it is important to save the number of bytes while the
             # computation is going, to see the progress
             update_nbytes(self.datastore, 'gmf_data/data', data)
             for sid, start, stop in result['indices']:
                 self.indices[sid].append(
                     (start + self.offset, stop + self.offset))
             self.offset += len(data)
             if self.offset >= TWO32:
                 raise RuntimeError(
                     'The gmf_data table has more than %d rows' % TWO32)
     slicedic = self.oqparam.imtls.slicedic
     with agg_mon:
         for key, poes in result.get('hcurves', {}).items():
             r, sid, imt = str2rsi(key)
             array = acc[r].setdefault(sid, 0).array[slicedic[imt], 0]
             array[:] = 1. - (1. - array) * (1. - poes)
     sav_mon.flush()
     agg_mon.flush()
     self.datastore.flush()
     return acc
Esempio n. 7
0
 def agg_dicts(self, acc, result):
     """
     :param acc: accumulator dictionary
     :param result: an AccumDict with events, ruptures, gmfs and hcurves
     """
     # in UCERF
     if hasattr(result, 'ruptures_by_grp'):
         for ruptures in result.ruptures_by_grp.values():
             self.save_ruptures(ruptures)
     elif hasattr(result, 'events_by_grp'):
         for grp_id in result.events_by_grp:
             events = result.events_by_grp[grp_id]
             self.datastore.extend('events', events)
     sav_mon = self.monitor('saving gmfs')
     agg_mon = self.monitor('aggregating hcurves')
     if 'gmdata' in result:
         self.gmdata += result['gmdata']
         data = result.pop('gmfdata')
         with sav_mon:
             self.datastore.extend('gmf_data/data', data)
             # it is important to save the number of bytes while the
             # computation is going, to see the progress
             update_nbytes(self.datastore, 'gmf_data/data', data)
             for sid, start, stop in result['indices']:
                 self.indices[sid, 0].append(start + self.offset)
                 self.indices[sid, 1].append(stop + self.offset)
             self.offset += len(data)
             if self.offset >= TWO32:
                 raise RuntimeError(
                     'The gmf_data table has more than %d rows' % TWO32)
     imtls = self.oqparam.imtls
     with agg_mon:
         for key, poes in result.get('hcurves', {}).items():
             r, sid, imt = str2rsi(key)
             array = acc[r].setdefault(sid, 0).array[imtls(imt), 0]
             array[:] = 1. - (1. - array) * (1. - poes)
     sav_mon.flush()
     agg_mon.flush()
     self.datastore.flush()
     return acc
Esempio n. 8
0
 def agg_dicts(self, acc, result):
     """
     :param acc: accumulator dictionary
     :param result: an AccumDict with events, ruptures, gmfs and hcurves
     """
     sav_mon = self.monitor('saving gmfs')
     agg_mon = self.monitor('aggregating hcurves')
     primary = self.oqparam.get_primary_imtls()
     sec_imts = self.oqparam.get_sec_imts()
     with sav_mon:
         df = result.pop('gmfdata')
         if len(df):
             dset = self.datastore['gmf_data/sid']
             times = result.pop('times')
             [task_no] = numpy.unique(times['task_no'])
             rupids = list(times['rup_id'])
             self.datastore['gmf_data/time_by_rup'][rupids] = times
             hdf5.extend(dset, df.sid.to_numpy())
             hdf5.extend(self.datastore['gmf_data/eid'], df.eid.to_numpy())
             for m in range(len(primary)):
                 hdf5.extend(self.datastore[f'gmf_data/gmv_{m}'],
                             df[f'gmv_{m}'])
             for sec_imt in sec_imts:
                 hdf5.extend(self.datastore[f'gmf_data/{sec_imt}'],
                             df[sec_imt])
             sig_eps = result.pop('sig_eps')
             hdf5.extend(self.datastore['gmf_data/sigma_epsilon'], sig_eps)
             self.offset += len(df)
     if self.offset >= TWO32:
         raise RuntimeError(
             'The gmf_data table has more than %d rows' % TWO32)
     imtls = self.oqparam.imtls
     with agg_mon:
         for key, poes in result.get('hcurves', {}).items():
             r, sid, imt = str2rsi(key)
             array = acc[r].setdefault(sid, 0).array[imtls(imt), 0]
             array[:] = 1. - (1. - array) * (1. - poes)
     self.datastore.flush()
     return acc
Esempio n. 9
0
 def agg_dicts(self, acc, result):
     """
     :param acc: accumulator dictionary
     :param result: an AccumDict with events, ruptures, gmfs and hcurves
     """
     sav_mon = self.monitor('saving gmfs')
     agg_mon = self.monitor('aggregating hcurves')
     M = len(self.oqparam.imtls)
     sec_outputs = self.oqparam.get_sec_outputs()
     with sav_mon:
         data = result.pop('gmfdata')
         if len(data):
             times = result.pop('times')
             rupids = list(times['rup_id'])
             self.datastore['gmf_data/time_by_rup'][rupids] = times
             hdf5.extend(self.datastore['gmf_data/sid'], data['sid'])
             hdf5.extend(self.datastore['gmf_data/eid'], data['eid'])
             for m in range(M):
                 hdf5.extend(self.datastore[f'gmf_data/gmv_{m}'],
                             data['gmv'][:, m])
             for sec_out in sec_outputs:
                 hdf5.extend(self.datastore[f'gmf_data/{sec_out}'],
                             data[sec_out])
             sig_eps = result.pop('sig_eps')
             hdf5.extend(self.datastore['gmf_data/sigma_epsilon'], sig_eps)
             self.offset += len(data)
     if self.offset >= TWO32:
         raise RuntimeError('The gmf_data table has more than %d rows' %
                            TWO32)
     imtls = self.oqparam.imtls
     with agg_mon:
         for key, poes in result.get('hcurves', {}).items():
             r, sid, imt = str2rsi(key)
             array = acc[r].setdefault(sid, 0).array[imtls(imt), 0]
             array[:] = 1. - (1. - array) * (1. - poes)
     self.datastore.flush()
     return acc
Esempio n. 10
0
 def agg_dicts(self, acc, result):
     """
     :param acc: accumulator dictionary
     :param result: an AccumDict with events, ruptures, gmfs and hcurves
     """
     sav_mon = self.monitor('saving gmfs')
     agg_mon = self.monitor('aggregating hcurves')
     with sav_mon:
         data = result.pop('gmfdata')
         if len(data) == 0:
             return acc
         idxs = base.get_idxs(data, self.eid2idx)  # this has to be fast
         data['eid'] = idxs  # replace eid with idx
         self.datastore.extend('gmf_data/data', data)
         sig_eps = result.pop('sig_eps')
         sig_eps['eid'] = base.get_idxs(sig_eps, self.eid2idx)
         self.datastore.extend('gmf_data/sigma_epsilon', sig_eps)
         # it is important to save the number of bytes while the
         # computation is going, to see the progress
         update_nbytes(self.datastore, 'gmf_data/data', data)
         for sid, start, stop in result['indices']:
             self.indices[sid, 0].append(start + self.offset)
             self.indices[sid, 1].append(stop + self.offset)
         self.offset += len(data)
         if self.offset >= TWO32:
             raise RuntimeError(
                 'The gmf_data table has more than %d rows' % TWO32)
     imtls = self.oqparam.imtls
     with agg_mon:
         for key, poes in result.get('hcurves', {}).items():
             r, sid, imt = str2rsi(key)
             array = acc[r].setdefault(sid, 0).array[imtls(imt), 0]
             array[:] = 1. - (1. - array) * (1. - poes)
     sav_mon.flush()
     agg_mon.flush()
     self.datastore.flush()
     return acc