def get_mean(self, grp=None): """ Compute the mean curve as a ProbabilityMap :param grp: if not None must be a string of the form "grp-XX"; in that case returns the mean considering only the contribution for group XX """ self.init() if len(self.weights) == 1: # one realization # the standard deviation is zero pmap = self.get(0, grp) for sid, pcurve in pmap.items(): array = numpy.zeros(pcurve.array.shape[:-1] + (2, )) array[:, 0] = pcurve.array[:, 0] pcurve.array = array return pmap else: # multiple realizations dic = ({g: self.dstore['poes/' + g] for g in self.dstore['poes']} if grp is None else { grp: self.dstore['poes/' + grp] }) pmaps = self.rlzs_assoc.combine_pmaps(dic) return stats.compute_pmap_stats( pmaps, [stats.mean_curve, stats.std_curve], self.weights, self.imtls)
def get_pmaps(dstore, indices): getter = getters.PmapGetter(dstore) getter.init() pmaps = getter.get_pmaps(indices) weights = dstore['csm_info'].rlzs['weight'] mean = compute_pmap_stats(pmaps, [mean_curve], weights) return mean, pmaps
def build_hazard_stats(pgetter, hstats, monitor): """ :param pgetter: an :class:`openquake.commonlib.getters.PmapGetter` :param hstats: a list of pairs (statname, statfunc) :param monitor: instance of Monitor :returns: a dictionary kind -> ProbabilityMap The "kind" is a string of the form 'rlz-XXX' or 'mean' of 'quantile-XXX' used to specify the kind of output. """ with monitor('combine pmaps'): pgetter.init() # if not already initialized try: pmaps = pgetter.get_pmaps(pgetter.sids) except IndexError: # no data return {} if sum(len(pmap) for pmap in pmaps) == 0: # no data return {} pmap_by_kind = {} for statname, stat in hstats: with monitor('compute ' + statname): pmap = compute_pmap_stats(pmaps, [stat], pgetter.weights) pmap_by_kind['hcurves', statname] = pmap if pgetter.poes: pmap_by_kind['hmaps', statname] = calc.make_hmap(pmap, pgetter.imtls, pgetter.poes) return pmap_by_kind
def compute_pmap_stats(self, pmap_by_grp, statfuncs): """ :param pmap_by_grp: dictionary group string -> probability map :param statfuncs: a list of statistical functions :returns: a probability map containing all statistics """ pmaps = self.combine_pmaps(pmap_by_grp) return stats.compute_pmap_stats(pmaps, statfuncs, self.weights)
def show(what='contents', calc_id=-1, extra=()): """ Show the content of a datastore (by default the last one). """ datadir = datastore.get_datadir() if what == 'all': # show all if not os.path.exists(datadir): return rows = [] for calc_id in datastore.get_calc_ids(datadir): try: ds = read(calc_id) oq = ds['oqparam'] cmode, descr = oq.calculation_mode, oq.description except: # invalid datastore file, or missing calculation_mode # and description attributes, perhaps due to a manual kill f = os.path.join(datadir, 'calc_%s.hdf5' % calc_id) logging.warn('Unreadable datastore %s', f) continue else: rows.append((calc_id, cmode, descr.encode('utf-8'))) for row in sorted(rows, key=lambda row: row[0]): # by calc_id print('#%d %s: %s' % row) return ds = read(calc_id) # this part is experimental if what == 'rlzs' and 'poes' in ds: min_value = 0.01 # used in rmsep getter = getters.PmapGetter(ds) sitecol = ds['sitecol'] pmaps = getter.get_pmaps(sitecol.sids) weights = [rlz.weight for rlz in getter.rlzs] mean = stats.compute_pmap_stats(pmaps, [numpy.mean], weights) dists = [] for rlz, pmap in zip(getter.rlzs, pmaps): dist = rmsep(mean.array, pmap.array, min_value) dists.append((dist, rlz)) print('Realizations in order of distance from the mean curves') for dist, rlz in sorted(dists): print('%s: rmsep=%s' % (rlz, dist)) elif view.keyfunc(what) in view: print(view(what, ds)) elif what.split('/', 1)[0] in extract: print(extract(ds, what, *extra)) elif what in ds: obj = ds[what] if hasattr(obj, 'value'): # an array print(write_csv(io.BytesIO(), obj.value).decode('utf8')) else: print(obj) else: print('%s not found' % what) ds.close()
def post_execute(self, result): """ :param result: a dictionary (src_group_id, gsim) -> haz_curves or an empty dictionary if hazard_curves_from_gmfs is false """ oq = self.oqparam if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields: return elif oq.hazard_curves_from_gmfs: rlzs = self.rlzs_assoc.realizations # save individual curves for i in sorted(result): key = 'hcurves/rlz-%03d' % i if result[i]: self.datastore[key] = result[i] else: self.datastore[key] = ProbabilityMap(oq.imtls.array.size) logging.info('Zero curves for %s', key) # compute and save statistics; this is done in process # we don't need to parallelize, since event based calculations # involves a "small" number of sites (<= 65,536) weights = [rlz.weight for rlz in rlzs] hstats = self.oqparam.hazard_stats() if len(hstats) and len(rlzs) > 1: for kind, stat in hstats: pmap = compute_pmap_stats(result.values(), [stat], weights) self.datastore['hcurves/' + kind] = pmap if self.datastore.parent: self.datastore.parent.open() if 'gmf_data' in self.datastore: self.save_gmf_bytes() if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # one could also set oq.number_of_logic_tree_samples = 0 self.cl = ClassicalCalculator(oq, self.monitor('classical')) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) for imt in eb_mean_curves.dtype.names: rdiff, index = util.max_rel_diff_index(cl_mean_curves[imt], eb_mean_curves[imt]) logging.warn( 'Relative difference with the classical ' 'mean curves for IMT=%s: %d%% at site index %d', imt, rdiff * 100, index)
def build_hazard_stats(pgetter, N, hstats, individual_curves, monitor): """ :param pgetter: an :class:`openquake.commonlib.getters.PmapGetter` :param N: the total number of sites :param hstats: a list of pairs (statname, statfunc) :param individual_curves: if True, also build the individual curves :param monitor: instance of Monitor :returns: a dictionary kind -> ProbabilityMap The "kind" is a string of the form 'rlz-XXX' or 'mean' of 'quantile-XXX' used to specify the kind of output. """ with monitor('combine pmaps'): pgetter.init() # if not already initialized try: pmaps = pgetter.get_pmaps() except IndexError: # no data return {} if sum(len(pmap) for pmap in pmaps) == 0: # no data return {} R = len(pmaps) imtls, poes, weights = pgetter.imtls, pgetter.poes, pgetter.weights pmap_by_kind = {} hmaps_stats = [] hcurves_stats = [] with monitor('compute stats'): for statname, stat in hstats.items(): pmap = compute_pmap_stats(pmaps, [stat], weights, imtls) hcurves_stats.append(pmap) if pgetter.poes: hmaps_stats.append( calc.make_hmap(pmap, pgetter.imtls, pgetter.poes)) if statname == 'mean' and R > 1 and N <= FEWSITES: pmap_by_kind['rlz_by_sid'] = rlz = {} for sid, pcurve in pmap.items(): rlz[sid] = util.closest_to_ref( [pm.setdefault(sid, 0).array for pm in pmaps], pcurve.array)['rlz'] if hcurves_stats: pmap_by_kind['hcurves-stats'] = hcurves_stats if hmaps_stats: pmap_by_kind['hmaps-stats'] = hmaps_stats if R > 1 and individual_curves or not hstats: pmap_by_kind['hcurves-rlzs'] = pmaps if pgetter.poes: with monitor('build individual hmaps'): pmap_by_kind['hmaps-rlzs'] = [ calc.make_hmap(pmap, imtls, poes) for pmap in pmaps ] return pmap_by_kind
def build_hazard_stats(pgetter, N, hstats, individual_curves, monitor): """ :param pgetter: an :class:`openquake.commonlib.getters.PmapGetter` :param N: the total number of sites :param hstats: a list of pairs (statname, statfunc) :param individual_curves: if True, also build the individual curves :param monitor: instance of Monitor :returns: a dictionary kind -> ProbabilityMap The "kind" is a string of the form 'rlz-XXX' or 'mean' of 'quantile-XXX' used to specify the kind of output. """ with monitor('combine pmaps'): pgetter.init() # if not already initialized try: pmaps = pgetter.get_pmaps() except IndexError: # no data return {} if sum(len(pmap) for pmap in pmaps) == 0: # no data return {} R = len(pmaps) imtls, poes, weights = pgetter.imtls, pgetter.poes, pgetter.weights pmap_by_kind = {} hmaps_stats = [] hcurves_stats = [] with monitor('compute stats'): for statname, stat in hstats.items(): pmap = compute_pmap_stats(pmaps, [stat], weights, imtls) hcurves_stats.append(pmap) if pgetter.poes: hmaps_stats.append( calc.make_hmap(pmap, pgetter.imtls, pgetter.poes)) if statname == 'mean' and R > 1 and N <= FEWSITES: pmap_by_kind['rlz_by_sid'] = rlz = {} for sid, pcurve in pmap.items(): rlz[sid] = util.closest_to_ref( [pm.setdefault(sid, 0).array for pm in pmaps], pcurve.array)['rlz'] if hcurves_stats: pmap_by_kind['hcurves-stats'] = hcurves_stats if hmaps_stats: pmap_by_kind['hmaps-stats'] = hmaps_stats if R > 1 and individual_curves or not hstats: pmap_by_kind['hcurves-rlzs'] = pmaps if pgetter.poes: with monitor('build individual hmaps'): pmap_by_kind['hmaps-rlzs'] = [ calc.make_hmap(pmap, imtls, poes) for pmap in pmaps] return pmap_by_kind
def build_hcurves_and_stats(pgetter, hstats, monitor): """ :param pgetter: an :class:`openquake.commonlib.calc.PmapGetter` :param hstats: a list of pairs (statname, statfunc) :param monitor: instance of Monitor :returns: a dictionary kind -> ProbabilityMap The "kind" is a string of the form 'rlz-XXX' or 'mean' of 'quantile-XXX' used to specify the kind of output. """ with monitor('combine pmaps'), pgetter: pmaps = pgetter.get_pmaps(pgetter.sids) if sum(len(pmap) for pmap in pmaps) == 0: # no data return {} pmap_by_kind = {} for kind, stat in hstats: with monitor('compute ' + kind): pmap = compute_pmap_stats(pmaps, [stat], pgetter.weights) pmap_by_kind[kind] = pmap return pmap_by_kind
def get_mean(self, grp=None): """ Compute the mean curve as a ProbabilityMap :param grp: if not None must be a string of the form "grp-XX"; in that case returns the mean considering only the contribution for group XX """ self.init() if len(self.weights) == 1: # one realization # the standard deviation is zero pmap = self.get(0, grp) for sid, pcurve in pmap.items(): array = numpy.zeros(pcurve.array.shape[:-1] + (2,)) array[:, 0] = pcurve.array[:, 0] pcurve.array = array return pmap else: # multiple realizations dic = ({g: self.dstore['poes/' + g] for g in self.dstore['poes']} if grp is None else {grp: self.dstore['poes/' + grp]}) pmaps = self.rlzs_assoc.combine_pmaps(dic) return stats.compute_pmap_stats( pmaps, [stats.mean_curve, stats.std_curve], self.weights, self.imtls)
def post_execute(self, result): """ Save the SES collection """ oq = self.oqparam N = len(self.sitecol.complete) L = len(oq.imtls.array) if oq.hazard_calculation_id is None: self.rupser.close() num_events = sum(set_counts(self.datastore, 'events').values()) if num_events == 0: raise RuntimeError( 'No seismic events! Perhaps the investigation time is too ' 'small or the maximum_distance is too small') if oq.save_ruptures: logging.info('Setting %d event years on %d ruptures', num_events, self.rupser.nruptures) with self.monitor('setting event years', measuremem=True, autoflush=True): numpy.random.seed(self.oqparam.ses_seed) set_random_years(self.datastore, 'events', int(self.oqparam.investigation_time)) if self.gmf_size: self.datastore.set_attrs('events', max_gmf_size=self.gmf_size) msg = 'less than ' if self.get_min_iml(self.oqparam).sum() else '' logging.info('Generating %s%s of GMFs', msg, humansize(self.gmf_size)) if oq.hazard_curves_from_gmfs: rlzs = self.csm_info.rlzs_assoc.realizations # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] hstats = self.oqparam.hazard_stats() if len(hstats): logging.info('Computing statistical hazard curves') for kind, stat in hstats: pmap = compute_pmap_stats(result.values(), [stat], weights) arr = numpy.zeros((N, L), F32) for sid in pmap: arr[sid] = pmap[sid].array[:, 0] self.datastore['hcurves/' + kind] = arr self.save_hmaps() if self.datastore.parent: self.datastore.parent.open('r') if 'gmf_data' in self.datastore: self.save_gmf_bytes() if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # one could also set oq.number_of_logic_tree_samples = 0 self.cl = ClassicalCalculator(oq) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) rdiff, index = util.max_rel_diff_index(cl_mean_curves, eb_mean_curves) logging.warn( 'Relative difference with the classical ' 'mean curves: %d%% at site index %d', rdiff * 100, index)
def post_execute(self, result): oq = self.oqparam if not oq.ground_motion_fields and not oq.hazard_curves_from_gmfs: return N = len(self.sitecol.complete) M = len(oq.imtls) L = len(oq.imtls.array) L1 = L // M if result and oq.hazard_curves_from_gmfs: rlzs = self.datastore['full_lt'].get_realizations() # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] # NB: in the future we may want to save to individual hazard # curves if oq.individual_curves is set; for the moment we # save the statistical curves only hstats = oq.hazard_stats() S = len(hstats) pmaps = list(result.values()) R = len(weights) if len(pmaps) != R: # this should never happen, unless I break the # logic tree reduction mechanism during refactoring raise AssertionError('Expected %d pmaps, got %d' % (len(weights), len(pmaps))) if oq.individual_curves: logging.info('Saving individual hazard curves') self.datastore.create_dset('hcurves-rlzs', F32, (N, R, M, L1)) self.datastore.set_shape_attrs('hcurves-rlzs', site_id=N, rlz_id=R, imt=list(oq.imtls), lvl=numpy.arange(L1)) if oq.poes: P = len(oq.poes) M = len(oq.imtls) ds = self.datastore.create_dset('hmaps-rlzs', F32, (N, R, M, P)) self.datastore.set_shape_attrs('hmaps-rlzs', site_id=N, rlz_id=R, imt=list(oq.imtls), poe=oq.poes) for r, pmap in enumerate(pmaps): arr = numpy.zeros((N, M, L1), F32) for sid in pmap: arr[sid] = pmap[sid].array.reshape(M, L1) self.datastore['hcurves-rlzs'][:, r] = arr if oq.poes: hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) for sid in hmap: ds[sid, r] = hmap[sid].array if S: logging.info('Computing statistical hazard curves') self.datastore.create_dset('hcurves-stats', F32, (N, S, M, L1)) self.datastore.set_shape_attrs('hcurves-stats', site_id=N, stat=list(hstats), imt=list(oq.imtls), lvl=numpy.arange(L1)) if oq.poes: P = len(oq.poes) M = len(oq.imtls) ds = self.datastore.create_dset('hmaps-stats', F32, (N, S, M, P)) self.datastore.set_shape_attrs('hmaps-stats', site_id=N, stat=list(hstats), imt=list(oq.imtls), poes=oq.poes) for s, stat in enumerate(hstats): pmap = compute_pmap_stats(pmaps, [hstats[stat]], weights, oq.imtls) arr = numpy.zeros((N, M, L1), F32) for sid in pmap: arr[sid] = pmap[sid].array.reshape(M, L1) self.datastore['hcurves-stats'][:, s] = arr if oq.poes: hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) for sid in hmap: ds[sid, s] = hmap[sid].array if self.datastore.parent: self.datastore.parent.open('r') if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir job_id = logs.init('job') oq.calculation_mode = 'classical' self.cl = ClassicalCalculator(oq, job_id) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run() engine.expose_outputs(self.datastore) for imt in oq.imtls: cl_mean_curves = get_mean_curves(self.datastore, imt) eb_mean_curves = get_mean_curves(self.datastore, imt) self.rdiff, index = util.max_rel_diff_index( cl_mean_curves, eb_mean_curves) logging.warning( 'Relative difference with the classical ' 'mean curves: %d%% at site index %d, imt=%s', self.rdiff * 100, index, imt)
def post_execute(self, result): """ Save the SES collection """ oq = self.oqparam if 'ucerf' in oq.calculation_mode: self.rupser.close() self.csm.info.update_eff_ruptures(self.csm.get_num_ruptures()) self.setting_events() N = len(self.sitecol.complete) L = len(oq.imtls.array) if result and oq.hazard_curves_from_gmfs: rlzs = self.csm_info.get_rlzs_assoc().realizations # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] # NB: in the future we may want to save to individual hazard # curves if oq.individual_curves is set; for the moment we # save the statistical curves only hstats = oq.hazard_stats() if len(hstats): logging.info('Computing statistical hazard curves') for statname, stat in hstats: pmap = compute_pmap_stats(result.values(), [stat], weights) arr = numpy.zeros((N, L), F32) for sid in pmap: arr[sid] = pmap[sid].array[:, 0] self.datastore['hcurves/' + statname] = arr if oq.poes: P = len(oq.poes) I = len(oq.imtls) self.datastore.create_dset( 'hmaps/' + statname, F32, (N, P * I)) self.datastore.set_attrs( 'hmaps/' + statname, nbytes=N * P * I * 4) hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) ds = self.datastore['hmaps/' + statname] for sid in hmap: ds[sid] = hmap[sid].array[:, 0] if self.datastore.parent: self.datastore.parent.open('r') if 'gmf_data' in self.datastore: self.save_gmf_bytes() if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # one could also set oq.number_of_logic_tree_samples = 0 self.cl = ClassicalCalculator(oq) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) rdiff, index = util.max_rel_diff_index( cl_mean_curves, eb_mean_curves) logging.warn('Relative difference with the classical ' 'mean curves: %d%% at site index %d', rdiff * 100, index)
def get_pmaps(dstore, indices): getter = calc.PmapGetter(dstore) pmaps = getter.get_pmaps(indices) weights = dstore['realizations']['weight'] mean = compute_pmap_stats(pmaps, [mean_curve], weights) return mean, pmaps
def get_pmaps(dstore, indices): getter = calc.PmapGetter(dstore) pmaps = getter.get_pmaps(indices) weights = [rlz.weight for rlz in getter.rlzs] mean = compute_pmap_stats(pmaps, [mean_curve], weights) return mean, pmaps
def post_execute(self, result): oq = self.oqparam if not oq.ground_motion_fields: return N = len(self.sitecol.complete) L = len(oq.imtls.array) if result and oq.hazard_curves_from_gmfs: rlzs = self.rlzs_assoc.realizations # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] # NB: in the future we may want to save to individual hazard # curves if oq.individual_curves is set; for the moment we # save the statistical curves only hstats = oq.hazard_stats() pmaps = list(result.values()) if len(hstats): logging.info('Computing statistical hazard curves') if len(weights) != len(pmaps): # this should never happen, unless I break the # logic tree reduction mechanism during refactoring raise AssertionError('Expected %d pmaps, got %d' % (len(weights), len(pmaps))) for statname, stat in hstats: pmap = compute_pmap_stats(pmaps, [stat], weights, oq.imtls) arr = numpy.zeros((N, L), F32) for sid in pmap: arr[sid] = pmap[sid].array[:, 0] self.datastore['hcurves/' + statname] = arr if oq.poes: P = len(oq.poes) M = len(oq.imtls) self.datastore.create_dset( 'hmaps/' + statname, F32, (N, M, P)) self.datastore.set_attrs( 'hmaps/' + statname, nbytes=N * P * M * 4) hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) ds = self.datastore['hmaps/' + statname] for sid in hmap: ds[sid] = hmap[sid].array if self.datastore.parent: self.datastore.parent.open('r') if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir job_id = logs.init('job') self.cl = ClassicalCalculator(oq, job_id) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) rdiff, index = util.max_rel_diff_index( cl_mean_curves, eb_mean_curves) logging.warning('Relative difference with the classical ' 'mean curves: %d%% at site index %d', rdiff * 100, index)
def post_execute(self, result): oq = self.oqparam if not oq.ground_motion_fields: return N = len(self.sitecol.complete) L = len(oq.imtls.array) if result and oq.hazard_curves_from_gmfs: rlzs = self.rlzs_assoc.realizations # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] # NB: in the future we may want to save to individual hazard # curves if oq.individual_curves is set; for the moment we # save the statistical curves only hstats = oq.hazard_stats() S = len(hstats) pmaps = list(result.values()) R = len(weights) if len(pmaps) != R: # this should never happen, unless I break the # logic tree reduction mechanism during refactoring raise AssertionError('Expected %d pmaps, got %d' % (len(weights), len(pmaps))) if oq.individual_curves: logging.info('Saving individual hazard curves') self.datastore.create_dset('hcurves-rlzs', F32, (N, R, L)) self.datastore.set_attrs('hcurves-rlzs', nbytes=N * R * L * 4) if oq.poes: P = len(oq.poes) M = len(oq.imtls) ds = self.datastore.create_dset( 'hmaps-rlzs', F32, (N, R, M, P)) self.datastore.set_attrs( 'hmaps-rlzs', nbytes=N * R * P * M * 4) for r, pmap in enumerate(pmaps): arr = numpy.zeros((N, L), F32) for sid in pmap: arr[sid] = pmap[sid].array[:, 0] self.datastore['hcurves-rlzs'][:, r] = arr if oq.poes: hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) for sid in hmap: ds[sid, r] = hmap[sid].array if S: logging.info('Computing statistical hazard curves') self.datastore.create_dset('hcurves-stats', F32, (N, S, L)) self.datastore.set_attrs('hcurves-stats', nbytes=N * S * L * 4) if oq.poes: P = len(oq.poes) M = len(oq.imtls) ds = self.datastore.create_dset( 'hmaps-stats', F32, (N, S, M, P)) self.datastore.set_attrs( 'hmaps-stats', nbytes=N * S * P * M * 4) for s, stat in enumerate(hstats): pmap = compute_pmap_stats( pmaps, [hstats[stat]], weights, oq.imtls) arr = numpy.zeros((N, L), F32) for sid in pmap: arr[sid] = pmap[sid].array[:, 0] self.datastore['hcurves-stats'][:, s] = arr if oq.poes: hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) for sid in hmap: ds[sid, s] = hmap[sid].array if self.datastore.parent: self.datastore.parent.open('r') if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir job_id = logs.init('job') self.cl = ClassicalCalculator(oq, job_id) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) engine.expose_outputs(self.cl.datastore) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) self.rdiff, index = util.max_rel_diff_index( cl_mean_curves, eb_mean_curves) logging.warning('Relative difference with the classical ' 'mean curves: %d%% at site index %d', self.rdiff * 100, index)