def post_execute(self, result): """ :param result: a dictionary (trt_model_id, gsim) -> haz_curves or an empty dictionary if hazard_curves_from_gmfs is false """ oq = self.oqparam if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields: return if oq.hazard_curves_from_gmfs: ClassicalCalculator.post_execute.__func__(self, result) if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # use a different datastore self.cl = ClassicalCalculator(oq, self.monitor) self.cl.datastore.parent = self.datastore result = self.cl.run(pre_execute=False, clean_up=False) for imt in self.mean_curves.dtype.fields: rdiff, index = max_rel_diff_index(self.cl.mean_curves[imt], self.mean_curves[imt]) logging.warn( 'Relative difference with the classical ' 'mean curves for IMT=%s: %d%% at site index %d', imt, rdiff * 100, index)
def post_execute(self, result): """ :param result: a dictionary (trt_model_id, gsim) -> haz_curves or an empty dictionary if hazard_curves_from_gmfs is false """ oq = self.oqparam if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields: return elif oq.hazard_curves_from_gmfs: rlzs = self.rlzs_assoc.realizations dic = {} for rlzi in result: dic[rlzs[rlzi]] = array_of_curves(result[rlzi], len(self.sitecol), oq.imtls) self.save_curves(dic) if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # one could also set oq.number_of_logic_tree_samples = 0 self.cl = ClassicalCalculator(oq, self.monitor) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(hazard_calculation_id=self.datastore.calc_id) for imt in self.mean_curves.dtype.fields: rdiff, index = max_rel_diff_index(self.cl.mean_curves[imt], self.mean_curves[imt]) logging.warn( 'Relative difference with the classical ' 'mean curves for IMT=%s: %d%% at site index %d', imt, rdiff * 100, index)
def post_execute(self, result): """ :param result: a dictionary (trt_model_id, gsim) -> haz_curves or an empty dictionary if hazard_curves_from_gmfs is false """ oq = self.oqparam if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields: return if oq.hazard_curves_from_gmfs: ClassicalCalculator.post_execute.__func__(self, result) if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # use a different datastore self.cl = ClassicalCalculator(oq, self.monitor) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(hazard_calculation_id=self.datastore.calc_id) for imt in self.mean_curves.dtype.fields: rdiff, index = max_rel_diff_index( self.cl.mean_curves[imt], self.mean_curves[imt]) logging.warn('Relative difference with the classical ' 'mean curves for IMT=%s: %d%% at site index %d', imt, rdiff * 100, index)
def post_execute(self, result): """ :param result: a dictionary (src_group_id, gsim) -> haz_curves or an empty dictionary if hazard_curves_from_gmfs is false """ oq = self.oqparam if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields: return elif oq.hazard_curves_from_gmfs: rlzs = self.rlzs_assoc.realizations # save individual curves for i in sorted(result): key = 'hcurves/rlz-%03d' % i if result[i]: self.datastore[key] = result[i] else: self.datastore[key] = ProbabilityMap(oq.imtls.array.size) logging.info('Zero curves for %s', key) # compute and save statistics; this is done in process # we don't need to parallelize, since event based calculations # involves a "small" number of sites (<= 65,536) weights = [rlz.weight for rlz in rlzs] hstats = self.oqparam.hazard_stats() if len(hstats) and len(rlzs) > 1: for kind, stat in hstats: pmap = compute_pmap_stats(result.values(), [stat], weights) self.datastore['hcurves/' + kind] = pmap if self.datastore.parent: self.datastore.parent.open() if 'gmf_data' in self.datastore: self.save_gmf_bytes() if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # one could also set oq.number_of_logic_tree_samples = 0 self.cl = ClassicalCalculator(oq, self.monitor('classical')) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) for imt in eb_mean_curves.dtype.names: rdiff, index = util.max_rel_diff_index(cl_mean_curves[imt], eb_mean_curves[imt]) logging.warn( 'Relative difference with the classical ' 'mean curves for IMT=%s: %d%% at site index %d', imt, rdiff * 100, index)
def post_execute(self, result): """ Save the SES collection """ oq = self.oqparam N = len(self.sitecol.complete) L = len(oq.imtls.array) if oq.hazard_calculation_id is None: self.rupser.close() num_events = sum(set_counts(self.datastore, 'events').values()) if num_events == 0: raise RuntimeError( 'No seismic events! Perhaps the investigation time is too ' 'small or the maximum_distance is too small') if oq.save_ruptures: logging.info('Setting %d event years on %d ruptures', num_events, self.rupser.nruptures) with self.monitor('setting event years', measuremem=True, autoflush=True): numpy.random.seed(self.oqparam.ses_seed) set_random_years(self.datastore, 'events', int(self.oqparam.investigation_time)) if self.gmf_size: self.datastore.set_attrs('events', max_gmf_size=self.gmf_size) msg = 'less than ' if self.get_min_iml(self.oqparam).sum() else '' logging.info('Generating %s%s of GMFs', msg, humansize(self.gmf_size)) if oq.hazard_curves_from_gmfs: rlzs = self.csm_info.rlzs_assoc.realizations # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] hstats = self.oqparam.hazard_stats() if len(hstats): logging.info('Computing statistical hazard curves') for kind, stat in hstats: pmap = compute_pmap_stats(result.values(), [stat], weights) arr = numpy.zeros((N, L), F32) for sid in pmap: arr[sid] = pmap[sid].array[:, 0] self.datastore['hcurves/' + kind] = arr self.save_hmaps() if self.datastore.parent: self.datastore.parent.open('r') if 'gmf_data' in self.datastore: self.save_gmf_bytes() if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # one could also set oq.number_of_logic_tree_samples = 0 self.cl = ClassicalCalculator(oq) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) rdiff, index = util.max_rel_diff_index(cl_mean_curves, eb_mean_curves) logging.warn( 'Relative difference with the classical ' 'mean curves: %d%% at site index %d', rdiff * 100, index)
def post_execute(self, result): oq = self.oqparam if not oq.ground_motion_fields and not oq.hazard_curves_from_gmfs: return N = len(self.sitecol.complete) M = len(oq.imtls) L = len(oq.imtls.array) L1 = L // M if result and oq.hazard_curves_from_gmfs: rlzs = self.datastore['full_lt'].get_realizations() # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] # NB: in the future we may want to save to individual hazard # curves if oq.individual_curves is set; for the moment we # save the statistical curves only hstats = oq.hazard_stats() S = len(hstats) pmaps = list(result.values()) R = len(weights) if len(pmaps) != R: # this should never happen, unless I break the # logic tree reduction mechanism during refactoring raise AssertionError('Expected %d pmaps, got %d' % (len(weights), len(pmaps))) if oq.individual_curves: logging.info('Saving individual hazard curves') self.datastore.create_dset('hcurves-rlzs', F32, (N, R, M, L1)) self.datastore.set_shape_attrs('hcurves-rlzs', site_id=N, rlz_id=R, imt=list(oq.imtls), lvl=numpy.arange(L1)) if oq.poes: P = len(oq.poes) M = len(oq.imtls) ds = self.datastore.create_dset('hmaps-rlzs', F32, (N, R, M, P)) self.datastore.set_shape_attrs('hmaps-rlzs', site_id=N, rlz_id=R, imt=list(oq.imtls), poe=oq.poes) for r, pmap in enumerate(pmaps): arr = numpy.zeros((N, M, L1), F32) for sid in pmap: arr[sid] = pmap[sid].array.reshape(M, L1) self.datastore['hcurves-rlzs'][:, r] = arr if oq.poes: hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) for sid in hmap: ds[sid, r] = hmap[sid].array if S: logging.info('Computing statistical hazard curves') self.datastore.create_dset('hcurves-stats', F32, (N, S, M, L1)) self.datastore.set_shape_attrs('hcurves-stats', site_id=N, stat=list(hstats), imt=list(oq.imtls), lvl=numpy.arange(L1)) if oq.poes: P = len(oq.poes) M = len(oq.imtls) ds = self.datastore.create_dset('hmaps-stats', F32, (N, S, M, P)) self.datastore.set_shape_attrs('hmaps-stats', site_id=N, stat=list(hstats), imt=list(oq.imtls), poes=oq.poes) for s, stat in enumerate(hstats): pmap = compute_pmap_stats(pmaps, [hstats[stat]], weights, oq.imtls) arr = numpy.zeros((N, M, L1), F32) for sid in pmap: arr[sid] = pmap[sid].array.reshape(M, L1) self.datastore['hcurves-stats'][:, s] = arr if oq.poes: hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) for sid in hmap: ds[sid, s] = hmap[sid].array if self.datastore.parent: self.datastore.parent.open('r') if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir job_id = logs.init('job') oq.calculation_mode = 'classical' self.cl = ClassicalCalculator(oq, job_id) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run() engine.expose_outputs(self.datastore) for imt in oq.imtls: cl_mean_curves = get_mean_curves(self.datastore, imt) eb_mean_curves = get_mean_curves(self.datastore, imt) self.rdiff, index = util.max_rel_diff_index( cl_mean_curves, eb_mean_curves) logging.warning( 'Relative difference with the classical ' 'mean curves: %d%% at site index %d, imt=%s', self.rdiff * 100, index, imt)
def post_execute(self, result): """ Save the SES collection """ oq = self.oqparam if 'ucerf' in oq.calculation_mode: self.rupser.close() self.csm.info.update_eff_ruptures(self.csm.get_num_ruptures()) self.setting_events() N = len(self.sitecol.complete) L = len(oq.imtls.array) if result and oq.hazard_curves_from_gmfs: rlzs = self.csm_info.get_rlzs_assoc().realizations # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] # NB: in the future we may want to save to individual hazard # curves if oq.individual_curves is set; for the moment we # save the statistical curves only hstats = oq.hazard_stats() if len(hstats): logging.info('Computing statistical hazard curves') for statname, stat in hstats: pmap = compute_pmap_stats(result.values(), [stat], weights) arr = numpy.zeros((N, L), F32) for sid in pmap: arr[sid] = pmap[sid].array[:, 0] self.datastore['hcurves/' + statname] = arr if oq.poes: P = len(oq.poes) I = len(oq.imtls) self.datastore.create_dset( 'hmaps/' + statname, F32, (N, P * I)) self.datastore.set_attrs( 'hmaps/' + statname, nbytes=N * P * I * 4) hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) ds = self.datastore['hmaps/' + statname] for sid in hmap: ds[sid] = hmap[sid].array[:, 0] if self.datastore.parent: self.datastore.parent.open('r') if 'gmf_data' in self.datastore: self.save_gmf_bytes() if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir # one could also set oq.number_of_logic_tree_samples = 0 self.cl = ClassicalCalculator(oq) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) rdiff, index = util.max_rel_diff_index( cl_mean_curves, eb_mean_curves) logging.warn('Relative difference with the classical ' 'mean curves: %d%% at site index %d', rdiff * 100, index)
def post_execute(self, result): oq = self.oqparam if not oq.ground_motion_fields: return N = len(self.sitecol.complete) L = len(oq.imtls.array) if result and oq.hazard_curves_from_gmfs: rlzs = self.rlzs_assoc.realizations # compute and save statistics; this is done in process and can # be very slow if there are thousands of realizations weights = [rlz.weight for rlz in rlzs] # NB: in the future we may want to save to individual hazard # curves if oq.individual_curves is set; for the moment we # save the statistical curves only hstats = oq.hazard_stats() pmaps = list(result.values()) if len(hstats): logging.info('Computing statistical hazard curves') if len(weights) != len(pmaps): # this should never happen, unless I break the # logic tree reduction mechanism during refactoring raise AssertionError('Expected %d pmaps, got %d' % (len(weights), len(pmaps))) for statname, stat in hstats: pmap = compute_pmap_stats(pmaps, [stat], weights, oq.imtls) arr = numpy.zeros((N, L), F32) for sid in pmap: arr[sid] = pmap[sid].array[:, 0] self.datastore['hcurves/' + statname] = arr if oq.poes: P = len(oq.poes) M = len(oq.imtls) self.datastore.create_dset( 'hmaps/' + statname, F32, (N, M, P)) self.datastore.set_attrs( 'hmaps/' + statname, nbytes=N * P * M * 4) hmap = calc.make_hmap(pmap, oq.imtls, oq.poes) ds = self.datastore['hmaps/' + statname] for sid in hmap: ds[sid] = hmap[sid].array if self.datastore.parent: self.datastore.parent.open('r') if oq.compare_with_classical: # compute classical curves export_dir = os.path.join(oq.export_dir, 'cl') if not os.path.exists(export_dir): os.makedirs(export_dir) oq.export_dir = export_dir job_id = logs.init('job') self.cl = ClassicalCalculator(oq, job_id) # TODO: perhaps it is possible to avoid reprocessing the source # model, however usually this is quite fast and do not dominate # the computation self.cl.run(close=False) cl_mean_curves = get_mean_curves(self.cl.datastore) eb_mean_curves = get_mean_curves(self.datastore) rdiff, index = util.max_rel_diff_index( cl_mean_curves, eb_mean_curves) logging.warning('Relative difference with the classical ' 'mean curves: %d%% at site index %d', rdiff * 100, index)