def plot(what='examples', calc_id=-1, other_id=None, webapi=False): """ Generic plotter for local and remote calculations. """ if what == 'examples': help_msg = ['Examples of possible plots:'] for k, v in globals().items(): if k.startswith('make_figure_'): help_msg.append(v.__doc__) raise SystemExit(''.join(help_msg)) if '?' not in what: raise SystemExit('Missing ? in %r' % what) prefix, rest = what.split('?', 1) if prefix in 'hcurves hmaps' and 'imt=' not in rest: raise SystemExit('Missing imt= in %r' % what) elif prefix == 'uhs' and 'imt=' in rest: raise SystemExit('Invalid IMT in %r' % what) elif prefix in 'hcurves uhs disagg' and 'site_id=' not in rest: what += '&site_id=0' if prefix == 'disagg' and 'poe=' not in rest: what += '&poe_id=0' if webapi: xs = [WebExtractor(calc_id)] if other_id: xs.append(WebExtractor(other_id)) else: xs = [Extractor(calc_id)] if other_id: xs.append(Extractor(other_id)) make_figure = globals()['make_figure_' + prefix] plt = make_figure(xs, what) plt.show()
def plot(what, calc_id=-1, other_id=None, webapi=False): """ Generic plotter for local and remote calculations. """ if '?' not in what: raise SystemExit('Missing ? in %r' % what) prefix, rest = what.split('?', 1) if prefix in 'hcurves hmaps' and 'imt=' not in rest: raise SystemExit('Missing imt= in %r' % what) elif prefix == 'uhs' and 'imt=' in rest: raise SystemExit('Invalid IMT in %r' % what) elif prefix in 'hcurves uhs disagg' and 'site_id=' not in rest: what += '&site_id=0' if prefix == 'disagg' and 'poe=' not in rest: what += '&poe_id=0' if webapi: xs = [WebExtractor(calc_id)] if other_id: xs.append(WebExtractor(other_id)) else: xs = [Extractor(calc_id)] if other_id: xs.append(Extractor(other_id)) make_figure = globals()['make_figure_' + prefix] plt = make_figure(xs, what) plt.show()
def plot(what, calc_id=-1, other_id=None, webapi=False): """ Hazard curves plotter. Here are a few examples of use:: $ oq plot 'hcurves?kind=mean&imt=PGA&site_id=0' $ oq plot 'hmaps?kind=mean&imt=PGA' $ oq plot 'uhs?kind=mean&site_id=0' """ if '?' not in what: raise SystemExit('Missing ? in %r' % what) elif 'kind' not in what: raise SystemExit('Missing kind= in %r' % what) prefix, rest = what.split('?', 1) assert prefix in 'hcurves hmaps uhs', prefix if prefix in 'hcurves hmaps' and 'imt=' not in rest: raise SystemExit('Missing imt= in %r' % what) elif prefix == 'uhs' and 'imt=' in rest: raise SystemExit('Invalid IMT in %r' % what) elif prefix in 'hcurves uhs' and 'site_id=' not in rest: what += '&site_id=0' if webapi: xs = [WebExtractor(calc_id)] if other_id: xs.append(WebExtractor(other_id)) else: xs = [Extractor(calc_id)] if other_id: xs.append(Extractor(other_id)) make_figure = globals()['make_figure_' + prefix] plt = make_figure(xs, what) plt.show()
def main(what, calc_id: int = -1, webapi=False, local=False, *, extract_dir='.'): """ Extract an output from the datastore and save it into an .hdf5 file. By default uses the WebAPI, otherwise the extraction is done locally. """ with performance.Monitor('extract', measuremem=True) as mon: if local: if calc_id == -1: calc_id = logs.dbcmd('get_job', calc_id).id aw = WebExtractor(calc_id, 'http://localhost:8800', '').get(what) elif webapi: aw = WebExtractor(calc_id).get(what) else: aw = Extractor(calc_id).get(what) w = what.replace('/', '-').replace('?', '-') if hasattr(aw, 'array') and isinstance(aw.array, str): # CSV string fname = os.path.join(extract_dir, '%s_%d.csv' % (w, calc_id)) with open(fname, 'w', encoding='utf-8') as f: f.write(aw.array) else: # save as npz fname = os.path.join(extract_dir, '%s_%d.npz' % (w, calc_id)) hdf5.save_npz(aw, fname) print('Saved', fname) if mon.duration > 1: print(mon)
def getdata(what, calc_ids, samplesites): extractors = [Extractor(calc_id) for calc_id in calc_ids] extractor = extractors[0] sitecol = extractor.get('sitecol') oq = extractor.oqparam imtls = oq.imtls poes = oq.poes if len(sitecol) > samplesites: numpy.random.seed(samplesites) sids = numpy.random.choice(len(sitecol), samplesites, replace=False) else: # keep all sites sids = sitecol['sids'] arrays = [extractor.get(what + '?kind=mean').mean[sids]] extractor.close() for extractor in extractors[1:]: oq = extractor.oqparam numpy.testing.assert_equal( extractor.get('sitecol').array, sitecol.array) if what == 'hcurves': numpy.testing.assert_equal(oq.imtls.array, imtls.array) elif what == 'hmaps': numpy.testing.assert_equal(oq.poes, poes) arrays.append(extractor.get(what + '?kind=mean').mean[sids]) extractor.close() return sids, imtls, poes, numpy.array(arrays) # shape (C, N, L)
def extract(what, calc_id=-1, webapi=False, local=False, extract_dir='.'): """ Extract an output from the datastore and save it into an .hdf5 file. By default uses the WebAPI, otherwise the extraction is done locally. """ with performance.Monitor('extract', measuremem=True) as mon: if local: if calc_id == -1: calc_id = logs.dbcmd('get_job', calc_id).id aw = WebExtractor(calc_id, 'http://localhost:8800', '').get(what) elif webapi: aw = WebExtractor(calc_id).get(what) else: aw = Extractor(calc_id).get(what) w = what.replace('/', '-').replace('?', '-') if isinstance(aw.array, str): # a big string fname = os.path.join(extract_dir, '%s_%d.csv' % (w, calc_id)) with open(fname, 'w', encoding='utf-8') as f: f.write(aw.array) elif aw.is_good(): # a regular ArrayWrapper fname = os.path.join(extract_dir, '%s_%d.npz' % (w, calc_id)) hdf5.save_npz(aw, fname) else: # ArrayWrapper of strings, dictionaries or other types fname = os.path.join(extract_dir, '%s_%d.txt' % (w, calc_id)) open(fname, 'w').write(aw.toml()) print('Saved', fname) if mon.duration > 1: print(mon)
def plot_hmaps(imt, calc_id, webapi=False): """ Mean hazard maps plotter. """ extractor = WebExtractor(calc_id) if webapi else Extractor(calc_id) with extractor: oq = extractor.oqparam sitecol = extractor.get('sitecol').array hmaps = extractor.get('hmaps/%s' % str(imt)).array lons, lats = sitecol['lon'], sitecol['lat'] plt = make_figure(lons, lats, imt, oq.imtls[str(imt)], oq.poes, hmaps) plt.show()
def extract(what, calc_id, webapi=True): """ Extract an output from the datastore and save it into an .hdf5 file. By default uses the WebAPI, otherwise the extraction is done locally. """ with performance.Monitor('extract', measuremem=True) as mon: if webapi: obj = WebExtractor(calc_id).get(what) else: obj = Extractor(calc_id).get(what) fname = '%s_%d.hdf5' % (what.replace('/', '-').replace('?', '-'), calc_id) obj.save(fname) print('Saved', fname) if mon.duration > 1: print(mon)
def extract(what, calc_id=-1, webapi=True, local=False): """ Extract an output from the datastore and save it into an .hdf5 file. By default uses the WebAPI, otherwise the extraction is done locally. """ with performance.Monitor('extract', measuremem=True) as mon: if local: obj = WebExtractor(calc_id, 'http://localhost:8800', '').get(what) elif webapi: obj = WebExtractor(calc_id).get(what) else: obj = Extractor(calc_id).get(what) w = what.replace('/', '-').replace('?', '-') fname = '%s_%d.npz' % (w, calc_id) hdf5.save_npz(obj, fname) print('Saved', fname) if mon.duration > 1: print(mon)
def getdata(what, calc_ids, sitecol, sids): extractors = [Extractor(calc_id) for calc_id in calc_ids] extractor = extractors[0] oq = extractor.oqparam imtls = oq.imtls poes = oq.poes arrays = [extractor.get(what + '?kind=mean').mean[sids]] extractor.close() for extractor in extractors[1:]: oq = extractor.oqparam numpy.testing.assert_array_equal( extractor.get('sitecol')[['lon', 'lat']], sitecol[['lon', 'lat']]) if what == 'hcurves': numpy.testing.assert_array_equal(oq.imtls.array, imtls.array) elif what == 'hmaps': numpy.testing.assert_array_equal(oq.poes, poes) arrays.append(extractor.get(what + '?kind=mean').mean[sids]) extractor.close() return imtls, poes, numpy.array(arrays) # shape (C, N, L)
def extract(what, calc_id=-1, webapi=True, local=False): """ Extract an output from the datastore and save it into an .hdf5 file. By default uses the WebAPI, otherwise the extraction is done locally. """ with performance.Monitor('extract', measuremem=True) as mon: if local: obj = WebExtractor(calc_id, 'http://localhost:8800', '').get(what) elif webapi: obj = WebExtractor(calc_id).get(what) else: obj = Extractor(calc_id).get(what) w = what.replace('/', '-').replace('?', '-') if not obj.shape: # is a dictionary of arrays fname = '%s_%d.txt' % (w, calc_id) open(fname, 'w').write(obj.toml()) else: # a regular ArrayWrapper fname = '%s_%d.hdf5' % (w, calc_id) obj.save(fname) print('Saved', fname) if mon.duration > 1: print(mon)
def __init__(self, calc_ids): self.extractors = [Extractor(calc_id) for calc_id in calc_ids] self.sitecol = self.extractors[0].get('sitecol') self.oq = self.extractors[0].oqparam
def compare(what, imt, calc_ids, files, samplesites='', rtol=0, atol=1E-3, threshold=1E-2): """ Compare the hazard curves or maps of two or more calculations. Also used to compare the times with `oq compare cumtime of -1 -2`. """ if what == 'cumtime': data = [] for calc_id in calc_ids: time = Extractor(calc_id).get('performance_data')['time_sec'].sum() data.append((calc_id, time)) print(views.rst_table(data, ['calc_id', 'time'])) return if what == 'rups': return compare_rups(int(imt), calc_ids[0]) sitecol = Extractor(calc_ids[0]).get('sitecol') sids = sitecol['sids'] if samplesites: try: numsamples = int(samplesites) # number except ValueError: # filename sids = [int(sid) for sid in open(samplesites).read().split()] else: if len(sitecol) > numsamples: numpy.random.seed(numsamples) sids = numpy.random.choice( len(sitecol), numsamples, replace=False) sids.sort() imtls, poes, arrays = getdata(what, calc_ids, sitecol, sids) imti = {imt: i for i, imt in enumerate(imtls)} try: levels = imtls[imt] except KeyError: sys.exit( '%s not found. The available IMTs are %s' % (imt, list(imtls))) imt2idx = {imt: i for i, imt in enumerate(imtls)} head = ['site_id'] if files else ['site_id', 'calc_id'] if what == 'hcurves': array_imt = arrays[:, :, imti[imt], :] # shape (C, N, L1) header = head + ['%.5f' % lvl for lvl in levels] else: # hmaps array_imt = arrays[:, :, imt2idx[imt]] header = head + [str(poe) for poe in poes] rows = collections.defaultdict(list) diff_idxs = get_diff_idxs(array_imt, rtol, atol, threshold) if len(diff_idxs) == 0: print('There are no differences within the tolerances ' 'atol=%s, rtol=%d%%, threshold=%s, sids=%s' % (atol, rtol * 100, threshold, sids)) return arr = array_imt.transpose(1, 0, 2) # shape (N, C, L) for sid, array in sorted(zip(sids[diff_idxs], arr[diff_idxs])): for calc_id, cols in zip(calc_ids, array): if files: rows[calc_id].append([sid] + list(cols)) else: rows['all'].append([sid, calc_id] + list(cols)) if files: fdict = {calc_id: open('%s.txt' % calc_id, 'w') for calc_id in calc_ids} for calc_id, f in fdict.items(): f.write(views.rst_table(rows[calc_id], header)) print('Generated %s' % f.name) else: print(views.rst_table(rows['all'], header)) if len(calc_ids) == 2 and what == 'hmaps': ms = numpy.mean((array_imt[0] - array_imt[1])**2, axis=0) # P rows = [(str(poe), m) for poe, m in zip(poes, numpy.sqrt(ms))] print(views.rst_table(rows, ['poe', 'rms-diff']))
def compare(what, imt, calc_ids, files, samplesites=100, rtol=0, atol=1E-3, threshold=1E-2): """ Compare the hazard curves or maps of two or more calculations """ sitecol = Extractor(calc_ids[0]).get('sitecol') try: numsamples = int(samplesites) except ValueError: sids = [int(sid) for sid in open(samplesites).read().split()] else: if len(sitecol) > numsamples: numpy.random.seed(numsamples) sids = numpy.random.choice(len(sitecol), numsamples, replace=False) else: # keep all sites sids = sitecol['sids'] sids.sort() imtls, poes, arrays = getdata(what, calc_ids, sitecol, sids) try: levels = imtls[imt] except KeyError: sys.exit('%s not found. The available IMTs are %s' % (imt, list(imtls))) imt2idx = {imt: i for i, imt in enumerate(imtls)} head = ['site_id'] if files else ['site_id', 'calc_id'] if what == 'hcurves': array_imt = arrays[:, :, imtls(imt)] header = head + ['%.5f' % lvl for lvl in levels] else: # hmaps array_imt = arrays[:, :, imt2idx[imt]] header = head + [str(poe) for poe in poes] rows = collections.defaultdict(list) diff_idxs = get_diff_idxs(array_imt, rtol, atol, threshold) if len(diff_idxs) == 0: print('There are no differences within the tolerances ' 'atol=%s, rtol=%d%%, threshold=%s, sids=%s' % (atol, rtol * 100, threshold, sids)) return arr = array_imt.transpose(1, 0, 2) # shape (N, C, L) for sid, array in sorted(zip(sids[diff_idxs], arr[diff_idxs])): for calc_id, cols in zip(calc_ids, array): if files: rows[calc_id].append([sid] + list(cols)) else: rows['all'].append([sid, calc_id] + list(cols)) if files: fdict = { calc_id: open('%s.txt' % calc_id, 'w') for calc_id in calc_ids } for calc_id, f in fdict.items(): f.write(views.rst_table(rows[calc_id], header)) print('Generated %s' % f.name) else: print(views.rst_table(rows['all'], header))