def do_index(args): f = Field(args.param) args.target = os.path.realpath(args.target) print('index', f, args.target) if not os.path.exists(args.target): print("%s does not exists, skipped" % (args.target, )) else: f.index_grib_file(args.target)
def best_match(): data = request.get_json(force=True) date_clustering = data.get('date_clustering', 4) if 'field2' in data: data2 = data['field2'] weigths = data['weights'] return best_match_2(Field(**data), Field(**data2), data, data2, weigths, date_clustering) else: return best_match_1(Field(**data), data, date_clustering)
def do_seed(args): from datetime import datetime from dateutil.rrule import rrule, DAILY f = Field(args.param, args.domain, args.dataset) # for t in (12,): # range(0, 24, 6): # for t in range(0, 24, 6): for t in range(0, 24, 1): a = datetime(1979, 1, 1, t) b = datetime(2018, 10, 31, t) f.seed(rrule(DAILY, dtstart=a, until=b))
def plot_field(date, param, domain, dataset): f = Field(param, domain, dataset) path, position = f.grib_path_offset(date=date) print("GRIB", path, position) if request.args.get('depth'): import pywt from grib import GribFile depth = int(request.args.get('depth')) + 1 grib = GribFile(path).at_offset(position) coeffs = pywt.wavedec2(grib.array, 'haar') for n in range(1, depth): coeffs[-n] = tuple([np.zeros_like(v) for v in coeffs[-n]]) field = pywt.waverec2(coeffs, 'haar') output = maps.cached_plot(field, conf.ANALOGUES_CACHE, key='%s-%s-%s' % (os.path.basename(path), position, depth), contour=f.metadata.get('contour'), metadata=grib.metadata, format=request.args.get('format', 'png'), area=f.area) else: output = maps.cached_plot(path, conf.ANALOGUES_CACHE, position=position, contour=f.metadata.get('contour'), format=request.args.get('format', 'png'), area=f.area) return send_from_directory(conf.ANALOGUES_CACHE, os.path.basename(output))
def index(param='tp', domain=DEFAULT_DOMAIN, level=3): f = Field(param, domain) options = { 'param': param, 'level': level, 'wavelet': 'haar', 'units': f.units.to_json(), 'domain': domain, } params = OrderedDict() for k, v in sorted(PARAMS.items(), key=lambda x: x[1].title): params[k] = v domains = OrderedDict() for k, v in sorted(DOMAINS.items(), key=lambda x: x[1].title, reverse=True): domains[k] = v regimes = OrderedDict() for k, v in sorted(REGIMES.items(), key=lambda x: x[1].title): regimes[k] = v methods = OrderedDict() # for k, v in sorted(METHODS.items(), key=lambda x: x[1].title): for k in range(0, 10): methods[str(k)] = str(k) args = { 'options': json.dumps(options, indent=4), 'params': params, 'regimes': regimes, 'domains': domains, 'methods': methods, } args.update(options) args['use_ws'] = os.environ.get('ANALOGUE_USE_WS', '1') return render_template("index.html", **args)
def plot_field2(date, param, domain, dataset, param2): f1 = Field(param, domain, dataset) f2 = Field(param2, domain, dataset) if f1.zindex > f2.zindex: f1, f2 = f2, f1 path1, position1 = f1.grib_path_offset(date=date) path2, position2 = f2.grib_path_offset(date=date) png = maps.cached_plot( [path1, path2], conf.ANALOGUES_CACHE, position=[position1, position2], contour=[f1.metadata.get('contour'), f2.metadata.get('contour')], format=request.args.get('format', 'png'), area=f1.area) return send_from_directory(conf.ANALOGUES_CACHE, os.path.basename(png)) return redirect(url_for('send_cache_file', filename=os.path.basename(png)))
def get_field(): data = request.get_json(force=True) print(data) date = None regime = data.pop('regime', None) if regime: f = Regime.lookup(regime).field date = Regime.lookup(regime).date else: f = Field(**data) date = data.get('date', date) if date: date = dateutil.parser.parse(date) # date = date.replace(hour=12) print(date) field = f.sample(date=date) values = field.array # if data.get('date'): # values = field.array # else: # values = f.mean_field # path = conf.data_path("%s_%s.json" % (data['domain'], data['param'],)) clim = dict( maximum=f.maximum, minimum=f.minimum, mean=f.mean, mean_field=[a for a in f.mean_field.flatten()], maximum_field=[a for a in f.maximum_field.flatten()], minimum_field=[a for a in f.minimum_field.flatten()], gradient_ns_max=[a for a in f.gradient_ns_max.flatten()], gradient_sn_max=[a for a in f.gradient_sn_max.flatten()], gradient_we_max=[a for a in f.gradient_we_max.flatten()], gradient_ew_max=[a for a in f.gradient_ew_max.flatten()], gradient_ns_min=[a for a in f.gradient_ns_min.flatten()], gradient_sn_min=[a for a in f.gradient_sn_min.flatten()], gradient_we_min=[a for a in f.gradient_we_min.flatten()], gradient_ew_min=[a for a in f.gradient_ew_min.flatten()], ) # try: # with open(path) as f: # clim = json.loads(f.read()) # except: # pass # constraints = {} # for c in ['max', 'min', 'mean']: # path = conf.data_path("%s_%s.%s.numpy" % (data['domain'], data['param'], c,)) # z = np.load(path) # constraints[c] = [a for a in z.flatten()] metadata = dict() metadata.update(field.metadata) metadata.update(f.metadata) data = json.dumps({ 'metadata': metadata, 'values': [a for a in values.flatten()], 'shape': list(values.shape), 'domain': list(field.domain), 'grid': list(field.grid), 'fielddate': field.date, 'clim': clim, 'options': { 'param': f.param, 'units': f.units.to_json(), 'domain': f.domain, 'dataset': f.dataset, 'zindex': f.zindex, }, 'min': np.min(values), 'max': np.max(values) }) return Response(data, mimetype='application/json')
def mars_request_for_missing_fields(args): assert args.param assert args.target f = Field(args.param) query_0 = text(""" select * from {table} ; """.format(table=f.file_table)) query_10 = text(""" update {table} set file_id=null where file_id=:file_id; """.format(table=f.fingerprint_table)) query_11 = text(""" delete from {table} where id=:file_id; """.format(table=f.file_table)) missing = set() with cdsdb.begin() as connection: for e in connection.execute(query_0): if not os.path.exists(e[1]): print("MISSING file %s" % (e[1], )) missing.add(e[0]) if missing: print("CLEANUP MISSING:", len(missing)) missing = list(missing)[:500] with cdsdb.begin() as connection: for m in missing: connection.execute(query_10, file_id=m) connection.execute(query_11, file_id=m) print("CLEANUP MISSING:", len(missing)) args.target = os.path.realpath(args.target) query_2 = text(""" select valid_date from {table} where file_id is null order by updated limit :limit; """.format(table=f.fingerprint_table)) retriever = Param.lookup(f.param).retriever(cdsdb) retriever.domain(Domain.lookup(f.domain)) retriever.dataset(Dataset.lookup(f.dataset)) dates = set() times = set() valid_dates = [] with cdsdb.begin() as connection: for valid_date in connection.execute(query_2, limit=366): d = valid_date[0] dates.add(cdsdb.sql_date_to_yyyymmdd(d)) times.add(cdsdb.sql_date_to_hhmm(d)) valid_dates.append(d) # dates = list(dates)[:400] # times = ['12'] retriever.dates(list(dates)) retriever.times(list(times)) retriever.execute(args.target) if not os.path.exists(args.target): print("%s does not exists, skipped" % (args.target, )) else: f.index_grib_file(args.target) insql, vals = cdsdb.sql_in_statement('valid_dates', valid_dates) query_6 = text(""" update {table} set updated={now} where valid_date in {insql}; """.format(table=f.fingerprint_table, now=cdsdb.sql_now, insql=insql)) # print(query_6) with cdsdb.begin() as connection: connection.execute(query_6, **vals)