def get_compressed(year,month,day,hour): prmsl=twcr.load('prmsl',datetime.datetime(year,month,day,hour), version='2c') prmsl=to_analysis_grid(prmsl.extract(iris.Constraint(member=1))) t2m=twcr.load('air.2m',datetime.datetime(year,month,day,hour), version='2c') t2m=to_analysis_grid(t2m.extract(iris.Constraint(member=1))) u10m=twcr.load('uwnd.10m',datetime.datetime(year,month,day,hour), version='2c') u10m=to_analysis_grid(u10m.extract(iris.Constraint(member=1))) v10m=twcr.load('vwnd.10m',datetime.datetime(year,month,day,hour), version='2c') v10m=to_analysis_grid(v10m.extract(iris.Constraint(member=1))) insol=to_analysis_grid(load_insolation(year,month,day,hour)) # Convert the validation data into tensor format t2m_t = tf.convert_to_tensor(normalise_t2m(t2m.data),numpy.float32) t2m_t = tf.reshape(t2m_t,[79,159,1]) prmsl_t = tf.convert_to_tensor(normalise_prmsl(prmsl.data),numpy.float32) prmsl_t = tf.reshape(prmsl_t,[79,159,1]) u10m_t = tf.convert_to_tensor(normalise_wind(u10m.data),numpy.float32) u10m_t = tf.reshape(u10m_t,[79,159,1]) v10m_t = tf.convert_to_tensor(normalise_wind(v10m.data),numpy.float32) v10m_t = tf.reshape(v10m_t,[79,159,1]) insol_t = tf.convert_to_tensor(normalise_insolation(insol.data),numpy.float32) insol_t = tf.reshape(insol_t,[79,159,1]) ict = tf.concat([t2m_t,prmsl_t,u10m_t,v10m_t,insol_t],2) # Now [79,159,5] ict = tf.reshape(ict,[1,79,159,5]) result = autoencoder.predict_on_batch(ict) result = tf.reshape(result,[79,159,5]) ls = encoder.predict_on_batch(ict) # Convert the encoded fields back to unnormalised cubes t2m_r=t2m.copy() t2m_r.data = tf.reshape(result.numpy()[:,:,0],[79,159]).numpy() t2m_r.data = unnormalise_t2m(t2m_r.data) prmsl_r=prmsl.copy() prmsl_r.data = tf.reshape(result.numpy()[:,:,1],[79,159]).numpy() prmsl_r.data = unnormalise_prmsl(prmsl_r.data) u10m_r=u10m.copy() u10m_r.data = tf.reshape(result.numpy()[:,:,2],[79,159]).numpy() u10m_r.data = unnormalise_wind(u10m_r.data) v10m_r=v10m.copy() v10m_r.data = tf.reshape(result.numpy()[:,:,3],[79,159]).numpy() v10m_r.data = unnormalise_wind(v10m_r.data) return {'t2m':t2m_r,'prmsl':prmsl_r,'u10m':u10m_r,'v10m':v10m_r,'ls':ls}
def random_state(): ls = tf.convert_to_tensor(numpy.random.normal(size=100), numpy.float32) ls = tf.reshape(ls, [1, 100]) result = generator.predict_on_batch(ls) result = tf.reshape(result, [79, 159, 5]) t2m = dummy_cube() t2m.data = tf.reshape(result.numpy()[:, :, 0], [79, 159]).numpy() t2m.data = unnormalise_t2m(t2m.data) prmsl = dummy_cube() prmsl.data = tf.reshape(result.numpy()[:, :, 1], [79, 159]).numpy() prmsl.data = unnormalise_prmsl(prmsl.data) u10m = dummy_cube() u10m.data = tf.reshape(result.numpy()[:, :, 2], [79, 159]).numpy() u10m.data = unnormalise_wind(u10m.data) v10m = dummy_cube() v10m.data = tf.reshape(result.numpy()[:, :, 3], [79, 159]).numpy() v10m.data = unnormalise_wind(v10m.data) return (t2m, prmsl, u10m, v10m)
def ls_load_at_timepoint(year, month, day, hour): pfile = ("%s/ML_GCM/GCM_mucdf/" + "%04d-%02d-%02d:%02d.pkl") % ( os.getenv('SCRATCH'), year, month, day, hour) res = pickle.load(open(pfile, 'rb')) ls = res['latent_s'] t2m = dummy_cube() t2m.data = res['state_v'][0, :, :, 0] t2m.data = unnormalise_t2m(t2m.data) prmsl = dummy_cube() prmsl.data = res['state_v'][0, :, :, 1] prmsl.data = unnormalise_prmsl(prmsl.data) u10m = dummy_cube() u10m.data = res['state_v'][0, :, :, 2] u10m.data = unnormalise_wind(u10m.data) v10m = dummy_cube() v10m.data = res['state_v'][0, :, :, 3] v10m.data = unnormalise_wind(v10m.data) return (ls, t2m, prmsl, u10m, v10m)
insol_t = tf.convert_to_tensor(normalise_insolation(insol.data), numpy.float32) insol_t = tf.reshape(insol_t, [79, 159, 1]) # Get autoencoded versions of the validation data model_save_file = ("%s/ML_GCM/autoencoder.tst/" + "Epoch_%04d/autoencoder") % ( os.getenv('SCRATCH'), args.epoch) autoencoder = tf.keras.models.load_model(model_save_file, compile=False) ict = tf.concat([t2m_t, prmsl_t, u10m_t, v10m_t, insol_t], 2) # Now [79,159,5] ict = tf.reshape(ict, [1, 79, 159, 5]) result = autoencoder.predict_on_batch(ict) result = tf.reshape(result, [79, 159, 5]) # Convert the encoded fields back to unnormalised cubes t2m_r = t2m.copy() t2m_r.data = tf.reshape(result.numpy()[:, :, 0], [79, 159]).numpy() t2m_r.data = unnormalise_t2m(t2m_r.data) prmsl_r = prmsl.copy() prmsl_r.data = tf.reshape(result.numpy()[:, :, 1], [79, 159]).numpy() prmsl_r.data = unnormalise_prmsl(prmsl_r.data) u10m_r = u10m.copy() u10m_r.data = tf.reshape(result.numpy()[:, :, 2], [79, 159]).numpy() u10m_r.data = unnormalise_wind(u10m_r.data) v10m_r = v10m.copy() v10m_r.data = tf.reshape(result.numpy()[:, :, 3], [79, 159]).numpy() v10m_r.data = unnormalise_wind(v10m_r.data) # Plot the two fields and a scatterplot for each variable fig = Figure(figsize=(9.6 * 1.2, 10.8), dpi=100, facecolor=(0.88, 0.88, 0.88, 1), edgecolor=None,
return (dummy_cube) # Load the latent-space representation, and convert it back into normal space model_save_file = ("%s/ML_GCM/predictor.delta/" + "Epoch_%04d/generator") % (os.getenv('SCRATCH'), args.epoch) generator = tf.keras.models.load_model(model_save_file, compile=False) # Random latent state ls = tf.convert_to_tensor(numpy.random.normal(size=100), numpy.float32) ls = tf.reshape(ls, [1, 100]) result = generator.predict_on_batch(ls) result = tf.reshape(result, [79, 159, 5]) t2m = dummy_cube() t2m.data = tf.reshape(result.numpy()[:, :, 0], [79, 159]).numpy() t2m.data = unnormalise_t2m(t2m.data) prmsl = dummy_cube() prmsl.data = tf.reshape(result.numpy()[:, :, 1], [79, 159]).numpy() prmsl.data = unnormalise_prmsl(prmsl.data) u10m = dummy_cube() u10m.data = tf.reshape(result.numpy()[:, :, 2], [79, 159]).numpy() u10m.data = unnormalise_wind(u10m.data) v10m = dummy_cube() v10m.data = tf.reshape(result.numpy()[:, :, 3], [79, 159]).numpy() v10m.data = unnormalise_wind(v10m.data) mask = iris.load_cube("%s/fixed_fields/land_mask/opfc_global_2019.nc" % os.getenv('SCRATCH')) # Define the figure (page size, background color, resolution, ... fig = Figure(