예제 #1
0
# This version uses only a small quantity of training data

import os
import tensorflow as tf
import ML_Utilities
import pickle

# How many epochs to train for
n_epochs=100
# How many fields to train on
n_fields=100

# Create TensorFlow Dataset object from the prepared training data
(tr_data,n_steps) = ML_Utilities.dataset(purpose='training',
                                         source='20CR2c',
                                         variable='prmsl',
                                         length=n_fields)
tr_data = tr_data.repeat(n_epochs)

# Need to reshape the data to linear, and produce a tuple
#  (source,target) for model
def to_model(ict):
   ict=tf.reshape(ict,[1,91*180])
   return(ict,ict)
tr_data = tr_data.map(to_model)

# Similar dataset from the prepared test data
(tr_test,test_steps) = ML_Utilities.dataset(purpose='test',
                                            source='20CR2c',
                                            variable='prmsl')
tr_test = tr_test.repeat(n_epochs)
예제 #2
0
parser.add_argument("--member",
                    help="Ensemble member",
                    default=1,
                    type=int,
                    required=False)

args = parser.parse_args()

# Get the 20CR data
ic = twcr.load('prmsl',
               datetime.datetime(args.year, args.month, args.day, args.hour),
               version='2c')
ic = ic.extract(iris.Constraint(member=args.member))

# Normalisation - Pa to mean=0, sd=1 - and back
normalise = ML_Utilities.get_normalise_function(source='20CR2c',
                                                variable='prmsl')
unnormalise = ML_Utilities.get_unnormalise_function(source='20CR2c',
                                                    variable='prmsl')

# Get the original autoencoder
model_save_file = ("%s/Machine-Learning-experiments/simple_autoencoder/" +
                   "saved_models/Epoch_%04d") % (os.getenv('SCRATCH'), 100)
autoencoder_original = tf.keras.models.load_model(model_save_file)

# Run the data through the original autoencoder and convert back to iris cube
pm_original = ic.copy()
pm_original.data = normalise(pm_original.data)
ict = tf.convert_to_tensor(pm_original.data, numpy.float32)
ict = tf.reshape(ict, [1, 91 * 180])  # ????
result = autoencoder_original.predict_on_batch(ict)
result = tf.reshape(result, [91, 180])
예제 #3
0
#  is the equator - this limits the problems with boundary conditions.

# This version looks at precip

import os
import tensorflow as tf
import ML_Utilities
import pickle
import numpy

# How many epochs to train for
n_epochs = 50

# Create TensorFlow Dataset object from the prepared training data
(tr_data, n_steps) = ML_Utilities.dataset(purpose='training',
                                          source='rotated_pole/20CR2c',
                                          variable='prate')
tr_data = tr_data.repeat(n_epochs)


# Also produce a tuple (source,target) for model
def to_model(ict):
    ict = tf.reshape(ict, [79, 159, 1])
    return (ict, ict)


tr_data = tr_data.map(to_model)
tr_data = tr_data.batch(1)

# Similar dataset from the prepared test data
(tr_test, test_steps) = ML_Utilities.dataset(purpose='test',
예제 #4
0
# Very simple autoencoder for 20CR air.2m fields.
# Single, fully-connected layer as encoder+decoder, 32 neurons.
# Very unlikely to work well at all, but this isn't about good
#  results, it's about getting started.

import os
import tensorflow as tf
import ML_Utilities
import pickle

# How many epochs to train for
n_epochs = 100

# Create TensorFlow Dataset object from the prepared training data
(tr_data, n_steps) = ML_Utilities.dataset(purpose='training',
                                          source='20CR2c',
                                          variable='air.2m')
tr_data = tr_data.repeat(n_epochs)


# Need to reshape the data to linear, and produce a tuple
#  (source,target) for model
def to_model(ict):
    ict = tf.reshape(ict, [1, 18048])
    return (ict, ict)


tr_data = tr_data.map(to_model)

# Similar dataset from the prepared test data
(tr_test, test_steps) = ML_Utilities.dataset(purpose='test',