Ejemplo n.º 1
0
 def __init__(self, input_dim, n_features, x_l, dtype='float64', **kwargs):
     super(VCNN, self).__init__(name='variational_convolutional_neural_network', dtype='float64', **kwargs)
     
     # Sizes
     self.input_dim = input_dim
     self.x_dim, self.y_dim = input_dim
     self.n_features = n_features
     self.num_layers = 5
             
     # Initialize grid and gaussian process        
     self.x_l = x_l
     self.gp = gp.GPR(x_l)
     
     
    # Change kernel size depending on location  (EPO vs. SIO)
     if self.x_dim == 40:
         strides = (2,3)
     else:
         strides = (2,4)
     
     self.cnn = [tf.keras.layers.Conv2D(filters = min(128, 16*2**j), kernel_size = strides, strides = 1, padding = 'same') for j in range(self.num_layers)]
     self.cnn_final = tf.keras.layers.Conv2D(filters = 2, kernel_size = strides, strides = 1, padding = 'same')
     
     self.batch_norm = [tf.keras.layers.BatchNormalization() for j in range(self.num_layers)]
     self.leaky_relu = tf.keras.layers.LeakyReLU()
Ejemplo n.º 2
0
    def reanalysis(self,
                   week_index,
                   num_iters=100,
                   u0=None,
                   var=None,
                   y=None,
                   silent=True):
        self.X_d, self.X_l, self.y_d, self.y_l = self.data.get_index(
            week_index)
        if y is not None:
            self.y_l, self.y_d = y
        x = self.model(self.X_d)
        x = tf.reshape(x, (-1, 1))
        u0 = tf.reshape(u0, (-1, 1))
        if u0 is None:
            u = tf.Variable(x)
        else:
            assert x.shape.as_list() == u0.shape.as_list(
            ), 'Shape of u0 should be consistent with the model.'
            u = tf.Variable(u0)
        if var is None:
            var = self.model.var
        var = tf.Variable(var)
        gpr = gp.GPR(self.X_l)
        opt = tf.keras.optimizers.Adam(learning_rate=0.01)
        opt_var = tf.keras.optimizers.Adam(learning_rate=0.01)

        def optimize(u, var):
            with tf.GradientTape(persistent=True) as tape:
                m, v = gpr(u, self.y_l, noise=var)
                loss = -gpr.log_prob(self.y_d)
                loss += tf.reduce_mean(var - tf.math.log(var))
                z = (tf.reshape(u, (-1, 1)) - tf.reshape(x, (-1, 1)))
            u_grads = tape.gradient(loss, [u])
            opt.apply_gradients(zip(u_grads, [u]))
            return loss

        assert isinstance(silent, bool), 'silence must be True/False!'
        if not silent: print('Beginning Reanalysis')
        total = 0
        for j in range(num_iters):
            start = time()
            loss = optimize(u, var)
            end = time()
            diff = end - start
            total += diff
            remaining = (total / (j + 1)) * num_iters - total
            hours, rem = divmod(remaining, 3600)
            minutes, seconds = divmod(rem, 60)
            if (j % 10 == 0) and (not silent):
                print(
                    'Epoch: ', j, '/', num_iters,
                    '\t Loss: {:.2f}'.format(loss.numpy()),
                    '\t Time Remaining:',
                    "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes),
                                                    seconds))
        return u, var
Ejemplo n.º 3
0
    def __init__(self,
                 input_dim,
                 n_features,
                 x_l,
                 l1_=1e-4,
                 dtype='float64',
                 **kwargs):
        super(VANN, self).__init__(name='variational_neural_network',
                                   dtype='float64',
                                   **kwargs)

        # Sizes
        self.input_dim = input_dim
        self.n_features = n_features

        # Initialize grid and gaussian process
        self.x_l = x_l
        self.gp = gp.GPR(x_l)

        l1 = tf.keras.regularizers.l1(0.01 * l1_)
        l2 = tf.keras.regularizers.l2(0.001 * l1_)
        # Parameters, w, b, input_noise
        # Linear weights
        self.L = tf.keras.Sequential()
        self.L.add(
            tf.keras.layers.Dense(self.input_dim,
                                  input_shape=(self.n_features *
                                               self.input_dim, ),
                                  activation='relu',
                                  kernel_regularizer=l1,
                                  bias_regularizer=l1))
        self.L.add(
            tf.keras.layers.Dense(self.input_dim,
                                  input_shape=(self.input_dim, ),
                                  activation='relu',
                                  kernel_regularizer=l2,
                                  bias_regularizer=l2))
        self.L.add(
            tf.keras.layers.Dense(
                2 * self.input_dim,
                input_shape=(self.input_dim, ),
                # activation='relu',
                kernel_regularizer=l2,
                bias_regularizer=l2,
            ))
        self.L.add(
            tfp.layers.DistributionLambda(lambda t: tfd.MultivariateNormalDiag(
                loc=t[..., :self.input_dim],
                scale_diag=np.float64(1e-8) + tf.math.softplus(t[
                    ..., self.input_dim:]))), )
Ejemplo n.º 4
0
 def __init__(self, input_dim, n_features, x_l, dtype='float64', **kwargs):
     super(ANN_dropout, self).__init__(name='dropout_neural_network', dtype='float64', **kwargs)
     
     # Sizes
     self.input_dim = input_dim
     self.n_features = n_features
     
     # Initialize grid and gaussian process        
     self.x_l = x_l
     self.gp = gp.GPR(x_l)
     
     l1 = tf.keras.regularizers.l2(1e-8)
     
     # Parameters, w, b, input_noise
     # Linear weights
     self.L = tf.keras.Sequential()
     self.L.add(
         tf.keras.layers.Dense(
             self.input_dim, 
             input_shape=(self.n_features*self.input_dim,),
             activation='relu',
             kernel_regularizer = l1,
             bias_regularizer = l1
             )
     )
     self.L.add(
         tf.keras.layers.Dropout(0.2)
     )
     self.L.add(
         tf.keras.layers.Dense(
             self.input_dim, 
             input_shape=(self.input_dim,),
             activation='relu',
             kernel_regularizer = l1,
             bias_regularizer = l1
             )
     )
     self.L.add(
         tf.keras.layers.Dropout(0.5)
     )
     self.L.add(
         tf.keras.layers.Dense(
             self.input_dim, 
             input_shape=(self.input_dim,),
             )
     )
Ejemplo n.º 5
0
    def __init__(self, input_dim, n_features, x_l, dtype='float64', **kwargs):
        super(Linear, self).__init__(name='linear_projection',
                                     dtype='float64',
                                     **kwargs)

        # Sizes
        self.input_dim = input_dim
        self.x_dim, self.y_dim = input_dim
        self.n_features = n_features
        # Initialize grid and gaussian process
        self.x_l = x_l
        self.gp = gp.GPR(x_l)

        # Parameters, w, b
        # Linear weights
        w_init = tf.initializers.GlorotNormal()
        self.A1 = tf.Variable(initial_value=w_init(shape=(self.x_dim,
                                                          self.y_dim,
                                                          self.n_features),
                                                   dtype='float64'),
                              trainable=True,
                              name='linear')
        # bias weights
        b_init = tf.initializers.GlorotNormal()
        self.b1 = tf.Variable(initial_value=b_init(shape=(self.x_dim,
                                                          self.y_dim),
                                                   dtype='float64'),
                              trainable=True,
                              name='bias')

        self.A2 = tf.Variable(initial_value=w_init(shape=(self.x_dim,
                                                          self.y_dim,
                                                          self.n_features)),
                              trainable=True,
                              validate_shape=True,
                              caching_device=None,
                              name='Weight_matrix_2',
                              dtype='float64')

        self.b2 = tf.Variable(initial_value=b_init(shape=(self.x_dim,
                                                          self.y_dim)),
                              trainable=True,
                              validate_shape=True,
                              caching_device=None,
                              name='bias_matrix_2',
                              dtype='float64')
Ejemplo n.º 6
0
    def __init__(self, input_dim, n_features, x_l, dtype='float64', **kwargs):
        super(DEEPCNN, self).__init__(name='deep_convolutional_neural_network',
                                      dtype='float64',
                                      **kwargs)

        # Sizes
        self.x_dim, self.y_dim = input_dim
        self.input_dim = self.x_dim * self.y_dim
        self.n_features = n_features

        # Initialize grid and gaussian process
        self.x_l = x_l
        self.gp = gp.GPR(x_l)

        if self.x_dim == 40:
            kernel_sizes = [
                np.asarray((2, 3)),
                np.asarray((2, 3)),
                np.asarray((4, 6))
            ]
        else:
            kernel_sizes = [
                np.asarray((1, 6)),
                np.asarray((1, 6)),
                np.asarray((2, 12))
            ]

        self.cnn_initial = tf.keras.layers.Conv2D(8,
                                                  kernel_size=(2, 3),
                                                  strides=1,
                                                  padding='same')
        self.cnn1 = [
            self.cnn_block(16, [
                j * kernel_sizes[0], j * kernel_sizes[1], j * kernel_sizes[2]
            ],
                           dropout=0.2) for j in range(1, 3)
        ]
        self.cnn2 = [
            self.cnn_block(64, [
                j * kernel_sizes[0], j * kernel_sizes[1], j * kernel_sizes[2]
            ],
                           dropout=0.1) for j in range(1, 3)
        ]
        self.cnn3 = [
            self.cnn_block(128, [
                j * kernel_sizes[0], j * kernel_sizes[1], j * kernel_sizes[2]
            ],
                           dropout=0.05) for j in range(1, 3)
        ]
        self.cnn4 = [
            self.cnn_block(256, [
                j * kernel_sizes[0], j * kernel_sizes[1], j * kernel_sizes[2]
            ],
                           dropout=0.0) for j in range(1, 3)
        ]
        self.cnn_final = tf.keras.layers.Conv2D(1,
                                                kernel_size=kernel_sizes[0],
                                                strides=1,
                                                padding='same')

        # Linear estimate of variance
        initializer = tf.keras.initializers.GlorotNormal()
        self.A2 = tf.Variable(
            initial_value=initializer(shape=(self.x_dim, self.y_dim,
                                             self.n_features)),
            trainable=True,
            validate_shape=True,
            caching_device=None,
            name='Weight_matrix_2',
            dtype='float64')

        self.b2 = tf.Variable(initial_value=initializer(shape=(self.x_dim,
                                                               self.y_dim)),
                              trainable=True,
                              validate_shape=True,
                              caching_device=None,
                              name='bias_matrix_2',
                              dtype='float64')

        self.batch = tf.keras.layers.BatchNormalization()
        self.leaky_relu = tf.keras.layers.LeakyReLU()
        self.concat = tf.keras.layers.concatenate
        self.reshape = tf.keras.layers.Reshape(input_dim)
Ejemplo n.º 7
0
    def __init__(self,
                 input_dim,
                 n_features,
                 x_l,
                 latent_dim=40,
                 dtype='float64',
                 **kwargs):
        super(CVAE,
              self).__init__(name='convolutional_variational_autoencoder',
                             dtype='float64',
                             **kwargs)

        # Sizes
        self.input_dim = input_dim
        self.x_dim, self.y_dim = input_dim
        self.n_features = n_features
        self.latent_dim = latent_dim

        # Initialize grid and gaussian process
        self.x_l = x_l
        self.gp = gp.GPR(x_l)

        if self.x_dim == 40:
            strides = [(2, 3), (2, 2), (2, 2)]
        else:
            strides = [(2, 4), (1, 3), (2, 2)]

        self.prior1 = tfd.Independent(tfd.Normal(loc=tf.zeros(self.latent_dim,
                                                              dtype='float64'),
                                                 scale=1),
                                      reinterpreted_batch_ndims=1)
        self.prior2 = tfd.Independent(tfd.Normal(loc=tf.zeros(self.x_dim *
                                                              self.y_dim,
                                                              dtype='float64'),
                                                 scale=1),
                                      reinterpreted_batch_ndims=1)

        self.encoder = tfk.Sequential([
            tfkl.InputLayer(input_shape=(self.x_dim, self.y_dim,
                                         self.n_features)),
            tfkl.Conv2D(8, strides[0], strides=1, padding='same'),
            tfkl.Conv2D(8, 3, strides=strides[0], padding='same'),
            tfkl.LayerNormalization(),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2D(16, strides[1], strides=1, padding='same'),
            tfkl.Conv2D(16, 3, strides=strides[1], padding='same'),
            tfkl.LayerNormalization(),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2D(64, strides[2], strides=1, padding='same'),
            tfkl.Conv2D(64, 3, strides=strides[2], padding='same'),
            tfkl.LayerNormalization(),
            tf.keras.layers.LeakyReLU(),
            tfkl.Flatten(),
            tfkl.Dense(2 * self.latent_dim, activation=None),
        ])

        self.decoder = tfk.Sequential([
            tfkl.InputLayer(input_shape=[self.latent_dim]),
            tfkl.Dense(5 * 5 * 64, activation=None),
            tfkl.Reshape([5, 5, 64]),
            tfkl.Conv2DTranspose(64, strides[2], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(64, 3, strides=strides[2], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(16, strides[1], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(16, 3, strides=strides[1], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(8, strides[0], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(8, 3, strides=strides[0], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(3, 1, padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Reshape([self.x_dim, self.y_dim, self.n_features])
        ])

        self.regressor = tfk.Sequential([
            tfkl.InputLayer(input_shape=[self.latent_dim]),
            tfkl.Dense(5 * 5 * 64, activation=None),
            tfkl.Reshape([5, 5, 64]),
            tfkl.Conv2DTranspose(64, strides[2], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(64, 3, strides=strides[2], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(16, strides[1], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(16, 3, strides=strides[1], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(8, strides[0], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(8, 3, strides=strides[0], padding='same'),
            tf.keras.layers.LeakyReLU(),
            tfkl.Conv2DTranspose(2, 1, padding='same'),
        ])
Ejemplo n.º 8
0
    def __init__(self, input_dim, n_features, x_l, dtype='float64', **kwargs):
        super(VLCNN, self).__init__(
            name='variational_linear_convolutional_neural_network',
            dtype='float64',
            **kwargs)

        # Sizes and setup
        num_dense_layers = 0
        self.num_dense_layers = 0
        self.input_dim = input_dim
        self.x_dim, self.y_dim = input_dim
        self.n_features = n_features
        num_layers = 5
        self.num_layers = num_layers
        batch_bool = True
        self.batch_bool = batch_bool
        Dropout = 0.1
        self.Dropout = Dropout

        # Initialize grid and gaussian process
        self.x_l = x_l
        self.gp = gp.GPR(x_l)

        # Alter size of kernels
        if self.x_dim == 40:
            location = "EPO"
        else:
            location = "SIO"

        if num_dense_layers == 0:
            if location == "EPO":
                strides = (2, 3)
            else:
                strides = (2, 4)
            self.cnn = [
                tf.keras.layers.Conv2D(filters=min(16 * 2**j, 128),
                                       kernel_size=strides,
                                       strides=1,
                                       padding='same')
                for j in range(num_layers)
            ]
            self.cnn_final = tf.keras.layers.Conv2D(filters=1,
                                                    kernel_size=strides,
                                                    strides=1,
                                                    padding='same')
        else:
            if location == "EPO":
                strides = [(2, 3), (2, 2), (2, 2)]
            else:
                strides = [(2, 4), (2, 3), (1, 2)]
            self.cnn = [
                tf.keras.layers.Conv2D(filters=min(16 * 2**j, 128),
                                       kernel_size=3,
                                       strides=strides[j],
                                       padding='same')
                for j in range(num_layers)
            ]
            self.dense_layers = [
                tf.keras.layers.Dense(
                    int(self.x_dim * self.y_dim /
                        2**(num_dense_layers - j - 1)))
                for j in range(num_dense_layers)
            ]
            self.flatten = tf.keras.layers.Flatten()
        self.batch_norm = [
            tf.keras.layers.BatchNormalization() for j in range(num_layers)
        ]
        self.dropout = [
            tf.keras.layers.Dropout(Dropout) for j in range(num_layers)
        ]
        self.leaky_relu = tf.keras.layers.LeakyReLU()

        # Linear variance estimator
        initializer = tf.keras.initializers.GlorotNormal()
        self.A2 = tf.Variable(
            initial_value=initializer(shape=(self.x_dim, self.y_dim,
                                             self.n_features)),
            trainable=True,
            validate_shape=True,
            caching_device=None,
            name='Weight_matrix_2',
            dtype='float64')

        self.b2 = tf.Variable(initial_value=initializer(shape=(self.x_dim,
                                                               self.y_dim)),
                              trainable=True,
                              validate_shape=True,
                              caching_device=None,
                              name='bias_matrix_2',
                              dtype='float64')