Ejemplo n.º 1
0
    def sample_variance(self, x, y):
        _, _, decoder_lambda, decoder_dense = self.decoder.layers
        input1 = K.random_normal(shape=(1000, self.latent_dim))
        input2 = K.random_normal(shape=(1000, self.latent_dim))

        z = decoder_lambda([input1, input2])
        x_decoded_mean = decoder_dense(z)

        return K.mean(K.std(x_decoded_mean, axis=0))
Ejemplo n.º 2
0
 def call(self, X):
     perturbation = self.sigma_kernel * K.random_normal(
         shape=(self.input_dim, self.units), mean=0, stddev=1)
     perturbed_kernel = self.kernel + perturbation
     output = K.dot(X, perturbed_kernel)
     if self.use_bias:
         bias_perturbation = self.sigma_bias * K.random_normal(
             shape=(self.units, ), mean=0, stddev=1)
         perturbed_bias = self.bias + bias_perturbation
         output = K.bias_add(output, perturbed_bias)
     if self.activation is not None:
         output = self.activation(output)
     return output
Ejemplo n.º 3
0
 def noised():
   stddev = np.sqrt(self.rate / (1.0 - self.rate))
   return inputs * K.random_normal(
       shape=array_ops.shape(inputs),
       mean=1.0,
       stddev=stddev,
       dtype=inputs.dtype)
Ejemplo n.º 4
0
def _do_variational_autoencoding(input_signal, latent_dim=2):
    x = layers.Conv2D(filters=64,
                      kernel_size=(3, 3),
                      strides=2,
                      padding='same')(input_signal)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(filters=32,
                      kernel_size=(3, 3),
                      strides=2,
                      padding='same')(x)
    x = layers.LeakyReLU()(x)
    shape_before_flattening = K.int_shape(x)
    x = layers.Flatten()(x)
    x = layers.Dense(units=32, activation='relu')(x)
    z_mean = layers.Dense(units=latent_dim)(x)
    z_log_var = layers.Dense(units=latent_dim)(x)
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0.,
                              stddev=1.)
    z = z_mean + K.exp(z_log_var) * epsilon
    x = layers.Dense(np.prod(shape_before_flattening[1:]),
                     activation='relu')(z)
    x = layers.Reshape(shape_before_flattening[1:])(x)
    x = layers.Conv2DTranspose(filters=32,
                               kernel_size=(3, 3),
                               strides=2,
                               padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(filters=64,
                               kernel_size=(3, 3),
                               strides=2,
                               padding='same',
                               activation='relu')(x)
    return x
Ejemplo n.º 5
0
    def call(self, x):
        if len(x) != 2:
            raise Exception('input layers must be a list: mean and stddev')
        if len(x[0].shape) != 2 or len(x[1].shape) != 2:
            raise Exception(
                'input shape is not a vector [batchSize, latentSize]')

        mean = x[0]
        stddev = x[1]

        if self.reg == 'bvae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(
                1 + stddev - K.square(mean) - K.exp(stddev), axis=-1)
            # use beta to force less usage of vector space:
            # also try to use <capacity> dimensions of the space:
            latent_loss = self.beta * K.abs(latent_loss - self.capacity /
                                            self.shape.as_list()[1])
            self.add_loss(latent_loss, x)
        elif self.reg == 'vae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(
                1 + stddev - K.square(mean) - K.exp(stddev), axis=-1)
            self.add_loss(latent_loss, x)
        if self.random:
            # 'reparameterization trick':
            epsilon = K.random_normal(shape=(self.batchSize, self.latentSize),
                                      mean=0.,
                                      stddev=1.)
            return mean + K.exp(stddev) * epsilon
        else:  # do not perform random sampling, simply grab the impulse value
            return mean + 0 * stddev  # Keras needs the *0 so the gradinent is not None
Ejemplo n.º 6
0
 def sampling(args):
     z_mean, z_log_var = args
     batch = K.shape(z_mean)[0]
     dim = K.int_shape(z_mean)[1]
     # by default, random_normal has mean=0 and std=1.0
     epsilon = K.random_normal(shape=(batch, dim))
     return z_mean + K.exp(0.5 * z_log_var) * epsilon
Ejemplo n.º 7
0
    def _build_latent_vars(self, mu_z, log_var_z, epsilon_std=1., kl_scale=1.):
        """ Build keras variables representing the latent space

        First, calculate the KL divergence from the input mean and log variance
        and add this to the model loss via a KLDivergenceLayer. Then sample an epsilon
        and perform a location-scale transformation to obtain the latent embedding, z.

        Args:
            epsilon_std: standard deviation of p(epsilon)
            kl_scale: weight of KL divergence loss

        Returns:
            Variables representing z and epsilon

        """

        # mu_z, log_var_z, kl_batch  = KLDivergenceLayer()([mu_z, log_var_z], scale=kl_scale)
        lmda_func = lambda inputs: -0.5 * K.sum(1 + inputs[1] - K.square(inputs[0]) - K.exp(inputs[1]), axis=1)

        kl_batch = Lambda(lmda_func, name='kl_calc')([mu_z, log_var_z])
        kl_batch = Reshape((1,), name='kl_reshape')(kl_batch)

        # get standard deviation from log variance:
        sigma_z = Lambda(lambda lv: K.exp(0.5 * lv))(log_var_z)

        # re-parametrization trick ( z = mu_z + eps * sigma_z)
        eps = Input(tensor=K.random_normal(stddev=epsilon_std,
                                           shape=(K.shape(mu_z)[0], self.latentDim_)))

        eps_z = Multiply()([sigma_z, eps])  # scale by epsilon sample
        z = Add()([mu_z, eps_z])

        return z, eps, kl_batch
Ejemplo n.º 8
0
    def _call_one_layer(self, inputs, flatten_memory, training, ws):
        dp_mask = self.get_dropout_mask_for_cell(
            inputs, training, count=1)
        rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
            flatten_memory, training, count=1)

        if 0 < self.dropout < 1:
            inputs = inputs * dp_mask[0]
        if 0 < self.recurrent_dropout < 1:
            flatten_memory = flatten_memory * rec_dp_mask[0]

        memory = array_ops.reshape(
            flatten_memory, shape=[-1, self.num_memory_slots, self.units])

        input_gate, forget_gate = self._input_and_forget_gates(inputs, memory, ws)
        hs, new_memory = self._attend_over_memory(inputs, memory, ws)

        next_memory = input_gate * new_memory + forget_gate * memory

        flatten_next_memory = array_ops.reshape(
            next_memory, shape=[-1, self.num_memory_slots * self.units])

        mus_and_log_sigmas = K.dot(hs, ws["random_kernel"])
        mus_and_log_sigmas = K.bias_add(mus_and_log_sigmas, ws["random_bias"])
        mus, log_sigmas = array_ops.split(mus_and_log_sigmas, 2, axis=-1)
        sigmas = K.log(1.0 + K.exp(log_sigmas + self.sigma_bias))
        zs = K.random_normal(shape=K.shape(mus)) * sigmas + mus

        return zs, mus, sigmas, hs, flatten_next_memory
Ejemplo n.º 9
0
    def call(self, x):
        if len(x) != 2:
            raise Exception('input layers must be a list: mean and stddev')
        if len(x[0].shape) != 2 or len(x[1].shape) != 2:
            raise Exception('input shape is not a vector [batchSize, latentSize]')

        mean = x[0]
        stddev = x[1]

        if self.reg == 'bvae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(1 + stddev
                                - K.square(mean)
                                - K.exp(stddev), axis=-1)
            # use beta to force less usage of vector space:
            # also try to use <capacity> dimensions of the space:
            latent_loss = self.beta * K.abs(latent_loss - self.capacity/self.shape.as_list()[1])
            self.add_loss(latent_loss, x)
        elif self.reg == 'vae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(1 + stddev
                                - K.square(mean)
                                - K.exp(stddev), axis=-1)
            self.add_loss(latent_loss, x)

        epsilon = K.random_normal(shape=self.shape,
                              mean=0., stddev=1.)
        if self.random:
            # 'reparameterization trick':
            return mean + K.exp(stddev) * epsilon
        else: # do not perform random sampling, simply grab the impulse value
            return mean + 0*stddev # Keras needs the *0 so the gradinent is not None
Ejemplo n.º 10
0
 def sampling(args):
     z_mean_, z_log_var_ = args
     batch_size = shape(z_mean_)[0]
     epsilon = random_normal(shape=(batch_size, latent_size),
                             mean=0.,
                             stddev=0.01)
     return z_mean_ + exp(z_log_var_ / 2) * epsilon
def sampling(args, batch_size=batch_size, latent_dim=latent_dim, epsilon_std=epsilon_std):
    z_mean, z_log_var = args
    
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., stddev=epsilon_std)
    
    return z_mean + K.exp(z_log_var) * epsilon
Ejemplo n.º 12
0
 def __init__(self, tensor, by_even_and_odd=False):
     if by_even_and_odd:
         self.mean, self.logsd = split_channels_by_even_and_odd(tensor)
     else:
         self.mean, self.logsd = split_channels(tensor)
     self.eps = K.random_normal(K.shape(
         self.mean))  # eps means like temperature
     self.sample = self.mean + K.exp(self.logsd) * self.eps
Ejemplo n.º 13
0
    def call(self, inputs):

        if self.factorised:
            noise_in = self.scale_noise(
                K.random_normal(shape=(self.input_dim.value, )))
            noise_out = self.scale_noise(K.random_normal(shape=(self.units, )))
            kernel_noise = noise_in[:, None] * noise_out[None, :]
            bias_noise = noise_out
        else:
            kernel_noise = K.random_normal(shape=(self.input_dim.value,
                                                  self.units))
            bias_noise = K.random_normal(shape=(self.units, ))

        out = K.dot(inputs, self.kernel_mu + self.kernel_sigma * kernel_noise)
        if self.use_bias:
            out = K.bias_add(out,
                             self.bias_mu + self.bias_sigma * bias_noise,
                             data_format='channels_last')
        if self.activation is not None:
            out = self.activation(out)
        return out
    def sampling(args):
        """Define a sampling for our lambda layer.

        Taken from:
        https://github.com/keras-team/keras/master/examples/variational_autoencoder.py
        """
        z_mean, z_log_var = args
        batch = K.shape(z_mean)[0]
        dim = K.int_shape(z_mean)[1]
        # by default, random_normal has mean=0 and std=1.0
        epsilon = K.random_normal(shape=(batch, dim))
        return z_mean + K.exp(0.5 * z_log_var) * epsilon
Ejemplo n.º 15
0
 def sampling(self, args):
     """Reparameterization trick by sampling fr an isotropic unit Gaussian.
     # Arguments:
         args (tensor): mean and log of variance of Q(z|X)
     # Returns:
         z (tensor): sampled latent vector
     """
     z_mean, z_log_var = args
     batch = K.shape(z_mean)[0]
     dim = K.int_shape(z_mean)[1]
     # by default, random_normal has mean=0 and std=1.0
     epsilon = K.random_normal(shape=(batch, dim))
     return z_mean + K.exp(0.5 * z_log_var) * epsilon
Ejemplo n.º 16
0
def sampling(args):
    """
    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)
    # Returns:
        z (tensor): sampled latent vector
    """
    from tensorflow.python.keras import backend as K
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
Ejemplo n.º 17
0
    def _call_lstm_core(self, inputs, flatten_memory, training, ws, num_memory_slots=None):
        dp_mask = self.get_dropout_mask_for_cell(
            inputs, training, count=1)
        rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
            flatten_memory, training, count=1)

        if 0 < self.dropout < 1:
            inputs = inputs * dp_mask[0]
        if 0 < self.recurrent_dropout < 1:
            flatten_memory = flatten_memory * rec_dp_mask[0]

        h_tm1, c_tm1 = array_ops.split(flatten_memory, 2, axis=1)

        z = K.dot(inputs, ws['kernel'])
        z += K.dot(h_tm1, ws['recurrent_kernel'])
        z = K.bias_add(z, ws['bias'])

        z = array_ops.split(z, num_or_size_splits=4, axis=1)
        z0, z1, z2, z3 = z
        z1 += self.forget_bias

        i = self.recurrent_activation(z0)
        f = self.recurrent_activation(z1)
        c = f * c_tm1 + i * self.activation(z2)
        o = self.recurrent_activation(z3)
        hs = o * self.activation(c)

        flatten_next_memory = K.concatenate([hs, c])

        mus_and_log_sigmas = K.dot(hs, ws["random_kernel"])
        mus_and_log_sigmas = K.bias_add(mus_and_log_sigmas, ws["random_bias"])
        mus, log_sigmas = array_ops.split(mus_and_log_sigmas, 2, axis=-1)
        sigmas = K.log(1.0 + K.exp(log_sigmas + self.sigma_bias))
        zs = K.random_normal(shape=K.shape(mus)) * sigmas + mus

        return zs, mus, sigmas, hs, flatten_next_memory
Ejemplo n.º 18
0
 def call(self, inputs):
     mean, log_var = inputs
     epsilon = K.random_normal(tf.shape(log_var), mean=0., stddev=1.)
     sample = epsilon * K.exp(
         log_var / 2) + mean  # equivalent to e * std + mean
     return sample
Ejemplo n.º 19
0
def sampling(z_mean, z_log_sigma, batch_size, latent_dim):
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0.,
                              stddev=1.)
    return z_mean + K.exp(
        z_log_sigma / 2) * epsilon  # equivalent to e * std + mean
Ejemplo n.º 20
0
 def sampling(self, args):
     z_mean, z_logvar = args
     epsilon = K.random_normal(shape=(self.latent_dim, ))
     return z_mean + K.exp(z_logvar * 0.5) * epsilon
Ejemplo n.º 21
0
def sampling(args):
    """ Given a normal mean/variance, pull a random sample """
    z_mean_, z_log_var_ = args
    epsilon = K.random_normal(shape=(BATCH_SIZE, LATENT_DIM), mean=0.)
    return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
Ejemplo n.º 22
0
        kl_batch = -.5 * backend.sum(
            1 + log_var - backend.square(mu) - backend.exp(log_var), axis=-1)

        self.add_loss(backend.mean(kl_batch), inputs=inputs)

        return inputs


# Loss Function #
# y_true - True labels #
# y_pred - Predicted labels #
def nll(y_true, y_pred):
    return backend.sum(backend.binary_crossentropy(y_true, y_pred), axis=-1)


# Decoder Specific
x = Input(shape=(original_dim, ))
h = Dense(intermediate_dim, activation='relu')(x)

z_mu = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)

(z_mu, z_log_var) = KLDivergenceLayer()([z_mu, z_log_var])

# Encoder Specific #
z_sigma = Lambda(lambda t: backend.exp(.5 * t))(z_log_var)

eps = Input(tensor=backend.random_normal(
    stddev=epsilon_std, shape=(backend.shape(x)[0], latent_dim)))
z_eps = Multiply()([z_sigma, eps])
z = Add()([z_mu, z_eps])
Ejemplo n.º 23
0
def sampling(arg):
    mean = arg[0]
    logvar = arg[1]
    epsilon = K.random_normal(shape=K.shape(mean), mean=0., stddev=1.)
    return mean + K.exp(0.5 * logvar) * epsilon
Ejemplo n.º 24
0
 def sampling(args):
     z_mean, z_log_var = args
     return K.random_normal(
         shape=K.shape(z_log_var), mean=0., stddev=noise_std) * K.exp(
             .5 * z_log_var) + z_mean
Ejemplo n.º 25
0
 def sampling(self, args):
     self.z_mean, self.z_log_var = args
     batch = K.shape(self.z_mean)[0]
     dim = K.int_shape(self.z_mean)[1]
     epsilon = K.random_normal(shape=(batch, dim))
     return self.z_mean + K.exp(0.5 * self.z_log_var) * epsilon
Ejemplo n.º 26
0
 def noised():
   return inputs + K.random_normal(shape=array_ops.shape(inputs), mean=0., stddev=stddev)
Ejemplo n.º 27
0
 def sampling(self, args):
     z_mean, z_log_var = args
     epsilon = K.random_normal(shape=(K.shape(z_mean)[0], self.z_dim),
                               mean=0.,
                               stddev=1.)
     return (z_mean + K.exp(z_log_var / 2.) * epsilon)
Ejemplo n.º 28
0
 def reparameterization_trick():
     epsilon = K.random_normal(shape=logvar.shape, mean=0., stddev=1.)
     stddev = K.exp(logvar * 0.5)
     return mean + stddev * epsilon
Ejemplo n.º 29
0
 def noised():
     eps = K.random_uniform(shape=[1], maxval=self.alpha)
     return inputs + K.random_normal(
         shape=K.shape(inputs), mean=0., stddev=eps)
Ejemplo n.º 30
0
 def sample_z(self, args):
     mu, std = args
     eps = K.random_normal(shape=(self.batch_size, self.n_dim), mean=0., stddev=1.)
     return mu + K.exp(0.5 * std) * eps
Ejemplo n.º 31
0
 def noised():
     return inputs + K.random_normal(shape=array_ops.shape(inputs),
                                     mean=0.,
                                     stddev=self.stddev,
                                     dtype=inputs.dtype)
Ejemplo n.º 32
0
 def noised():
   return inputs + K.random_normal(
       shape=array_ops.shape(inputs), mean=0., stddev=self.stddev)
Ejemplo n.º 33
0
 def noised():
   stddev = np.sqrt(self.rate / (1.0 - self.rate))
   return inputs * K.random_normal(
       shape=array_ops.shape(inputs), mean=1.0, stddev=stddev)