Example #1
0
    def call(self, inputs, states, training):
        if training:
            self.alpha = SGD_p['train_t_step'] / SGD_p['tau']
        else:
            self.alpha = SGD_p['test_t_step'] / SGD_p['tau']

        x_prev = states[0]

        self.W_rec = self.rec_scale * (tf.multiply(
            self._M_rec, K.relu(self.W_rec_plastic)) + self._W_fixed)

        t = K.mean(
            K.sqrt(K.constant(2.0 * self.alpha * SGD_p['rr_noise_std']**2)) *
            K.random_normal(K.shape(x_prev)))
        p = K.mean(K.dot(K.relu(x_prev), K.dot(self.Dale_rec, self.W_rec)))
        q = K.mean(K.dot(inputs, K.relu(self.W_in)))

        x = ((1 - self.alpha) * x_prev) + \
            self.alpha * (
                K.dot(K.relu(x_prev), K.dot(self.Dale_rec, self.W_rec)) +
                K.dot(inputs, K.relu(self.W_in)) +
                K.sqrt(K.constant(2.0 * self.alpha * SGD_p['rr_noise_std']**2)) *
                    K.random_normal(K.shape(x_prev))
            )

        r = K.relu(x)
        z = K.dot(r, K.dot(self.Dale_out, K.relu(self.W_out)))

        rz = K.mean(r)
        zz = K.mean(z)
        return z, [x]
Example #2
0
def vae_sample(args):
    z_mean, z_noise = args
    std = 1.0
    K.random_normal(shape=(z_mean._keras_shape[1], ),
                    mean=0.,
                    stddev=epsilon_std)
    return z_mean + K.exp(z_noise / 2) * epsilon
def make_ptregression_data(input_arr,mu=1.10,scale=0.1):
    energy_norm = 50.
    angle_norm = 1.
    
    condition = (input_arr[:,-1] - 90.) 
    arr = input_arr[np.squeeze(np.abs(condition) < 1)]

    arr[:,0] = arr[:,0] / energy_norm
    arr[:,1] = arr[:,1] / angle_norm
    arr[:,2] = arr[:,2] / angle_norm
    arr[:,3] = arr[:,3] / energy_norm
    arr[:,4] = arr[:,4] / angle_norm
    arr[:,5] = arr[:,5] / angle_norm
        
    x_orig = np.concatenate(
            [
                np.expand_dims(arr[:,0],axis=1),
                np.expand_dims(arr[:,1],axis=1),
                np.expand_dims(arr[:,2],axis=1),
                np.expand_dims(arr[:,3],axis=1),
                np.expand_dims(arr[:,4],axis=1),
                np.expand_dims(arr[:,5],axis=1),
            ],
            axis=1,
            )
    
    eps1 = K.random_normal(shape=(x_orig.shape[0],1))
    sf1 = mu + scale * eps1

    eps2 = K.random_normal(shape=(x_orig.shape[0],1))
    sf2 = mu + scale * eps2

    smear_pt1 = tf.math.multiply(np.expand_dims(arr[:,0],axis=1),sf1) 
    eta1 = np.expand_dims(arr[:,1],axis=1)
    phi1 = np.expand_dims(arr[:,2],axis=1)
    smear_pt2 = tf.math.multiply(np.expand_dims(arr[:,3],axis=1),sf2) 
    eta2 = np.expand_dims(arr[:,4],axis=1)
    phi2 = np.expand_dims(arr[:,5],axis=1)

    x_smear = np.concatenate(
            [
                smear_pt1,
                np.expand_dims(arr[:,1],axis=1),
                np.expand_dims(arr[:,2],axis=1),
                smear_pt2,
                np.expand_dims(arr[:,4],axis=1),
                np.expand_dims(arr[:,5],axis=1),
            ],
            axis=1,
            )
    
    smear_mll = 2 * np.multiply(
            np.multiply(smear_pt1,smear_pt2),
            np.cosh(eta1-eta2) - np.cos(phi1-phi2), 
            )

    return x_orig,x_smear,smear_mll
Example #4
0
    def build(self, input_shape):
        attention_size = int(input_shape[1])
        hidden_size = int(input_shape[2])  # D value - hidden size of the RNN layer
        # removed hidden_size = inputs.shape[2].value

        # Trainable parameters
        self.w_omega = tf.Variable(K.random_normal([hidden_size, attention_size], stddev=0.1))
        self.b_omega = tf.Variable(K.random_normal([attention_size], stddev=0.1))
        self.u_omega = tf.Variable(K.random_normal([attention_size], stddev=0.1))
        super(AttentionLayer, self).build(input_shape)
Example #5
0
    def call(self, x):
        if self.random_gain:
            noise_x = x + K.random_normal(shape=K.shape(x),
                                          mean=0.0,
                                          stddev=np.random.uniform(
                                              0.0, self.power))
        else:
            noise_x = x + K.random_normal(
                shape=K.shape(x), mean=0.0, stddev=self.power)

        return K.in_train_phase(noise_x, x)
Example #6
0
 def call(self, X):
     perturbation = self.sigma_kernel * K.random_normal(shape=(self.input_dim, self.units), mean=0, stddev=1)
     perturbed_kernel = self.kernel + perturbation
     output = K.dot(X, perturbed_kernel)
     if self.use_bias:
         bias_perturbation = self.sigma_bias * K.random_normal(shape=(self.units,), mean=0, stddev=1)
         perturbed_bias = self.bias + bias_perturbation
         output = K.bias_add(output, perturbed_bias)
     if self.activation is not None:
         output = self.activation(output)
     return output
Example #7
0
    def call(self, x, mask=None):
        e = K.random_normal((x.shape[0], self.input_dim, self.output_dim))
        w = self.W_mu.dimshuffle('x',0,1)+e*(K.exp(self.W_log_sigma/2).dimshuffle('x',0,1))
        output = K.batch_dot(x, w)
        test_output = K.dot(x, self.W_mu)
        if self.bias:
            eb = K.random_normal((x.shape[0], self.output_dim))
            b = self.b_mu.dimshuffle('x',0)+eb*(K.exp(self.b_log_sigma/2).dimshuffle('x',0))
            output += b
            test_output += self.b_mu

        return self.activation(K.in_train_phase(output, test_output))
Example #8
0
  def rayleigh_block_fading(self, symbol_tensor):
  
    batch_size = K.shape(symbol_tensor)[0]
    enc_dim = K.shape(symbol_tensor)[1]

    h1_tensor = K.random_normal(shape=(batch_size,), stddev=np.sqrt(0.5))
    h2_tensor = K.random_normal(shape=(batch_size,), stddev=np.sqrt(0.5))

    p2 = K.concatenate( [-symbol_tensor[:,enc_dim//2:],symbol_tensor[:,:enc_dim//2]] )

    t1 = h1_tensor[:,None]*symbol_tensor + h2_tensor[:,None]*p2

    return t1
Example #9
0
    def test_before_after_train_step(self):
        t = self
        invoked_before, invoked_after = False, False

        class SubscriberBefore:
            def update(self, epoch, iteration, batch):
                nonlocal invoked_before
                t.assertEqual(2, epoch)
                t.assertEqual(1, iteration)
                invoked_before = True

        class SubscriberAfter(SubscriberBefore):
            def update(self, loss_g, loss_d, epoch, iteration, batch):
                nonlocal invoked_after
                super(SubscriberAfter, self).update(epoch, iteration, batch)
                t.assertIsNotNone(loss_g)
                t.assertIsNotNone(loss_d)
                invoked_after = True

        self.dojo.register('before_train_step', SubscriberBefore().update)
        self.dojo.register('after_train_step', SubscriberAfter().update)

        batch = K.random_normal([4, 3, 3, 3])
        self.dojo.train_on_batch(2, 1, batch)

        self.assertEqual(True, invoked_before)
        self.assertEqual(True, invoked_after)
Example #10
0
File: stage1.py Project: Paseul/gan
def generate_c(x):
    mean = x[:, :128]
    log_sigma = x[:, 128:]
    stddev = K.exp(log_sigma)
    epsilon = K.random_normal(shape=K.constant((mean.shape[1],), dtype='int32'))
    c = stddev * epsilon + mean
    return c
Example #11
0
    def call(self, x, inter=None):

        # Encoding
        if self.in_channels == 3:
            x1 = relu(self.bne1(self.conv1(x)))
        else:
            x1 = relu(self.bne1(self.conv1_clean(x)))
        x2 = relu(self.bne2(self.conv2(x1)))
        x3 = relu(self.bne3(self.conv3(x2)))
        x3 = self.flatten(x3)
        x4 = relu(self.bne4(self.fce1(x3)))
        x5 = self.fce2(x4)
        means = self.mean_params(x5)
        stddev = tf.math.exp(0.5 * self.stddev_params(x5))
        eps = random_normal(tf.shape(stddev))

        # Decoding
        z = means + eps * stddev
        if inter is not None:
            z = tf.keras.layers.add([z, inter])
        x6 = relu(self.bnd1(self.fcd1(z)))
        x7 = relu(self.bnd2(self.fcd2(x6)))
        x7 = self.reshape(x7)
        x8 = relu(self.bnd3(self.deconv1(x7)))
        x9 = relu(self.bnd4(self.deconv2(x8)))
        if self.out_channels == 3:
            x10 = self.deconv3(x9)
        else:
            x10 = self.deconv3_clean(x9)

        return x10, means, stddev, z
Example #12
0
    def call(self, x, *args, **kwargs):
        """
        Samples a latent vector from a multi-dimensional Gaussian distribution.

        Parameters
        ----------
        x : list of tensors
            A list consisting of 2 tensors representing the latent
            distributions output within an encoder network: z_mu and
            z_log_sigma. Both tensors must be of rank 2 where the first axis
            indexes the data sample and the second indexes the latent dimension.

        Returns
        -------
        z : tensor
            A rank-2 tensor representing a random sample from a
            multivariate Gaussian distribution parametrized by the
            input tensors <x>. The first axis indexes the data sample and
            the second indexes the latent dimension.
        """
        assert (isinstance(x, list))
        z_mu, z_log_sigma = x
        eps = K.random_normal(K.shape(z_log_sigma))
        z = Add()([z_mu, Multiply()([K.exp(z_log_sigma), eps])])
        return z
Example #13
0
        def train_step(inp, tar):
            # optimizer F
            with tf.GradientTape() as tape:
                outputs = net(inp)
                loss = criterion(tar, outputs)
                train_accuracy(tar, outputs)
                train_loss(loss)

                f_variables = net.downsampling_layers.trainable_variables
                f_variables += net.drift.trainable_variables
                f_variables += net.fc_layers.trainable_variables
                gradients = tape.gradient(loss, f_variables)
                optimizer_f.apply_gradients(zip(gradients, f_variables))

            # optimizer G
            # training with out-of-domain data
            if args.training_out:
                with tf.GradientTape() as tape:
                    g_variables = net.diffusion.trainable_variables
                    label = ones * real_label  # real = 0.
                    loss_in = criterion2(label, net(inp, training_diffusion=True))
                    train_loss_in(loss_in)
                    gradients_in = tape.gradient(loss_in, g_variables)
                    optimizer_g.apply_gradients(zip(gradients_in, g_variables))

                with tf.GradientTape() as tape:
                    label = label * 0 + fake_label  # fake = 1.
                    inputs_out = 2 * K.random_normal((args.batch_size, args.imageSize, args.imageSize, 1)) + inp
                    loss_out = criterion2(label, net(inputs_out, training_diffusion=True))
                    train_loss_out(loss_out)
                    gradients_out = tape.gradient(loss_out, g_variables)
                    optimizer_g.apply_gradients(zip(gradients_out, g_variables))
Example #14
0
 def sample_point_from_normal_distribution(args):
     mu, log_variance = args
     epsilon = K.random_normal(shape=K.shape(self.mu),
                               mean=0.,
                               stddev=1.)
     sampled_point = mu + K.exp(log_variance / 2) * epsilon
     return sampled_point
Example #15
0
def sampling(args: tuple):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0.,
                              stddev=epsilon_std)

    return z_mean + K.exp(z_log_var / 2) * epsilon
Example #16
0
 def sampling(args):
     z_mean, z_log_sigma = args
     epsilon = K.random_normal(shape=(batch_size, latent_dim),
                               mean=0.,
                               stddev=1.)
     print("epsilon shape", epsilon.shape)
     return z_mean + K.exp(z_log_sigma) * epsilon
Example #17
0
 def call(self, x, training=None):
     mask = K.random_uniform(K.shape(x)[:-1], 0.0, 1.0)
     mask = K.expand_dims(mask, -1)
     mask = K.repeat_elements(mask, K.int_shape(x)[-1], -1)
     rand_x = K.switch(K.less(mask, self.rate),
                       K.random_normal(K.shape(x), 0.0, 1.0), x)
     return K.in_train_phase(rand_x, x, training=training)
def encoder_loss(latent):
    '''
    Compute MMD loss for the InfoVAE
    '''
    def compute_kernel(x, y):
        x_size = K.shape(x)[0]
        y_size = K.shape(y)[0]
        dim = K.shape(x)[1]
        tiled_x = K.tile(K.reshape(x, [x_size, 1, dim]), [1, y_size, 1])
        tiled_y = K.tile(K.reshape(y, [1, y_size, dim]), [x_size, 1, 1])
        return K.exp(-K.mean(K.square(tiled_x - tiled_y), axis=2) /
                     K.cast(dim, 'float32'))

    def compute_mmd(x, y):
        x_kernel = compute_kernel(x, x)
        y_kernel = compute_kernel(y, y)
        xy_kernel = compute_kernel(x, y)
        return K.mean(x_kernel) + K.mean(y_kernel) - 2 * K.mean(xy_kernel)

    'So, we first get the mmd loss'
    'First, sample from random noise'
    batch_size = K.shape(latent)[0]
    latent_dim = K.int_shape(latent)[1]
    true_samples = K.random_normal(shape=(batch_size, latent_dim),
                                   mean=0.,
                                   stddev=1.)
    'calculate mmd loss'
    loss_mmd = compute_mmd(true_samples, latent)

    'Add them together, then you can get the final loss'
    return loss_mmd
Example #19
0
 def call(self, input):
     z_mean = input[:, :self.dim]
     z_log_var = input[:, self.dim:]
     batch = tf.shape(z_mean)[0]
     dim = tf.shape(z_mean)[1]
     epsilon = K.random_normal(shape=(batch, 1))
     return z_mean + tf.exp(0.5 * z_log_var) * epsilon
Example #20
0
    def build_generator_encoder(self):
        init = RandomNormal(stddev=0.02)
        init = tf.initializers.RandomNormal(stddev=0.02)

        input_enc = tf.Input(shape=self.npcs)
        nNodes = self.initNNodes
        flag = 0
        while nNodes > latent_dim:
            if flag == 0:
                enc = tf.layers.Dense(nNodes)(input_enc)
                flag = 1
            else:
                enc = tf.layers.Dense(nNodes)(enc)
            enc = tf.layers.LeakyReLU(self.alpha)(enc)
            enc = tf.layers.BatchNormalization()(enc)
            nNodes = nNodes / 2
        mu = tf.layers.Dense(latent_dim)(enc)
        sigma = tf.layers.Dense(latent_dim)(enc)

        # The latent representation ("fake") in a Gaussian distribution is then compared to the "real" arbitrary Gaussian
        # distribution fed in the Discriminator
        latent_repr = tf.layers.Lambda(lambda p: p[0] + backend.random_normal(
            backend.shape(p[0])) * backend.exp(p[1] / 2))([mu, sigma])
        generator_encoder = tf.Model(input_enc, latent_repr, name='Encoder')
        generator_encoder.summary()
        return generator_encoder
Example #21
0
def spectral_normalization(kernel, u=None, niter=DEFAULT_NITER_SPECTRAL):
    """
    Normalize the kernel to have it's max eigenvalue == 1.

    Args:
        kernel: the kernel to normalize
        u: initialization for the max eigen vector
        niter: number of iteration

    Returns:
        the normalized kernel w_bar, it's shape, the maximum eigen vector, and the
        maximum eigen value

    """
    W_shape = kernel.shape
    if u is None:
        niter *= 2  # if u was not known increase number of iterations
        u = K.random_normal(shape=tuple([1, W_shape[-1]]))
    # Flatten the Tensor
    W_reshaped = K.reshape(kernel, [-1, W_shape[-1]])
    _u, _v = _power_iteration(W_reshaped, u, niter)
    # Calculate Sigma
    sigma = K.dot(_v, W_reshaped)
    sigma = K.dot(sigma, K.transpose(_u))
    # sigma/=self.kCoefLip
    # normalize it
    W_bar = W_reshaped / sigma
    return W_bar, _u, sigma
Example #22
0
 def encode(self, x):
     x = self.q_img(x)
     means = self.mean_params(x)
     stddev = tf.math.exp(0.5 * self.stddev_params(x))
     eps = random_normal(tf.shape(stddev))
     z = means + eps * stddev
     return z, means, stddev
Example #23
0
    def _init_model_fc(self):
        """ Initialize fully connected architecture.
        """
        # encoder architecture
        x = Input(shape=(self.input_dim,))
        h = x
        for layer_dim in self.layers:
            h = Dense(layer_dim, activation='relu')(h)

        # decoder architecture
        latent_input = Input(shape=(self.latent_dim,))
        decoder = latent_input
        for layer_dim in self.decoder_layers:
            decoder = Dense(layer_dim, activation='relu')(decoder)
        decoder = Dense(self.input_dim, activation='sigmoid')(decoder)
        decoder = Model(latent_input, decoder)

        # variational layer
        z_mu = Dense(self.latent_dim)(h)
        z_log_var = Dense(self.latent_dim)(h)
        z_mu, z_log_var = KLDivergenceLayer()([z_mu, z_log_var])
        # sampling
        z_sigma = Lambda(lambda t: K.exp(.5*t))(z_log_var)
        eps = Input(tensor=K.random_normal(stddev=self.epsilon_std,
                                           shape=(K.shape(x)[0], self.latent_dim)))
        z_eps = Multiply()([z_sigma, eps])
        z = Add()([z_mu, z_eps])
        x_pred = decoder(z)
        autoencoder = Model([x, eps], x_pred)
        encoder = Model(x, z_mu)
        return autoencoder, encoder, decoder
def sampling(args):
    """Reparameterization trick by sampling from an isotropic unit Gaussian.



    # Arguments

        args (tensor): mean and log of variance of Q(z|X)



    # Returns

        z (tensor): sampled latent vector

    """

    z_mean, z_log_var = args

    batch = K.shape(z_mean)[0]

    dim = K.int_shape(z_mean)[1]

    # by default, random_normal has mean = 0 and std = 1.0

    epsilon = K.random_normal(shape=(batch, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon
Example #25
0
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean = 0 and std = 1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
Example #26
0
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]

    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
Example #27
0
    def call(self, inputs, **kwargs):  #pylint: disable=unused-argument, arguments-differ
        mean, log_var = inputs
        batch = K.shape(mean)[0]
        dim = K.int_shape(mean)[1:]
        epsilon = K.random_normal(shape=(batch, ) + dim)
        result = mean + K.exp(0.5 * log_var) * epsilon

        if self._use_kl_loss:
            # this loss function makes the mean and variance match a Normal(0, 1) distribution
            kl_loss = K.square(mean) + K.exp(log_var) - 1 - log_var
            kl_loss = K.sum(kl_loss, axis=-1)
            kl_loss = 0.5 * K.mean(kl_loss)

            # reduce relative weight compared to mean squared error
            kl_loss /= K.cast(batch * dim[0] * dim[1] * dim[2],
                              dtype='float32')

            kl_loss *= self._kl_enabled

            self.add_loss(kl_loss)
            self.add_metric(kl_loss,
                            aggregation='mean',
                            name=self.name + '_kl_loss')

        return result
Example #28
0
 def p_from_norm_dist(args):
     mu, log_var = args
     epsilon = K.random_normal(shape=K.shape(self._mu),
                               mean=0.,
                               stddev=1.)
     point = mu + K.exp(log_var / 2) * epsilon
     return point
def sampling(args):
    mu, sigma = args
    batch = K.shape(mu)[0]
    dim = K.int_shape(mu)[1]
    epsilon = K.random_normal(shape=(batch, dim))

    return mu + K.exp(0.5 * sigma) * epsilon
def perturbation(x):
    w = K.random_normal(shape=(channel, 2),
                        mean=0.0,
                        stddev=sigma**0.5,
                        dtype=None)
    xp = ((1 - sigma)**0.5) * x + w
    return xp