def test_categorical_loss_with_probs():
    softmax = SoftMax()
    shape = (200, 2)
    y_true = tf.math.round(softmax(normal(shape)))
    logits = normal(shape)

    my_loss = CategoricalCrossentropy(from_logits=False)
    tf_loss = losses.KLDivergence()

    with tf.GradientTape(persistent=True) as tape:
        tape.watch(logits)
        pred = softmax(logits)
        loss_tf = tf_loss(y_true=y_true, y_pred=pred)

    # loss_my = my_loss(y_true, y_pred=pred)

    # float point comparison + numerical stability issues
    # assert np.allclose(loss_my, loss_tf, rtol=1e-05, atol=1e-08)

    dz_tf, da_tf = tape.gradient(loss_tf, [logits, pred])
    da_my = my_loss.get_gradient(y_true, pred)

    assert np.allclose(da_my, da_tf, rtol=1e-05, atol=1e-08)

    # Integration with softmax test

    # reshape to match batch jacobian shape
    da = tf.reshape(da_my, shape=(shape[0], 1, shape[1]))

    dz = da @ softmax.get_jacobian(logits)

    # reshape back to matrix
    dz = tf.reshape(dz, shape)

    assert np.allclose(dz, dz_tf)
def test_mse_loss():
    shape = (500, 1)
    y_true = normal(shape)
    inputs = normal(shape)

    relu = ReLU()

    my_loss = MeanSquaredError()
    tf_loss = losses.MeanSquaredError()

    with tf.GradientTape() as tape:
        tape.watch(inputs)
        y_pred = relu(inputs)
        loss_tf = tf_loss(y_true, y_pred)

    loss_my = my_loss(y_true, y_pred)

    assert np.allclose(loss_my, loss_tf)

    dz_tf, da_tf = tape.gradient(loss_tf, [inputs, y_pred])
    da_my = my_loss.get_gradient(y_true, y_pred)

    assert np.allclose(da_tf, da_my)

    # relu integration test

    # reshape to match batch jacobian shape
    da = tf.reshape(da_my, shape=(shape[0], 1, shape[1]))

    dz = da @ relu.get_jacobian(inputs)

    # reshape back to matrix
    dz = tf.reshape(dz, shape)

    assert np.allclose(dz, dz_tf)
Exemple #3
0
def test_softmax_kld_probs():
    units, N = 4, 100
    shape = (N, 6)
    inputs = normal(shape)
    y_true = tf.math.round(keras.activations.softmax(normal((N, units))))

    helper_test_coupling(activations.SoftMax(), "softmax", keras.losses.KLD,
                         inputs, y_true, units)
Exemple #4
0
def test_relu_mse():
    N = 400
    shape = (N, 6)
    inputs = normal(shape) * 10
    y_true = normal((N, 1)) * 10

    helper_test_coupling(activations.ReLU(), "relu", keras.losses.MSE, inputs,
                         y_true, 1)
Exemple #5
0
def test_linear_categorical_loss_logits():
    units, N = 4, 100
    shape = (N, 6)
    inputs = normal(shape)
    y_true = tf.math.round(keras.activations.softmax(normal((N, units))))

    helper_test_coupling(
        activations.Linear(),
        "linear",
        keras.losses.CategoricalCrossentropy(from_logits=True),
        inputs,
        y_true,
        units,
    )
Exemple #6
0
def test_sigmoid_binary_loss():
    N = 400
    shape = (N, 6)
    inputs = normal(shape)
    y_true = tf.math.round(keras.activations.sigmoid(normal((N, 1))))

    helper_test_coupling(
        activations.Sigmoid(),
        "sigmoid",
        keras.losses.BinaryCrossentropy(),
        inputs,
        y_true,
        1,
    )
Exemple #7
0
    def train(self, dataset):
        z = tf.constant(random.normal((FLAGS.n_samples, 1, 1, self.z_dim)))
        g_train_loss = metrics.Mean()
        d_train_loss = metrics.Mean()

        for epoch in range(self.epochs):
            bar = pbar(self.total_images, self.batch_size, epoch, self.epochs)
            for batch in dataset:
                for _ in range(self.n_critic):
                    self.train_d(batch)
                    d_loss = self.train_d(batch)
                    d_train_loss(d_loss)

                g_loss = self.train_g()
                g_train_loss(g_loss)
                self.train_g()

                bar.postfix['g_loss'] = f'{g_train_loss.result():6.3f}'
                bar.postfix['d_loss'] = f'{d_train_loss.result():6.3f}'
                bar.update(self.batch_size)

            g_train_loss.reset_states()
            d_train_loss.reset_states()

            bar.close()
            del bar

            samples = self.generate_samples(z)
            image_grid = img_merge(samples, n_rows=8).squeeze()
            save_image_grid(image_grid, epoch + 1)
Exemple #8
0
    def save_random_images(self, epoch, num_samples, imageSavePath):
        seed = random.normal([num_samples, self.noiseDim])

        for i in range(num_samples):
            file_path = os.path.join(imageSavePath, f'image_at_epoch_{epoch}_#{i}.png')
            image = self.generate_image(seed[i])
            self.save_image(image, file_path)
Exemple #9
0
 def noiseAndLatentC(self):
     noise = normal([self.batchSize, self.noiseDim - 1])
     rC = random_uniform(shape=(self.batchSize, 1), minval=0, maxval=9, dtype='int32')
     cForLoss = to_categorical(rC, num_classes=10) 
     cForLoss = transpose(cForLoss)
     rC = cast(rC, dtype='float')
     noiseAndC = concat([rC, noise], axis=1)
     return (cForLoss, noiseAndC)
Exemple #10
0
 def train_g(self):
     z = random.normal((self.batch_size, 1, 1, self.z_dim))
     with tf.GradientTape() as t:
         x_fake = self.G(z, training=True)
         fake_logits = self.D(x_fake, training=True)
         loss = ops.g_loss_fn(fake_logits)
     grad = t.gradient(loss, self.G.trainable_variables)
     self.g_opt.apply_gradients(zip(grad, self.G.trainable_variables))
     return loss
 def val_d(self, x_real):
     z = random.normal((self.batch_size, 1, 1, self.z_dim))
     x_fake = self.G(z, training=False)
     fake_logits = self.D(x_fake, training=False)
     real_logits = self.D(x_real, training=False)
     cost = ops.d_loss_fn(fake_logits, real_logits)
     gp = self.gradient_penalty(partial(self.D, training=False), x_real, x_fake)
     cost += self.grad_penalty_weight * gp
     return cost
Exemple #12
0
 def train_g(self, image_scale=255.0):
     z = random.normal((self.batch_size, 1, 1, self.z_dim))
     with tf.GradientTape() as t:
         x_fake = self.G(z, training=True)  #* image_scale
         #x_fake, _  = self.Augment(images=x_fake, scale=image_scale, \
         #                                            batch_shape=[self.batch_size, *self.image_shape])
         fake_logits = self.D(x_fake, training=True)
         loss = ops.g_loss_fn(fake_logits)
     grad = t.gradient(loss, self.G.trainable_variables)
     self.g_opt.apply_gradients(zip(grad, self.G.trainable_variables))
     return loss
Exemple #13
0
 def build_graph():
   k_constant6 = array_ops.constant([[0.0, 0.0], [0.0, 0.0]])
   k_constant5 = array_ops.constant([[1.0, 1.0], [1.0, 1.0]])
   k_rng4 = normal([2, 2],
                   mean=k_constant6,
                   stddev=k_constant5,
                   dtype=np.float32)
   k_constant3 = array_ops.constant([[2.0, 2.3], [1.0, 2.2]])
   k_multiply2 = k_rng4 * k_constant3
   k_constante1 = array_ops.constant([[0.1, 1.5], [0.9, 1.3]])
   return k_multiply2 + k_constante1
 def train_d(self, x_real):
     z = random.normal((self.batch_size, 1, 1, self.z_dim))
     with tf.GradientTape() as t:
         x_fake = self.G(z, training=True)
         fake_logits = self.D(DiffAugment(x_fake, policy=self.policy), training=True)
         real_logits = self.D(DiffAugment(x_real, policy=self.policy), training=True)
         cost = ops.d_loss_fn(fake_logits, real_logits)
         gp = self.gradient_penalty(partial(self.D, training=True), x_real, x_fake)
         cost += self.grad_penalty_weight * gp
     grad = t.gradient(cost, self.D.trainable_variables)
     self.d_opt.apply_gradients(zip(grad, self.D.trainable_variables))
     return cost
def test_categorical_loss_with_logits():
    softmax = SoftMax()
    shape = (100, 4)
    y_true = tf.math.round(softmax(normal(shape)))
    logits = normal(shape)

    my_loss = CategoricalCrossentropy(from_logits=True)
    tf_loss = losses.CategoricalCrossentropy(from_logits=True)

    with tf.GradientTape(persistent=True) as tape:
        tape.watch(logits)
        loss_tf = tf_loss(y_true=y_true, y_pred=logits)

    loss_my = my_loss(y_true, y_pred=logits)

    assert np.allclose(loss_my, loss_tf)

    dz_tf = tape.gradient(loss_tf, [logits])[0]
    dz_my = my_loss.get_gradient(y_true, logits)

    assert np.allclose(dz_tf, dz_my)
    def build(self, seed=None):
        if self.built: return

        if len(self.inputConnections) < 1:
            raise (notEnoughNodeConnections(len(self.inputConnections), 1))

        self.filter = Variable(
            normal(shape=[
                self.kernelSize, self.kernelSize, self.inputChannels,
                self.numberOfKernels
            ],
                   seed=seed))
        self.built = True
Exemple #17
0
 def noiseAndC(self):
     noise = normal([self.batchSize, self.noiseDim - 10])
     #print(noise[3])
     nC = []
     i = 0
     while i < self.batchSize:
         j = 0
         while j < 10:
             nC.append(
                 concat([self.latentCodes[j], noise[i]], axis=-1).numpy())
             j = j + 1
         i = i + 1
     nC = tf.constant(nC)
     return nC
def test_binary_loss():
    sigmoid = Sigmoid()
    shape = (500, 1)
    y_true = tf.math.round(sigmoid(normal(shape)))
    logits = normal(shape)

    my_loss = BinaryCrossentropy()
    tf_loss = losses.BinaryCrossentropy()

    with tf.GradientTape(persistent=True) as tape:
        tape.watch(logits)
        pred = keras.activations.sigmoid(logits)
        loss_tf = tf_loss(y_true=y_true, y_pred=pred)

    loss_my = my_loss(y_true, y_pred=pred)

    assert np.allclose(loss_my, loss_tf, rtol=1e-05, atol=1e-08)

    dz_tf, da_tf = tape.gradient(loss_tf, [logits, pred])
    da_my = my_loss.get_gradient(y_true, pred)

    assert np.allclose(da_my, da_tf, rtol=1e-05, atol=1e-08)

    # sigmoid integration test

    sigmoid = Sigmoid()

    # reshape to match batch jacobian shape
    da = da_my[:, tf.newaxis, :]

    dz = da @ sigmoid.get_jacobian(logits)

    # reshape back to matrix
    dz = tf.reshape(dz, shape)

    assert np.allclose(dz, dz_tf)
Exemple #19
0
 def train_uniform_gausian(self):
     x_real = random.normal(
         (self.params.n_samples, self.z_dim, self.z_dim, 3))
     g_train_loss = metrics.Mean()
     d_train_loss = metrics.Mean()
     for epoch in range(self.epochs):
         for _ in range(self.n_critic):
             self.train_d(x_real)
             d_loss = self.train_d(x_real)
             d_train_loss(d_loss)
         g_loss = self.train_g()
         g_train_loss(g_loss)
         self.train_g()
         g_train_loss.reset_states()
         d_train_loss.reset_states()
Exemple #20
0
 def train_d(self, x_real, image_scale=255.0):
     z = random.normal((self.batch_size, 1, 1, self.z_dim))
     with tf.GradientTape() as t:
         x_fake = self.G(z, training=True)
         #flist= None
         #x_fake, flist = self.Augment(images=x_fake, scale=image_scale, \
         #                        batch_shape=[self.batch_size, *self.image_shape])
         fake_logits = self.D(x_fake, training=True)
         x_real = self.Augment(images=x_real, scale=image_scale, \
                                  batch_shape=[self.batch_size, *self.image_shape])
         real_logits = self.D(x_real, training=True)
         cost = ops.d_loss_fn(fake_logits, real_logits)
         gp = self.gradient_penalty(partial(self.D, training=True), x_real,
                                    x_fake)
         cost += self.grad_penalty_weight * gp
     grad = t.gradient(cost, self.D.trainable_variables)
     self.d_opt.apply_gradients(zip(grad, self.D.trainable_variables))
     return cost
Exemple #21
0
    def train_step(self, images):
        noise = random.normal([self.batchSize, self.noiseDim])

        with GradientTape() as gen_tape, GradientTape() as disc_tape:
            generated_images = self.generator(noise, training=True)

            real_output = self.discriminator(images, training=True)
            fake_output = self.discriminator(generated_images, training=True)

            gen_loss = self.generator_loss(fake_output)
            disc_loss = self.discriminator_loss(real_output, fake_output)

        gradients_of_generator = gen_tape.gradient(gen_loss, self.generator.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(disc_loss, self.discriminator.trainable_variables)

        self.generatorOptimizer.apply_gradients(zip(gradients_of_generator, self.generator.trainable_variables))
        self.discriminatorOptimizer.apply_gradients(
            zip(gradients_of_discriminator, self.discriminator.trainable_variables))
    def build(self, seed=None):
        if self.built: return

        if len(self.inputConnections) < 1:
            raise (notEnoughNodeConnections(len(self.inputConnections), 1))
        #now make the variables

        inputShape = self.inputConnections[0].outputShape
        self.inputSize = inputShape[0]

        biasInit = 0.1
        weightInitSTDDEV = 1 / self.inputSize

        self.biases = Variable(constant(biasInit, shape=[self.size]))
        self.weights = Variable(
            normal([self.inputSize, self.size],
                   stddev=weightInitSTDDEV,
                   mean=0,
                   seed=seed))

        self.built = True
Exemple #23
0
    def train(self, dataset):
        z = tf.constant(
            random.normal((self.params.n_samples, 1, 1, self.z_dim)))
        g_train_loss = metrics.Mean()
        d_train_loss = metrics.Mean()

        for epoch in range(self.epochs):
            for batch in dataset:
                for _ in range(self.n_critic):
                    self.train_d(batch)
                    d_loss = self.train_d(batch)
                    d_train_loss(d_loss)

                g_loss = self.train_g()
                g_train_loss(g_loss)
                self.train_g()

            g_train_loss.reset_states()
            d_train_loss.reset_states()

            samples = self.generate_samples(z)
            image_grid = img_merge(samples, n_rows=8).squeeze()
            save_image_grid(image_grid, epoch + 1)
Exemple #24
0
from __future__ import print_function, division
import tensorflow as tf
from tensorflow.random import normal
from tensorflow.keras.backend import random_uniform, concatenate, transpose, dot
from tensorflow.keras.utils import to_categorical

a = random_uniform(shape=(15, 1), minval=0, maxval=9, dtype='int32')
d = to_categorical(a, num_classes=10)
d = transpose(d)
b = normal([15, 4])
a = tf.dtypes.cast(a, dtype='float')
c = tf.concat([a, b], axis=1)
print(a - 1)
print(b.numpy())
print(c.numpy())
print(d)
eM = normal([15, 10])
print(eM)
print(tf.linalg.tensor_diag_part(dot(eM, d)))
Exemple #25
0
 def call(self, inputs):
     Z_mu, Z_logvar = inputs
     epsilon = random.normal(shape(Z_mu))
     sigma = math.exp(0.5 * Z_logvar)
     return Z_mu + sigma * epsilon
Exemple #26
0
def weight_variable(shape, w_alpha = 0.01):
    return Variable(w_alpha * normal(shape))
Exemple #27
0
    def train(self, dataset, val_dataset=None, epochs=int(3e4), n_itr=100):
        try:
            z = tf.constant(
                np.load(f'{self.save_path}/{self.model_name}_z.npy'))
        except FileNotFoundError:
            z = tf.constant(random.normal((self.batch_size, 1, 1, self.z_dim)))
            os.makedirs(self.save_path, exist_ok=True)
            np.save(f'{self.save_path}/{self.model_name}_z', z.numpy())

        liveplot = PlotLosses()
        try:
            losses_list = pickle.load(
                open(f'{self.save_path}/{self.model_name}_losses_list.pkl',
                     'rb'))
        except:
            losses_list = []

        for i, losses in enumerate(losses_list):
            liveplot.update(losses, i)

        start_epoch = len(losses_list)

        g_train_loss = metrics.Mean()
        d_train_loss = metrics.Mean()
        d_val_loss = metrics.Mean()

        for epoch in range(start_epoch, epochs):
            train_bar = pbar(n_itr, epoch, epochs)
            for itr_c, batch in zip(range(n_itr), dataset):
                if train_bar.n >= n_itr:
                    break

                for _ in range(self.n_critic):
                    d_loss = self.train_d(batch['images'])
                    d_train_loss(d_loss)

                g_loss = self.train_g()
                g_train_loss(g_loss)
                self.train_g()

                train_bar.postfix['g_loss'] = f'{g_train_loss.result():6.3f}'
                train_bar.postfix['d_loss'] = f'{d_train_loss.result():6.3f}'
                train_bar.update(n=itr_c)

            train_bar.close()

            if val_dataset:
                val_bar = vbar(n_itr // 5, epoch, epochs)
                for itr_c, batch in zip(range(n_itr // 5), val_dataset):
                    if val_bar.n >= n_itr // 5:
                        break

                    d_val_l = self.val_d(batch['images'])
                    d_val_loss(d_val_l)

                    val_bar.postfix[
                        'd_val_loss'] = f'{d_val_loss.result():6.3f}'
                    val_bar.update(n=itr_c)
                val_bar.close()

            losses = {
                'g_loss': g_train_loss.result(),
                'd_loss': d_train_loss.result(),
                'd_val_loss': d_val_loss.result()
            }
            losses_list += [losses]
            pickle.dump(
                losses_list,
                open(f'{self.save_path}/{self.model_name}_losses_list.pkl',
                     'wb'))
            liveplot.update(losses, epoch)
            liveplot.send()

            g_train_loss.reset_states()
            d_train_loss.reset_states()
            d_val_loss.reset_states()
            del train_bar
            del val_bar

            self.G.save_weights(
                filepath=f'{self.save_path}/{self.model_name}_generator')
            self.D.save_weights(
                filepath=f'{self.save_path}/{self.model_name}_discriminator')

            if epoch >= int(2e4):
                if epoch % 1000 == 0:
                    self.G.save_weights(
                        filepath=
                        f'{self.save_path}/{self.model_name}_generator{epoch}')
                    self.D.save_weights(
                        filepath=
                        f'{self.save_path}/{self.model_name}_discriminator{epoch}'
                    )

            if epoch % 5 == 0:
                samples = self.generate_samples(z)
                image_grid = img_merge(samples, n_rows=6).squeeze()
                img_path = f'./images/{self.model_name}'
                os.makedirs(img_path, exist_ok=True)
                save_image_grid(image_grid,
                                epoch + 1,
                                self.model_name,
                                output_dir=img_path)
Exemple #28
0
def bias_variable(shape, b_alpha = 0.1):
    return Variable(b_alpha * normal(shape))