コード例 #1
0
ファイル: Nets.py プロジェクト: MarkusHald/SCoGAN
def res_net_block(input_data, filters, kernel, norm, w_init):
    x = layers.Conv2D(filters, kernel, padding='same',kernel_initializer=w_init)(input_data)
    x = u.get_norm(norm)(x)
    #x = layers.PReLU(args.args.prelu_init)(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv2D(filters, kernel, padding='same',kernel_initializer=w_init)(x)
    x = u.get_norm(norm)(x)
    x = layers.Add()([input_data, x])
    return x
コード例 #2
0
ファイル: GAN_archs.py プロジェクト: MarkusHald/SCoGAN
def gan32_gen(args):
    channels = args.dataset_dim[3]

    # Shared weights between generators
    noise = tf.keras.layers.Input(shape=(args.noise_dim, ))

    model = tf.keras.layers.Dense(1024 * 4 * 4,
                                  kernel_initializer=args.w_init,
                                  kernel_regularizer=args.wd,
                                  bias_initializer=args.bi)(noise)
    model = tf.keras.layers.Reshape((4, 4, 1024))(model)
    model = u.get_norm(args.norm)(model)
    model = (tf.keras.layers.PReLU(args.prelu_init))(model)

    model = (tf.keras.layers.Conv2DTranspose(512, (3, 3),
                                             strides=(2, 2),
                                             padding='same',
                                             kernel_initializer=args.w_init,
                                             kernel_regularizer=args.wd,
                                             bias_initializer=args.bi))(model)
    model = u.get_norm(args.norm)(model)
    model = (tf.keras.layers.PReLU(args.prelu_init))(model)

    model = (tf.keras.layers.Conv2DTranspose(256, (3, 3),
                                             strides=(2, 2),
                                             padding='same',
                                             kernel_initializer=args.w_init,
                                             kernel_regularizer=args.wd,
                                             bias_initializer=args.bi))(model)
    model = u.get_norm(args.norm)(model)
    model = (tf.keras.layers.PReLU(args.prelu_init))(model)

    model = (tf.keras.layers.Conv2DTranspose(128, (3, 3),
                                             strides=(2, 2),
                                             padding='same',
                                             kernel_initializer=args.w_init,
                                             kernel_regularizer=args.wd,
                                             bias_initializer=args.bi))(model)
    model = u.get_norm(args.norm)(model)
    model = (tf.keras.layers.PReLU(args.prelu_init))(model)

    # Generator 1
    img = tf.keras.layers.Conv2DTranspose(channels, (6, 6),
                                          strides=(1, 1),
                                          activation='tanh',
                                          padding='same',
                                          kernel_initializer=args.w_init,
                                          kernel_regularizer=args.wd,
                                          bias_initializer=args.bi)(model)

    return keras.Model(noise, img)
コード例 #3
0
ファイル: Style_transfer.py プロジェクト: MarkusHald/SCoGAN
def find_latent_code(content_image,
                     generator,
                     args,
                     feature_loss,
                     iterations=1000,
                     verbose=False):
    x = tf.Variable(u.gen_noise(args, style_transfer=True), trainable=True)
    opt = tf.optimizers.Adam(learning_rate=0.001)

    for i in range(iterations):
        with tf.GradientTape() as t:
            # no need to watch a variable:
            # trainable variables are always watched
            if feature_loss:
                img_guess = generator(x)[-1]
            else:
                img_guess = generator(x)

            diff = tf.math.abs(content_image - img_guess)
            # diff = tf.math.squared_difference(self.content_image, img_guess)
            loss = tf.math.reduce_mean(diff)
            if verbose and i % 250 == 0:
                print(str(i) + "/" + str(iterations))

        # Is the tape that computes the gradients!
        trainable_variables = [x]
        gradients = t.gradient(loss, trainable_variables)
        # The optimize applies the update, using the variables
        # and the optimizer update rule
        opt.apply_gradients(zip(gradients, trainable_variables))
    return x
コード例 #4
0
ファイル: GAN_archs.py プロジェクト: MarkusHald/SCoGAN
def patch_gan_disc(args):
    img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])

    img1 = tf.keras.layers.Input(shape=img_shape)

    x = tf.keras.layers.Conv2D(32,
                               4,
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer=args.w_init)(img1)
    x = tf.keras.layers.LeakyReLU(0.2)(x)

    x = tf.keras.layers.Conv2D(64,
                               4,
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer=args.w_init)(x)
    x = u.get_norm(args.norm)(x)
    x = tf.keras.layers.LeakyReLU(0.2)(x)

    x = tf.keras.layers.Conv2D(128,
                               4,
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer=args.w_init)(x)
    x = u.get_norm(args.norm)(x)
    x = tf.keras.layers.LeakyReLU(0.2)(x)

    x = tf.keras.layers.Conv2D(256,
                               4,
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer=args.w_init)(x)
    x = u.get_norm(args.norm)(x)
    x = tf.keras.layers.LeakyReLU(0.2)(x)

    out = tf.keras.layers.Conv2D(1, (4, 4),
                                 padding='same',
                                 kernel_initializer=args.w_init)(x)

    return keras.Model(img1, out)
コード例 #5
0
ファイル: GAN_trainer.py プロジェクト: MarkusHald/SCoGAN
    def train_generator(self, args):
        noise = u.gen_noise(args)

        with tf.GradientTape() as gen_tape:
            generated_images = self.generator(noise, training=True)
            fake_output = self.discriminator(generated_images, training=True)
            gen_loss = self.g_loss_fn(fake_output)
        gradients_of_generator = gen_tape.gradient(
            gen_loss, self.generator.trainable_variables)
        args.gen_optimizer.apply_gradients(
            zip(gradients_of_generator, self.generator.trainable_variables))
        return gen_loss
コード例 #6
0
ファイル: GAN_archs.py プロジェクト: MarkusHald/SCoGAN
def resnet128_gen(args):
    channels = args.dataset_dim[3]

    # Shared weights between generators
    noise = tf.keras.layers.Input(shape=(args.noise_dim, ))

    model = tf.keras.layers.Dense(1024 * 4 * 4,
                                  kernel_initializer=args.w_init)(noise)
    model = tf.keras.layers.Reshape((4, 4, 1024))(model)
    model = u.get_norm(args.norm)(model)
    model = layers.Activation('relu')(model)

    model = (tf.keras.layers.Conv2DTranspose(
        512, 3, strides=(2, 2), padding='same',
        kernel_initializer=args.w_init))(model)
    model = u.get_norm(args.norm)(model)
    model = layers.Activation('relu')(model)

    model = (tf.keras.layers.Conv2DTranspose(
        256, 3, strides=(2, 2), padding='same',
        kernel_initializer=args.w_init))(model)
    model = u.get_norm(args.norm)(model)
    model = layers.Activation('relu')(model)

    for i in range(6):
        model = nets.res_net_block(model, 256, 3, args.norm, args.w_init)

    model = (tf.keras.layers.Conv2DTranspose(
        128, 3, strides=(2, 2), padding='same',
        kernel_initializer=args.w_init))(model)
    model = u.get_norm(args.norm)(model)
    model = layers.Activation('relu')(model)

    model = (tf.keras.layers.Conv2DTranspose(
        64, (3, 3),
        strides=(2, 2),
        padding='same',
        kernel_initializer=args.w_init))(model)
    model = u.get_norm(args.norm)(model)
    model = layers.Activation('relu')(model)

    img1 = (tf.keras.layers.Conv2DTranspose(
        32, (3, 3),
        strides=(2, 2),
        padding='same',
        kernel_initializer=args.w_init))(model)
    img1 = u.get_norm(args.norm)(img1)
    img1 = layers.Activation('relu')(img1)

    img1 = tf.keras.layers.Conv2DTranspose(
        channels, (3, 3),
        strides=(1, 1),
        activation='tanh',
        padding='same',
        kernel_initializer=args.w_init)(img1)

    return keras.Model(noise, img1)
コード例 #7
0
ファイル: GAN_trainer.py プロジェクト: MarkusHald/SCoGAN
    def train(self, args):
        if args.dataset != 'lsun':
            it = iter(self.dataset)
        else:
            it = self.dataset

        # Set loss functions
        self.d_loss_fn, self.g_loss_fn = l.set_losses(args)

        for epoch in range(args.epochs):
            start = time.time()
            disc_iters_loss = []

            # take x steps with disc before training generator
            for i in range(args.disc_iters):
                if args.dataset in ['celeba', 'lsun']:
                    batch = next(it)
                else:
                    batch = next(it)[0]

                d_loss = self.train_discriminator(batch, args)
                disc_iters_loss.append(d_loss)

            g_loss = self.train_generator(args)

            self.full_training_time += time.time() - start
            self.disc_loss.append(tf.reduce_mean(disc_iters_loss).numpy())
            self.gen_loss.append(g_loss.numpy())
            print("%d [D loss: %f] [G loss: %f]" % (
                epoch,
                d_loss,
                g_loss,
            ))

            # Generate samples and save
            if args.images_while_training != 0:
                if epoch % args.images_while_training == 0:
                    if args.dataset == "toy":
                        self.images_while_training.append(
                            u.draw_2d_samples(self.generator, args.noise_dim))
                    else:
                        self.sample_images(epoch, args.seed, args.dir,
                                           args.dataset_dim[3])

        self.plot_losses(args.dir, self.disc_loss, self.gen_loss)
        self.sample_images(epoch, args.seed, args.dir, args.dataset_dim[3])
        return self.full_training_time
コード例 #8
0
ファイル: GAN_trainer.py プロジェクト: MarkusHald/SCoGAN
    def train_discriminator(self, real_data, args):
        noise = u.gen_noise(args)
        generated_images = self.generator(noise, training=True)

        with tf.GradientTape() as disc_tape:
            fake_output = self.discriminator(generated_images, training=True)
            real_output = self.discriminator(real_data, training=True)
            disc_loss = self.d_loss_fn(fake_output, real_output)
            gp = self.discPenal.calc_penalty(
                generated_images, real_data, self.discriminator,
                args)  # if loss is not wgan-gp then gp=0
            disc_loss = disc_loss + (gp * args.penalty_weight_d)
        gradients_of_discriminator = disc_tape.gradient(
            disc_loss, self.discriminator.trainable_variables)
        args.disc_optimizer.apply_gradients(
            zip(gradients_of_discriminator,
                self.discriminator.trainable_variables))

        # Clip weights if wgan loss function
        if args.loss == "wgan":
            for var in self.discriminator.trainable_variables:
                var.assign(tf.clip_by_value(var, -args.clip, args.clip))
        return disc_loss
コード例 #9
0
def parserForInformation(page_source):
    """
    @param page_source: This is a string that contains the HTML we are going to scrap to print
    the information about the discography of an artist/band
    
    This function is called in method getDiscography after we got to the discography page of
    a band/artist using selenium's webdriver. We use BeautifulSoup to get the information through
    the HTML
    """
    #First we create the object soup that will parse our HTML. We give it as first parameter
    #the string we want to parse, in this case the source code (hTML code) of our page
    #and then we set that it has to be and html parser
    soup = BeautifulSoup(page_source, "html.parser")
    #We get all the elements in the page that are of type table
    tables = BeautifulSoup.find_all(soup, "table")
    #We find all elements inside the second table in the HTML that are of type tr (rows)
    trs = BeautifulSoup.find_all(tables[1], "tr")
    #We go through the different rows (each row is and album) of the table and for each row
    #(each album) we get the columns that contain the information we want to print
    #and we add the columns with the right information to our string "s" where we will
    #keep the information and print it
    for i in range(1, len(trs)):
        tds = BeautifulSoup.find_all(trs[i], "td")
        for j in range(2, len(tds) - 1):
            if (j == 2):
                s = Utils.deleteSpaces(tds[j].text)
                print("Year: " + s)
            elif (j != 5 and j != 6):
                s = Utils.deleteIntroTabs(tds[j].text)
                if (j == 4):
                    s = "Label: " + s
                print(s)
            elif (j == 5):
                stars = tds[j].div.attrs['class'][1]
                Utils.starsDecode(stars)
            elif (j == 6):
                stars = tds[j].div.span.attrs['class'][1]
                Utils.userStarsDecode(stars)
        print("")
コード例 #10
0
    def train(self, args):
        if args.use_cycle:
            self.encoder = n.encoder(args)
        if args.semantic_loss:
            self.classifier = tf.keras.models.load_model(args.classifier_path)
            if args.cogan_data == 'mnist2fashion':
                self.classifier2 = tf.keras.models.load_model(args.classifier_path + '_fashion')
        if args.feature_loss:
            vgg = tf.keras.applications.VGG19(include_top=False, input_shape=(args.dataset_dim[1],args.dataset_dim[2], args.dataset_dim[3]))
            self.high_level_feature_extractor = tf.keras.Model(inputs=vgg.input, outputs=vgg.get_layer('block4_conv4').output)
            self.low_level_feature_extractor = tf.keras.Model(inputs=vgg.input, outputs=vgg.get_layer('block1_pool').output)
        if args.perceptual_loss:
            self.vgg_feature_model = self.feature_layers(self.style_layers + self.content_layers, args)
        it1 = iter(self.X1)
        it2 = iter(self.X2)

        # Set loss functions
        d_loss_fn, g_loss_fn = l.set_losses(args)

        for epoch in range(args.epochs):
            start = time.time()

            # ----------------------
            #  Train Discriminators
            # ----------------------

            for i in range(args.disc_iters):
                # Select a random batch of images
                if args.cogan_data in ['mnist2edge','shapes2flowers', 'Eyeglasses', 'Smiling', 'Blond_Hair', 'Male']:
                    batch1 = next(it1)
                    batch2 = next(it2)
                elif args.cogan_data == 'mnist2svhn_prune':
                    batch1 = next(it1)[0]
                    batch2 = next(it2)
                else:
                    batch1 = next(it1)[0]
                    batch2 = next(it2)[0]

                # Sample noise as generator input
                noise = u.gen_noise(args)

                # Generate a batch of new images
                gen_batch1 = self.g1(noise, training=True)

                # d1
                with tf.GradientTape() as tape:
                    # Disc response
                    disc_real1 = self.d1(batch1, training=True)
                    disc_fake1 = self.d1(gen_batch1[-1], training=True)

                    # Calc loss and penalty
                    d1_loss = d_loss_fn(disc_fake1, disc_real1)
                    gp1 = self.discPenal.calc_penalty(gen_batch1[-1], batch1, self.d1, args)  # if loss is not wgan-gp then gp=0
                    self.hist_discpenalty1.append(gp1)
                    d1_loss = d1_loss + (gp1 * args.penalty_weight_d)
                gradients_of_discriminator = tape.gradient(d1_loss, self.d1.trainable_variables)
                args.disc_optimizer.apply_gradients(zip(gradients_of_discriminator, self.d1.trainable_variables))

                # Generate a batch of new images
                gen_batch2 = self.g2(noise, training=True)

                # d2
                with tf.GradientTape() as tape:
                    # Disc response
                    disc_real2 = self.d2(batch2, training=True)
                    disc_fake2 = self.d2(gen_batch2[-1], training=True)

                    # Calc loss and penalty
                    d2_loss = d_loss_fn(disc_fake2, disc_real2)
                    gp2 = self.discPenal.calc_penalty(gen_batch2[-1], batch2, self.d2, args)  # if loss is not wgan-gp then gp=0
                    self.hist_discpenalty2.append(gp2)
                    d2_loss = d2_loss + (gp2 * args.penalty_weight_d)
                gradients_of_discriminator = tape.gradient(d2_loss, self.d2.trainable_variables)
                args.disc_optimizer.apply_gradients(zip(gradients_of_discriminator, self.d2.trainable_variables))

                if args.loss == 'wgan' and args.disc_penalty == 'none':
                    self.clip_weights(args.clip)

            # ------------------
            #  Train Generators
            # ------------------

            # Sample noise as generator input
            noise = u.gen_noise(args)
            with tf.GradientTape() as tape1, tf.GradientTape() as tape2, tf.GradientTape() as tape3:
                # Adv loss
                gen1_fake = self.g1(noise, training=True)
                disc1_fake = self.d1(gen1_fake[-1], training=True)
                g1_loss = g_loss_fn(disc1_fake)

                gen2_fake = self.g2(noise, training=True)
                disc2_fake = self.d2(gen2_fake[-1], training=True)
                g2_loss = g_loss_fn(disc2_fake)
                
                if args.semantic_loss:
                    domain1_pred = self.classifier(gen1_fake[-1])
                    domain2_pred = self.classifier(gen2_fake[-1])
                    diff = tf.reduce_mean(tf.math.squared_difference(domain1_pred, domain2_pred))
                    # log semantic loss
                    self.hist_semantic_loss.append(diff)
                    g1_loss = g1_loss + diff * args.semantic_weight
                    g2_loss = g2_loss + diff * args.semantic_weight

                penalty = self.genPenal.calc_penalty(self.g1, self.g2, args.shared_layers, args, gen1_fake, gen2_fake)
                g1_loss = g1_loss + (penalty * args.penalty_weight_g)
                g2_loss = g2_loss + (penalty * args.penalty_weight_g)

                if args.feature_loss:
                    #fake1_high_features = self.high_level_feature_extractor(gen1_fake[-1])
                    #fake2_high_features = self.high_level_feature_extractor(gen2_fake[-1])
                    fake1_low_features = self.low_level_feature_extractor(gen1_fake[-1])
                    fake2_low_features = self.low_level_feature_extractor(gen2_fake[-1])
                    real1_low_features = self.low_level_feature_extractor(batch1)
                    real2_low_features = self.low_level_feature_extractor(batch2)

                    #high_diff = tf.reduce_mean(tf.math.squared_difference(fake1_high_features, fake2_high_features))
                    low_test = tf.reduce_mean(tf.math.squared_difference(fake1_low_features,fake2_low_features))
                    low1_diff = tf.reduce_mean(tf.math.squared_difference(fake1_low_features, real1_low_features))
                    low2_diff = tf.reduce_mean(tf.math.squared_difference(fake2_low_features, real2_low_features))
                    diffs1 = tf.math.l2_normalize([penalty, low1_diff])
                    diffs2 = tf.math.l2_normalize([penalty, low2_diff])

                    #high_diff = diffs1[0] * args.fl_high_weight
                    low1_diff = diffs1[1] * args.fl_low_weight
                    low2_diff = diffs2[1] * args.fl_low_weight


                    #self.hist_high_diff.append(high_diff)
                    self.hist_low1_diff.append(low1_diff)
                    self.hist_low2_diff.append(low2_diff)

                    #g1_loss = g1_loss + high_diff
                    g1_loss = g1_loss + low1_diff
                    #g2_loss = g2_loss + high_diff
                    g2_loss = g2_loss + low2_diff

                    #g1_loss = g1_loss + high_diff + low1_diff
                    #g2_loss = g2_loss + high_diff + low2_diff

                if args.perceptual_loss:
                    fake1_content, fake1_style = self.StyleContentModel(gen1_fake[-1])
                    fake2_content, fake2_style = self.StyleContentModel(gen2_fake[-1])
                    real1_content, real1_style = self.StyleContentModel(batch1)
                    real2_content, real2_style = self.StyleContentModel(batch2)

                    g1_style_loss, g1_content_loss = self.StyleContentLoss(fake1_style, real1_style, fake1_content, fake2_content, args)
                    g2_style_loss, g2_content_loss = self.StyleContentLoss(fake2_style, real2_style, fake2_content, fake1_content, args)

                    g1_loss = (g1_loss) + g1_style_loss + g1_content_loss
                    g2_loss = (g2_loss) + g2_style_loss + g2_content_loss

                    self.hist_style1_loss.append(g1_style_loss)
                    self.hist_style2_loss.append(g2_style_loss)
                    self.hist_content_loss.append(g1_content_loss)

                if args.use_cycle:
                    # Recon loss
                    noise_recon1 = self.encoder(gen1_fake[-1])
                    noise_recon2 = self.encoder(gen2_fake[-1])

                    #fake_recon1 = self.g1(noise_recon1, training=False)
                    #fake_recon2 = self.g2(noise_recon2, training=False)

                    noise_recon_loss1 = l.recon_criterion(noise_recon1, noise)
                    noise_recon_loss2 = l.recon_criterion(noise_recon2, noise)

                    #fake_recon_loss1 = l.recon_criterion(fake_recon1[-1], gen1_fake[-1])
                    #fake_recon_loss2 = l.recon_criterion(fake_recon2[-1], gen2_fake[-1])

                    total_recon_loss = noise_recon_loss1 + noise_recon_loss2

                    # log cycle loss
                    self.hist_cycle_loss.append(total_recon_loss)

                    g1_loss = g1_loss + (total_recon_loss * args.cycle_weight)
                    g2_loss = g2_loss + (total_recon_loss * args.cycle_weight)

            gradients_of_generator1 = tape1.gradient(g1_loss, self.g1.trainable_variables)
            args.gen_optimizer.apply_gradients(zip(gradients_of_generator1, self.g1.trainable_variables))
            gradients_of_generator2 = tape2.gradient(g2_loss, self.g2.trainable_variables)
            args.gen_optimizer.apply_gradients(zip(gradients_of_generator2, self.g2.trainable_variables))
            if args.use_cycle:
                gradients_of_encoder = tape3.gradient(total_recon_loss, self.encoder.trainable_variables)
                args.gen_optimizer.apply_gradients(zip(gradients_of_encoder, self.encoder.trainable_variables))
            weight_sim = self.genPenal.weight_regularizer(self.g1, self.g2, 21)
            self.full_training_time += time.time() - start

            '''
            # Check if shared weights are equal between generators
            a = self.g1.trainable_variables
            b = self.g2.trainable_variables
            mask = []

            for i in range(8):
                if np.array_equal(a[i].numpy(), b[i].numpy()):
                    mask.append(1)
                else:
                    mask.append(0)
            if 0 in mask:
                print("ERROR - weight sharing failure:" + mask)
            '''

            # Collect loss values
            self.hist_d1.append(d1_loss)
            self.hist_d2.append(d2_loss)
            self.hist_g1.append(g1_loss)
            self.hist_g2.append(g2_loss)
            self.hist_weight_similarity.append(weight_sim)

            print("%d [D1 loss: %f] [D2 loss: %f] [G1 loss: %f] [G2 loss: %f] [WeightSim: %f]}" % (epoch, d1_loss, d2_loss, g1_loss, g2_loss, weight_sim))

            # If at save interval => save generated image samples
            if epoch % args.images_while_training == 0:
                self.sample_images(epoch, args.seed, args.dir, args.dataset_dim[3])
        self.plot_losses(args.dir)
        self.sample_images(epoch, args.seed, args.dir, args.dataset_dim[3])
        return self.full_training_time
コード例 #11
0
ファイル: CoGAN_trainer.py プロジェクト: MarkusHald/SCoGAN
    def train(self, args):

        it1 = iter(self.X1)
        it2 = iter(self.X2)

        # Set loss functions
        d_loss_fn, g_loss_fn = l.set_losses(args)

        for epoch in range(args.epochs):
            start = time.time()

            # ----------------------
            #  Train Discriminators
            # ----------------------

            for i in range(args.disc_iters):
                # Select a random batch of images
                if args.cogan_data in [
                        'mnist2edge', 'Eyeglasses', 'Smiling', 'Blond_Hair',
                        'Male'
                ]:
                    batch1 = next(it1)
                    batch2 = next(it2)
                else:
                    batch1 = next(it1)[0]
                    batch2 = next(it2)[0]

                # Sample noise as generator input
                noise = u.gen_noise(args)

                # Generate a batch of new images
                gen_batch1 = self.g1(noise, training=True)

                # d1
                with tf.GradientTape() as tape:
                    # Disc response
                    disc_real1 = self.d1(batch1, training=True)
                    disc_fake1 = self.d1(gen_batch1, training=True)

                    # Calc loss and penalty
                    d1_loss = d_loss_fn(disc_fake1, disc_real1)
                    gp1 = self.discPenal.calc_penalty(
                        gen_batch1, batch1, self.d1,
                        args)  # if loss is not wgan-gp then gp=0
                    d1_loss = d1_loss + (gp1 * args.penalty_weight_d)
                gradients_of_discriminator = tape.gradient(
                    d1_loss, self.d1.trainable_variables)
                args.disc_optimizer.apply_gradients(
                    zip(gradients_of_discriminator,
                        self.d1.trainable_variables))

                # Generate a batch of new images
                gen_batch2 = self.g2(noise, training=True)

                # d2
                with tf.GradientTape() as tape:
                    # Disc response
                    disc_real2 = self.d2(batch2, training=True)
                    disc_fake2 = self.d2(gen_batch2, training=True)

                    # Calc loss and penalty
                    d2_loss = d_loss_fn(disc_fake2, disc_real2)
                    gp2 = self.discPenal.calc_penalty(
                        gen_batch2, batch2, self.d2,
                        args)  # if loss is not wgan-gp then gp=0
                    d2_loss = d2_loss + (gp2 * args.penalty_weight_d)
                gradients_of_discriminator = tape.gradient(
                    d2_loss, self.d2.trainable_variables)
                args.disc_optimizer.apply_gradients(
                    zip(gradients_of_discriminator,
                        self.d2.trainable_variables))

                if args.loss == 'wgan' and args.disc_penalty == 'none':
                    self.clip_weights(args.clip)

            # ------------------
            #  Train Generators
            # ------------------

            # Sample noise as generator input
            noise = u.gen_noise(args)

            with tf.GradientTape() as tape:
                gen_fake = self.g1(noise, training=True)
                disc_fake = self.d1(gen_fake, training=True)
                g1_loss = g_loss_fn(disc_fake)
                penalty1 = self.genPenal.calc_penalty(self.g1, self.g2, 20,
                                                      args)
                g1_loss = g1_loss + (penalty1 * args.penalty_weight_g)
            gradients_of_generator1 = tape.gradient(
                g1_loss, self.g1.trainable_variables)
            args.gen_optimizer.apply_gradients(
                zip(gradients_of_generator1, self.g1.trainable_variables))

            with tf.GradientTape() as tape:
                gen_fake = self.g2(noise, training=True)
                disc_fake = self.d2(gen_fake, training=True)
                g2_loss = g_loss_fn(disc_fake)
                penalty2 = self.genPenal.calc_penalty(self.g1, self.g2, 20,
                                                      args)
                g2_loss = g2_loss + (penalty2 * args.penalty_weight_g)
            gradients_of_generator2 = tape.gradient(
                g2_loss, self.g2.trainable_variables)
            args.gen_optimizer.apply_gradients(
                zip(gradients_of_generator2, self.g2.trainable_variables))

            self.full_training_time += time.time() - start
            '''
            # Check if shared weights are equal between generators
            a = self.g1.trainable_variables
            b = self.g2.trainable_variables
            mask = []

            for i in range(8):
                if np.array_equal(a[i].numpy(), b[i].numpy()):
                    mask.append(1)
                else:
                    mask.append(0)
            if 0 in mask:
                print("ERROR - weight sharing failure:" + mask)
            '''

            # Collect loss values
            self.hist_d1.append(d1_loss)
            self.hist_d2.append(d2_loss)
            self.hist_g1.append(g1_loss)
            self.hist_g2.append(g2_loss)

            # If at save interval => save generated image samples
            if epoch % args.images_while_training == 0:
                self.sample_images(epoch, args.seed, args.dir,
                                   args.dataset_dim[3])
                print(
                    "%d [D1 loss: %f] [D2 loss: %f] [G1 loss: %f] [G2 loss: %f] [G1 penalty: %f] [G2 penalty: %f]"
                    % (epoch, d1_loss, d2_loss, g1_loss, g2_loss, penalty1,
                       penalty2))

        self.plot_losses(args.dir)
        self.sample_images(epoch, args.seed, args.dir, args.dataset_dim[3])
        return self.full_training_time
コード例 #12
0
parser = argparse.ArgumentParser()
parser.add_argument('--dir',            type=str,           default='/user/student.aau.dk/mjuuln15/output_data',     help='Directory to save images, models, weights etc')
parser.add_argument('--sample_itr',            type=int,           default=250)
parser.add_argument('--purpose', type=str, default='')
args = parser.parse_args()

args.dir = 'C:/Users/marku/Desktop/gan_training_output/testing'
args.sample_itr = 10
args.cogan_data='mnist2edge'
args.g_arch = 'digit_noshare'
args.d_arch = 'digit_noshare'
args.batch_size = 64
args.noise_dim = 100

u.write_config(args)

X1, X2, shape = d.select_dataset_cogan(args)
args.dataset_dim = shape
gen_a, gen_b, disc_a, disc_b = u.select_cogan_architecture(args)


class CCEncoder(tf.keras.Model):
    def __init__(self, latent_dim):
        super(CCEncoder, self).__init__()
        self.latent_dim = latent_dim

        self.encoder = tf.keras.Sequential(
            [
                tf.keras.layers.InputLayer(input_shape=[32, 32, 1]),
                tf.keras.layers.Conv2D(
コード例 #13
0
ファイル: Main.py プロジェクト: MarkusHald/SCoGAN
#args.dataset = 'celeba'
#args.disc_penalty = 'wgan-gp'
#args.gen_penalty = 'feature'
#args.scale_data = 64
#args.epochs = 2
#args.disc_iters = 1
#args.images_while_training = 10
#args.limit_dataset = True

if args.style_weight == -1:
    args.style_weight = args.content_weight / 8e-4


args.wd = tf.keras.regularizers.l2(args.weight_decay)
args.bi = tf.keras.initializers.Constant(args.bias_init)
args.w_init = u.select_weight_init(args.weight_init)
args.prelu_init = tf.keras.initializers.Constant(args.prelu_init)

# We will reuse this seed overtime for visualization
args.seed = u.gen_noise(args, gen_noise_seed=True)

# Set random seeds for reproducability
tf.random.set_seed(2020)
np.random.seed(2020)

#st.style_transfer([image_celeb], args, 'C:/Users/marku/Desktop/gan_training_output/perceptual/sw_0.00000000001_cw_0.001/20k/celeba/41735/generator1', verbose=True)
#u.latent_walk('C:/users/marku/Desktop/gan_training_output/relax_weight_sharing/26508/generator1','C:/Users/marku/Desktop/gan_training_output/relax_weight_sharing/26508/generator2',100,3)

u.select_optimisers(args)

# Choose gan type
コード例 #14
0
ファイル: GAN_archs.py プロジェクト: MarkusHald/SCoGAN
def resnet128_disc(args):
    img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])

    img1 = tf.keras.layers.Input(shape=img_shape)

    x1 = tf.keras.layers.Conv2D(32, (3, 3),
                                strides=(2, 2),
                                padding='same',
                                kernel_initializer=args.w_init,
                                kernel_regularizer=args.wd,
                                bias_initializer=args.bi)(img1)
    x1 = u.get_norm(args.norm)(x1)
    x1 = tf.keras.layers.PReLU(args.args.prelu_init)(x1)

    x1 = tf.keras.layers.Conv2D(64, (3, 3),
                                strides=(1, 1),
                                padding='same',
                                kernel_initializer=args.w_init,
                                kernel_regularizer=args.wd,
                                bias_initializer=args.bi)(img1)
    x1 = u.get_norm(args.norm)(x1)
    x1 = tf.keras.layers.PReLU(args.args.prelu_init)(x1)

    x1 = tf.keras.layers.Conv2D(64, (3, 3),
                                strides=(2, 2),
                                padding='same',
                                kernel_initializer=args.w_init,
                                kernel_regularizer=args.wd,
                                bias_initializer=args.bi)(x1)
    x1 = u.get_norm(args.norm)(x1)
    x1 = tf.keras.layers.PReLU(args.args.prelu_init)(x1)

    x1 = tf.keras.layers.Conv2D(64, (3, 3),
                                strides=(1, 1),
                                padding='same',
                                kernel_initializer=args.w_init,
                                kernel_regularizer=args.wd,
                                bias_initializer=args.bi)(x1)
    x1 = u.get_norm(args.norm)(x1)
    x1 = tf.keras.layers.PReLU(args.args.prelu_init)(x1)

    model = keras.Sequential()
    model.add(
        tf.keras.layers.Conv2D(128, (3, 3),
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer=args.w_init,
                               kernel_regularizer=args.wd,
                               bias_initializer=args.bi))
    model.add(u.get_norm(args.norm))
    model.add(tf.keras.layers.PReLU(args.args.prelu_init))
    model.add(tf.keras.layers.Dropout(0.1))

    model.add(
        tf.keras.layers.Conv2D(128, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               kernel_initializer=args.w_init,
                               kernel_regularizer=args.wd,
                               bias_initializer=args.bi))
    model.add(u.get_norm(args.norm))
    model.add(tf.keras.layers.PReLU(args.args.prelu_init))
    model.add(tf.keras.layers.Dropout(0.1))

    model.add(
        tf.keras.layers.Conv2D(256, (3, 3),
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer=args.w_init,
                               kernel_regularizer=args.wd,
                               bias_initializer=args.bi))
    model.add(u.get_norm(args.norm))
    model.add(tf.keras.layers.PReLU(args.args.prelu_init))
    model.add(tf.keras.layers.Dropout(0.3))

    model.add(
        tf.keras.layers.Conv2D(512, (3, 3),
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer=args.w_init,
                               kernel_regularizer=args.wd,
                               bias_initializer=args.bi))
    model.add(u.get_norm(args.norm))
    model.add(tf.keras.layers.PReLU(args.args.prelu_init))
    model.add(tf.keras.layers.Dropout(0.3))

    model.add(
        tf.keras.layers.Conv2D(1024, (3, 3),
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer=args.w_init,
                               kernel_regularizer=args.wd,
                               bias_initializer=args.bi))
    model.add(u.get_norm(args.norm))
    model.add(tf.keras.layers.PReLU(args.args.prelu_init))
    model.add(tf.keras.layers.Dropout(0.5))

    model.add(tf.keras.layers.Flatten())
    model.add(
        tf.keras.layers.Dense(2048,
                              kernel_initializer=args.w_init,
                              kernel_regularizer=args.wd))
    model.add(u.get_norm(args.norm))
    model.add(tf.keras.layers.PReLU(args.args.prelu_init))
    model.add(tf.keras.layers.Dropout(0.5))

    model.add(
        tf.keras.layers.Dense(1,
                              kernel_initializer=args.w_init,
                              kernel_regularizer=args.wd))

    output1 = model(x1)

    return keras.Model(img1, output1)