def __init__(self, neural_network_layers):
            if neural_network_layers["dis"][0] != neural_network_layers["gen"][
                    -1]:
                print(
                    "Error the first layer in the dsicminator should be equal to the last layer in the generator"
                )

            self.neural_network_layers = neural_network_layers

            self.generator = NeuNet.neural_network(
                neural_network_layers["gen"],
                actf={-1: "sigm"})  # similar to the decoder
            self.discriminator = NeuNet.neural_network(
                neural_network_layers["dis"], actf={-1: "sigm"})

            self.X, self.Z = NeuNet.model.create_placeholders([
                neural_network_layers["dis"][0],
                neural_network_layers["gen"][0]
            ])  # X is img feed into qnet+discrimnator,Z,C into generator
            self.sampler_Z = NeuNet.model.extra.random_sample(self.Z,
                                                              mode="normal")

            self.create_D_fake__D_real()

            self.D_loss, self.D_solver, self.G_loss, self.G_solver = GAN.loss(
                self.D_real, self.D_fake, self.generator.var_list(),
                self.discriminator.var_list())
    def __init__(self,
                 decoder_layers,
                 encoder_layers,
                 gamma_100=100.0,
                 capacity_25=25.0,
                 block_b_vae=True):
        self.decoder_layers, self.encoder_layers, self.gamma_100, self.capacity_25, self.block_b_vae = decoder_layers, encoder_layers, gamma_100, capacity_25, block_b_vae

        self.X_in, self.Y = NeuNet.model.create_placeholders({
            "X_in":
            encoder_layers[0],
            "Y":
            decoder_layers[-1]
        })

        self.encoder_net = NeuNet.neural_network(
            encoder_layers,
            actf={-1:
                  "none"})  #encoder has twice the paramaenters use reparimtixe
        self.decoder = NeuNet.neural_network(decoder_layers, actf={-1: "sigm"})
        self.encoder = lambda x: VAE.reparameterization(self.encoder_net(x))

        self.z, self.z_mean, self.z_log_sigma_sq = self.encoder(self.X_in)
        self.OUT = self.decoder(self.z)  # Z LATENT

        #Kullback Leibler divergence:
        self.latent_loss = -0.5 * tf.reduce_sum(
            1.0 + self.z_log_sigma_sq - tf.square(self.z_mean) - tf.exp(
                self.z_log_sigma_sq), 1)  #REDUCE SUMS THEM TO ELIMINETATE AXIS
        self.img_loss = [
            -tf.reduce_sum(VAE.binary_cross_entropy(self.Y, self.OUT), [1, 2]),
            tf.reduce_sum(tf.squared_difference(self.Y, self.OUT), [1, 2])
        ][0]
        self.latent_loss = tf.reduce_mean(
            VAE.disentangle(self.latent_loss, gamma_100, capacity_25,
                            block_b_vae)
        )  # B-VAE #https://github.com/miyosuda/disentangled_vae/blob/master/model.py
        self.img_loss = tf.reduce_mean(self.img_loss)
        self.loss, self.optimizer = NeuNet.train(
            self.img_loss + self.latent_loss,
            self.decoder.var_list() + self.encoder_net.var_list())

        def binary_cross_entropy(A, B):
            """  binary_cross_entropy(Y, Y_pred) """
            return ((A) * safe_log(B)) + ((1 - A) * safe_log(1 - B))
 def __init__(self,neural_network_layers):
     self.gen_layers, self.dis_layers, self.q_net_layers = neural_network_layers["gen"], neural_network_layers["dis"], neural_network_layers["q_net"]
     
     self.generator     = NeuNet.neural_network( self.gen_layers  , actf={-1:"sigm"}) # similar to the decoder
     self.discriminator = NeuNet.neural_network( self.dis_layers  , actf={-1:"sigm"}) 
     self.q_net         = NeuNet.neural_network( self.q_net_layers, actf={-1:"soft"}) 
     
     self.X, self.Z, self.c = NeuNet.model.create_placeholders([self.dis_layers[0], no_of_noise_channels16, no_of_classes10]) # X is img feed into qnet+discrimnator,Z,C into generator 
     
     self.sampler_Z, self.sampler_c = NeuNet.model.extra.random_sample(self.Z, mode="uniform") , NeuNet.model.extra.random_sample(self.c, mode="nomial" )
     
     self.G_sample    = self.generator(tf.concat(axis=1, values=[self.Z, self.c]))
     self.D_real      = self.discriminator(self.X)
     self.D_fake      = self.discriminator(self.G_sample)
     self.Q_c_given_x = self.q_net(self.G_sample)
     ##############################################################################
     self.D_loss, self.D_solver = NeuNet.train(  safe_log(self.D_real ) + safe_log(1 - self.D_fake) ,  self.discriminator.var_list())
     self.G_loss, self.G_solver = NeuNet.train(  safe_log(self.D_fake )                             ,  self.generator.var_list()    )
     self.Q_loss, self.Q_solver = NeuNet.train( -tf.reduce_sum(safe_log(self.Q_c_given_x )*self.c,1),  self.generator.var_list() + self.q_net.var_list()    )# equation is cross entropy
Example #4
0
                return z, z_mean, z_log_sigma_sq
            
        def seperate_latent(lat):
           latp1 = lat[:, 1, [0,1,2]]#[Noise, is_it_vae, critic_is_it_real]
           latp2 = lat[:, :,    3:  ]
           return latp1,latp2
       
        def VAE_encode_new_reparameterization(encoder_net, enc_in):   
             lat = encoder_net(enc_in)
             latp1,latp2 = seperate_latent(lat)
             z, z_mean, z_log_sigma_sq = NeuNet.model.extra.VAE.reparameterization(latp2,mode="train")  
             return latp1,z, z_mean, z_log_sigma_sq            
    
        encoder_layers, decoder_layers    = NeuNet.model.extra.VAE.create_encoder_generator_layer_sizes((28,28), 100, 10) 
        enc_in, enc_out, dec_in, dec_out  = NeuNet.model.create_placeholders({"enc_in":encoder_layers[0],"enc_out":encoder_layers[-1],"dec_in":decoder_layers[0], "dec_out":decoder_layers[-1]})
        encoder_net                       = NeuNet.neural_network(encoder_layers, actf={-1:"none"}) 
        decoder                           = NeuNet.neural_network(decoder_layers, actf={-1:"none"}) 

        #reparmitize   ##><## encoder_net(enc_in)
        latp1,z, z_mean, z_log_sigma_sq = VAE_encode_new_reparameterization(encoder_net, enc_in)   
        
    # n is noise
    # latp1 = [noise_level, is_it_vae, critic_is_it_real]   ,  latp2 is tradional latent,   lat = latp1+latp2 , LAT([a,b,c],n) = [a,b,c]+latp2+n# n being noise to the latp2  e2() = just gives you latp2

    # maybe latpl1# [noise_level, vae?, fake?]    #vae? is the image real but has been encoded and decoded,  
    
    # First Runs(tradional VAE)                      d(e(img+n)), img
    # so they roughly match the distributions 
    
    # Second Runs                                  e(d(e(img))),  e(img)
    # This is a bit more complex but this means that encoder and decoder should be more symteric
    return np.random.uniform(-1., 1., size=[m, n])


def sample_c(m):
    return np.random.multinomial(1, 10 * [0.1], size=m)


def get_noises_and_images(batch_size):
    imgs, labels = batcher(
        x_train, y_train, batchsize=batch_size
    )  #imgs, label = mnist.train.next_batch(batch_size) #label not used
    return imgs, labels, sample_Z(
        batch_size, number_of_noise_channels16), sample_c(batch_size)


generator = NeuNet.neural_network([total_for_generator26, 256, img_size784],
                                  actf={-1: "sigm"})  # similar to the decoder
discriminator = NeuNet.neural_network([img_size784, 128, 1], actf={-1: "sigm"})
q_net = NeuNet.neural_network([img_size784, 128, 10], actf={-1: "soft"})

X, Z, c = NeuNet.model.create_placeholders([
    img_size784, number_of_noise_channels16, number_of_classes10
])  # X is img feed into qnet+discrimnator,Z,C into generator

G_sample = generator(tf.concat(axis=1, values=[Z, c]))
D_real = discriminator(X)
D_fake = discriminator(G_sample)
Q_c_given_x = q_net(G_sample)
##############################################################################
D_loss, D_solver = NeuNet.train(
    tf.log(D_real + 1e-8) + tf.log(1 - D_fake + 1e-8),
    discriminator.var_list())
Example #6
0
def disentangle(latent_loss,
                gamma=gamma_100,
                capacity=capacity_25,
                block=False):
    if block:
        return latent_loss
    return gamma * tf.abs(latent_loss - capacity)


X_in, Y = NeuNet.model.create_placeholders({
    "X_in": imgsz28_28,
    "Y": imgsz28_28
})
#X_in, Y = NeuNet.model.create_placeholders( {"X_in":neural_network_layers["encoder"][ 0], "Y":neural_network_layers["decoder"][-1]} )
encoder_net = NeuNet.neural_network(
    neural_network_layers["encoder"],
    actf={-1: "none"})  #encoder has twice the paramaenters use reparimtixe
decoder = NeuNet.neural_network(neural_network_layers["decoder"],
                                actf={-1: "sigm"})
encoder = lambda x: reparameterization(encoder_net(x))

z, z_mean, z_log_sigma_sq = encoder(X_in)
VAE = decoder(z)  # Z LATENT

#Kullback Leibler divergence:
latent_loss = -0.5 * tf.reduce_sum(
    1.0 + z_log_sigma_sq - tf.square(z_mean) - tf.exp(z_log_sigma_sq),
    1)  #REDUCE SUMS THEM TO ELIMINETATE AXIS
img_loss = [
    -tf.reduce_sum(binary_cross_entropy(Y, VAE), [1, 2]),
    tf.reduce_sum(tf.squared_difference(Y, VAE), [1, 2])
imgsz28_28, no_of_classes10, no_of_noise_channels16 = (28, 28), 10, 16
total_for_generator26 = no_of_classes10 + no_of_noise_channels16
neural_network_layers = {
    "gen": [total_for_generator26, 256, imgsz28_28],
    "dis": [imgsz28_28, 128, 1]
}

if MODE in ["INFOGAN"]:
    print("Running INFOGAN ... ")

    neural_network_layers["q_net"] = [imgsz28_28, 128, 10]  # 784 in the image

    #%%##################################################################################################################################

    generator = NeuNet.neural_network(neural_network_layers["gen"],
                                      actf={-1:
                                            "sigm"})  # similar to the decoder
    discriminator = NeuNet.neural_network(neural_network_layers["dis"],
                                          actf={-1: "sigm"})
    q_net = NeuNet.neural_network(neural_network_layers["q_net"],
                                  actf={-1: "soft"})

    X, Z, c = NeuNet.model.create_placeholders([
        imgsz28_28, no_of_noise_channels16, no_of_classes10
    ])  # X is img feed into qnet+discrimnator,Z,C into generator

    sampler_Z, sampler_c = NeuNet.model.extra.random_sample(
        Z, mode="normal"), NeuNet.model.extra.random_sample(c, mode="onehot")

    G_sample = generator(tf.concat(axis=1, values=[Z, c]))
    D_fake = discriminator(G_sample)