def __init__(self, num_epochs, keep_prob):
        self.num_epochs = num_epochs
        self.keep_prob = keep_prob
        self.train_total_data, train_size, _, _, test_data, test_labels = mnist_data.prepare_MNIST_data()
        map_test_labels = {}

        map_labels = defaultdict(list)

        for i in range(100):
            item = test_labels[i]
            map_labels[get_label(item)].append(i)

        self.n_samples = train_size

        self.x_hat = tf.placeholder(
            tf.float32, shape=[None, dim_img], name='input_img')
        self.x = tf.placeholder(
            tf.float32, shape=[None, dim_img], name='target_img')
        # self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
        z_in = tf.placeholder(
            tf.float32, shape=[None, dim_z], name='latent_variable')
        self.y, self.z, self.loss, self.neg_marginal_likelihood, self.KL_divergence = vae.autoencoder(
            self.x_hat, self.x, dim_img, dim_z, n_hidden, self.keep_prob)
        self.train_op = tf.train.AdamOptimizer(learn_rate).minimize(self.loss)

        self.PRR = plot_utils.Plot_Reproduce_Performance(
            RESULTS_DIR, PRR_n_img_x, PRR_n_img_y, IMAGE_SIZE, IMAGE_SIZE, PRR_resize_factor)

        self.x_PRR = test_data[0:self.PRR.n_tot_imgs, :]

        x_PRR_img = self.x_PRR.reshape(
            self.PRR.n_tot_imgs, IMAGE_SIZE, IMAGE_SIZE)
        self.PRR.save_images(x_PRR_img, name='input.jpg')

        self.x_PRR = self.x_PRR * np.random.randint(2, size=self.x_PRR.shape)
        self.x_PRR += np.random.randint(2, size=self.x_PRR.shape)

        x_PRR_img = self.x_PRR.reshape(self.PRR.n_tot_imgs, IMAGE_SIZE, IMAGE_SIZE)
        self.PRR.save_images(x_PRR_img, name='input_noise.jpg')

        # train
        self.total_batch = int(self.n_samples / batch_size)
Esempio n. 2
0
def main(args):
    """ parameters """
    RESULTS_DIR = ss.path.DATADIR + "vae/" + args.results_path

    # network architecture
    ADD_NOISE = args.add_noise

    n_hidden = args.n_hidden
    dim_img = IMAGE_SIZE_MNIST**2  # number of pixels for a MNIST image
    dim_z = args.dim_z

    # train
    n_epochs = args.num_epochs
    batch_size = args.batch_size
    learn_rate = args.learn_rate

    # Plot
    PRR = args.PRR  # Plot Reproduce Result
    PRR_n_img_x = args.PRR_n_img_x  # number of images along x-axis in a canvas
    PRR_n_img_y = args.PRR_n_img_y  # number of images along y-axis in a canvas
    PRR_resize_factor = args.PRR_resize_factor  # resize factor for each image in a canvas

    PMLR = args.PMLR  # Plot Manifold Learning Result
    PMLR_n_img_x = args.PMLR_n_img_x  # number of images along x-axis in a canvas
    PMLR_n_img_y = args.PMLR_n_img_y  # number of images along y-axis in a canvas
    PMLR_resize_factor = args.PMLR_resize_factor  # resize factor for each image in a canvas
    PMLR_z_range = args.PMLR_z_range  # range for random latent vector
    PMLR_n_samples = args.PMLR_n_samples  # number of labeled samples to plot a map from input data space to the latent space
    """ prepare MNIST data """

    train_total_data, train_size, _, _, test_data, test_labels = mnist_data.prepare_MNIST_data(
    )
    n_samples = train_size
    """ build graph """

    # input placeholders
    # In denoising-autoencoder, x_hat == x + noise, otherwise x_hat == x
    x_hat = tf.placeholder(tf.float32, shape=[None, dim_img], name='input_img')
    x = tf.placeholder(tf.float32, shape=[None, dim_img], name='target_img')

    # dropout
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    # input for PMLR
    z_in = tf.placeholder(tf.float32,
                          shape=[None, dim_z],
                          name='latent_variable')

    # network architecture
    y, z, loss, neg_marginal_likelihood, KL_divergence = vae.autoencoder(
        x_hat, x, dim_img, dim_z, n_hidden, keep_prob)

    # optimization
    train_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)
    """ training """

    # Plot for reproduce performance
    if PRR:
        PRR = plot_utils.Plot_Reproduce_Performance(RESULTS_DIR, PRR_n_img_x,
                                                    PRR_n_img_y,
                                                    IMAGE_SIZE_MNIST,
                                                    IMAGE_SIZE_MNIST,
                                                    PRR_resize_factor)

        x_PRR = test_data[0:PRR.n_tot_imgs, :]

        x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                  IMAGE_SIZE_MNIST)
        PRR.save_images(x_PRR_img, name='input.jpg')

        if ADD_NOISE:
            x_PRR = x_PRR * np.random.randint(2, size=x_PRR.shape)
            x_PRR += np.random.randint(2, size=x_PRR.shape)

            x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                      IMAGE_SIZE_MNIST)
            PRR.save_images(x_PRR_img, name='input_noise.jpg')

    # Plot for manifold learning result
    if PMLR and dim_z == 2:

        PMLR = plot_utils.Plot_Manifold_Learning_Result(
            RESULTS_DIR, PMLR_n_img_x, PMLR_n_img_y, IMAGE_SIZE_MNIST,
            IMAGE_SIZE_MNIST, PMLR_resize_factor, PMLR_z_range)

        x_PMLR = test_data[0:PMLR_n_samples, :]
        id_PMLR = test_labels[0:PMLR_n_samples, :]

        if ADD_NOISE:
            x_PMLR = x_PMLR * np.random.randint(2, size=x_PMLR.shape)
            x_PMLR += np.random.randint(2, size=x_PMLR.shape)

        decoded = vae.decoder(z_in, dim_img, n_hidden)

    # train
    total_batch = int(n_samples / batch_size)
    min_tot_loss = 1e99

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer(), feed_dict={keep_prob: 0.9})

        for epoch in range(n_epochs):

            # Random shuffling
            np.random.shuffle(train_total_data)
            train_data_ = train_total_data[:, :-mnist_data.NUM_LABELS]

            # Loop over all batches
            for i in range(total_batch):
                # Compute the offset of the current minibatch in the data.
                offset = (i * batch_size) % (n_samples)
                batch_xs_input = train_data_[offset:(offset + batch_size), :]

                batch_xs_target = batch_xs_input

                # add salt & pepper noise
                if ADD_NOISE:
                    batch_xs_input = batch_xs_input * np.random.randint(
                        2, size=batch_xs_input.shape)
                    batch_xs_input += np.random.randint(
                        2, size=batch_xs_input.shape)

                _, tot_loss, loss_likelihood, loss_divergence = sess.run(
                    (train_op, loss, neg_marginal_likelihood, KL_divergence),
                    feed_dict={
                        x_hat: batch_xs_input,
                        x: batch_xs_target,
                        keep_prob: 0.9
                    })

            # print cost every epoch
            print(
                "epoch %d: L_tot %03.2f L_likelihood %03.2f L_divergence %03.2f"
                % (epoch, tot_loss, loss_likelihood, loss_divergence))

            # if minimum loss is updated or final epoch, plot results
            if min_tot_loss > tot_loss or epoch + 1 == n_epochs:
                min_tot_loss = tot_loss
                # Plot for reproduce performance
                if PRR:
                    y_PRR = sess.run(y, feed_dict={x_hat: x_PRR, keep_prob: 1})
                    y_PRR_img = y_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                              IMAGE_SIZE_MNIST)
                    PRR.save_images(y_PRR_img,
                                    name="/PRR_epoch_%02d" % (epoch) + ".jpg")

                # Plot for manifold learning result
                if PMLR and dim_z == 2:
                    y_PMLR = sess.run(decoded,
                                      feed_dict={
                                          z_in: PMLR.z,
                                          keep_prob: 1
                                      })
                    y_PMLR_img = y_PMLR.reshape(PMLR.n_tot_imgs,
                                                IMAGE_SIZE_MNIST,
                                                IMAGE_SIZE_MNIST)
                    PMLR.save_images(y_PMLR_img,
                                     name="/PMLR_epoch_%02d" % (epoch) +
                                     ".jpg")

                    # plot distribution of labeled images
                    z_PMLR = sess.run(z,
                                      feed_dict={
                                          x_hat: x_PMLR,
                                          keep_prob: 1
                                      })
                    PMLR.save_scattered_image(z_PMLR,
                                              id_PMLR,
                                              name="/PMLR_map_epoch_%02d" %
                                              (epoch) + ".jpg")
Esempio n. 3
0
def main(args):

    """ parameters """
    RESULTS_DIR = args.results_path

    n_hidden = args.n_hidden
    dim_img = IMAGE_SIZE_MNIST**2  # number of pixels for a MNIST image
    dim_z = args.dim_z

    # train
    n_epochs = args.num_epochs
    batch_size = args.batch_size
    learn_rate = args.learn_rate

    # Plotting
    PRR_n_img_x = 4              # number of images along x-axis in a canvas
    PRR_n_img_y = 4              # number of images along y-axis in a canvas
    PRR_resize_factor = 1.0  # resize factor for each image in a canvas

    """ prepare MNIST data """

    train_total_data, train_size, _, _, test_data, test_labels = mnist_data.prepare_MNIST_data()
    n_samples = train_size

    # input placeholders
    # In denoising-autoencoder, x_hat == x + noise, otherwise x_hat == x
    x_hat = tf.placeholder(tf.float32, shape=[None, dim_img], name='input_img')
    x = tf.placeholder(tf.float32, shape=[None, dim_img], name='target_img')
    labels = tf.placeholder(tf.float32, shape=[None, 10], name='target_label')

    keep_prob = tf.placeholder(tf.float32, name='keep_prob')


    # network architecture
    y, z, loss, neg_marginal_likelihood, KL_divergence = cvae.autoencoder(x_hat, x, labels, dim_img, dim_z, n_hidden, keep_prob)

    # optimization
    train_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    """ training """

    # Plot for reproduce performance

    PRR = plot_utils.Plot_Reproduce_Performance(RESULTS_DIR, PRR_n_img_x, PRR_n_img_y, IMAGE_SIZE_MNIST, IMAGE_SIZE_MNIST, PRR_resize_factor)

    x_PRR = test_data[0:PRR.n_tot_imgs, :]
    label_PRR = test_labels[0:PRR.n_tot_imgs, :]
    x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST, IMAGE_SIZE_MNIST)
    PRR.save_images(x_PRR_img, name='input.jpg')



    # train
    total_batch = int(n_samples / batch_size)
    min_tot_loss = 1e99

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer(), feed_dict={keep_prob : 0.9})

        for epoch in range(n_epochs):

            # Random shuffling
            np.random.shuffle(train_total_data)
            train_data_ = train_total_data[:, :-mnist_data.NUM_LABELS]
            train_label_ = train_total_data[:,-mnist_data.NUM_LABELS:]
            # Loop over all batches
            for i in range(total_batch):
                # Compute the offset of the current minibatch in the data.
                offset = (i * batch_size) % (n_samples)
                batch_xs_input = train_data_[offset:(offset + batch_size), :]
                batch_xs_target = batch_xs_input
                batch_xs_labels = train_label_[offset:(offset + batch_size), :]

                _, tot_loss, loss_likelihood, loss_divergence = sess.run(
                    (train_op, loss, neg_marginal_likelihood, KL_divergence),
                    feed_dict={x_hat: batch_xs_input, x: batch_xs_target, labels:batch_xs_labels, keep_prob : 0.9})

            print("epoch %d: L_tot %03.2f" %(epoch,tot_loss))
            # if minimum loss is updated or final epoch, plot results
            if min_tot_loss > tot_loss or epoch+1 == n_epochs:
                min_tot_loss = tot_loss
                # Plot for reproduce performance
                    #print('reach here !!!')
                y_PRR = sess.run(y, feed_dict={x_hat: x_PRR, labels:label_PRR,keep_prob : 1})
                y_PRR_img = y_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST, IMAGE_SIZE_MNIST)
                PRR.save_images(y_PRR_img, name="/PRR_epoch_%02d" %(epoch) + ".jpg")
Esempio n. 4
0
def main(args):
    """ parameters """
    RESULTS_DIR = args.results_path

    # network architecture

    n_hidden = args.n_hidden
    dim_img = IMAGE_SIZE_MNIST**2  # number of pixels for a MNIST image
    dim_z = 2  # to visualize learned manifold

    # train
    n_epochs = args.num_epochs
    batch_size = args.batch_size
    learn_rate = args.learn_rate

    # Plot
    PRR = args.PRR  # Plot Reproduce Result
    PRR_n_img_x = args.PRR_n_img_x  # number of images along x-axis in a canvas
    PRR_n_img_y = args.PRR_n_img_y  # number of images along y-axis in a canvas
    PRR_resize_factor = args.PRR_resize_factor  # resize factor for each image in a canvas

    PMLR = args.PMLR  # Plot Manifold Learning Result
    PMLR_n_img_x = args.PMLR_n_img_x  # number of images along x-axis in a canvas
    PMLR_n_img_y = args.PMLR_n_img_y  # number of images along y-axis in a canvas
    PMLR_resize_factor = args.PMLR_resize_factor  # resize factor for each image in a canvas
    PMLR_z_range = args.PMLR_z_range  # range for random latent vector
    PMLR_n_samples = args.PMLR_n_samples  # number of labeled samples to plot a map from input data space to the latent space
    """ prepare MNIST data """

    train_total_data, train_size, _, _, test_data, test_labels = mnist_data.prepare_MNIST_data(
    )
    n_samples = train_size
    """ build graph """

    # input placeholders
    # In denoising-autoencoder, x_hat == x + noise, otherwise x_hat == x
    x_hat = tf.placeholder(tf.float32, shape=[None, dim_img], name='input_img')
    x = tf.placeholder(tf.float32, shape=[None, dim_img], name='target_img')
    x_id = tf.placeholder(tf.float32, shape=[None, 10], name='input_img_label')

    # dropout
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    # input for PMLR
    z_in = tf.placeholder(tf.float32,
                          shape=[None, dim_z],
                          name='latent_variable')

    # samples drawn from prior distribution
    z_sample = tf.placeholder(tf.float32,
                              shape=[None, dim_z],
                              name='prior_sample')
    z_id = tf.placeholder(tf.float32,
                          shape=[None, 10],
                          name='prior_sample_label')

    # network architecture
    y, z, neg_marginal_likelihood, D_loss, G_loss = aae.adversarial_autoencoder(
        x_hat, x, x_id, z_sample, z_id, dim_img, dim_z, n_hidden, keep_prob)

    # optimization
    t_vars = tf.trainable_variables()
    d_vars = [var for var in t_vars if "discriminator" in var.name]
    g_vars = [var for var in t_vars if "MLP_encoder" in var.name]
    ae_vars = [
        var for var in t_vars if "MLP_encoder" or "MLP_decoder" in var.name
    ]

    train_op_ae = tf.train.AdamOptimizer(learn_rate).minimize(
        neg_marginal_likelihood, var_list=ae_vars)
    train_op_d = tf.train.AdamOptimizer(learn_rate / 5).minimize(
        D_loss, var_list=d_vars)
    train_op_g = tf.train.AdamOptimizer(learn_rate).minimize(G_loss,
                                                             var_list=g_vars)
    """ training """

    # Plot for reproduce performance
    if PRR:
        PRR = plot_utils.Plot_Reproduce_Performance(RESULTS_DIR, PRR_n_img_x,
                                                    PRR_n_img_y,
                                                    IMAGE_SIZE_MNIST,
                                                    IMAGE_SIZE_MNIST,
                                                    PRR_resize_factor)

        x_PRR = test_data[0:PRR.n_tot_imgs, :]

        x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                  IMAGE_SIZE_MNIST)
        PRR.save_images(x_PRR_img, name='input.jpg')

    # Plot for manifold learning result
    if PMLR and dim_z == 2:

        PMLR = plot_utils.Plot_Manifold_Learning_Result(
            RESULTS_DIR, PMLR_n_img_x, PMLR_n_img_y, IMAGE_SIZE_MNIST,
            IMAGE_SIZE_MNIST, PMLR_resize_factor, PMLR_z_range)

        x_PMLR = test_data[0:PMLR_n_samples, :]
        id_PMLR = test_labels[0:PMLR_n_samples, :]

        decoded = aae.decoder(z_in, dim_img, n_hidden)

    # train
    total_batch = int(n_samples / batch_size)
    min_tot_loss = 1e99

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer(), feed_dict={keep_prob: 0.9})

        for epoch in range(n_epochs):

            # Random shuffling
            np.random.shuffle(train_total_data)
            train_data_ = train_total_data[:, :-mnist_data.NUM_LABELS]
            train_label_ = train_total_data[:, -mnist_data.NUM_LABELS:]

            # Loop over all batches
            for i in range(total_batch):
                # Compute the offset of the current minibatch in the data.
                offset = (i * batch_size) % (n_samples)
                batch_xs_input = train_data_[offset:(offset + batch_size), :]
                batch_ids_input = train_label_[offset:(offset + batch_size), :]
                batch_xs_target = batch_xs_input

                # draw samples from prior distribution
                if args.prior_type == 'mixGaussian':
                    z_id_ = np.random.randint(0, 10, size=[batch_size])
                    samples = prior.gaussian_mixture(batch_size,
                                                     dim_z,
                                                     label_indices=z_id_)
                elif args.prior_type == 'swiss_roll':
                    z_id_ = np.random.randint(0, 10, size=[batch_size])
                    samples = prior.swiss_roll(batch_size,
                                               dim_z,
                                               label_indices=z_id_)
                elif args.prior_type == 'normal':
                    samples, z_id_ = prior.gaussian(batch_size,
                                                    dim_z,
                                                    use_label_info=True)
                else:
                    raise Exception("[!] There is no option for " +
                                    args.prior_type)

                z_id_one_hot_vector = np.zeros((batch_size, 10))
                z_id_one_hot_vector[np.arange(batch_size), z_id_] = 1

                # reconstruction loss
                _, loss_likelihood = sess.run(
                    (train_op_ae, neg_marginal_likelihood),
                    feed_dict={
                        x_hat: batch_xs_input,
                        x: batch_xs_target,
                        x_id: batch_ids_input,
                        z_sample: samples,
                        z_id: z_id_one_hot_vector,
                        keep_prob: 0.9
                    })

                # discriminator loss
                _, d_loss = sess.run(
                    (train_op_d, D_loss),
                    feed_dict={
                        x_hat: batch_xs_input,
                        x: batch_xs_target,
                        x_id: batch_ids_input,
                        z_sample: samples,
                        z_id: z_id_one_hot_vector,
                        keep_prob: 0.9
                    })

                # generator loss
                for _ in range(2):
                    _, g_loss = sess.run(
                        (train_op_g, G_loss),
                        feed_dict={
                            x_hat: batch_xs_input,
                            x: batch_xs_target,
                            x_id: batch_ids_input,
                            z_sample: samples,
                            z_id: z_id_one_hot_vector,
                            keep_prob: 0.9
                        })

            tot_loss = loss_likelihood + d_loss + g_loss

            # print cost every epoch
            print(
                "epoch %d: L_tot %03.2f L_likelihood %03.2f d_loss %03.2f g_loss %03.2f"
                % (epoch, tot_loss, loss_likelihood, d_loss, g_loss))

            # if minimum loss is updated or final epoch, plot results
            if epoch % 2 == 0 or min_tot_loss > tot_loss or epoch + 1 == n_epochs:
                min_tot_loss = tot_loss
                # Plot for reproduce performance
                if PRR:
                    y_PRR = sess.run(y, feed_dict={x_hat: x_PRR, keep_prob: 1})
                    y_PRR_img = y_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                              IMAGE_SIZE_MNIST)
                    PRR.save_images(y_PRR_img,
                                    name="/PRR_epoch_%02d" % (epoch) + ".jpg")

                # Plot for manifold learning result
                if PMLR and dim_z == 2:
                    y_PMLR = sess.run(decoded,
                                      feed_dict={
                                          z_in: PMLR.z,
                                          keep_prob: 1
                                      })
                    y_PMLR_img = y_PMLR.reshape(PMLR.n_tot_imgs,
                                                IMAGE_SIZE_MNIST,
                                                IMAGE_SIZE_MNIST)
                    PMLR.save_images(y_PMLR_img,
                                     name="/PMLR_epoch_%02d" % (epoch) +
                                     ".jpg")

                    # plot distribution of labeled images
                    z_PMLR = sess.run(z,
                                      feed_dict={
                                          x_hat: x_PMLR,
                                          keep_prob: 1
                                      })
                    PMLR.save_scattered_image(z_PMLR,
                                              id_PMLR,
                                              name="/PMLR_map_epoch_%02d" %
                                              (epoch) + ".jpg")
Esempio n. 5
0
def main(args):

    np.random.seed(1337)
    """ parameters """
    RESULTS_DIR = args.results_path

    # network architecture
    n_hidden = args.n_hidden

    # train
    n_epochs = args.num_epochs
    batch_size = args.batch_size
    learn_rate = args.learn_rate

    # Plot
    PRR = args.PRR  # Plot Reproduce Result
    PRR_n_img_x = args.PRR_n_img_x  # number of images along x-axis in a canvas
    PRR_n_img_y = args.PRR_n_img_y  # number of images along y-axis in a canvas
    PRR_resize_factor = args.PRR_resize_factor  # resize factor for each image in a canvas

    PMLR = args.PMLR  # Plot Manifold Learning Result
    PMLR_n_img_x = args.PMLR_n_img_x  # number of images along x-axis in a canvas
    PMLR_n_img_y = args.PMLR_n_img_y  # number of images along y-axis in a canvas
    PMLR_resize_factor = args.PMLR_resize_factor  # resize factor for each image in a canvas
    PMLR_z_range = args.PMLR_z_range  # range for random latent vector
    PMLR_n_samples = args.PMLR_n_samples  # number of labeled samples to plot a map from input data space to the latent space
    """ prepare MNIST data """
    '''

    esense_files = [
                    "AAU_livingLab4_202481591532165_1541682359",
                    "fabio_1-202481588431654_1541691060", 
                    "alemino_ZRH_202481601716927_1541691041",
                    "IMDEA_wideband_202481598624002_1541682492"
                    ]
                    b
    esense_folder = "./datadumps/esense_data_jan2019/"
    #train_data, train_labels, test_data, test_labels, bw_labels, pos_labels = spec_data.gendata()
    for ei,efile in enumerate(esense_files):
        print efile
        if ei==0:
            train_data, train_labels,_ = esense_seqload.gendata(esense_folder+efile)
        else:
            dtrain_data, dtrain_labels,_ = esense_seqload.gendata(esense_folder+efile)
            train_data = np.vstack((train_data,dtrain_data))
            train_labels = np.vstack((train_labels,dtrain_labels))
    '''
    #train_data, train_labels, _,_,_,_,_ = synthetic_data.gendata()
    train_data, train_labels, _, _, _ = hackrf_data.gendata(
        "./datadumps/sample_hackrf_data.csv")
    #train_data, train_labels = rawdata.gendata()
    #Split the data
    train_data, train_labels = shuffle_in_unison_inplace(
        train_data, train_labels)
    splitval = int(train_data.shape[0] * 0.5)
    test_data = train_data[:splitval]
    test_labels = train_labels[:splitval]
    train_data = train_data[splitval:]
    train_labels = train_labels[splitval:]
    #Semsup splitting
    splitval = int(train_data.shape[0] * 0.2)
    train_data_sup = train_data[:splitval]
    train_data = train_data[splitval:]
    train_labels_sup = train_labels[:splitval]
    train_labels = train_labels[splitval:]
    n_samples = train_data.shape[0]
    tsamples = train_data.shape[1]
    fsamples = train_data.shape[2]
    dim_img = [tsamples, fsamples]
    nlabels = train_labels.shape[1]
    print(nlabels)

    encoder = "CNN"
    #encoder="LSTM"
    dim_z = args.dimz  # to visualize learned manifold
    enable_sel = False
    """ build graph """

    # input placeholders
    x_hat = tf.placeholder(tf.float32,
                           shape=[None, tsamples, fsamples],
                           name='input_img')
    x = tf.placeholder(tf.float32,
                       shape=[None, tsamples, fsamples],
                       name='target_img')
    x_id = tf.placeholder(tf.float32,
                          shape=[None, nlabels],
                          name='input_img_label')

    # dropout
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    # input for PMLR
    z_in = tf.placeholder(tf.float32,
                          shape=[None, dim_z],
                          name='latent_variable')

    # samples drawn from prior distribution
    z_sample = tf.placeholder(tf.float32,
                              shape=[None, dim_z],
                              name='prior_sample')
    cat_sample = tf.placeholder(tf.float32,
                                shape=[None, nlabels],
                                name='prior_sample_label')

    # network architecture
    #y, z, neg_marginal_likelihood, D_loss, G_loss = aae.adversarial_autoencoder(x_hat, x, x_id, z_sample, z_id, dim_img,
    #                                                                            dim_z, n_hidden, keep_prob)
    y, z, neg_marginal_likelihood, D_loss, G_loss, cat_gen_loss, cat = spec_aae.adversarial_autoencoder_semsup_cat_nodimred(
        x_hat,
        x,
        x_id,
        z_sample,
        cat_sample,
        dim_img,
        dim_z,
        n_hidden,
        keep_prob,
        nlabels=nlabels,
        vdim=2)

    # optimization
    t_vars = tf.trainable_variables()
    d_vars = [
        var for var in t_vars
        if "discriminator" or "discriminator_cat" in var.name
    ]
    g_vars = [var for var in t_vars if encoder + "_encoder_cat" in var.name]
    ae_vars = [
        var for var in t_vars
        if encoder + "_encoder_cat" or "CNN_decoder" in var.name
    ]

    train_op_ae = tf.train.AdamOptimizer(learn_rate).minimize(
        neg_marginal_likelihood, var_list=ae_vars)
    train_op_d = tf.train.AdamOptimizer(learn_rate / 2.0).minimize(
        D_loss, var_list=d_vars)
    train_op_g = tf.train.AdamOptimizer(learn_rate).minimize(G_loss,
                                                             var_list=g_vars)
    train_op_cat = tf.train.AdamOptimizer(learn_rate).minimize(cat_gen_loss,
                                                               var_list=g_vars)
    """ training """

    # Plot for reproduce performance
    if PRR:
        PRR = plot_utils.Plot_Reproduce_Performance(RESULTS_DIR, PRR_n_img_x,
                                                    PRR_n_img_y, tsamples,
                                                    fsamples,
                                                    PRR_resize_factor)

        x_PRR = test_data[0:PRR.n_tot_imgs, :]

        x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, tsamples, fsamples)
        PRR.save_images(x_PRR_img, name='input.jpg')

    # Plot for manifold learning result
    if PMLR and dim_z == 2:

        PMLR = plot_utils.Plot_Manifold_Learning_Result(
            RESULTS_DIR, PMLR_n_img_x, PMLR_n_img_y, tsamples, fsamples,
            PMLR_resize_factor, PMLR_z_range)

        x_PMLR = test_data[0:PMLR_n_samples, :]
        id_PMLR = test_labels[0:PMLR_n_samples, :]

        decoded = spec_aae.decoder(z_in, dim_img, n_hidden)
    else:
        x_PMLR = test_data[0:PMLR_n_samples, :]
        id_PMLR = test_labels[0:PMLR_n_samples, :]
        z_in = tf.placeholder(tf.float32,
                              shape=[None, dim_z],
                              name='latent_variable')

    # train
    total_batch = int(n_samples / batch_size)
    min_tot_loss = 1e99
    prev_loss = 1e99

    saver = tf.train.Saver()
    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer(), feed_dict={keep_prob: 0.9})

        for epoch in range(n_epochs):

            # Random shuffling
            train_data_, train_label_ = shuffle_in_unison_inplace(
                train_data, train_labels)
            train_data_sup_, train_labels_sup_ = shuffle_in_unison_inplace(
                train_data_sup, train_labels_sup)

            # Loop over all batches
            for i in range(total_batch):
                # Compute the offset of the current minibatch in the data.
                offset = (i * batch_size) % (n_samples)
                offset_sup = (i * batch_size) % (train_data_sup.shape[0])
                batch_xs_input = train_data_[offset:(offset + batch_size), :]
                batch_ids_input = train_label_[offset:(offset + batch_size), :]
                batch_xs_sup_input = train_data_sup_[offset_sup:(
                    offset_sup + batch_size), :]
                batch_ids_sup_input = train_labels_sup_[offset_sup:(
                    offset_sup + batch_size), :]
                batch_xs_target = batch_xs_input
                batch_xs_sup_target = batch_xs_sup_input

                # draw samples from prior distribution
                if dim_z > 2:
                    if enable_sel:
                        if args.prior_type == 'mixGaussian':
                            z_id_ = np.random.randint(0,
                                                      nlabels,
                                                      size=[batch_size])
                            samples = np.zeros((batch_size, dim_z))
                            for el in range(dim_z / 2):
                                samples_ = prior.gaussian_mixture(
                                    batch_size,
                                    2,
                                    n_labels=nlabels,
                                    label_indices=z_id_,
                                    y_var=(1.0 / nlabels))
                                samples[:, el * 2:(el + 1) * 2] = samples_
                        elif args.prior_type == 'swiss_roll':
                            z_id_ = np.random.randint(0,
                                                      nlabels,
                                                      size=[batch_size])
                            samples = np.zeros((batch_size, dim_z))
                            for el in range(dim_z / 2):
                                samples_ = prior.swiss_roll(
                                    batch_size, 2, label_indices=z_id_)
                                samples[:, el * 2:(el + 1) * 2] = samples_
                        elif args.prior_type == 'normal':
                            samples, z_id_ = prior.gaussian(
                                batch_size,
                                dim_z,
                                n_labels=nlabels,
                                use_label_info=True)
                        else:
                            raise Exception("[!] There is no option for " +
                                            args.prior_type)
                    else:
                        z_id_ = np.random.randint(0,
                                                  nlabels,
                                                  size=[batch_size])
                        samples = np.random.normal(
                            0.0, 1, (batch_size, dim_z)).astype(np.float32)
                else:
                    if args.prior_type == 'mixGaussian':
                        z_id_ = np.random.randint(0,
                                                  nlabels,
                                                  size=[batch_size])
                        samples = prior.gaussian_mixture(batch_size,
                                                         dim_z,
                                                         n_labels=nlabels,
                                                         label_indices=z_id_,
                                                         y_var=(1.0 / nlabels))
                    elif args.prior_type == 'swiss_roll':
                        z_id_ = np.random.randint(0,
                                                  nlabels,
                                                  size=[batch_size])
                        samples = prior.swiss_roll(batch_size,
                                                   dim_z,
                                                   label_indices=z_id_)
                    elif args.prior_type == 'normal':
                        samples, z_id_ = prior.gaussian(batch_size,
                                                        dim_z,
                                                        n_labels=nlabels,
                                                        use_label_info=True)
                    else:
                        raise Exception("[!] There is no option for " +
                                        args.prior_type)

                z_id_one_hot_vector = np.zeros((batch_size, nlabels))
                z_id_one_hot_vector[np.arange(batch_size), z_id_] = 1

                # reconstruction loss
                _, loss_likelihood0 = sess.run(
                    (train_op_ae, neg_marginal_likelihood),
                    feed_dict={
                        x_hat: batch_xs_input,
                        x: batch_xs_target,
                        z_sample: samples,
                        cat_sample: z_id_one_hot_vector,
                        keep_prob: 0.9
                    })

                _, loss_likelihood1 = sess.run(
                    (train_op_ae, neg_marginal_likelihood),
                    feed_dict={
                        x_hat: batch_xs_sup_input,
                        x: batch_xs_sup_target,
                        z_sample: samples,
                        cat_sample: batch_ids_sup_input,
                        keep_prob: 0.9
                    })
                loss_likelihood = loss_likelihood0 + loss_likelihood1
                # discriminator loss
                _, d_loss = sess.run(
                    (train_op_d, D_loss),
                    feed_dict={
                        x_hat: batch_xs_input,
                        x: batch_xs_target,
                        z_sample: samples,
                        cat_sample: z_id_one_hot_vector,
                        keep_prob: 0.9
                    })

                # generator loss
                for _ in range(2):
                    _, g_loss = sess.run(
                        (train_op_g, G_loss),
                        feed_dict={
                            x_hat: batch_xs_input,
                            x: batch_xs_target,
                            z_sample: samples,
                            cat_sample: z_id_one_hot_vector,
                            keep_prob: 0.9
                        })

                    # supervised phase
                    _, cat_loss = sess.run(
                        (train_op_cat, cat_gen_loss),
                        feed_dict={
                            x_hat: batch_xs_sup_input,
                            x: batch_xs_sup_target,
                            x_id: batch_ids_sup_input,
                            keep_prob: 0.9
                        })

            tot_loss = loss_likelihood + d_loss + g_loss + cat_loss

            # print cost every epoch
            print(
                "epoch %d: L_tot %03.2f L_likelihood %03.4f d_loss %03.2f g_loss %03.2f "
                % (epoch, tot_loss, loss_likelihood, d_loss, g_loss))

            #for v in sess.graph.get_operations():
            #    print(v.name)
            # if minimum loss is updated or final epoch, plot results
            if epoch % 2 == 0 or min_tot_loss > tot_loss or epoch + 1 == n_epochs:
                min_tot_loss = tot_loss
                # Plot for reproduce performance
                if PRR:
                    y_PRR = sess.run(y, feed_dict={x_hat: x_PRR, keep_prob: 1})
                    save_subimages([x_PRR[:10], y_PRR[:10]],
                                   "./results/Reco_%02d" % (epoch))
                    #y_PRR_img = y_PRR.reshape(PRR.n_tot_imgs, tsamples, fsamples)
                    #PRR.save_images(y_PRR_img, name="/PRR_epoch_%02d" %(epoch) + ".jpg")

                # Plot for manifold learning result
                if PMLR and dim_z == 2:
                    y_PMLR = sess.run(decoded,
                                      feed_dict={
                                          z_in: PMLR.z,
                                          keep_prob: 1
                                      })
                    y_PMLR_img = y_PMLR.reshape(PMLR_n_img_x, PMLR_n_img_x,
                                                tsamples, fsamples)
                    save_subimages(y_PMLR_img, "./results/Mani_%02d" % (epoch))
                    #y_PMLR_img = y_PMLR.reshape(PMLR.n_tot_imgs, fsamples, tsamples)
                    #PMLR.save_images(y_PMLR_img, name="/PMLR_epoch_%02d" % (epoch) + ".jpg")

                    # plot distribution of labeled images
                    z_PMLR = sess.run(z,
                                      feed_dict={
                                          x_hat: x_PMLR,
                                          keep_prob: 1
                                      })
                    PMLR.save_scattered_image(z_PMLR,
                                              id_PMLR,
                                              name="/PMLR_map_epoch_%02d" %
                                              (epoch) + ".jpg",
                                              N=nlabels)
                else:
                    retcat, test_cat_loss, test_ll = sess.run(
                        (cat, cat_gen_loss, neg_marginal_likelihood),
                        feed_dict={
                            x_hat: x_PMLR,
                            x_id: id_PMLR,
                            x: x_PMLR,
                            keep_prob: 1
                        })
                    print(
                        "Accuracy: ", 100.0 *
                        np.sum(np.argmax(retcat, 1) == np.argmax(id_PMLR, 1)) /
                        retcat.shape[0], test_cat_loss, test_ll)
                    save_loss = test_cat_loss + test_ll
                    if prev_loss > save_loss and (epoch % 100
                                                  == 0):  # and epoch!=0:
                        prev_loss = save_loss
                        #save_graph(sess,"./savedmodels/","saved_checkpoint","checkpoint_state","input_graph.pb","output_graph.pb",encoder+"_encoder_cat/zout/BiasAdd,"+encoder+"_encoder_cat/catout/Softmax,CNN_decoder/reshaped/Reshape,discriminator_cat_1/add_2,discriminator_1/add_2")
                        save_path = saver.save(
                            sess, "./savedmodels_allsensors/allsensors.ckpt")
                        tf.train.write_graph(sess.graph_def,
                                             "./savedmodels_allsensors/",
                                             "allsensors.pb",
                                             as_text=False)
Esempio n. 6
0
def main(args):

    # torch.manual_seed(222)
    # torch.cuda.manual_seed_all(222)
    # np.random.seed(222)

    device = torch.device('cuda')

    RESULTS_DIR = args.results_path
    ADD_NOISE = args.add_noise
    n_hidden = args.n_hidden
    dim_img = IMAGE_SIZE_MNIST**2  # number of pixels for a MNIST image
    dim_z = args.dim_z

    # train
    n_epochs = args.num_epochs
    batch_size = args.batch_size
    learn_rate = args.learn_rate

    # Plot
    PRR = args.PRR  # Plot Reproduce Result
    PRR_n_img_x = args.PRR_n_img_x  # number of images along x-axis in a canvas
    PRR_n_img_y = args.PRR_n_img_y  # number of images along y-axis in a canvas
    PRR_resize_factor = args.PRR_resize_factor  # resize factor for each image in a canvas

    PMLR = args.PMLR  # Plot Manifold Learning Result
    PMLR_n_img_x = args.PMLR_n_img_x  # number of images along x-axis in a canvas
    PMLR_n_img_y = args.PMLR_n_img_y  # number of images along y-axis in a canvas
    PMLR_resize_factor = args.PMLR_resize_factor  # resize factor for each image in a canvas
    PMLR_z_range = args.PMLR_z_range  # range for random latent vector
    PMLR_n_samples = args.PMLR_n_samples  # number of labeled samples to plot a map from input data space to the latent space
    """ prepare MNIST data """
    train_total_data, train_size, _, _, test_data, test_labels = mnist_data.prepare_MNIST_data(
    )
    n_samples = train_size
    """ create network """
    keep_prob = 0.99
    encoder = vae.Encoder(dim_img, n_hidden, dim_z, keep_prob).to(device)
    decoder = vae.Decoder(dim_z, n_hidden, dim_img, keep_prob).to(device)
    # + operator will return but .extend is inplace no return.
    optimizer = torch.optim.Adam(list(encoder.parameters()) +
                                 list(decoder.parameters()),
                                 lr=learn_rate)
    # vae.init_weights(encoder, decoder)
    """ training """
    # Plot for reproduce performance
    if PRR:
        PRR = plot_utils.Plot_Reproduce_Performance(RESULTS_DIR, PRR_n_img_x,
                                                    PRR_n_img_y,
                                                    IMAGE_SIZE_MNIST,
                                                    IMAGE_SIZE_MNIST,
                                                    PRR_resize_factor)

        x_PRR = test_data[0:PRR.n_tot_imgs, :]

        x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                  IMAGE_SIZE_MNIST)
        PRR.save_images(x_PRR_img, name='input.jpg')
        print('saved:', 'input.jpg')

        if ADD_NOISE:
            x_PRR = x_PRR * np.random.randint(2, size=x_PRR.shape)
            x_PRR += np.random.randint(2, size=x_PRR.shape)

            x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                      IMAGE_SIZE_MNIST)
            PRR.save_images(x_PRR_img, name='input_noise.jpg')
            print('saved:', 'input_noise.jpg')

        x_PRR = torch.from_numpy(x_PRR).float().to(device)

    # Plot for manifold learning result
    if PMLR and dim_z == 2:

        PMLR = plot_utils.Plot_Manifold_Learning_Result(
            RESULTS_DIR, PMLR_n_img_x, PMLR_n_img_y, IMAGE_SIZE_MNIST,
            IMAGE_SIZE_MNIST, PMLR_resize_factor, PMLR_z_range)

        x_PMLR = test_data[0:PMLR_n_samples, :]
        id_PMLR = test_labels[0:PMLR_n_samples, :]

        if ADD_NOISE:
            x_PMLR = x_PMLR * np.random.randint(2, size=x_PMLR.shape)
            x_PMLR += np.random.randint(2, size=x_PMLR.shape)

        z_ = torch.from_numpy(PMLR.z).float().to(device)
        x_PMLR = torch.from_numpy(x_PMLR).float().to(device)

    # train
    total_batch = int(n_samples / batch_size)
    min_tot_loss = np.inf
    for epoch in range(n_epochs):

        # Random shuffling
        np.random.shuffle(train_total_data)
        train_data_ = train_total_data[:, :-mnist_data.NUM_LABELS]

        # Loop over all batches
        encoder.train()
        decoder.train()
        for i in range(total_batch):
            # Compute the offset of the current minibatch in the data.
            offset = (i * batch_size) % (n_samples)
            batch_xs_input = train_data_[offset:(offset + batch_size), :]

            batch_xs_target = batch_xs_input

            # add salt & pepper noise
            if ADD_NOISE:
                batch_xs_input = batch_xs_input * np.random.randint(
                    2, size=batch_xs_input.shape)
                batch_xs_input += np.random.randint(2,
                                                    size=batch_xs_input.shape)

            batch_xs_input, batch_xs_target = torch.from_numpy(batch_xs_input).float().to(device), \
                                              torch.from_numpy(batch_xs_target).float().to(device)

            assert not torch.isnan(batch_xs_input).any()
            assert not torch.isnan(batch_xs_target).any()

            y, z, tot_loss, loss_likelihood, loss_divergence = \
                                        vae.get_loss(encoder, decoder, batch_xs_input, batch_xs_target)

            optimizer.zero_grad()
            tot_loss.backward()
            optimizer.step()

            # print cost every epoch
        print(
            "epoch %d: L_tot %03.2f L_likelihood %03.2f L_divergence %03.2f" %
            (epoch, tot_loss.item(), loss_likelihood.item(),
             loss_divergence.item()))

        encoder.eval()
        decoder.eval()
        # if minimum loss is updated or final epoch, plot results
        if min_tot_loss > tot_loss.item() or epoch + 1 == n_epochs:
            min_tot_loss = tot_loss.item()

            # Plot for reproduce performance
            if PRR:
                y_PRR = vae.get_ae(encoder, decoder, x_PRR)

                y_PRR_img = y_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                          IMAGE_SIZE_MNIST)
                PRR.save_images(y_PRR_img.detach().cpu().numpy(),
                                name="/PRR_epoch_%02d" % (epoch) + ".jpg")
                print('saved:', "/PRR_epoch_%02d" % (epoch) + ".jpg")

            # Plot for manifold learning result
            if PMLR and dim_z == 2:
                y_PMLR = decoder(z_)

                y_PMLR_img = y_PMLR.reshape(PMLR.n_tot_imgs, IMAGE_SIZE_MNIST,
                                            IMAGE_SIZE_MNIST)
                PMLR.save_images(y_PMLR_img.detach().cpu().numpy(),
                                 name="/PMLR_epoch_%02d" % (epoch) + ".jpg")
                print('saved:', "/PMLR_epoch_%02d" % (epoch) + ".jpg")

                # plot distribution of labeled images
                z_PMLR = vae.get_z(encoder, x_PMLR)
                PMLR.save_scattered_image(z_PMLR.detach().cpu().numpy(),
                                          id_PMLR,
                                          name="/PMLR_map_epoch_%02d" %
                                          (epoch) + ".jpg")
                print('saved:', "/PMLR_map_epoch_%02d" % (epoch) + ".jpg")
Esempio n. 7
0
def main(args):

    """ parameters """
    RESULTS_DIR = args.results_path

    # network architecture
    ADD_NOISE = args.add_noise

    n_hidden = args.n_hidden
    dim_img = IMAGE_SIZE_MNIST**2  # number of pixels for a MNIST image
    dim_z = args.dim_z

    # train
    n_epochs = args.num_epochs
    batch_size = args.batch_size
    learn_rate = args.learn_rate

    # Plot
    PRR = args.PRR                              # Plot Reproduce Result
    PRR_n_img_x = args.PRR_n_img_x              # number of images along x-axis in a canvas
    PRR_n_img_y = args.PRR_n_img_y              # number of images along y-axis in a canvas
    PRR_resize_factor = args.PRR_resize_factor  # resize factor for each image in a canvas

    PMLR = args.PMLR                            # Plot Manifold Learning Result
    PMLR_n_img_x = args.PMLR_n_img_x            # number of images along x-axis in a canvas
    PMLR_n_img_y = args.PMLR_n_img_y            # number of images along y-axis in a canvas
    PMLR_resize_factor = args.PMLR_resize_factor# resize factor for each image in a canvas
    PMLR_z_range = args.PMLR_z_range            # range for random latent vector
    PMLR_n_samples = args.PMLR_n_samples        # number of labeled samples to plot a map from input data space to the latent space

    """ prepare MNIST data """

    train_total_data, train_size, test_data, test_labels = mnist_data.prepare_MNIST_data()
    n_samples = train_size
    para_lamda=10.0
    clipping_parameter = 0.01
    n_critic = 5
    #train_data = train_total_data[:, :-mnist_data.NUM_LABELS]

    """ build graph """

    # input placeholders
    # In denoising-autoencoder, x_hat == x + noise, otherwise x_hat == x
    #x_hat = tf.placeholder(tf.float32, shape=[None, dim_img], name='input_img')
    x = tf.placeholder(tf.float32, shape=[None, dim_img], name='target_img')
    batchsize=tf.placeholder(tf.float32,  name='batchsize')

    # input for PMLR
    z = tf.placeholder(tf.float32, shape=[None, dim_z], name='latent_variable')

    cond_info= tf.placeholder(tf.float32, shape=[None, 12], name='cond_info')


    encoder_output = CNNVae.gaussian_CNN_encoder(x, cond_info, dim_z)

    decoder_output = CNNVae.gaussian_CNN_decoder(encoder_output,cond_info)


    with tf.variable_scope('Discriminator') as scope:
        D_real = CNNVae.discriminator(z,n_hidden)
        scope.reuse_variables()
        D_fake = CNNVae.discriminator(encoder_output,n_hidden)

        alpha = tf.random_uniform(
            shape=[batch_size, 1],
            minval=0.,
            maxval=1.
        )
        differences = encoder_output - z  # may cause problem!!!
        interpolates = z + (alpha * differences)
        # gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]
        gradients = tf.gradients(CNNVae.discriminator(interpolates, n_hidden), [interpolates])[0]
        slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
        gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
        ddx = 10.0 * gradient_penalty

        # ddx = gradient_penalty(z, encoder_output, CNNVae.discriminator)
        # ddx = ddx*10.0
    with tf.name_scope('Loss'):

        #marginal_likelihood = tf.reduce_sum(3.14/4.0*tf.exp(2.0*(x-decoder_output))-2.0*(x-decoder_output), 1)
        #loss_reconstr = tf.reduce_mean(marginal_likelihood)
        loss_reconstr = tf.reduce_mean(3.14/4.0*tf.exp(2.0*(x-decoder_output))-2.0*(x-decoder_output))
        # Adversarial loss to approx. Q(z|X)
        with tf.name_scope('Discriminator_loss'):
            #loss_discriminator = -para_lamda*(tf.reduce_mean(tf.log(D_real)) + tf.reduce_mean(tf.log(1.0-D_fake)))
            loss_discriminator = 1.0*(tf.reduce_mean(D_fake) - tf.reduce_mean(D_real)+ddx)

        with tf.name_scope('Encoder_loss'):
            #loss_encoder = -para_lamda* tf.reduce_mean(tf.log(D_fake))
            loss_encoder = -(1.0)*tf.reduce_mean(D_fake)

    vars = tf.trainable_variables()
    enc_params = [v for v in vars if 'g_encoder_' in v.name]
    dec_params = [v for v in vars if 'g_decoder_' in v.name]
    dis_params = [v for v in vars if 'g_dis_' in v.name]
    dis_weights = [w for w in dis_params if 'weight' in w.name]


    with tf.variable_scope('Discriminator_Accuracy'):
        accuracy_real = tf.reduce_mean(tf.cast(tf.greater_equal(D_real, 0.5), tf.float16))
        accuracy_fake = tf.reduce_mean(tf.cast(tf.less(D_fake, 0.5), tf.float16))
        accuracy_tot = (accuracy_real + accuracy_fake) / 2
        #accuracy_tot = tf.reduce_mean(D_real) - tf.reduce_mean(D_fake)
    #clipped_weights = clip_weights(dis_weights, clipping_parameter, 'clip_weights')
    CLIP = [-0.04, 0.04]
    clipped_weights = [var.assign(tf.clip_by_value(var, CLIP[0], CLIP[1])) for var in dis_weights]
    with tf.name_scope('Optimizer'):
        train_op_AE = tf.train.AdamOptimizer(learning_rate=learn_rate,beta1=0.,beta2=0.9).minimize(loss_reconstr+para_lamda*loss_encoder,var_list=[dec_params,enc_params])
        train_op_Dis = tf.train.AdamOptimizer(learning_rate=learn_rate,beta1=0.,beta2=0.9).minimize(para_lamda*loss_discriminator,var_list=[dis_params])


    test_smaple_size=12800
    test_batch_size=128
    z_test = tf.placeholder(tf.float32, shape=[test_batch_size, dim_z])
    test_cond_info = tf.placeholder(tf.float32, shape=[test_batch_size, 12], name='test_cond_info')
    test_op = CNNVae.CNN_decoder(z_test, test_cond_info)
    mu_test = tf.zeros([test_smaple_size, dim_z], dtype=tf.float32)
    test_sample = mu_test + tf.random_normal(tf.shape(mu_test), 0, 1, dtype=tf.float32)
    test_rand = np.random.randint(0, 10,size=[test_smaple_size,1]) #[0,10)
    test_info=num_to_one_hot(test_rand)
    test_angle=np.random.randint(0, 360,size=[test_smaple_size,1]) #[0,360)
    sin_angle=np.sin(test_angle/180.0*np.pi)
    cos_angle=np.cos(test_angle/180.0*np.pi)
    test_info = np.concatenate((test_info, sin_angle), axis=1)
    test_info = np.concatenate((test_info, cos_angle), axis=1)
    savename = "./label" + ".mat"
    sio.savemat(savename, {'label': test_rand})
    savename = "./angle" + ".mat"
    sio.savemat(savename, {'angle': test_angle})
    """ training """
    loss_array = np.zeros(shape=[n_epochs, 1], dtype=np.float32)
    epoch_array = np.zeros(shape=[n_epochs, 1], dtype=np.uint)

    # Plot for reproduce performance
    if PRR:
        PRR = plot_utils.Plot_Reproduce_Performance(RESULTS_DIR, PRR_n_img_x, PRR_n_img_y, IMAGE_SIZE_MNIST, IMAGE_SIZE_MNIST, PRR_resize_factor)

        x_PRR = test_data[0:PRR.n_tot_imgs, :]
        x_PRR_info = test_labels[0:PRR.n_tot_imgs, :]
        x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST, IMAGE_SIZE_MNIST)
        PRR.save_images(x_PRR_img, name='input.jpg')
        sio.savemat('testimage.mat', {'testimage': x_PRR})


    # train
    total_batch = int(n_samples / batch_size)
    min_tot_loss = 1e99
    min_tot_mar_loss=1e99
    #force using cpu instead of gpu 0:cpu 1:gpu
    config = tf.ConfigProto(device_count={'GPU':1})
    with tf.Session(config=config) as sess:

        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver()
        # to visualize using TensorBoard
        writer = tf.summary.FileWriter('./graphs', sess.graph)
        ckpt = tf.train.get_checkpoint_state(os.path.dirname('./checkpoints/'))

        # if that checkpoint exists, restore from checkpoint
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        for epoch in range(n_epochs):

            total_loss_likelihood = 0.0
            total_loss_divergence = 0.0
            total_loss_dis =0.0
            # Random shuffling
            np.random.shuffle(train_total_data)
            #train_data_ = train_total_data[:, :-mnist_data.NUM_LABELS]

            # Loop over all batches
            for i in range(total_batch):
                # Compute the offset of the current minibatch in the data.
                offset = (i * batch_size) % (n_samples)
                batch_xs_input = train_total_data[offset:(offset + batch_size), :-12]
                batch_cond_info = train_total_data[offset:(offset + batch_size), -12:]

                # update autoencoder parameters
                #z0 = np.random.randn(128, dim_z)
                #_, loss_divergence ,enc_out= sess.run((train_op_Enc, loss_encoder,encoder_output),feed_dict={x: batch_xs_input, cond_info: batch_cond_info})
                _,loss_likelihood,loss_divergence= sess.run((train_op_AE,loss_reconstr,loss_encoder), feed_dict={ x: batch_xs_input, cond_info:batch_cond_info,batchsize:batch_xs_input.shape[0]})


                # update discriminator
                for _ in range(n_critic):

                    z0 = np.random.normal(loc=0., scale=1, size=(batch_size, dim_z))
                    _, loss_dis = sess.run((train_op_Dis, loss_discriminator),
                                           feed_dict={ x: batch_xs_input, cond_info: batch_cond_info, z: z0})
                # _ = sess.run(clipped_weights)

                total_loss_likelihood = total_loss_likelihood + loss_likelihood
                total_loss_divergence = total_loss_divergence + loss_divergence
                total_loss_dis = total_loss_dis + loss_dis

            total_loss_likelihood = total_loss_likelihood / total_batch
            total_loss_divergence = total_loss_divergence / total_batch
            total_loss_dis = total_loss_dis / total_batch
            tot_loss = total_loss_divergence + total_loss_likelihood

            epoch_array[epoch] = epoch
            loss_array[epoch] = total_loss_likelihood
            # print cost every epoch
            print("epoch %d:  L_likelihood %03.3f L_divergence %03.3f L_dis %03.3f" % (epoch, total_loss_likelihood*4096, total_loss_divergence,total_loss_dis))

            # if minimum loss is updated or final epoch, plot results
            #if min_tot_loss > tot_loss or min_tot_mar_loss > total_loss_likelihood or epoch+1 == n_epochs:
            if epoch %10==0:
                saver.save(sess, './checkpoints/checkpoint', epoch)
                min_tot_loss = tot_loss
                min_tot_mar_loss = total_loss_likelihood
                # Plot for reproduce performance
                if PRR:
                    #z_PRR = sess.run(encoder_output,feed_dict={x: x_PRR, cond_info: x_PRR_info})
                    y_PRR = sess.run(decoder_output, feed_dict={x: x_PRR, cond_info:x_PRR_info})
                    y_PRR_img = y_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE_MNIST, IMAGE_SIZE_MNIST)
                    t_z = np.random.normal(loc=0., scale=1, size=(batch_size, dim_z))
                    t_rand = np.random.randint(0, 10, size=[test_batch_size, 1])  # [0,10)
                    t_info = num_to_one_hot(t_rand)
                    #t_angle = np.random.randint(0, 360, size=[test_batch_size, 1])  # [0,360)
                    t_angle = np.random.uniform(size=[test_batch_size, 1])*360  # [0,360)
                    sin_angle = np.sin(t_angle / 180.0 * np.pi)
                    cos_angle = np.cos(t_angle / 180.0 * np.pi)
                    t_info = np.concatenate((t_info, sin_angle), axis=1)
                    t_info = np.concatenate((t_info, cos_angle), axis=1)
                    x_test = sess.run(test_op, feed_dict={z_test: t_z, test_cond_info: t_info})
                    x_test= x_test.reshape(test_batch_size, IMAGE_SIZE_MNIST,IMAGE_SIZE_MNIST)
                    if epoch%10==0 :
                        PRR.save_images(y_PRR_img, name="/PRR_epoch_%02d" % (epoch) + ".jpg")
                        PRR.save_images(x_test, name="/GER_epoch_%02d" % (epoch) + ".jpg")



        if PRR and save_result:
           test_image=np.zeros([test_smaple_size,IMAGE_SIZE_MNIST,IMAGE_SIZE_MNIST],dtype=np.float32)
           test_sample=sess.run(test_sample)
           test_batch = int(test_smaple_size/test_batch_size)
           for i in range(test_batch):
               # Compute the offset of the current minibatch in the data.
               offset = i * test_batch_size
               test_input = test_sample[offset:(offset + test_batch_size), :]
               test_input_info = test_info[offset:(offset + test_batch_size), :]
               x_test=sess.run(test_op,feed_dict={z_test:test_input,test_cond_info:test_input_info})
               test_image[offset:(offset + test_batch_size),:,:] = x_test.reshape(test_batch_size, IMAGE_SIZE_MNIST, IMAGE_SIZE_MNIST)
           PRR.save_images(test_image[0:128,:,:], name="/PRR_test"  + ".jpg")
           savename="./fakeimdb_loss_%03.2f" % (tot_loss) + ".mat"
           sio.savemat(savename,{'fakeimdb':test_image})
Esempio n. 8
0
    # print('test_labels', test_labels[90])

    n_samples = train_size

    x_hat = tf.placeholder(tf.float32, shape=[None, dim_img], name='input_img')
    x = tf.placeholder(tf.float32, shape=[None, dim_img], name='target_img')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    z_in = tf.placeholder(tf.float32,
                          shape=[None, dim_z],
                          name='latent_variable')
    y, z, loss, neg_marginal_likelihood, KL_divergence = vae.autoencoder(
        x_hat, x, dim_img, dim_z, n_hidden, keep_prob)
    train_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    PRR = plot_utils.Plot_Reproduce_Performance(RESULTS_DIR, PRR_n_img_x,
                                                PRR_n_img_y, IMAGE_SIZE,
                                                IMAGE_SIZE, PRR_resize_factor)

    x_PRR = test_data[0:PRR.n_tot_imgs, :]

    x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE, IMAGE_SIZE)
    PRR.save_images(x_PRR_img, name='input.jpg')

    x_PRR = x_PRR * np.random.randint(2, size=x_PRR.shape)
    x_PRR += np.random.randint(2, size=x_PRR.shape)

    x_PRR_img = x_PRR.reshape(PRR.n_tot_imgs, IMAGE_SIZE, IMAGE_SIZE)
    PRR.save_images(x_PRR_img, name='input_noise.jpg')

    # train
    total_batch = int(n_samples / batch_size)
Esempio n. 9
0
def main(args):
    device = torch.device('cuda: 0' if torch.cuda.is_available() else 'cpu')

    '''prepare cifar 10 data '''
    train_data, train_label, val_data, val_label, test_data, test_labels = cifar10_data.prepare_cifar_10_data()

    n_train_samples = train_data.shape[0]
    n_samples = n_train_samples         #only train 2000 images as test
    #n_samples = 2000
    n_val_samples = val_data.shape[0]
    n_test_samples = test_data.shape[0]
    image_size = 32
    input_dim = 3
    output_dim = 3
    train_data = train_data.reshape(n_train_samples, input_dim, image_size, image_size)
    train_data = train_data[:n_samples, :, :, :]
    val_data = val_data.reshape(n_val_samples, input_dim, image_size, image_size)
    test_data = test_data.reshape(n_test_samples, input_dim, image_size, image_size)

    """ create network """
    encoder = vae2.Encoder(input_dim, args.zdim, args.use_batch_norm).to(device)
    decoder = vae2.Decoder(args.zdim, output_dim, args.use_batch_norm).to(device)

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=args.learn_rate)
    elif args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(list(encoder.parameters()) + list(decoder.parameters()), lr=args.learn_rate, momentum=args.momentum)
    else:
        print("wrong optimizer")
        return

    # init weights
    if args.seed is None:
        args.seed = random.randint(0, 10000)
    cifar10_data.set_random_seed(args.seed)

    #output log to writer
    writer_train = SummaryWriter(logdir=args.train_log_name)
    writer_val = SummaryWriter(logdir=args.val_log_name)
    mode = args.type

    # Plot for reproduce performance
    image_size = 32
    plot_perform = plot_utils.Plot_Reproduce_Performance(args.results_path, args.show_n_img_x, args.show_n_img_y, image_size,
                                                    image_size, args.show_resize_factor)


    show_img_nums = args.show_n_img_x * args.show_n_img_y
    show_img = val_data[:show_img_nums, :, :, :]
    #show_img = val_data.data[:show_img_nums]
    print(show_img.shape)           # N * 32 * 32 *3
    plot_perform.save_images(show_img, name='input.jpg')
    print('saved:', 'input.jpg')

    """ training """
    batch_size = args.batch_size
    epochs = args.num_epochs
    num_batches = int(n_samples / batch_size)
    for epoch in range(epochs):

        encoder.train()
        decoder.train()
        tot_loss = 0
        tot_l2_loss = 0
        for i in range(num_batches):
            idx = (i * batch_size) % (n_samples)
            end_idx =  idx + batch_size
            batch_input = train_data[idx:end_idx, :, :, :]
            batch_target = batch_input
            batch_input, batch_target = torch.from_numpy(batch_input).float().to(device), \
                                        torch.from_numpy(batch_target).float().to(device)

            y, z, loss, loss_likelihood, loss_divergence, l2_dis = \
                vae2.get_loss(encoder, decoder, batch_input, batch_target, mode)

            tot_loss = tot_loss + loss.item()
            tot_l2_loss = tot_l2_loss + l2_dis.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            #print cost every epoch
            if i == num_batches - 1:
                print("train loss: epoch %d: Loss %03.2f L_likelihood %03.2f L_divergence %03.2f L2_dis %03.2f " % (
                    epoch, loss.item(), loss_likelihood.item(), loss_divergence.item(), l2_dis.item()))
                break

        tot_loss = tot_loss / float(num_batches)
        tot_l2_loss = tot_l2_loss / float(num_batches)
        writer_train.add_scalar('loss', tot_loss, epoch)
        writer_train.add_scalar('l2_dis', tot_l2_loss, epoch)

        #evaluate on val dataset

        if (epoch + 1) % 5 == 0 or epoch + 1 == epochs:
            encoder.eval()
            decoder.eval()

            with torch.no_grad():
                #calculate the validation loss
                val_target = val_data
                val_input = val_data
                val_input, val_target = torch.from_numpy(val_input).float().to(device), \
                                        torch.from_numpy(val_target).float().to(device)

                y_val, z_val, loss_val, loss_likelihood_val, loss_divergence_val, l2_dis_val = \
                                vae2.get_loss(encoder, decoder, val_input, val_target, mode)
                print("test results in val data: epoch %d: Loss %03.2f L_likelihood %03.2f L_divergence %03.2f L2_dis %03.2f " % (
                    epoch, loss_val.item(), loss_likelihood_val.item(), loss_divergence_val.item(), l2_dis_val.item()))

            # Plot for reproduce performance
            plot_batch = torch.from_numpy(show_img).float().to(device)
            y_PRR = vae2.get_ae(encoder, decoder, plot_batch)
            y_PRR_img = y_PRR.reshape(show_img_nums, 3, image_size, image_size)
            print(y_PRR_img.shape)
            plot_perform.save_images(y_PRR_img.detach().cpu().numpy(), name="/PRR_epoch_%02d" % (epoch) + ".jpg")
            print('saved:', "/PRR_epoch_%02d" % (epoch) + ".jpg")
            writer_val.add_scalar('loss', loss_val, epoch)
            writer_val.add_scalar('l2_dis', l2_dis_val, epoch)

    writer_train.close()
    writer_val.close()


    plot_utils.plot_t_sne(z_val[:100, :].detach().cpu().numpy(), val_data[:100, :, :, :])
    plot_utils.plot_t_sne_col(z_val.detach().cpu().numpy(), val_label)