Ejemplo n.º 1
0
def testGAN(trained_model_path=None, n_batches=40):

    weights = initialiseWeights()

    z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) 
    net_g_test = generator(z_vector, phase_train=True, reuse=True)

    vis = visdom.Visdom()

    sess = tf.Session()
    saver = tf.train.Saver()
    
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, trained_model_path) 

        # output generated chairs
        for i in range(n_batches):
            next_sigma = float(raw_input())
            z_sample = np.random.normal(0, next_sigma, size=[batch_size, z_size]).astype(np.float32)
            g_objects = sess.run(net_g_test,feed_dict={z_vector:z_sample})
            id_ch = np.random.randint(0, batch_size, 4)
            for i in range(4):
                print g_objects[id_ch[i]].max(), g_objects[id_ch[i]].min(), g_objects[id_ch[i]].shape
                if g_objects[id_ch[i]].max() > 0.5:
                    d.plotVoxelVisdom(np.squeeze(g_objects[id_ch[i]]>0.5), vis, '_'.join(map(str,[i])))
Ejemplo n.º 2
0
def testGAN(trained_model_path=None, n_batches=1):

    weights = initialiseWeights()

    z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) 
    net_g_test = generator(z_vector, phase_train=True, reuse=False)

    vis = visdom.Visdom()

    sess = tf.Session()
    saver = tf.train.Saver()
    
    d_output_x, d_no_sigmoid_output_x = discriminator(net_g_test, phase_train=True, reuse=False)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, trained_model_path) 

        # output generated chairs
        for i in range(n_batches):
            inter_size = 50
            next_sigma = float(0.33)
            z_sample = np.random.normal(0, next_sigma, size=[batch_size, z_size]).astype(np.float32)
            new_sample = np.zeros((inter_size,z_size))
            new_sample[0] = z_sample[0]
            new_sample[inter_size-1] = z_sample[batch_size-1]
            diff = (new_sample[0]-new_sample[inter_size-1])/float(inter_size)
            for k in range(1,inter_size):
                new_sample[k] = new_sample[k-1]+diff
                #new_sample[k,k] = z_sample[batch_size-1,k]
             
                   
            g_objects = sess.run(net_g_test,feed_dict={z_vector:new_sample})
            
            #x = sess.run(d_output_x, feed_dict={net_g_test:g_objects})
            #g_objects = np.transpose(g_objects, (0,4,1,2,3))
            #print (x)
            #print (g_objects.shape)
            # g_objects = g_objects/np.linalg.norm(g_objects,axis = 4)
            #io.savemat("image", {"voxels": g_objects})
            #id_ch = np.random.randint(0, batch_size, 4)
            g_objects.dump(train_sample_directory+'/interpolation_new')
            for i in range(4):
                #print(g_objects[id_ch[i]].max(), g_objects[id_ch[i]].min(), g_objects[id_ch[i]].shape)
                if g_objects[66*i].max() > 0.5:
                    #iimage = g_objects[id_ch[i]]>0.5
                    #print (image.shape)
                    #io.savemat("image"+str(i), {"voxels": image.reshape(1,1,64,64,64)})
                    d.plotVoxelVisdom(np.squeeze(g_objects[66*i]>0.5), vis, '_'.join(map(str,[66*i])))
Ejemplo n.º 3
0
def trainGAN(is_dummy=False, checkpoint=None):

    weights =  initialiseWeights()

    z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) 
    x_vector = tf.placeholder(shape=[batch_size,cube_len,cube_len,cube_len,1],dtype=tf.float32) 

    net_g_train = generator(z_vector, phase_train=True, reuse=False) 

    d_output_x, d_no_sigmoid_output_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z, d_no_sigmoid_output_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z < 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    # d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z))
    # g_loss = -tf.reduce_mean(tf.log(d_output_z))

    d_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_x, labels=tf.ones_like(d_output_x)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_z, labels=tf.zeros_like(d_output_z))
    g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_z, labels=tf.ones_like(d_output_z))
    
    d_loss = tf.reduce_mean(d_loss)
    g_loss = tf.reduce_mean(g_loss)

    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)

    net_g_test = generator(z_vector, phase_train=False, reuse=True)

    para_g = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wg', 'bg', 'gen'])]
    para_d = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wd', 'bd', 'dis'])]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(learning_rate=d_lr,beta1=beta).minimize(d_loss,var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(learning_rate=g_lr,beta1=beta).minimize(g_loss,var_list=para_g)

    saver = tf.train.Saver() 
    vis = visdom.Visdom()


    with tf.Session() as sess:  
      
        sess.run(tf.global_variables_initializer())        
        if checkpoint is not None:
            saver.restore(sess, checkpoint)        

        if is_dummy:
            volumes = np.random.randint(0,2,(batch_size,cube_len,cube_len,cube_len))
            print 'Using Dummy Data'
        else:
            volumes = d.getAll(obj=obj, train=True, is_local=is_local, obj_ratio=obj_ratio)
            print 'Using ' + obj + ' Data'
        volumes = volumes[...,np.newaxis].astype(np.float)
        # volumes *= 2.0
        # volumes -= 1.0

        for epoch in range(n_epochs):
            
            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
            z_sample = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
            z = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
            # z = np.random.uniform(0, 1, size=[batch_size, z_size]).astype(np.float32)

            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge([summary_d_loss,
                                                summary_d_x_hist, 
                                                summary_d_z_hist,
                                                summary_n_p_x,
                                                summary_n_p_z,
                                                summary_d_acc])

            summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x})
            summary_g, generator_loss = sess.run([summary_g_loss,g_loss],feed_dict={z_vector:z})  
            d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],feed_dict={z_vector:z, x_vector:x})
            print n_x, n_z

            if d_accuracy < d_thresh:
                sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x})
                print 'Discriminator Training ', "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss, "d_acc: ", d_accuracy

            sess.run([optimizer_op_g],feed_dict={z_vector:z})
            print 'Generator Training ', "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss, "d_acc: ", d_accuracy

            # output generated chairs
            if epoch % 200 == 0:
                g_objects = sess.run(net_g_test,feed_dict={z_vector:z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_objects.dump(train_sample_directory+'/biasfree_'+str(epoch))
                id_ch = np.random.randint(0, batch_size, 4)
                for i in range(4):
                    if g_objects[id_ch[i]].max() > 0.5:
    		            d.plotVoxelVisdom(np.squeeze(g_objects[id_ch[i]]>0.5), vis, '_'.join(map(str,[epoch,i])))          
            if epoch % 50 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)      
                saver.save(sess, save_path = model_directory + '/biasfree_' + str(epoch) + '.cptk')
Ejemplo n.º 4
0
        # visdom visualization
        if (i % 50) == 0 and i > 0:
            vis.close(None)
            id_ch = np.random.randint(0, opt.batchSize, opt.batchSize)
            t = gen_data.detach().cpu().clone()
            t = t.permute(0, 4, 2, 3, 1)
            gen_data_np = t.numpy()
            t = masked_data.detach().cpu().clone()
            t = t.permute(0, 4, 2, 3, 1)
            masked_data_np = t.numpy()
            t = real_data.detach().cpu().clone()
            t = t.permute(0, 4, 2, 3, 1)
            real_data_np = t.numpy()
            for j in range(opt.batchSize):
                if gen_data_np[id_ch[j]].max() > 0.5:
                    d.plotVoxelVisdom(np.squeeze(real_data_np[id_ch[j]] > 0.5), vis, '_'.join(map(str, [epoch, j])))
                    d.plotVoxelVisdom(np.squeeze(masked_data_np[id_ch[j]] > 0.5), vis, '_'.join(map(str, [epoch, j+1])))
                    d.plotVoxelVisdom(np.squeeze(gen_data_np[id_ch[j]] > 0.5), vis, '_'.join(map(str, [epoch, j+2])))
                    break

    print('Time elapsed Epoch %d: %d seconds'
            % (epoch + 1, time.time() - t0))

    # TensorBoard logging
    # scalar values
    info = {
        'D loss': errD_all.avg,
        'G loss': errG_all.avg
    }

    for tag, value in info.items():
Ejemplo n.º 5
0
def trainGAN(is_dummy=False, checkpoint=None):

    weights = initialiseWeights()
    #  z_size  = 200  cube_len   = 64
    z_vector = tf.placeholder(shape=[batch_size, z_size], dtype=tf.float32)
    x_vector = tf.placeholder(
        shape=[batch_size, cube_len, cube_len, cube_len, 1], dtype=tf.float32)

    net_g_train = generator(z_vector, phase_train=True, reuse=False)
    #  x_vector is the input of discriminator   original 3D object
    d_output_x, d_no_sigmoid_output_x = discriminator(x_vector,
                                                      phase_train=True,
                                                      reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99),
                            0.01)  # d_output_x is between 0.01 and 0.99
    # tf.summary.histogram is concerning to tensorboard
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)
    # generating 3d OBJECT
    d_output_z, d_no_sigmoid_output_z = discriminator(net_g_train,
                                                      phase_train=True,
                                                      reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z < 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    # d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z))
    # g_loss = -tf.reduce_mean(tf.log(d_output_z))

    d_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        logits=d_no_sigmoid_output_x, labels=tf.ones_like(
            d_output_x)) + tf.nn.sigmoid_cross_entropy_with_logits(
                logits=d_no_sigmoid_output_z, labels=tf.zeros_like(d_output_z))
    g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        logits=d_no_sigmoid_output_z, labels=tf.ones_like(d_output_z))

    d_loss = tf.reduce_mean(d_loss)
    g_loss = tf.reduce_mean(g_loss)
    # concerning to tensorboard
    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)
    net_g_test = generator(z_vector, phase_train=False, reuse=True)
    # tf.trainable_variables() return a list which need training
    para_g = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wg', 'bg', 'gen'])
    ]
    para_d = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wd', 'bd', 'dis'])
    ]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(
        learning_rate=d_lr, beta1=beta).minimize(d_loss, var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(
        learning_rate=g_lr, beta1=beta).minimize(g_loss, var_list=para_g)

    saver = tf.train.Saver()
    # data visidize
    vis = visdom.Visdom()

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        if checkpoint is not None:
            saver.restore(sess, checkpoint)
        # is_dummy = false
        if is_dummy:
            # is_dummy = true
            # batch_size = 32 cube_len = 64
            volumes = np.random.randint(
                0, 2, (batch_size, cube_len, cube_len, cube_len))
            print 'Using Dummy Data'
        else:
            # is_dummy = false  is_local = false  volumes is a list of npy
            voxList = d.getAll(obj=obj,
                               train=True,
                               is_local=is_local,
                               obj_ratio=obj_ratio)
            print 'Using ' + obj + ' Data'
        # volumes *= 2.0
        # volumes -= 1.0
        for epoch in range(n_epochs):
            for batch_count in range(len(voxList) / batch_size):
                volumes = np.zeros([batch_size, cube_len, cube_len, cube_len],
                                   dtype=np.bool)
                # each batch get two npy
                for index in range(batch_count * batch_size,
                                   (batch_count + 1) * batch_size):
                    # each npy load a array
                    vox_arr = np.load(voxList[index])
                    volumes[index % batch_size] = vox_arr
                # increase a axis to stand a place
                x = volumes[..., np.newaxis].astype(np.float)
                # Gaussian Distribution
                z_sample = np.random.normal(0, 0.33,
                                            size=[batch_size,
                                                  z_size]).astype(np.float32)
                z = np.random.normal(0, 0.33, size=[batch_size,
                                                    z_size]).astype(np.float32)
                # z = np.random.uniform(0, 1, size=[batch_size, z_size]).astype(np.float32)

                # Update the discriminator and generator
                # manage summary
                d_summary_merge = tf.summary.merge([
                    summary_d_loss, summary_d_x_hist, summary_d_z_hist,
                    summary_n_p_x, summary_n_p_z, summary_d_acc
                ])
                #writer = tf.summary.FileWriter('logs' , sess.graph)
                #summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x})
                discriminator_loss = sess.run(d_loss,
                                              feed_dict={
                                                  z_vector: z,
                                                  x_vector: x
                                              })
                #writer.add_summary(summary_d, batch_count)
                generator_loss = sess.run(g_loss, feed_dict={z_vector: z})
                #writer.add_summary(summary_g, batch_count)
                d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],
                                                feed_dict={
                                                    z_vector: z,
                                                    x_vector: x
                                                })
                print n_x, n_z
                # when discriminator's d_accuracy < 0.8, optimize
                if d_accuracy < d_thresh:
                    sess.run([optimizer_op_d],
                             feed_dict={
                                 z_vector: z,
                                 x_vector: x
                             })
                    print 'Discriminator Training ', "Iteration: ", batch_count, ', d_loss:', discriminator_loss, 'g_loss:', generator_loss, "d_acc: ", d_accuracy

                sess.run([optimizer_op_g], feed_dict={z_vector: z})
                print 'Generator Training ', "Iteration: ", batch_count, ', d_loss:', discriminator_loss, 'g_loss:', generator_loss, "d_acc: ", d_accuracy
            # output generated chairs
            if epoch % 2 == 0:
                g_objects = sess.run(net_g_test,
                                     feed_dict={z_vector: z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_objects.dump(train_sample_directory + '/biasfree_' +
                               str(epoch))
                id_ch = np.random.randint(0, batch_size, 4)
                for i in range(4):
                    if g_objects[id_ch[i]].max() > 0.5:
                        d.plotVoxelVisdom(
                            np.squeeze(g_objects[id_ch[i]] > 0.5), vis,
                            '_'.join(map(str, [epoch, i])))
            if epoch % 50 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)
                saver.save(sess,
                           save_path=model_directory + '/biasfree_' +
                           str(epoch) + '.cptk')