Esempio n. 1
0
def trainGAN(is_dummy=False, checkpoint=None):

    weights =  initialiseWeights()

    z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) 
    x_vector = tf.placeholder(shape=[batch_size,cube_len,cube_len,cube_len,1],dtype=tf.float32) 

    net_g_train = generator(z_vector, phase_train=True, reuse=False) 

    d_output_x, d_no_sigmoid_output_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z, d_no_sigmoid_output_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z < 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    # d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z))
    # g_loss = -tf.reduce_mean(tf.log(d_output_z))

    d_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_x, labels=tf.ones_like(d_output_x)) 
    d_loss += tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_z, labels=tf.zeros_like(d_output_z))
    g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_z, labels=tf.ones_like(d_output_z))
    
    d_loss = tf.reduce_mean(d_loss)
    g_loss = tf.reduce_mean(g_loss)

    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)

    net_g_test = generator(z_vector, phase_train=False, reuse=True)

    para_g = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wg', 'bg', 'gen'])]
    para_d = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wd', 'bd', 'dis'])]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(learning_rate=d_lr,beta1=beta).minimize(d_loss,var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(learning_rate=g_lr,beta1=beta).minimize(g_loss,var_list=para_g)

    saver = tf.train.Saver() 
    vis = visdom.Visdom()


    with tf.Session() as sess:  
      
        sess.run(tf.global_variables_initializer())        
        if checkpoint is not None:
            saver.restore(sess, checkpoint)        

        if is_dummy:
            volumes = np.random.randint(0,2,(batch_size,cube_len,cube_len,cube_len))
            print('Using Dummy Data')
        else:
            volumes = d.getAll(obj=obj, train=True, is_local=is_local, obj_ratio=obj_ratio)
            print('Using ' + obj + ' Data')
        volumes = volumes[...,np.newaxis].astype(np.float)
        # volumes *= 2.0
        # volumes -= 1.0

        for epoch in range(n_epochs):
            
            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
            z_sample = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
            z = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
            # z = np.random.uniform(0, 1, size=[batch_size, z_size]).astype(np.float32)

            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge([summary_d_loss,
                                                summary_d_x_hist, 
                                                summary_d_z_hist,
                                                summary_n_p_x,
                                                summary_n_p_z,
                                                summary_d_acc])

            summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x})
            summary_g, generator_loss = sess.run([summary_g_loss,g_loss],feed_dict={z_vector:z})  
            d_accuracy, n_x, n_z, d_x,d_z = sess.run([d_acc, n_p_x, n_p_z,d_output_x,d_output_z],feed_dict={z_vector:z, x_vector:x})
            #print("nx_nz:",n_x, n_z, "\nd_x:",d_x.reshape(batch_size), "d_z:",d_z.reshape(batch_size))
            print ("nx",n_x,"nz",n_z)
            if d_accuracy < d_thresh:
                sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x})
                print('Discriminator Training ', "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss, "d_acc: ", d_accuracy)

            sess.run([optimizer_op_g],feed_dict={z_vector:z})
            print('Generator Training ', "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss, "d_acc: ", d_accuracy)

            # output generated chairs
            if epoch % 100 == 0:
                g_objects = sess.run(net_g_test,feed_dict={z_vector:z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_objects.dump(train_sample_directory+'/biasfree_'+str(epoch))
                id_ch = np.random.randint(0, batch_size, 4)
                for i in range(4):
                    print(g_objects[id_ch[i]].max())
                    if g_objects[id_ch[i]].max() > 0.5: d.plotVoxelVisdom(np.squeeze(g_objects[id_ch[i]]>0.5), vis, '_'.join(map(str,[epoch,i])))          
            if epoch % 200 == 0:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)      
                saver.save(sess, save_path = model_directory + '/biasfree_' + str(epoch) + '.cptk')
Esempio n. 2
0
def trainGAN(is_dummy=False):

    weights =  initialiseWeights()
    biases = initialiseBiases()

    z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) 
    x_vector = tf.placeholder(shape=[batch_size,cube_len,cube_len,cube_len,1],dtype=tf.float32)

    net_g_train = generator(z_vector, phase_train=True, reuse=False) 

    d_output_x, d_no_sigmoid_output_x, flat_feature_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z, d_no_sigmoid_output_z, flat_feature_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z < 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    # d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z))
    # g_loss = -tf.reduce_mean(tf.log(d_output_z))

    d_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_x, labels=tf.ones_like(d_output_x)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_z, labels=tf.zeros_like(d_output_z))
    g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_z, labels=tf.ones_like(d_output_z))
    
    d_loss = tf.reduce_mean(d_loss)
    g_loss = tf.reduce_mean(g_loss)

    # Feature Matching loss
    feature_loss = tf.reduce_mean(abs(tf.reduce_mean(flat_feature_z, 0) - tf.reduce_mean(flat_feature_x, 0)))
    g_loss = (1-feat_loss) * g_loss + feat_loss * feature_loss

    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)

    net_g_test = generator(z_vector, phase_train=False, reuse=True)

    para_g = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wg', 'bg', 'gen'])]
    para_d = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wd', 'bd', 'dis'])]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(learning_rate=d_lr,beta1=beta).minimize(d_loss,var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(learning_rate=g_lr,beta1=beta).minimize(g_loss,var_list=para_g)

    saver = tf.train.Saver(max_to_keep=50) 

    with tf.Session() as sess:  
      
        sess.run(tf.global_variables_initializer())       
        z_sample = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
        if is_dummy:
            volumes = np.random.randint(0,2,(batch_size,cube_len,cube_len,cube_len))
            print 'Using Dummy Data'
        else:
            volumes = d.getAll(obj=obj, train=True, is_local=is_local, obj_ratio=obj_ratio)
            print 'Using ' + obj + ' Data'
        volumes = volumes[...,np.newaxis].astype(np.float)
        # volumes *= 2.0
        # volumes -= 1.0

        for epoch in range(n_epochs):
            
            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
            # z = np.random.normal(0, 1, size=[batch_size, z_size]).astype(np.float32)
            z = np.random.uniform(0, 1, size=[batch_size, z_size]).astype(np.float32)

            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge([summary_d_loss,
                                                summary_d_x_hist, 
                                                summary_d_z_hist,
                                                summary_n_p_x,
                                                summary_n_p_z,
                                                summary_d_acc])

            summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x})
            summary_g, generator_loss = sess.run([summary_g_loss,g_loss],feed_dict={z_vector:z, x_vector:x}) 
            d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],feed_dict={z_vector:z, x_vector:x})
            print n_x, n_z

            if d_accuracy < d_thresh:
                sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x})
                print 'Discriminator Training ', "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss, "d_acc: ", d_accuracy

            sess.run([optimizer_op_g],feed_dict={z_vector:z, x_vector:x})
            print 'Generator Training ', "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss, "d_acc: ", d_accuracy

            # output generated chairs
            if epoch % 50 == 10:
                g_chairs = sess.run(net_g_test,feed_dict={z_vector:z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_chairs.dump(train_sample_directory+'/biasfree_'+str(epoch))
            
            if epoch % 50 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)      
                saver.save(sess, save_path = model_directory + '/biasfree_' + str(epoch) + '.cptk')
Esempio n. 3
0
def trainGAN(is_dummy=False, checkpoint=None):
    weights = initialiseWeights()

    z_vector = tf.placeholder(shape=[batch_size, z_size, z_size, 3],
                              dtype=tf.float32)
    #z_vector = tf.placeholder(shape=[batch_size, 32,32,1], dtype=tf.float32)
    y_vector = tf.placeholder(dtype=tf.float32, shape=[batch_size, y_dim])
    x_vector = tf.placeholder(
        shape=[batch_size, cube_len, cube_len, cube_len, 1], dtype=tf.float32)
    #rat_vector =  tf.placeholder(dtype = tf.float32, shape=[1, y_dim])

    net_g_train = generator(z_vector, y_vector, phase_train=True, reuse=False)

    y_mask = tf.placeholder(tf.float32, shape=[batch_size, 64, 64, 64, 1])

    d_output_x, d_no_sigmoid_output_x = discriminator(x_vector,
                                                      y_mask,
                                                      phase_train=True,
                                                      reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z, d_no_sigmoid_output_z = discriminator(net_g_train,
                                                      y_mask,
                                                      phase_train=True,
                                                      reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z < 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    d_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        logits=d_no_sigmoid_output_x, labels=tf.ones_like(
            d_output_x)) + tf.nn.sigmoid_cross_entropy_with_logits(
                logits=d_no_sigmoid_output_z, labels=tf.zeros_like(d_output_z))
    g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        logits=d_no_sigmoid_output_z, labels=tf.ones_like(d_output_z))

    d_loss = tf.reduce_mean(d_loss)
    gen_loss_L1 = tf.reduce_mean(tf.abs(x_vector - net_g_train))
    g_loss = tf.reduce_mean(g_loss) * 0.1 + gen_loss_L1 * 0.9

    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)

    para_g = [
        var for var in tf.trainable_variables()
        if any(x in var.name
               for x in ['encoder_h', 'encoder_b', 'wg', 'bg', 'gen'])
    ]
    para_d = [
        var for var in tf.trainable_variables()
        if any(x in var.name
               for x in ['encoder_h', 'encoder_b', 'wd', 'bd', 'dis'])
    ]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer().minimize(d_loss, var_list=para_d)
    optimizer_op_g = tf.train.AdamOptimizer().minimize(g_loss, var_list=para_g)

    saver = tf.train.Saver()
    # vis = visdom.Visdom()
    count = 0
    loss_all = 0
    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())

        if is_dummy:
            volumes = np.random.randint(
                0, 2, (batch_size, cube_len, cube_len, cube_len))
            print('Using Dummy Data')
        else:
            volumes = d.getAll(obj=obj,
                               train=True,
                               is_local=is_local,
                               obj_ratio=obj_ratio)
            print('Using ' + obj + ' Data')
        volumes = volumes[..., np.newaxis].astype(np.float)

        for epoch in range(n_epochs):

            idx = np.random.randint(len(volumes) - 1, size=batch_size)
            x = volumes[idx]
            y = np.random.normal(0, 0.33, size=[batch_size,
                                                y_dim]).astype(np.float32)

            #z = z_load[idx]
            z = np.array([], dtype=float)
            for m in range(batch_size):
                idy = random.randint(0, 7)
                id = idx[m]
                z1 = np.array([], float)
                if id < 200:  #0-199
                    z1 = z_load1[id][idy]
                if id >= 200 and id < 400:  #200-399
                    z1 = z_load2[id - 200][idy]
                if id >= 400 and id < 600:  #400-599
                    z1 = z_load3[id - 400][idy]
                if id >= 600:  #600-end
                    z1 = z_load4[id - 600][idy]
                z = np.append(z, z1)
                z = z.reshape([m + 1, 128, 128, 3])

            y_mask_array = mask_load[idx]
            d_summary_merge = tf.summary.merge([
                summary_d_loss, summary_d_x_hist, summary_d_z_hist,
                summary_n_p_x, summary_n_p_z, summary_d_acc
            ])
            count = count + 1
            summary_d, discriminator_loss = sess.run([d_summary_merge, d_loss],
                                                     feed_dict={
                                                         z_vector: z,
                                                         x_vector: x,
                                                         y_vector: y,
                                                         y_mask: y_mask_array
                                                     })
            summary_g, generator_loss, generator_loss_L1 = sess.run(
                [summary_g_loss, g_loss, gen_loss_L1],
                feed_dict={
                    z_vector: z,
                    x_vector: x,
                    y_vector: y,
                    y_mask: y_mask_array
                })
            d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],
                                            feed_dict={
                                                z_vector: z,
                                                x_vector: x,
                                                y_vector: y,
                                                y_mask: y_mask_array
                                            })
            loss_all = loss_all + generator_loss_L1
            loss_avg = loss_all / count
            print(loss_avg)
            if d_accuracy < d_thresh:
                sess.run([optimizer_op_d],
                         feed_dict={
                             z_vector: z,
                             x_vector: x,
                             y_vector: y,
                             y_mask: y_mask_array
                         })
                # write_Excle.saveValue('D', epoch, discriminator_loss.item(), generator_loss.item(), d_accuracy)
                print('Discriminator Training ', "epoch: ", epoch, ', d_loss:',
                      discriminator_loss, 'g_loss:', generator_loss, "d_acc: ",
                      d_accuracy)

                list.append(
                    dict(netType='D',
                         epoch=epoch,
                         d_loss=discriminator_loss.item(),
                         g_loss=generator_loss.item(),
                         d_acc=d_accuracy))
            if d_accuracy > 0.5:
                sess.run([optimizer_op_g],
                         feed_dict={
                             z_vector: z,
                             x_vector: x,
                             y_vector: y,
                             y_mask: y_mask_array
                         })
                # write_Excle.saveValue('G', epoch, discriminator_loss.item(), generator_loss.item(), d_accuracy)

                list.append(
                    dict(netType='G',
                         epoch=epoch,
                         d_loss=discriminator_loss.item(),
                         g_loss=generator_loss.item(),
                         d_acc=d_accuracy))

            print('Generator Training ', "epoch: ", epoch, ', d_loss:',
                  discriminator_loss, 'g_loss:', generator_loss, "d_acc: ",
                  d_accuracy)

            if epoch % 400 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)
                saver.save(sess,
                           save_path=model_directory + '/Larry_2_5_' +
                           str(epoch) + '.cptk')
                loss_all = 0
                count = 0
Esempio n. 4
0
def ganGetOneResult(trained_model_path):
    global IsInit
    global Istrue
    global number

    if IsInit == False:
        initialiseWeights()
        IsInit = False

    z_vector = tf.placeholder(shape=[batch_size, z_size, z_size, 3],
                              dtype=tf.float32)
    y_vector = tf.placeholder(tf.float32, [batch_size, y_dim])
    net_g_test = generator(z_vector, y_vector, phase_train=True)
    saver = tf.train.Saver()
    with tf.variable_scope(tf.get_variable_scope(), reuse=None):
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(sess, trained_model_path)
            while True:
                if Istrue:
                    x = []
                    Istrue = False
                    volumes = d.getAll(obj=obj,
                                       train=True,
                                       is_local=is_local,
                                       obj_ratio=obj_ratio)
                    volumes = volumes.reshape((99, 64, 64, 64))

                    g_objects = volumes[num]

                    for i in range(0, 5):
                        x.append(g_objects)
                    g_objects = np.array(x)
                    id_ch = np.random.randint(0, batch_size, 4)
                    i = 0
                    print(g_objects[id_ch[i]].max(), g_objects[id_ch[i]].min(),
                          g_objects[id_ch[i]].shape)
                    if g_objects[id_ch[i]].max() > 0.5:
                        result = np.squeeze(g_objects[id_ch[i]] > 0.5)
                        yield result
                else:

                    data_path = glob.glob(r'D:\Paper\image\test\Ball\*.png')
                    onepath = choice(data_path)
                    print(onepath)
                    img = Image.open(onepath)
                    img.save('output.png', quality=100)

                    def matrixpic(img):
                        matrixpic = np.asarray(img)
                        (h, w) = matrixpic.shape[:2]
                        for i in range(h):
                            for j in range(w):
                                if matrixpic[i][j] == 255:
                                    matrixpic[i][j] = 255
                                else:
                                    matrixpic[i][j] = 0
                        return matrixpic

                    # 绿色转白色
                    def convertcolor(img):
                        img = np.asarray(img)
                        (h, w) = img.shape[:2]
                        for i in range(h):
                            for j in range(w):
                                if img[i][j][1] > 180 and img[i][j][
                                        0] < 20 and img[i][j][2] < 20:
                                    img[i][j] = 255
                        return img

                    # 生成二值测试图像
                    img = cv2.imread('output.png')
                    img = cv2.resize(img, (128, 128))
                    one_array = np.asarray(img) / 255
                    #  img1 = np.ones(img.shape[:2])
                    #  converted = convertcolor(img)
                    #  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    #  thresh = matrixpic(gray)
                    #
                    #  #
                    #  # 检测所有图形的轮廓
                    #  contours = measure.find_contours(thresh, 0.5)
                    #  x = 0
                    #  y = 0
                    #  for n, contour in enumerate(contours):
                    #      if contour.size > x:
                    #          x = contour.size
                    #          y = contour
                    #  y[:, [0, 1]] = y[:, [1, 0]]
                    #  _contours = []
                    #  _contours.append(np.around(np.expand_dims(y, 1)).astype(np.int))
                    #  cv2.drawContours(img1, _contours, -1, (0, 0, 0), -1)
                    #
                    #  img1 = cv2.resize(img1, (64, 64), interpolation=cv2.INTER_CUBIC)
                    #
                    #
                    #  img_np_1d = img1 * 255
                    #
                    #  one_array = np.zeros((64, 64), float)
                    #  for i in range(64):
                    #      for j in range(64):
                    #          if img_np_1d[i][j] < 155:
                    #              one_array[i][j] = 255
                    #          else:
                    #              one_array[i][j] = 0
                    #
                    #  one_array = Image.fromarray(one_array)
                    #  one_array = one_array.rotate(-90)
                    #  if one_array.mode != 'RGB':
                    #      one_array = one_array.convert('RGB')
                    #  one_array.save('1234.png')
                    #  one_array = cv2.imread('1234.png', 0)
                    #  left = up = down = right = 0
                    #  for j in range(32):
                    #      for i  in range(64):
                    #          if one_array[i][j] != 0:
                    #              left = j
                    #      if left != 0:
                    #          break
                    #
                    #  for i in range(32):
                    #      for j in range(64):
                    #          if one_array[i][j] == 255:
                    #              up = i
                    #      if up != 0:
                    #          break
                    #  for i in range(32):
                    #      for j in range(64):
                    #          if one_array[63 - i][j] == 255:
                    #              down = 63 - i
                    #      if down != 0:
                    #          break
                    #  right = down - up + 24
                    #  box = (left, up, right, down)
                    #  img = Image.open('1234.png')
                    #  roi = img.crop(box)
                    #  dst = roi.resize((64, 64), Image.ANTIALIAS)
                    #  dst.save('123456.png', quality=100)
                    #  num = onepath[-8:-5]
                    #  num = int(num) - 901
                    # #
                    #one_array = cv2.imread('123456.png', 0)
                    # for i in range(64):
                    #     for j in range(64):
                    #         if one_array[i][j] > 125:
                    #             one_array[i][j] = 1
                    #         else:
                    #             one_array[i][j] = 0
                    # Istrue = True
                    x = np.array([], float)
                    for i in range(0, 5):
                        x = np.append(x, one_array)
                        x = x.reshape([i + 1, z_size, z_size, 3])
                    # x = np.array(x)
                    # z_sample = np.array(x)
                    # z_sample = z_sample.reshape([-1, z_size, z_size, 3])
                    y = np.random.normal(0, 0.33,
                                         size=[batch_size,
                                               y_dim]).astype(np.float32)
                    g_objects = sess.run(net_g_test,
                                         feed_dict={
                                             z_vector: x,
                                             y_vector: y
                                         })
                    id_ch = np.random.randint(0, batch_size, 4)
                    i = 0
                    print(g_objects[id_ch[i]].max(), g_objects[id_ch[i]].min(),
                          g_objects[id_ch[i]].shape)
                    if g_objects[id_ch[i]].max() > 0.5:
                        result = np.squeeze(g_objects[id_ch[i]] > 0.5)
                        yield result
    yield None
Esempio n. 5
0
def trainGAN():

    weights, biases = initialiseWeights(), initialiseBiases()

    z_vector = tf.placeholder(shape=[batch_size, z_size], dtype=tf.float32)
    x_vector = tf.placeholder(shape=[batch_size, 32, 32, 32, 1],
                              dtype=tf.float32)

    net_g_train = generator(z_vector, phase_train=True, reuse=False)

    d_output_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1 - d_output_z))
    summary_d_loss = tf.summary.scalar("d_loss", d_loss)

    g_loss = -tf.reduce_mean(tf.log(d_output_z))
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)

    net_g_test = generator(z_vector, phase_train=True, reuse=True)
    para_g = list(
        np.array(tf.trainable_variables())[[0, 1, 4, 5, 8, 9, 12, 13]])
    para_d = list(
        np.array(tf.trainable_variables())[[14, 15, 16, 17, 20, 21, 24,
                                            25]])  #,28,29]])

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(
        learning_rate=alpha_d, beta1=beta).minimize(d_loss, var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(
        learning_rate=alpha_g, beta1=beta).minimize(g_loss, var_list=para_g)

    saver = tf.train.Saver(max_to_keep=50)

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        z_sample = np.random.normal(0, 0.33, size=[batch_size,
                                                   z_size]).astype(np.float32)
        volumes = d.getAll(obj=obj, train=True, is_local=True)
        volumes = volumes[..., np.newaxis].astype(np.float)

        for epoch in tqdm(range(n_epochs)):

            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
            z = np.random.normal(0, 0.33, size=[batch_size,
                                                z_size]).astype(np.float32)

            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge(
                [summary_d_loss, summary_d_x_hist, summary_d_z_hist])

            summary_d, discriminator_loss = sess.run([d_summary_merge, d_loss],
                                                     feed_dict={
                                                         z_vector: z,
                                                         x_vector: x
                                                     })
            summary_g, generator_loss = sess.run([summary_g_loss, g_loss],
                                                 feed_dict={z_vector: z})

            if discriminator_loss <= 4.6 * 0.1:
                sess.run([optimizer_op_g], feed_dict={z_vector: z})
            elif generator_loss <= 4.6 * 0.1:
                sess.run([optimizer_op_d],
                         feed_dict={
                             z_vector: z,
                             x_vector: x
                         })
            else:
                sess.run([optimizer_op_d],
                         feed_dict={
                             z_vector: z,
                             x_vector: x
                         })
                sess.run([optimizer_op_g], feed_dict={z_vector: z})

            print("epoch: ", epoch, ', d_loss:', discriminator_loss, 'g_loss:',
                  generator_loss)

            # output generated chairs
            if epoch % 500 == 10:
                g_chairs = sess.run(net_g_test, feed_dict={z_vector: z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_chairs.dump(train_sample_directory + '/' + str(epoch))

            if epoch % 500 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)
                saver.save(sess,
                           save_path=model_directory + '/' + str(epoch) +
                           '.cptk')
Esempio n. 6
0
gen_loader = tf.train.Saver(generator_vars)
gen_loader.restore(sess, tf.train.latest_checkpoint(gan_save_directory))

vox_loader = tf.train.Saver(
    tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vox'))
vox_loader.restore(sess, voxnet_save_path)

# load from checkpoint
# all_saver.restore(sess, tf.train.latest_checkpoint(model_directory))

# In[11]:

volumes = d.getAll(obj=obj,
                   train=True,
                   is_local=False,
                   obj_ratio=obj_ratio,
                   cube_len=32)
print('Using ' + obj + ' Data')
volumes = volumes[..., np.newaxis].astype(np.float)

num_samples = len(volumes)

num_batches = num_samples // batch_size

sess.run(tf.global_variables_initializer())

# In[ ]:

volumes_test = d.getAll(obj=obj,
                        train=False,
Esempio n. 7
0
def trainGAN(is_dummy=False, exp_id=None):

    weights, biases =  initialiseWeights(), initialiseBiases()
    x_vector = tf.placeholder(shape=[batch_size,cube_len,cube_len,cube_len,1],dtype=tf.float32) 
    z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) 

    # Weights for autoencoder pretraining
    xavier_init = tf.contrib.layers.xavier_initializer()
    zero_init = tf.zeros_initializer()
    weights['wae_d'] = tf.get_variable("wae_d", shape=[4, 4, 4, 512, 200], initializer=xavier_init)
    biases['bae_d'] =  tf.get_variable("bae_d", shape=[200], initializer=zero_init)

    encoded = encoder(x_vector, phase_train=True, reuse=False)
    encoded = tf.maximum(tf.minimum(encoded, 0.99), 0.01)
    decoded = generator(encoded, phase_train=True, reuse=False) 

    decoded_test = generator(tf.maximum(tf.minimum(encoder(x_vector, phase_train=False, reuse=False), 0.99), 0.01), phase_train=False, reuse=False)

    # Round decoder output
    decoded = threshold(decoded)
    # Compute MSE Loss and L2 Loss
    mse_loss = tf.reduce_mean(tf.pow(x_vector - decoded, 2))
    para_ae = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wg', 'wd', 'wae'])]
    for var in tf.trainable_variables():
        if 'wd5' in var.name:
            last_layer_dis = var
    para_ae.remove(last_layer_dis)
    # l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in para_ae])
    # ae_loss = mse_loss + reg_l2 * l2_loss
    ae_loss = mse_loss     

    optimizer_ae = tf.train.AdamOptimizer(learning_rate=ae_lr,beta1=beta, name="Adam_AE").minimize(ae_loss)
    # optimizer_ae = tf.train.RMSPropOptimizer(learning_rate=ae_lr, name="RMS_AE").minimize(ae_loss)


    net_g_train = generator(z_vector, phase_train=True, reuse=False) 

    d_output_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z <= 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z))
    g_loss = -tf.reduce_mean(tf.log(d_output_z))
    
    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)

    net_g_test = generator(z_vector, phase_train=False, reuse=True)

    para_g = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wg', 'bg', 'gen'])]
    para_d = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wd', 'bd', 'dis'])]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(learning_rate=d_lr,beta1=beta).minimize(d_loss,var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(learning_rate=g_lr,beta1=beta).minimize(g_loss,var_list=para_g)

    saver = tf.train.Saver(max_to_keep=50) 

    with tf.Session() as sess:  
      
        sess.run(tf.global_variables_initializer())        
        z_sample = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
        if is_dummy:
            volumes = np.random.randint(0,2,(batch_size,cube_len,cube_len,cube_len))
            print 'Using Dummy Data'
        else:
            volumes = d.getAll(obj=obj, train=True, is_local=is_local, obj_ratio=obj_ratio)
            print 'Using ' + obj + ' Data'
        volumes = volumes[...,np.newaxis].astype(np.float) 

        for epoch in range(n_ae_epochs):
            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]

            # Autoencoder pretraining
            # ae_l, mse_l, l2_l, _ = sess.run([ae_loss, mse_loss, l2_loss, optimizer_ae],feed_dict={x_vector:x})
            # print 'Autoencoder Training ', "epoch: ",epoch, 'ae_loss:', ae_l, 'mse_loss:', mse_l, 'l2_loss:', l2_l

            ae_l, mse_l, _ = sess.run([ae_loss, mse_loss, optimizer_ae],feed_dict={x_vector:x})
            print 'Autoencoder Training ', "epoch: ",epoch, 'ae_loss:', ae_l, 'mse_loss:', mse_l

            # output generated chairs
            if epoch % ae_inter == 10:
                idx = np.random.randint(len(volumes), size=batch_size)
                x = volumes[idx]
                decoded_chairs = sess.run(decoded_test, feed_dict={x_vector:x})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                decoded_chairs.dump(train_sample_directory+'/ae_' + exp_id +str(epoch))

        for epoch in range(n_epochs):
            
            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
            z = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)

            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge([summary_d_loss,
                                                summary_d_x_hist, 
                                                summary_d_z_hist,
                                                summary_n_p_x,
                                                summary_n_p_z,
                                                summary_d_acc])

            summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x})
            summary_g, generator_loss = sess.run([summary_g_loss,g_loss],feed_dict={z_vector:z})  
            d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],feed_dict={z_vector:z, x_vector:x})
            print n_x, n_z

            if d_accuracy < d_thresh:
                sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x})
                print 'Discriminator Training ', "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss, "d_acc: ", d_accuracy

            sess.run([optimizer_op_g],feed_dict={z_vector:z})
            print 'Generator Training ', "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss, "d_acc: ", d_accuracy

            # output generated chairs
            if epoch % gan_inter == 10:
                g_chairs = sess.run(net_g_test,feed_dict={z_vector:z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_chairs.dump(train_sample_directory+'/'+str(epoch))
            
            if epoch % gan_inter == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)      
                saver.save(sess, save_path = model_directory + '/' + str(epoch) + '.cptk')
Esempio n. 8
0
def trainGAN(is_dummy=False, exp_id=None):

    weights, biases = initialiseWeights(), initialiseBiases()
    x_vector = tf.placeholder(
        shape=[batch_size, cube_len, cube_len, cube_len, 1], dtype=tf.float32)
    z_vector = tf.placeholder(shape=[batch_size, z_size], dtype=tf.float32)

    # Weights for autoencoder pretraining
    xavier_init = tf.contrib.layers.xavier_initializer()
    zero_init = tf.zeros_initializer()
    weights['wae_d'] = tf.get_variable("wae_d",
                                       shape=[4, 4, 4, 512, 200],
                                       initializer=xavier_init)
    biases['bae_d'] = tf.get_variable("bae_d",
                                      shape=[200],
                                      initializer=zero_init)

    encoded = encoder(x_vector, phase_train=True, reuse=False)
    encoded = tf.maximum(tf.minimum(encoded, 0.99), 0.01)
    decoded = generator(encoded, phase_train=True, reuse=False)

    decoded_test = generator(tf.maximum(
        tf.minimum(encoder(x_vector, phase_train=False, reuse=False), 0.99),
        0.01),
                             phase_train=False,
                             reuse=False)

    # Round decoder output
    decoded = threshold(decoded)
    # Compute MSE Loss and L2 Loss
    mse_loss = tf.reduce_mean(tf.pow(x_vector - decoded, 2))
    para_ae = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wg', 'wd', 'wae'])
    ]
    for var in tf.trainable_variables():
        if 'wd5' in var.name:
            last_layer_dis = var
    para_ae.remove(last_layer_dis)
    # l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in para_ae])
    # ae_loss = mse_loss + reg_l2 * l2_loss
    ae_loss = mse_loss

    optimizer_ae = tf.train.AdamOptimizer(learning_rate=ae_lr,
                                          beta1=beta,
                                          name="Adam_AE").minimize(ae_loss)
    # optimizer_ae = tf.train.RMSPropOptimizer(learning_rate=ae_lr, name="RMS_AE").minimize(ae_loss)

    net_g_train = generator(z_vector, phase_train=True, reuse=False)

    d_output_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z <= 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1 - d_output_z))
    g_loss = -tf.reduce_mean(tf.log(d_output_z))

    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)

    net_g_test = generator(z_vector, phase_train=False, reuse=True)

    para_g = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wg', 'bg', 'gen'])
    ]
    para_d = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wd', 'bd', 'dis'])
    ]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(
        learning_rate=d_lr, beta1=beta).minimize(d_loss, var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(
        learning_rate=g_lr, beta1=beta).minimize(g_loss, var_list=para_g)

    saver = tf.train.Saver(max_to_keep=50)

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        z_sample = np.random.normal(0, 0.33, size=[batch_size,
                                                   z_size]).astype(np.float32)
        if is_dummy:
            volumes = np.random.randint(
                0, 2, (batch_size, cube_len, cube_len, cube_len))
            print('Using Dummy Data')
        else:
            volumes = d.getAll(obj=obj,
                               train=True,
                               is_local=is_local,
                               obj_ratio=obj_ratio)
            print('Using ' + obj + ' Data')
        volumes = volumes[..., np.newaxis].astype(np.float)

        for epoch in range(n_ae_epochs):
            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]

            # Autoencoder pretraining
            # ae_l, mse_l, l2_l, _ = sess.run([ae_loss, mse_loss, l2_loss, optimizer_ae],feed_dict={x_vector:x})
            # print ('Autoencoder Training ', "epoch: ",epoch, 'ae_loss:', ae_l, 'mse_loss:', mse_l, 'l2_loss:', l2_l)

            ae_l, mse_l, _ = sess.run([ae_loss, mse_loss, optimizer_ae],
                                      feed_dict={x_vector: x})
            print('Autoencoder Training ', "epoch: ", epoch, 'ae_loss:', ae_l,
                  'mse_loss:', mse_l)

            # output generated chairs
            if epoch % ae_inter == 10:
                idx = np.random.randint(len(volumes), size=batch_size)
                x = volumes[idx]
                decoded_chairs = sess.run(decoded_test,
                                          feed_dict={x_vector: x})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                decoded_chairs.dump(train_sample_directory + '/ae_' + exp_id +
                                    str(epoch))

        for epoch in range(n_epochs):

            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
            z = np.random.normal(0, 0.33, size=[batch_size,
                                                z_size]).astype(np.float32)

            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge([
                summary_d_loss, summary_d_x_hist, summary_d_z_hist,
                summary_n_p_x, summary_n_p_z, summary_d_acc
            ])

            summary_d, discriminator_loss = sess.run([d_summary_merge, d_loss],
                                                     feed_dict={
                                                         z_vector: z,
                                                         x_vector: x
                                                     })
            summary_g, generator_loss = sess.run([summary_g_loss, g_loss],
                                                 feed_dict={z_vector: z})
            d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],
                                            feed_dict={
                                                z_vector: z,
                                                x_vector: x
                                            })
            print(n_x, n_z)

            if d_accuracy < d_thresh:
                sess.run([optimizer_op_d],
                         feed_dict={
                             z_vector: z,
                             x_vector: x
                         })
                print('Discriminator Training ', "epoch: ", epoch, ', d_loss:',
                      discriminator_loss, 'g_loss:', generator_loss, "d_acc: ",
                      d_accuracy)

            sess.run([optimizer_op_g], feed_dict={z_vector: z})
            print('Generator Training ', "epoch: ", epoch, ', d_loss:',
                  discriminator_loss, 'g_loss:', generator_loss, "d_acc: ",
                  d_accuracy)

            # output generated chairs
            if epoch % gan_inter == 10:
                g_chairs = sess.run(net_g_test, feed_dict={z_vector: z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_chairs.dump(train_sample_directory + '/' + str(epoch))

            if epoch % gan_inter == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)
                saver.save(sess,
                           save_path=model_directory + '/' + str(epoch) +
                           '.cptk')
                export_model(saver, sess, ["gen/init"], "gen/tanh")
Esempio n. 9
0
def trainGAN(is_dummy=False):

    weights, biases = initialiseWeights(), initialiseBiases()

    z_vector = tf.placeholder(shape=[batch_size, z_size], dtype=tf.float32)
    x_vector = tf.placeholder(
        shape=[batch_size, cube_len, cube_len, cube_len, 1], dtype=tf.float32)

    net_g_train = generator(z_vector, phase_train=True, reuse=False)

    d_output_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z <= 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1 - d_output_z))
    g_loss = -tf.reduce_mean(tf.log(d_output_z))

    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)

    net_g_test = generator(z_vector, phase_train=False, reuse=True)

    para_g = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wg', 'bg', 'gen'])
    ]
    para_d = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wd', 'bd', 'dis'])
    ]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(
        learning_rate=d_lr, beta1=beta).minimize(d_loss, var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(
        learning_rate=g_lr, beta1=beta).minimize(g_loss, var_list=para_g)

    saver = tf.train.Saver(max_to_keep=50)

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        z_sample = np.random.normal(0, 0.33, size=[batch_size,
                                                   z_size]).astype(np.float32)
        if is_dummy:
            volumes = np.random.randint(
                0, 2, (batch_size, cube_len, cube_len, cube_len))
            print 'Using Dummy Data'
        else:
            volumes = d.getAll(obj=obj,
                               train=True,
                               is_local=is_local,
                               obj_ratio=obj_ratio)
            print 'Using ' + obj + ' Data'
        volumes = volumes[..., np.newaxis].astype(np.float)

        for epoch in range(n_epochs):

            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
            z = np.random.normal(0, 0.33, size=[batch_size,
                                                z_size]).astype(np.float32)

            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge([
                summary_d_loss, summary_d_x_hist, summary_d_z_hist,
                summary_n_p_x, summary_n_p_z, summary_d_acc
            ])

            summary_d, discriminator_loss = sess.run([d_summary_merge, d_loss],
                                                     feed_dict={
                                                         z_vector: z,
                                                         x_vector: x
                                                     })
            summary_g, generator_loss = sess.run([summary_g_loss, g_loss],
                                                 feed_dict={z_vector: z})
            d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],
                                            feed_dict={
                                                z_vector: z,
                                                x_vector: x
                                            })
            print n_x, n_z

            if d_accuracy < d_thresh:
                sess.run([optimizer_op_d],
                         feed_dict={
                             z_vector: z,
                             x_vector: x
                         })
                print 'Discriminator Training ', "epoch: ", epoch, ', d_loss:', discriminator_loss, 'g_loss:', generator_loss, "d_acc: ", d_accuracy

            sess.run([optimizer_op_g], feed_dict={z_vector: z})
            print 'Generator Training ', "epoch: ", epoch, ', d_loss:', discriminator_loss, 'g_loss:', generator_loss, "d_acc: ", d_accuracy

            # output generated chairs
            if epoch % 500 == 10:
                g_chairs = sess.run(net_g_test, feed_dict={z_vector: z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_chairs.dump(train_sample_directory + '/' + str(epoch))

            if epoch % 500 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)
                saver.save(sess,
                           save_path=model_directory + '/' + str(epoch) +
                           '.cptk')
Esempio n. 10
0
def save_checkpoint(state, curr_epoch):
    torch.save(state, './models/netG_e%d.pth.tar' % (curr_epoch))


# initialize Generator & Discriminator
netG = Generator().to(device)
weights_init(netG)
print(netG)

netD = Discriminator().to(device)
weights_init(netD)
print(netD)

# load ".off" files
volumes = d.getAll(obj=obj, train=True, is_local=is_local, obj_ratio=obj_ratio)
print('Using ' + obj + ' Data')
volumes = volumes[..., np.newaxis].astype(np.float)
data = torch.from_numpy(volumes)
data = data.permute(0, 4, 1, 2, 3)
data = data.type(torch.FloatTensor)


# choose loss function
criterion = nn.BCELoss()
criterion2 = nn.MSELoss()

# fake/real labels
real_label = 1
fake_label = 0
Esempio n. 11
0
def trainGAN(is_dummy=False, checkpoint=None):
    weights = initialiseWeights()

   # z_vector = tf.placeholder(shape=[batch_size, z_size], dtype=tf.float32)
    #z_vector = tf.placeholder(shape=[batch_size, 32,32,1], dtype=tf.float32)
    #y_vector = tf.placeholder(tf.float32, [batch_size, y_dim])
    x_vector = tf.placeholder(shape=[batch_size, cube_len, cube_len, cube_len, 1], dtype=tf.float32)
    mask_vector = tf.placeholder(shape=[batch_size, cube_len, cube_len, cube_len, 1], dtype=tf.float32)
    net_g_train = generator(mask_vector, phase_train=True, reuse=False)

    #y_mask = tf.placeholder(tf.float32, shape=[batch_size, 64, 64, 64, 1])

    d_output_x, d_no_sigmoid_output_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z, d_no_sigmoid_output_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z < 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    # d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z))
    # g_loss = -tf.reduce_mean(tf.log(d_output_z))

    d_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_x, labels=tf.ones_like(
        d_output_x)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_z,
                                                               labels=tf.zeros_like(d_output_z))
    gen_loss_L1 = tf.reduce_mean(tf.abs(x_vector - net_g_train))
    g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_no_sigmoid_output_z, labels=tf.ones_like(d_output_z))

    d_loss = tf.reduce_mean(d_loss)
    g_loss = tf.reduce_mean(g_loss) * 0.01 + gen_loss_L1*0.99

    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)

    #net_g_test = generator(z_vector, y_vector, phase_train=False, reuse=True)

    para_g = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wg', 'bg', 'gen'])]
    para_d = [var for var in tf.trainable_variables() if any(x in var.name for x in ['wd', 'bd', 'dis'])]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(learning_rate=d_lr, beta1=beta).minimize(d_loss, var_list=para_d)#Larry
    #optimizer_op_d = tf.train.AdamOptimizer(learning_rate=d_lr, beta1=beta).minimize(d_loss)  # Larry
    #optimizer_op_g = tf.train.AdamOptimizer(learning_rate=g_lr, beta1=beta).minimize(g_loss)  # Larry
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(learning_rate=g_lr, beta1=beta).minimize(g_loss, var_list=para_g)#Larry
    parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])
    saver = tf.train.Saver()
    # vis = visdom.Visdom()

    with tf.Session() as sess:
        print("parameter_count =", sess.run(parameter_count))
        sess.run(tf.global_variables_initializer())
       # if checkpoint is not None:
        #    saver.restore(sess, checkpoint)

        if is_dummy:
            volumes = np.random.randint(0, 2, (batch_size, cube_len, cube_len, cube_len))
            print('Using Dummy Data')
        else:
            volumes = d.getAll(obj=obj, train=True, is_local=is_local, obj_ratio=obj_ratio)
            print('Using ' + obj + ' Data')
        volumes = volumes[..., np.newaxis].astype(np.float)
        # volumes *= 2.0
        # volumes -= 1.0
       # bound_list, mask_list = d.getBounds()
        #test_bound = np.array([22, 22, 0, 42, 42, 62], dtype=float)
        #test_bound = np.array([22, 22, 0, 42, 42, 62], dtype=int)
        #test_bound_list = np.tile(test_bound, bound_list.shape[0])
        #test_bound_list = test_bound_list.reshape((bound_list.shape[0], bound_list.shape[1]))
       # test_mask_list=d.get_mask_list_by_bound(test_bound_list)
        #mask_list = d.getMask()
        for epoch in range(n_epochs):

            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
           # y = bound_list[idx]/64
            #z = z_load[idx]
            #y = y_load[idx]
            y_mask_array = mask_load[idx]#Larry
            y_mask_array=y_mask_array.reshape([-1,64,64,64,1])
            # Larry Begin
            #z_sample = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)###Larry
            #z_sample
            #z = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
            z = np.random.uniform(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
            # Larry End
            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge([summary_d_loss,
                                                summary_d_x_hist,
                                                summary_d_z_hist,
                                                summary_n_p_x,
                                                summary_n_p_z,
                                                summary_d_acc])

            summary_d, discriminator_loss = sess.run([d_summary_merge, d_loss],
                                                     feed_dict={ x_vector: x,mask_vector: y_mask_array})
            summary_g, generator_loss = sess.run([summary_g_loss, g_loss], feed_dict={ mask_vector: y_mask_array})
            d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],
                                            feed_dict={ x_vector: x,mask_vector: y_mask_array})
            print(n_x, n_z)

            if d_accuracy < d_thresh:
                sess.run([optimizer_op_d], feed_dict={x_vector: x,mask_vector: y_mask_array})
                write_Excle.saveValue('D', epoch, discriminator_loss.item(), generator_loss.item(), d_accuracy)
                print('Discriminator Training ', "epoch: ", epoch, ', d_loss:', discriminator_loss, 'g_loss:',
                      generator_loss, "d_acc: ", d_accuracy)

                list.append(dict(netType='D', epoch=epoch, d_loss=discriminator_loss.item(), g_loss=generator_loss.item(), d_acc=d_accuracy))
            if d_accuracy > 0.5:
                sess.run([optimizer_op_g], feed_dict={mask_vector: y_mask_array})
                write_Excle.saveValue('G', epoch, discriminator_loss.item(), generator_loss.item(), d_accuracy)

                list.append(dict(netType='G', epoch=epoch, d_loss=discriminator_loss.item(), g_loss=generator_loss.item(), d_acc=d_accuracy))

            print('Generator Training ', "epoch: ", epoch, ', d_loss:', discriminator_loss, 'g_loss:', generator_loss,
                  "d_acc: ", d_accuracy)

            # output generated chairs
            # if epoch % 10000 == 0:
            #     #use fixed bound and mask
            #     y = test_bound_list[idx] / 64
            #     y_mask_array = test_mask_list[idx]
            #
            #     g_objects = sess.run(net_g_test, feed_dict={z_vector: z_sample, y_vector: y,y_mask: y_mask_array})
            #     if not os.path.exists(train_sample_directory):
            #         os.makedirs(train_sample_directory)
            #     g_objects.dump(train_sample_directory + '/biasfree_' + str(epoch))
            #     id_ch = np.random.randint(0, batch_size, 4)
            #     for i in range(4):
            #         if g_objects[id_ch[i]].max() > 0.5:
            #             d.plotVoxelVisdom(np.squeeze(g_objects[id_ch[i]] > 0.5), '_'.join(map(str, [epoch, i])))

            if epoch % 200 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)
                saver.save(sess, save_path=model_directory + '/Larry_5_' + str(epoch) + '.cptk')
Esempio n. 12
0
def train():
    discriminator = disc_model()
    generator = gen_model()
    discriminator_on_generator = generator_containing_discriminator(
        generator, discriminator)

    g_optim = Adam(lr=g_lr, beta_1=beta)
    generator.compile(loss='binary_crossentropy', optimizer="SGD")

    d_optim = Adam(lr=d_lr, beta_1=0.9)
    discriminator_on_generator.compile(loss='binary_crossentropy',
                                       optimizer=g_optim)
    discriminator.trainable = True
    discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)

    z_sample = np.random.normal(0, 0.33, size=[batch_size, 1, 1, 1,
                                               z_size]).astype(np.float32)
    volumes = d.getAll(obj=obj,
                       train=True,
                       is_local=is_local,
                       obj_ratio=obj_ratio)
    print
    'Data loaded .......'
    volumes = volumes[..., np.newaxis].astype(np.float)

    if not os.path.exists(train_sample_directory):
        os.makedirs(train_sample_directory)
    if not os.path.exists(model_directory):
        os.makedirs(model_directory)

    for epoch in range(n_epochs):

        print("Epoch is", epoch)

        idx = np.random.randint(len(volumes), size=batch_size)
        x = volumes[idx]
        z = np.random.normal(0, 0.33, size=[batch_size, 1, 1, 1,
                                            z_size]).astype(np.float32)

        generated_volumes = generator.predict(z, verbose=0)

        X = np.concatenate((x, generated_volumes))
        Y = np.reshape([1] * batch_size + [0] * batch_size, (-1, 1, 1, 1, 1))

        d_loss = discriminator.train_on_batch(X, Y)
        print("d_loss : %f" % (d_loss))

        z = np.random.normal(0, 0.33, size=[batch_size, 1, 1, 1,
                                            z_size]).astype(np.float32)
        discriminator.trainable = False
        g_loss = discriminator_on_generator.train_on_batch(
            z, np.reshape([1] * batch_size, (-1, 1, 1, 1, 1)))
        discriminator.trainable = True

        print("g_loss : %f" % (g_loss))

        if epoch % 1000 == 10:
            generator.save_weights(model_directory + 'generator_' + str(epoch),
                                   True)
            discriminator.save_weights(
                model_directory + 'discriminator_' + str(epoch), True)

        if epoch % 500 == 10:
            generated_volumes = generator.predict(z_sample, verbose=0)
            generated_volumes.dump(train_sample_directory + '/' + str(epoch))
Esempio n. 13
0
def ganGetOneResult(trained_model_path):
    global IsInit
    global Istrue
    global number

    if IsInit == False:
        initialiseWeights()
        IsInit = True

    z_vector = tf.placeholder(shape=[batch_size, z_size, z_size, 1],
                              dtype=tf.float32)
    y_vector = tf.placeholder(tf.float32, [batch_size, y_dim])
    net_g_test = generator(z_vector, y_vector, phase_train=True)
    saver = tf.train.Saver()
    with tf.variable_scope(tf.get_variable_scope(), reuse=None):
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(sess, trained_model_path)
            while True:
                if Istrue:
                    x = []
                    Istrue = False
                    volumes = d.getAll(obj=obj,
                                       train=True,
                                       is_local=is_local,
                                       obj_ratio=obj_ratio)
                    volumes = volumes.reshape((99, 64, 64, 64))

                    g_objects = volumes[num]

                    for i in range(0, 5):
                        x.append(g_objects)
                    g_objects = np.array(x)
                    id_ch = np.random.randint(0, batch_size, 4)
                    i = 0
                    print(g_objects[id_ch[i]].max(), g_objects[id_ch[i]].min(),
                          g_objects[id_ch[i]].shape)
                    if g_objects[id_ch[i]].max() > 0.5:
                        result = np.squeeze(g_objects[id_ch[i]] > 0.5)
                        yield result
                else:

                    data_path = glob.glob(r'D:\Paper\image\test\A\*.png')
                    onepath = choice(data_path)
                    print(onepath)
                    img = Image.open(onepath)
                    img.save('output.png', quality=100)

                    def matrixpic(img):
                        matrixpic = np.asarray(img)
                        (h, w) = matrixpic.shape[:2]
                        for i in range(h):
                            for j in range(w):
                                if matrixpic[i][j] == 255:
                                    matrixpic[i][j] = 255
                                else:
                                    matrixpic[i][j] = 0
                        return matrixpic

                    # 绿色转白色
                    def convertcolor(img):
                        img = np.asarray(img)
                        (h, w) = img.shape[:2]
                        for i in range(h):
                            for j in range(w):
                                if img[i][j][1] > 180 and img[i][j][
                                        0] < 20 and img[i][j][2] < 20:
                                    img[i][j] = 255
                        return img

                    # 生成二值测试图像
                    img = cv2.imread('output.png')
                    img1 = np.ones(img.shape[:2])
                    converted = convertcolor(img)
                    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    thresh = matrixpic(gray)

                    #
                    # 检测所有图形的轮廓
                    contours = measure.find_contours(thresh, 0.5)
                    x = 0
                    y = 0
                    for n, contour in enumerate(contours):
                        if contour.size > x:
                            x = contour.size
                            y = contour
                    y[:, [0, 1]] = y[:, [1, 0]]
                    _contours = []
                    _contours.append(
                        np.around(np.expand_dims(y, 1)).astype(np.int))
                    cv2.drawContours(img1, _contours, -1, (0, 0, 0), -1)

                    img1 = cv2.resize(img1, (64, 64),
                                      interpolation=cv2.INTER_CUBIC)

                    img_np_1d = img1 * 255

                    one_array = np.zeros((64, 64), float)
                    for i in range(64):
                        for j in range(64):
                            if img_np_1d[i][j] < 155:
                                one_array[i][j] = 255
                            else:
                                one_array[i][j] = 0

                    one_array = Image.fromarray(one_array)
                    one_array = one_array.rotate(-90)
                    if one_array.mode != 'RGB':
                        one_array = one_array.convert('RGB')
                    one_array.save('1234.png')
                    one_array = cv2.imread('1234.png', 0)
                    left = up = down = right = 0
                    for j in range(32):
                        for i in range(64):
                            if one_array[i][j] != 0:
                                left = j
                        if left != 0:
                            break

                    for i in range(32):
                        for j in range(64):
                            if one_array[i][j] == 255:
                                up = i
                        if up != 0:
                            break
                    for i in range(32):
                        for j in range(64):
                            if one_array[63 - i][j] == 255:
                                down = 63 - i
                        if down != 0:
                            break
                    right = down - up + 24
                    box = (left, up, right, down)
                    img = Image.open('1234.png')
                    roi = img.crop(box)
                    dst = roi.resize((64, 64), Image.ANTIALIAS)
                    dst.save('123456.png', quality=100)
                    num = onepath[-8:-5]
                    num = int(num) - 901
                    #
                    one_array = cv2.imread('123456.png', 0)
                    for i in range(64):
                        for j in range(64):
                            if one_array[i][j] > 125:
                                one_array[i][j] = 1
                            else:
                                one_array[i][j] = 0
                    Istrue = True
                    x = []
                    for i in range(0, 5):
                        x.append(one_array)
                    x = np.array(x)
                    z_sample = np.array(x)
                    z_sample = z_sample.reshape([-1, z_size, z_size, 1])
                    y = np.random.normal(0, 0.33,
                                         size=[batch_size,
                                               y_dim]).astype(np.float32)
                    g_objects = sess.run(net_g_test,
                                         feed_dict={
                                             z_vector: z_sample,
                                             y_vector: y
                                         })
                    id_ch = np.random.randint(0, batch_size, 4)
                    i = 0
                    print(g_objects[id_ch[i]].max(), g_objects[id_ch[i]].min(),
                          g_objects[id_ch[i]].shape)
                    if g_objects[id_ch[i]].max() > 0.5:
                        result = np.squeeze(g_objects[id_ch[i]] > 0.5)

                        volumes = d.getAll(obj=obj,
                                           train=True,
                                           is_local=is_local,
                                           obj_ratio=obj_ratio)
                        volumes = volumes.reshape((99, 64, 64, 64))
                        g_objects = volumes[num]
                        x = []
                        for i in range(0, 5):
                            x.append(g_objects)
                        g_objects = np.array(x)
                        id_ch = np.random.randint(0, batch_size, 4)
                        i = 0
                        print(g_objects[id_ch[i]].max(),
                              g_objects[id_ch[i]].min(),
                              g_objects[id_ch[i]].shape)
                        result2 = []
                        result3 = []
                        result4 = []
                        if g_objects[id_ch[i]].max() > 0.5:
                            result2 = np.squeeze(g_objects[id_ch[i]] > 0.5)

                        pre1 = np.sum(np.logical_xor(result, result2))
                        pre2 = np.sum(np.logical_and(result, result2))
                        pre3 = np.sum(np.logical_or(result, result2))
                        print("xor=", pre1, "   and=", pre2, "   or=", pre3)
                        acc = pre2 / pre3
                        print("Acc=", acc)
                        # result2=result2.reshape(64 * 64 * 64)
                        # result =result.reshape(64 * 64 * 64)
                        # result3 = result&result2
                        # result4 = result | result2
                        # result3=result3.reshape((64,64,64))

                    yield result
    yield None
Esempio n. 14
0
def trainGAN(is_dummy=False, checkpoint=None):

    weights = initialiseWeights()
    #  z_size  = 200  cube_len   = 64
    z_vector = tf.placeholder(shape=[batch_size, z_size], dtype=tf.float32)
    x_vector = tf.placeholder(
        shape=[batch_size, cube_len, cube_len, cube_len, 1], dtype=tf.float32)

    net_g_train = generator(z_vector, phase_train=True, reuse=False)
    #  x_vector is the input of discriminator   original 3D object
    d_output_x, d_no_sigmoid_output_x = discriminator(x_vector,
                                                      phase_train=True,
                                                      reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99),
                            0.01)  # d_output_x is between 0.01 and 0.99
    # tf.summary.histogram is concerning to tensorboard
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)
    # generating 3d OBJECT
    d_output_z, d_no_sigmoid_output_z = discriminator(net_g_train,
                                                      phase_train=True,
                                                      reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    # Compute the discriminator accuracy
    n_p_x = tf.reduce_sum(tf.cast(d_output_x > 0.5, tf.int32))
    n_p_z = tf.reduce_sum(tf.cast(d_output_z < 0.5, tf.int32))
    d_acc = tf.divide(n_p_x + n_p_z, 2 * batch_size)

    # Compute the discriminator and generator loss
    # d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z))
    # g_loss = -tf.reduce_mean(tf.log(d_output_z))

    d_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        logits=d_no_sigmoid_output_x, labels=tf.ones_like(
            d_output_x)) + tf.nn.sigmoid_cross_entropy_with_logits(
                logits=d_no_sigmoid_output_z, labels=tf.zeros_like(d_output_z))
    g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        logits=d_no_sigmoid_output_z, labels=tf.ones_like(d_output_z))

    d_loss = tf.reduce_mean(d_loss)
    g_loss = tf.reduce_mean(g_loss)
    # concerning to tensorboard
    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)
    summary_n_p_z = tf.summary.scalar("n_p_z", n_p_z)
    summary_n_p_x = tf.summary.scalar("n_p_x", n_p_x)
    summary_d_acc = tf.summary.scalar("d_acc", d_acc)
    net_g_test = generator(z_vector, phase_train=False, reuse=True)
    # tf.trainable_variables() return a list which need training
    para_g = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wg', 'bg', 'gen'])
    ]
    para_d = [
        var for var in tf.trainable_variables()
        if any(x in var.name for x in ['wd', 'bd', 'dis'])
    ]

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(
        learning_rate=d_lr, beta1=beta).minimize(d_loss, var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(
        learning_rate=g_lr, beta1=beta).minimize(g_loss, var_list=para_g)

    saver = tf.train.Saver()
    # data visidize
    vis = visdom.Visdom()

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        if checkpoint is not None:
            saver.restore(sess, checkpoint)
        # is_dummy = false
        if is_dummy:
            # is_dummy = true
            # batch_size = 32 cube_len = 64
            volumes = np.random.randint(
                0, 2, (batch_size, cube_len, cube_len, cube_len))
            print 'Using Dummy Data'
        else:
            # is_dummy = false  is_local = false  volumes is a list of npy
            voxList = d.getAll(obj=obj,
                               train=True,
                               is_local=is_local,
                               obj_ratio=obj_ratio)
            print 'Using ' + obj + ' Data'
        # volumes *= 2.0
        # volumes -= 1.0
        for epoch in range(n_epochs):
            for batch_count in range(len(voxList) / batch_size):
                volumes = np.zeros([batch_size, cube_len, cube_len, cube_len],
                                   dtype=np.bool)
                # each batch get two npy
                for index in range(batch_count * batch_size,
                                   (batch_count + 1) * batch_size):
                    # each npy load a array
                    vox_arr = np.load(voxList[index])
                    volumes[index % batch_size] = vox_arr
                # increase a axis to stand a place
                x = volumes[..., np.newaxis].astype(np.float)
                # Gaussian Distribution
                z_sample = np.random.normal(0, 0.33,
                                            size=[batch_size,
                                                  z_size]).astype(np.float32)
                z = np.random.normal(0, 0.33, size=[batch_size,
                                                    z_size]).astype(np.float32)
                # z = np.random.uniform(0, 1, size=[batch_size, z_size]).astype(np.float32)

                # Update the discriminator and generator
                # manage summary
                d_summary_merge = tf.summary.merge([
                    summary_d_loss, summary_d_x_hist, summary_d_z_hist,
                    summary_n_p_x, summary_n_p_z, summary_d_acc
                ])
                #writer = tf.summary.FileWriter('logs' , sess.graph)
                #summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x})
                discriminator_loss = sess.run(d_loss,
                                              feed_dict={
                                                  z_vector: z,
                                                  x_vector: x
                                              })
                #writer.add_summary(summary_d, batch_count)
                generator_loss = sess.run(g_loss, feed_dict={z_vector: z})
                #writer.add_summary(summary_g, batch_count)
                d_accuracy, n_x, n_z = sess.run([d_acc, n_p_x, n_p_z],
                                                feed_dict={
                                                    z_vector: z,
                                                    x_vector: x
                                                })
                print n_x, n_z
                # when discriminator's d_accuracy < 0.8, optimize
                if d_accuracy < d_thresh:
                    sess.run([optimizer_op_d],
                             feed_dict={
                                 z_vector: z,
                                 x_vector: x
                             })
                    print 'Discriminator Training ', "Iteration: ", batch_count, ', d_loss:', discriminator_loss, 'g_loss:', generator_loss, "d_acc: ", d_accuracy

                sess.run([optimizer_op_g], feed_dict={z_vector: z})
                print 'Generator Training ', "Iteration: ", batch_count, ', d_loss:', discriminator_loss, 'g_loss:', generator_loss, "d_acc: ", d_accuracy
            # output generated chairs
            if epoch % 2 == 0:
                g_objects = sess.run(net_g_test,
                                     feed_dict={z_vector: z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_objects.dump(train_sample_directory + '/biasfree_' +
                               str(epoch))
                id_ch = np.random.randint(0, batch_size, 4)
                for i in range(4):
                    if g_objects[id_ch[i]].max() > 0.5:
                        d.plotVoxelVisdom(
                            np.squeeze(g_objects[id_ch[i]] > 0.5), vis,
                            '_'.join(map(str, [epoch, i])))
            if epoch % 50 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)
                saver.save(sess,
                           save_path=model_directory + '/biasfree_' +
                           str(epoch) + '.cptk')
Esempio n. 15
0
def trainGAN():

    weights, biases =  initialiseWeights(), initialiseBiases()

    z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) 
    x_vector = tf.placeholder(shape=[batch_size,32,32,32,1],dtype=tf.float32) 

    net_g_train = generator(z_vector, phase_train=True, reuse=False) 

    d_output_x = discriminator(x_vector, phase_train=True, reuse=False)
    d_output_x = tf.maximum(tf.minimum(d_output_x, 0.99), 0.01)
    summary_d_x_hist = tf.summary.histogram("d_prob_x", d_output_x)

    d_output_z = discriminator(net_g_train, phase_train=True, reuse=True)
    d_output_z = tf.maximum(tf.minimum(d_output_z, 0.99), 0.01)
    summary_d_z_hist = tf.summary.histogram("d_prob_z", d_output_z)

    d_loss = -tf.reduce_mean(tf.log(d_output_x) + tf.log(1-d_output_z))
    summary_d_loss = tf.summary.scalar("d_loss", d_loss)
    
    g_loss = -tf.reduce_mean(tf.log(d_output_z))
    summary_g_loss = tf.summary.scalar("g_loss", g_loss)

    net_g_test = generator(z_vector, phase_train=True, reuse=True)
    para_g=list(np.array(tf.trainable_variables())[[0,1,4,5,8,9,12,13]])
    para_d=list(np.array(tf.trainable_variables())[[14,15,16,17,20,21,24,25]])#,28,29]])

    # only update the weights for the discriminator network
    optimizer_op_d = tf.train.AdamOptimizer(learning_rate=alpha_d,beta1=beta).minimize(d_loss,var_list=para_d)
    # only update the weights for the generator network
    optimizer_op_g = tf.train.AdamOptimizer(learning_rate=alpha_g,beta1=beta).minimize(g_loss,var_list=para_g)

    saver = tf.train.Saver(max_to_keep=50) 

    with tf.Session() as sess:  
      
        sess.run(tf.global_variables_initializer())        
        z_sample = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
        volumes = d.getAll(obj=obj, train=True, is_local=True)
        volumes = volumes[...,np.newaxis].astype(np.float) 

        for epoch in tqdm(range(n_epochs)):
            
            idx = np.random.randint(len(volumes), size=batch_size)
            x = volumes[idx]
            z = np.random.normal(0, 0.33, size=[batch_size, z_size]).astype(np.float32)
        
            # Update the discriminator and generator
            d_summary_merge = tf.summary.merge([summary_d_loss, summary_d_x_hist,summary_d_z_hist])

            summary_d, discriminator_loss = sess.run([d_summary_merge,d_loss],feed_dict={z_vector:z, x_vector:x})
            summary_g, generator_loss = sess.run([summary_g_loss,g_loss],feed_dict={z_vector:z})  
            
            if discriminator_loss <= 4.6*0.1: 
                sess.run([optimizer_op_g],feed_dict={z_vector:z})
            elif generator_loss <= 4.6*0.1:
                sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x})
            else:
                sess.run([optimizer_op_d],feed_dict={z_vector:z, x_vector:x})
                sess.run([optimizer_op_g],feed_dict={z_vector:z})
                            
            print "epoch: ",epoch,', d_loss:',discriminator_loss,'g_loss:',generator_loss

            # output generated chairs
            if epoch % 500 == 10:
                g_chairs = sess.run(net_g_test,feed_dict={z_vector:z_sample})
                if not os.path.exists(train_sample_directory):
                    os.makedirs(train_sample_directory)
                g_chairs.dump(train_sample_directory+'/'+str(epoch))
            
            if epoch % 500 == 10:
                if not os.path.exists(model_directory):
                    os.makedirs(model_directory)      
                saver.save(sess, save_path = model_directory + '/' + str(epoch) + '.cptk')