Exemple #1
0
			print "{0}, {1}, {2:.8f}, {3:.8f}".format(epoch, i, batch_loss_G, batch_loss_D)

		### print loss ### 
		loss_G_mean = np.mean(loss_list['G'])
		loss_D_mean = np.mean(loss_list['D'])
		with open(myconfig.loss_csv, 'a') as f:
			msg = "{0}, {1:.8f}, {2:.8f}".format(epoch, loss_G_mean, loss_D_mean)
			print >> f, msg
			print msg

		### output voxels by G ###
		if epoch%10 != 0:
			continue 
		# 1st ground truth
		np.save(myconfig.vox_prefix+"{0}-sample.npy".format(epoch), 
				dataset.transformBack(batch_dict['rgba'][0]))
		# generated 
		voxels_ = sess.run(rgba_, feed_dict={a:batch_dict['a'], 
											z:batch_dict['z'], 
											train:False})
		for j, v in enumerate(voxels_[:4]):
			v = v.reshape([32, 32, 32, 4])
			v = dataset.transformBack(v)
			np.save(myconfig.vox_prefix+"{0}-{1}.npy".format(epoch, j), v)






Exemple #2
0
			for i,vox in enumerate(voxes): 
				# print i,vox
				sub_name = "tmp/tmp-%d-%d.jpg"%(myconfig.version,i)
				dataset.vox2image(vox, sub_name)
				sub_names.append(sub_name)
			dataset.concatenateImages(sub_names, imname)
			print imname
			for name in sub_names:
				os.remove(name)

		if epoch % sample_interval == 0:
			### train data
			batch_dict = prepare_batch_dict(train_data.next_batch(batch_size))

			# save ground-truth
			saveConcatVoxes2image(dataset.transformBack(np.array(batch_dict['rgba'][0:12])), 
									myconfig.vox_prefix+"{0}.rgba.jpg".format(epoch))

			# save generated
			feed_dict = prepare_feed_dict(batch_dict, rgba, a, z, k_t,k_t_, train,False)
			b_rgba_out, b_rgba_, b_rgba_out_ = sess.run([rgba_out, rgba_, rgba_out_], feed_dict=feed_dict)
			saveConcatVoxes2image(dataset.transformBack(np.array(b_rgba_out[0:12])), 
									myconfig.vox_prefix+"{0}.rgba_out.jpg".format(epoch))
			saveConcatVoxes2image(dataset.transformBack(np.array(b_rgba_[0:12])), 
									myconfig.vox_prefix+"{0}.rgba_.jpg".format(epoch))
			saveConcatVoxes2image(dataset.transformBack(np.array(b_rgba_out_[0:12])), 
									myconfig.vox_prefix+"{0}.rgba_out_.jpg".format(epoch))


		##################################################
		## save trained model
Exemple #3
0
                         z: batch_dict['z'],
                         train: False
                     })
fetches_D = sess.run(loss_D,
                     feed_dict={
                         a: batch_dict['a'],
                         z: batch_dict['z'],
                         rgba: batch_dict['rgba'],
                         train: False
                     })

# test
batch_dict = prepare_batch_dict(data.train.next_batch(batch_size))
tmp_a = batch_dict['a']
tmp_rgba = batch_dict['rgba']
np.save("testgt.npy", dataset.transformBack(tmp_rgba[0]))
fetches_G = sess.run(rgba_,
                     feed_dict={
                         a: tmp_a,
                         z: batch_dict['z'],
                         train: False
                     })
print 'z:', batch_dict['z'][0, :]
print fetches_G[0, :, :, 0, 0]
np.save("test1.npy", dataset.transformBack(fetches_G[0]))

batch_dict = prepare_batch_dict(data.train.next_batch(batch_size))
fetches_G = sess.run(rgba_,
                     feed_dict={
                         a: tmp_a,
                         z: batch_dict['z'],
Exemple #4
0
                loss_G_mean = np.mean(loss_dict['G'])
                f.write("{0}, {1:.8f}\n".format(epoch, loss_G_mean))
                print >> f, loss_G_mean_list

            ##################################################
            ## draw samples on train and test dataset
            ##
            ##################################################

            if epoch % sample_interval == 0:
                batch_dict = prepare_batch_dict(
                    train_data.next_batch(batch_size))

                # save ground-truth
                dataset.saveConcatVoxes2image(
                    dataset.transformBack(np.array(batch_dict['rgba'][0:8])),
                    myconfig.vox_prefix + "{0}.gt.jpg".format(epoch))

                # sample with z and mask_a aligned
                feed_dict = prepare_shuffled_feed_dict(batch_dict, rgb, a,
                                                       mask_a, indexes, train,
                                                       False)
                batch_rgba = rgbas_[-1].eval(feed_dict)
                dataset.saveConcatVoxes2image(
                    dataset.transformBack(np.array(batch_rgba[0:8])),
                    myconfig.vox_prefix + "{0}.train.jpg".format(epoch))

                # sample with a and mask_a being identical
                feed_dict[mask_a] = feed_dict[a]
                batch_rgba = rgbas_[-1].eval(feed_dict)
                dataset.saveConcatVoxes2image(
Exemple #5
0
pltKernel(W['dh3'][:, :, :, 5, 7])
print 'G dh5'
print W['dh3'][:, :, :, 1, 3]
pltKernel(W['dh3'][:, :, :, 1, 3])

W = sess.run(D.W, feed_dict=feed_dict)
print 'd h3'
print W['h3'][:, :, :, 5, 7]
pltKernel(W['h3'][:, :, :, 5, 7])

for v in tf.trainable_variables():
    if 'alpha' in v.name:
        print v.name
        print sess.run(v, feed_dict=feed_dict)

sess.close()
exit(0)

#
batch_dict = prepare_batch_dict(data.train.next_batch(batch_size))
feed_dict = prepare_feed_dict(batch_dict, rgba, a, z, train, False)
voxels_ = sess.run(rgba_, feed_dict=feed_dict)
np.save("test1.npy", dataset.transformBack(voxels_[0]))

batch_dict['rgba'][1] = batch_dict['rgba'][0]
feed_dict = prepare_feed_dict(batch_dict, rgba, a, z, train, False)
voxels_ = sess.run(rgba_, feed_dict=feed_dict)
np.save("test2.npy", dataset.transformBack(voxels_[1]))

sess.close()
Exemple #6
0
                                    feed_dict={
                                        x: voxels,
                                        z: batch_z,
                                        train: False
                                    })
            loss_list['G'].append(batch_loss_G)
            loss_list['D'].append(batch_loss_D)

        ## print loss
        loss_G_mean = np.mean(loss_list['G'])
        loss_D_mean = np.mean(loss_list['D'])
        with open(myconfig.loss_csv, 'a') as f:
            msg = "{0}, {1:.8f}, {2:.8f}".format(epoch, loss_G_mean,
                                                 loss_D_mean)
            print >> f, msg
            print msg

        ## output voxels
        batch_z = np.random.uniform(-1, 1,
                                    [batch_size, z_size]).astype(np.float32)
        voxels_ = sess.run(x_, feed_dict={z: batch_z})

        # 1st ground truth
        np.save(myconfig.vox_prefix + "{0}-sample.npy".format(epoch),
                dataset.transformBack(voxels[0]))
        # generated
        for j, v in enumerate(voxels_[:2]):
            v = v.reshape([32, 32, 32, 4])
            v = dataset.transformBack(v)
            np.save(myconfig.vox_prefix + "{0}-{1}.npy".format(epoch, j), v)