def save_vis_masks(ims,im_names,gt_masks_dir,gt_masks,corrup_masks_dir,corrup_masks): # 5.2> save vis_gt_masks and corrup_masks alpha_folder='alpha_masks' color_folder='color_masks' ## alpha masks(gt) alpha_gt_dir=os.path.join(gt_masks_dir,alpha_folder) create_dir(alpha_gt_dir) alpha_gt_masks=gt_masks*255.0 save_images(alpha_gt_masks,alpha_gt_dir,im_names,im_ext) ## alpha masks(corrup) alpha_corrup_dir=os.path.join(corrup_masks_dir,alpha_folder) create_dir(alpha_corrup_dir) alpha_corrup_masks=corrup_masks*255.0 save_images(alpha_corrup_masks,alpha_corrup_dir,im_names,im_ext) ## color masks(gt) color_gt_masks=np.zeros((ims.shape),dtype=np.uint8) color=[0,0,255] for m_id in xrange(len(gt_masks)): im=ims[m_id] mask=gt_masks[m_id] color_mask=vis_im_mask(im,mask,color,True) color_gt_masks[m_id]=color_mask color_gt_dir=os.path.join(gt_masks_dir,color_folder) create_dir(color_gt_dir) save_images(color_gt_masks,color_gt_dir,im_names,im_ext) ## here is small bug(need to debug) ## color masks(corrup) color_corrup_masks=np.zeros((ims.shape),dtype=np.uint8) color=[0,0,255] for m_id in xrange(len(corrup_masks)): im=ims[m_id] mask=corrup_masks[m_id] color_mask=vis_im_mask(im,mask,color,True) color_corrup_masks[m_id]=color_mask color_corrup_dir=os.path.join(corrup_masks_dir,color_folder) create_dir(color_corrup_dir) save_images(color_corrup_masks,color_corrup_dir,im_names,im_ext)
dcgan.input_label: one_label }) d_loss = d_loss_fake + d_loss_real g_loss = g_loss1 + g_loss2 writer.add_summary(ds, epoch * batch_count + i) writer.add_summary(gs, epoch * batch_count + i) print( 'EPOCH {}'.format(epoch), '[{}/{}] D_Loss: {}, G_Loss: {}'.format( i, batch_count, d_loss, g_loss)) if i % save_image_interval == 0: # de-preprocess image & save z = np.random.normal(loc=0.0, scale=0.1, size=[batch_size, 100]) gen = sess.run(dcgan.generated, feed_dict={dcgan.input_noise: z}) if channel_num > 1: gen = np.reshape(gen, [batch_size, 64, 64, channel_num]) else: gen = np.reshape(gen, [batch_size, 64, 64]) gen_img = gen + 1.0 * 127.0 if not os.path.exists('gen_celeb'): os.makedirs('gen_celeb') image_util.save_images('gen_celeb/{}_{}.png'.format(epoch, i), gen_img[0:64], [8, 8])
self.loss_sum = tf.summary.scalar('loss', self.loss) self.sum = tf.summary.merge([self.loss_sum]) # # hyper-parameter # batch_size = 200 dcgan = DCGAN(batch_size=batch_size, channel=1) # load MNIST data images, labels = mnist_data.load_mnist('./mnist') input_img = [] for i in range(60000): input_img.append(images[i]) input_img = np.array(input_img) image_util.save_images('output.png', input_img[0:64], [8,8]) # preprocess images input_img = input_img / 127.0 - 1.0 input_img = np.reshape(input_img, [60000, 28, 28, 1]) # init TF init = tf.global_variables_initializer() saver = tf.train.Saver() config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: sess.run(init) writer = tf.summary.FileWriter('./graphs_mnist', sess.graph)
import os import numpy as np from image_util import save_image, save_images def load_mnist(path): fd = open(os.path.join(path, 'train-images-idx3-ubyte')) images = np.fromfile(file=fd, dtype=np.uint8) images = images[16:].reshape([60000, 28, 28]).astype(np.float) fd = open(os.path.join(path, 'train-labels-idx1-ubyte')) labels = np.fromfile(file=fd, dtype=np.uint8) labels = labels[8:].reshape([60000]).astype(np.float) return images, labels # TEST DRIVE images, labels = load_mnist('./mnist') save_images('output.png', images[0:64], [8, 8])