def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) with tf.Session() as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.load(FLAGS.checkpoint_dir) to_json("./web/js/gen_layers.js", dcgan.h0_w, dcgan.h1_w, dcgan.h2_w, dcgan.h3_w, dcgan.h4_w) z_sample = np.random.uniform(-1, 1, size=(FLAGS.batch_size, dcgan.z_dim)) samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) with tf.Session() as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.load(FLAGS.checkpoint_dir) to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 2 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) with tf.Session() as sess: dcgan = DCGAN(sess, dataset=FLAGS.dataset, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=FLAGS.c_dim) if FLAGS.is_train: if FLAGS.preload_data == True: data = get_data_arr(FLAGS) else: data = glob(os.path.join('./data', FLAGS.dataset, '*.jpg')) train.train_wasserstein(sess, dcgan, data, FLAGS) else: dcgan.load(FLAGS.checkpoint_dir)
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) with tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=False)) as sess: if FLAGS.dataset == 'mnist': assert False dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, sample_size = 64, z_dim = 8192, d_label_smooth = .25, generator_target_prob = .75 / 2., out_stddev = .075, out_init_b = - .45, image_shape=[FLAGS.image_width, FLAGS.image_width, 3], dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, generator=Generator(), train_func=train, discriminator_func=discriminator, build_model_func=build_model, config=FLAGS, devices=["gpu:0", "gpu:1", "gpu:2", "gpu:3"] #, "gpu:4"] ) if FLAGS.is_train: print "TRAINING" dcgan.train(FLAGS) print "DONE TRAINING" else: dcgan.load(FLAGS.checkpoint_dir) OPTION = 2 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #if not os.path.exists('samples_progress'): # os.makedirs('samples_progress') for i in range(8): if not os.path.exists('samples_progress/part{:1d}'.format(i + 1)): os.makedirs('samples_progress/part{:1d}'.format(i + 1)) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with open('settings.txt', "w") as f: f.write("\n" + " ".join(sys.argv) + "\n\n") print("FLAGS values:") for key, val in flags.FLAGS.__flags.items(): print(str([key, val])) f.write(str([key, val]) + "\n") print() with tf.Session(config=run_config) as sess: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.z_dim, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, lambda_loss=FLAGS.lambda_loss) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") # Below is codes for visualization #OPTION = 1 if FLAGS.vis_type == 0: vis_options = [6, 7, 9, 10] for option in vis_options: print("Visualizing option %s" % option) OPTION = option visualize(sess, dcgan, FLAGS, OPTION) else: OPTION = FLAGS.vis_type visualize(sess, dcgan, FLAGS, OPTION)
def main(_): # Print out the parameters pprint.PrettyPrinter().pprint(flags.FLAGS.__flags) # Deal with input/output size default values if not FLAGS.input_width: FLAGS.input_width = FLAGS.input_height if not FLAGS.output_width: FLAGS.output_width = FLAGS.output_height # Deal with checkpoint/sample directory path if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) # Deal with GPU utilization def get_default_gpu_session(fraction=0.8): if fraction > 1: fraction = 0.8 config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = fraction return tf.Session(config=config) with get_default_gpu_session(FLAGS.gpu_utilization) as sess: # Deal with MNIST dataset if FLAGS.dataset == 'mnist': # Instantiate a dcgan isntance dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, c_dim=1, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_file_extension, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) else: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_file_extension, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() # Deal with training if FLAGS.is_train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir): raise Exception( "[!!!] Need to train a model first, then run test mode")
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir, df_dim=FLAGS.df_dim, gf_dim = FLAGS.gf_dim, double_update_gen=FLAGS.double_update_gen) else: sample_dir="samples/{}_bz{}_out{}_in{}_df{}_gf{}_update{}_noise{}".format( FLAGS.dataset, FLAGS.batch_size, FLAGS.output_height, FLAGS.input_height, FLAGS.df_dim, FLAGS.gf_dim, FLAGS.double_update_gen, FLAGS.noise) dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir= sample_dir,#FLAGS.sample_dir, data_dir=FLAGS.data_dir, df_dim=FLAGS.df_dim, gf_dim = FLAGS.gf_dim, double_update_gen=FLAGS.double_update_gen, noise=FLAGS.noise) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) # Create directories if necessary if not os.path.exists(FLAGS.log_dir): print("*** create log dir %s" % FLAGS.log_dir) os.makedirs(FLAGS.log_dir) if not os.path.exists(FLAGS.sample_dir): print("*** create sample dir %s" % FLAGS.sample_dir) os.makedirs(FLAGS.sample_dir) if not os.path.exists(FLAGS.checkpoint_dir): print("*** create checkpoint dir %s" % FLAGS.checkpoint_dir) os.makedirs(FLAGS.checkpoint_dir) # Write flags to log dir flags_file = open("%s/flags.txt" % FLAGS.log_dir, "w") for k, v in flags.FLAGS.__flags.items(): line = '{}, {}'.format(k, v) print(line, file=flags_file) flags_file.close() if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True # load model fid.create_inception_graph(FLAGS.incept_path) with tf.Session(config=run_config) as sess: # get querry tensor if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, batch_size_m=FLAGS.batch_size_m, y_dim=10, c_dim=1, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, log_dir=FLAGS.log_dir, stats_path=FLAGS.stats_path, data_path=FLAGS.data_path, fid_batch_size=FLAGS.fid_batch_size, fid_verbose=FLAGS.fid_verbose) else: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, log_dir=FLAGS.log_dir, stats_path=FLAGS.stats_path, data_path=FLAGS.data_path, fid_batch_size=FLAGS.fid_batch_size, fid_verbose=FLAGS.fid_verbose) if FLAGS.is_train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir): raise Exception("[!] Train a model first, then run test mode")
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: if FLAGS.is_train: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, input_size=FLAGS.input_size, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = EVAL(sess, input_size = 600, batch_size=1,ir_image_shape=[600,800,1],normal_image_shape=[600,800,3],dataset_name=FLAGS.dataset,\ is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.load(FLAGS.checkpoint_dir) OPTION = 2 # for validation list_val = [11,16,21,22,33,36,38,53,59,92] VAL_OPTION =2 """ if OPTION == 1: data = json.load(open("/research2/IR_normal_small/json/traininput_single_224_ori_small.json")) data_label = json.load(open("/research2/IR_normal_small/json/traingt_single_224_ori_small.json")) elif OPTION == 2: data = json.load(open("/research2/IR_normal_small/json/testinput_single_224_ori_small.json")) data_label = json.load(open("/research2/IR_normal_small/json/testgt_single_224_ori_small.json")) """ if VAL_OPTION ==1: list_val = [11,16,21,22,33,36,38,53,59,92] for idx in range(len(list_val)): for idx2 in range(1,10): print("Selected material %03d/%d" % (list_val[idx],idx2)) img = '/research2/IR_normal_small/save%03d/%d' % (list_val[idx],idx2) input_ = scipy.misc.imread(img+'/3.bmp').astype(float) gt_ = scipy.misc.imread('/research2/IR_normal_small/save016/1/12_Normal.bmp').astype(float) input_ = scipy.misc.imresize(input_,[600,800]) gt_ = scipy.misc.imresize(gt_,[600,800]) #input_ = input_[240:840,515:1315] #gt_ = gt_[240:840,515:1315] input_ = np.reshape(input_,(1,600,800,1)) gt_ = np.reshape(gt_,(1,600,800,3)) input_ = np.array(input_).astype(np.float32) gt_ = np.array(gt_).astype(np.float32) start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' %(time.time()-start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) gt_ = np.squeeze(gt_).astype(np.float32) output = np.zeros((600,800,3)).astype(np.float32) output[:,:,0] = sample[:,:,0]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2))) output[:,:,1] = sample[:,:,1]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2))) output[:,:,2] = sample[:,:,2]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2))) output[output ==inf] = 0.0 sample = (output+1.)/2. savename = '/home/yjyoon/Dropbox/ECCV16_IRNormal/single_result/%03d/%d/single_normal_L2ang.bmp' % (list_val[idx],idx2) scipy.misc.imsave(savename, sample) elif VAL_OPTION ==2: print("Computing all validation set ") ErrG =0.0 num_img =13 for idx in xrange(5, num_img+1): print("[Computing Validation Error %d/%d]" % (idx, num_img)) img = '/home/yjyoon/Dropbox/ECCV16_IRNormal/extra/extra_%d.bmp' % (idx) input_ = scipy.misc.imread(img).astype(float) input_ = input_[:,:,0] gt_ = scipy.misc.imread('/research2/IR_normal_small/save016/1/12_Normal.bmp').astype(float) input_ = scipy.misc.imresize(input_,[600,800]) gt_ = scipy.misc.imresize(gt_,[600,800]) input_ = np.reshape(input_,(1,600,800,1)) gt_ = np.reshape(gt_,(1,600,800,3)) input_ = np.array(input_).astype(np.float32) gt_ = np.array(gt_).astype(np.float32) start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' %(time.time()-start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) gt_ = np.squeeze(gt_).astype(np.float32) output = np.zeros((600,800,3)).astype(np.float32) output[:,:,0] = sample[:,:,0]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2))) output[:,:,1] = sample[:,:,1]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2))) output[:,:,2] = sample[:,:,2]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2))) output[output ==inf] = 0.0 sample = (output+1.)/2. savename = '/home/yjyoon/Dropbox/ECCV16_IRNormal/extra/extra_result%d.bmp' % (idx) scipy.misc.imsave(savename, sample)
def main(_): print('Before processing flags') pp.pprint(flags.FLAGS.__flags) if FLAGS.use_s3: import aws if FLAGS.s3_bucket is None: raise ValueError('use_s3 flag set, but no bucket set. ') # check to see if s3 bucket exists: elif not aws.bucket_exists(FLAGS.s3_bucket): raise ValueError( '`use_s3` flag set, but bucket "%s" doesn\'t exist. Not using s3' % FLAGS.s3_bucket) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height # configure the log_dir to match the params log_dir = os.path.join( FLAGS.log_dir, "dataset={},isCan={},lr={},imsize={},batch_size={}".format( FLAGS.dataset, FLAGS.can, FLAGS.learning_rate, FLAGS.input_height, FLAGS.batch_size)) if not glob(log_dir + "*"): log_dir = os.path.join(log_dir, "000") else: containing_dir = os.path.join(log_dir, "*") print(containing_dir) nums = [int(x[-3:]) for x in glob(containing_dir)] # TODO FIX THESE HACKS print('nums', nums) num = str(max(nums) + 1) log_dir = os.path.join(log_dir, (3 - len(num)) * "0" + num) FLAGS.log_dir = log_dir if FLAGS.checkpoint_dir is None: FLAGS.checkpoint_dir = os.path.join(FLAGS.log_dir, 'checkpoint') FLAGS.use_default_checkpoint = True elif FLAGS.use_default_checkpoint: raise ValueError( '`use_default_checkpoint` flag only works if you keep checkpoint_dir as None' ) if FLAGS.sample_dir is None: FLAGS.sample_dir = os.path.join(FLAGS.log_dir, 'samples') if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) print('After processing flags') pp.pprint(flags.FLAGS.__flags) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.sample_size, use_resize=FLAGS.use_resize, replay=FLAGS.replay, y_dim=10, smoothing=FLAGS.smoothing, lamb=FLAGS.lambda_val, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, wgan=FLAGS.wgan, can=FLAGS.can) elif FLAGS.dataset == 'wikiart': dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.sample_size, use_resize=FLAGS.use_resize, replay=FLAGS.replay, y_dim=27, smoothing=FLAGS.smoothing, lamb=FLAGS.lambda_val, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, wgan=FLAGS.wgan, can=FLAGS.can) else: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.sample_size, dataset_name=FLAGS.dataset, replay=FLAGS.replay, input_fname_pattern=FLAGS.input_fname_pattern, use_resize=FLAGS.use_resize, smoothing=FLAGS.smoothing, crop=FLAGS.crop, lamb=FLAGS.lambda_val, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, wgan=FLAGS.wgan, can=FLAGS.can) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") OPTION = 0 visualize(sess, dcgan, FLAGS, OPTION)
output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) #打印所有与训练相关的变量 show_all_variables() #判断train的flags,判断是执行训练还是加载保存好的模型 if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 1 #visualize用来将sampler生成的图片进行merge保存(将一个batch图片合成一张大图像) visualize(sess, dcgan, FLAGS, OPTION) if __name__ == '__main__':
class Runner(): def __init__(self, model_config): #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True self.model_config = model_config # reset tensorflow graph, does not require kernel restart in spyder tf.reset_default_graph() self.sess = tf.Session(config=run_config) # mnist dataset specific settings if model_config.dataset == 'mnist': model_config.y_dim = 10 model_config.dataset = 'mnist' data_dir = os.path.join(os.path.abspath(model_config.data_dir), 'mnist') dataset = MNIST(data_dir, epoch=model_config.epoch, batch_size=64) # celebA dataset specific settings elif model_config.dataset == 'celebA': model_config.crop = True data_dir = os.path.join(os.path.abspath(model_config.data_dir), 'celebA') dataset = CelebA(data_dir, epoch=model_config.epoch, crop=model_config.crop, sess=self.sess) # Set the input height for the model to be dependent on the dataset's input_height # TODO: This should be in the dataset # TODO: model should refer to the dataset for the input_height and input_width to be used all through out model_config.input_height = dataset.input_height model_config.output_height = dataset.output_height # set input data to be a square if model_config.input_width is None: model_config.input_width = model_config.input_height if model_config.output_width is None: model_config.output_width = model_config.output_height # if model_config.dataset == 'mnist': self.model = DCGAN( self.sess, input_width=model_config.input_width, input_height=model_config.input_height, output_width=model_config.output_width, output_height=model_config.output_height, batch_size=model_config.batch_size, sample_num=model_config.batch_size, y_dim=model_config.y_dim, z_dim=model_config.generate_test_images, dataset_name=model_config. dataset, # TODO: make this to refer the dataset instead # TODO: make this to refer the dataset input_fname_pattern=model_config.input_fname_pattern, crop=model_config. crop, # TODO: Not used in the model, only used in the dataset checkpoint_dir=model_config.checkpoint_dir, sample_dir=model_config.sample_dir, data_dir=model_config.data_dir, dataset=dataset) def start_training(self): show_all_variables() if self.model_config.train: self.model.train(self.model_config) else: if not self.model.load(self.model_config.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) OPTION = 1 visualize(self.sess, self.model, self.model_config, OPTION) self.sess.close()
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True #run_config.gpu_options.per_process_gpu_memory_fraction = 0.4 with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, test_batch_size=FLAGS.test_batch_size, sample_num=FLAGS.batch_size, y_dim=10, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, test_dir = FLAGS.test_dir) else: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, test_batch_size=FLAGS.test_batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, test_dir = FLAGS.test_dir) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") if FLAGS.anomaly_test: dcgan.anomaly_detector() assert len(dcgan.test_data_names) > 0 for idx in range(len(dcgan.test_data_names)): test_input = np.expand_dims(dcgan.test_data[idx],axis=0) test_name = dcgan.test_data_names[idx] dcgan.train_anomaly_detector(FLAGS, test_input, test_name)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) else: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, y_dim=2, # Condition GAN!!! Kaggle Iceberg Two classes c_dim=1, # grey color for satellite kaggle data dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) # show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) with tf.Session() as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.load(FLAGS.checkpoint_dir) to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 2 if OPTION == 0: z_sample = np.random.uniform(-0.5, 0.5, size=(FLAGS.batch_size, dcgan.z_dim)) samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images( samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime())) elif OPTION == 1: values = np.arange(0, 1, 1. / FLAGS.batch_size) for idx in xrange(100): print(" [*] %d" % idx) z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx)) elif OPTION == 2: values = np.arange(0, 1, 1. / FLAGS.batch_size) for idx in [random.randint(0, 99) for _ in xrange(100)]: print(" [*] %d" % idx) z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) z_sample = np.tile(z, (FLAGS.batch_size, 1)) #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) make_gif(samples, './samples/test_gif_%s.gif' % (idx)) elif OPTION == 3: values = np.arange(0, 1, 1. / FLAGS.batch_size) for idx in xrange(100): print(" [*] %d" % idx) z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) make_gif(samples, './samples/test_gif_%s.gif' % (idx)) elif OPTION == 4: image_set = [] values = np.arange(0, 1, 1. / FLAGS.batch_size) for idx in xrange(100): print(" [*] %d" % idx) z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] image_set.append( sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})) make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx)) new_image_set = [ merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1) ] make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8) elif OPTION == 5: image_set = [] values = np.arange(0, 1, 1. / FLAGS.batch_size) z_idx = [[random.randint(0, 99) for _ in xrange(5)] for _ in xrange(200)] for idx in xrange(200): print(" [*] %d" % idx) #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) z = np.random.uniform(-1e-1, 1e-1, size=(dcgan.z_dim)) z_sample = np.tile(z, (FLAGS.batch_size, 1)) for kdx, z in enumerate(z_sample): for jdx in xrange(5): z_sample[kdx][z_idx[idx][jdx]] = values[kdx] image_set.append( sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})) make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx)) new_image_set = [ merge(np.array([images[idx] for images in image_set]), [10, 20]) for idx in range(64) + range(63, -1, -1) ] make_gif(new_image_set, './samples/test_gif_random_merged.gif', duration=4) elif OPTION == 6: image_set = [] values = np.arange(0, 1, 1.0 / FLAGS.batch_size).tolist() z_idx = [[random.randint(0, 99) for _ in xrange(10)] for _ in xrange(100)] for idx in xrange(100): print(" [*] %d" % idx) z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) z_sample = np.tile(z, (FLAGS.batch_size, 1)) for kdx, z in enumerate(z_sample): for jdx in xrange(10): z_sample[kdx][z_idx[idx][jdx]] = values[kdx] image_set.append( sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})) save_images(image_set[-1], [8, 8], './samples/test_random_arange_%s.png' % (idx)) new_image_set = [ merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1) ] make_gif(new_image_set, './samples/test_gif_merged_random.gif', duration=4) elif OPTION == 7: for _ in xrange(50): z_idx = [[random.randint(0, 99) for _ in xrange(10)] for _ in xrange(8)] zs = [] for idx in xrange(8): z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) zs.append(np.tile(z, (8, 1))) z_sample = np.concatenate(zs) values = np.arange(0, 1, 1 / 8.) for idx in xrange(FLAGS.batch_size): for jdx in xrange(8): z_sample[idx][z_idx[idx / 8][jdx]] = values[idx % 8] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images( samples, [8, 8], './samples/multiple_testt_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime())) elif OPTION == 8: counter = 0 for _ in xrange(50): import scipy.misc z_idx = [[random.randint(0, 99) for _ in xrange(10)] for _ in xrange(8)] zs = [] for idx in xrange(8): z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) zs.append(np.tile(z, (8, 1))) z_sample = np.concatenate(zs) values = np.arange(0, 1, 1 / 8.) for idx in xrange(FLAGS.batch_size): for jdx in xrange(8): z_sample[idx][z_idx[idx / 8][jdx]] = values[idx % 8] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) for sample in samples: scipy.misc.imsave('./samples/turing/%s.png' % counter, sample) counter += 1 else: import scipy.misc from glob import glob samples = [] fnames = glob("/Users/carpedm20/Downloads/x/1/*.png") fnames = sorted(fnames, key=lambda x: int(x.split("_")[1]) * 10000 + int( x.split('_')[2].split(".")[0])) for f in fnames: samples.append(scipy.misc.imread(f)) make_gif(samples, './samples/training.gif', duration=8, true_image=True)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto(gpu_options=gpu_options) #run_confing = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir) else: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") OPTION = FLAGS.option visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True os.environ["CUDA_VISIBLE_DEVICES"] = "1" with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'cifar10': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, #y_dim=10, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) else: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization #OPTION=1 #visualize(sess, dcgan, FLAGS, OPTION) OPTION = 6 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) # expand user name and environment variables FLAGS.data_dir = expand_path(FLAGS.data_dir) FLAGS.out_dir = expand_path(FLAGS.out_dir) FLAGS.out_name = expand_path(FLAGS.out_name) FLAGS.checkpoint_dir = expand_path(FLAGS.checkpoint_dir) FLAGS.sample_dir = expand_path(FLAGS.sample_dir) if FLAGS.output_height is None: FLAGS.output_height = FLAGS.input_height if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height # output folders if FLAGS.out_name == "": FLAGS.out_name = '{} - {} - {}'.format(timestamp(), FLAGS.data_dir.split('/')[-1], FLAGS.dataset) # penultimate folder of path if FLAGS.train: FLAGS.out_name += ' - x{}.z{}.{}.y{}.b{}'.format(FLAGS.input_width, FLAGS.z_dim, FLAGS.z_dist, FLAGS.output_width, FLAGS.batch_size) FLAGS.out_dir = os.path.join(FLAGS.out_dir, FLAGS.out_name) FLAGS.checkpoint_dir = os.path.join(FLAGS.out_dir, FLAGS.checkpoint_dir) FLAGS.sample_dir = os.path.join(FLAGS.out_dir, FLAGS.sample_dir) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) with open(os.path.join(FLAGS.out_dir, 'FLAGS.json'), 'w') as f: flags_dict = {k:FLAGS[k].value for k in FLAGS} json.dump(flags_dict, f, indent=4, sort_keys=True, ensure_ascii=False) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, z_dim=FLAGS.z_dim, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir, out_dir=FLAGS.out_dir, max_to_keep=FLAGS.max_to_keep) else: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.z_dim, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir, out_dir=FLAGS.out_dir, max_to_keep=FLAGS.max_to_keep) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: load_success, load_counter = dcgan.load(FLAGS.checkpoint_dir) if not load_success: raise Exception("Checkpoint not found in " + FLAGS.checkpoint_dir) # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization if FLAGS.export: export_dir = os.path.join(FLAGS.checkpoint_dir, 'export_b'+str(FLAGS.batch_size)) dcgan.save(export_dir, load_counter, ckpt=True, frozen=False) if FLAGS.freeze: export_dir = os.path.join(FLAGS.checkpoint_dir, 'frozen_b'+str(FLAGS.batch_size)) dcgan.save(export_dir, load_counter, ckpt=False, frozen=True) if FLAGS.visualize: OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION, FLAGS.sample_dir)
import tensorflow as tf from model import DCGAN from ops import * from utils import * import numpy as np import matplotlib.pyplot as plt sess = tf.Session() run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True sess = tf.Session(config=run_config) dcgan = DCGAN(sess, batch_size=1, checkpoint_dir='checkpoint', dataset_name='CCZJZ') dcgan.load('checkpoint') def complete(batch_masks, batch_images, demo, img_num): zhats = np.random.uniform(-1, 1, size=(dcgan.batch_size, dcgan.z_dim)) t = 0 lr = 0.01 lrd = 0 beta1 = 0.9 beta2 = 0.999 epsilon = 1e-8 m = np.zeros_like(zhats) v = np.zeros_like(zhats)
def main(_): print('Before processing flags') pp.pprint(flags.FLAGS.__flags) if FLAGS.use_s3: import aws if FLAGS.s3_bucket is None: raise ValueError('use_s3 flag set, but no bucket set. ') # check to see if s3 bucket exists: elif not aws.bucket_exists(FLAGS.s3_bucket): raise ValueError( '`use_s3` flag set, but bucket "%s" doesn\'t exist. Not using s3' % FLAGS.s3_bucket) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height # configure the log_dir to match the params log_dir = os.path.join( FLAGS.log_dir, "dataset={},isCan={},lr={},imsize={},hasStyleNet={},batch_size={}". format(FLAGS.dataset, FLAGS.can, FLAGS.learning_rate, FLAGS.input_height, FLAGS.style_net_checkpoint is not None, FLAGS.batch_size)) FLAGS.log_dir = log_dir if FLAGS.checkpoint_dir is None: FLAGS.checkpoint_dir = os.path.join(FLAGS.log_dir, 'checkpoint') FLAGS.use_default_checkpoint = True elif FLAGS.use_default_checkpoint: raise ValueError( '`use_default_checkpoint` flag only works if you keep checkpoint_dir as None' ) if FLAGS.sample_dir is None: FLAGS.sample_dir = os.path.join(FLAGS.log_dir, 'samples') if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) print('After processing flags') pp.pprint(flags.FLAGS.__flags) if FLAGS.style_net_checkpoint: from slim.nets import nets_factory network_fn = nets_factory sess = None if FLAGS.dataset == 'mnist': y_dim = 10 elif FLAGS.dataset == 'wikiart': y_dim = 13 else: y_dim = None dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.sample_size, use_resize=FLAGS.use_resize, replay=FLAGS.replay, y_dim=y_dim, smoothing=FLAGS.smoothing, lamb=FLAGS.lambda_val, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=False, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, wgan=FLAGS.wgan, learning_rate=FLAGS.learning_rate, style_net_checkpoint=FLAGS.style_net_checkpoint, can=FLAGS.can) run_config = tf.ConfigProto(inter_op_parallelism_threads=3, intra_op_parallelism_threads=3) run_config.gpu_options.per_process_gpu_memory_fraction = 0.7 with tf.Session(config=run_config) as sess: #sess = tf_debug.TensorBoardDebugWrapperSession(sess, 'localhost:6064') dcgan.set_sess(sess) # show_all_variables() print(FLAGS.train, 'TRAIN') if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir, config=FLAGS)[0]: raise Exception("[!] Train a model first, then run test mode") OPTION = 0 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) with tf.Session() as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: dcgan.load(FLAGS.checkpoint_dir) to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 2 if OPTION == 0: z_sample = np.random.uniform(-0.5, 0.5, size=(FLAGS.batch_size, dcgan.z_dim)) samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime())) elif OPTION == 1: values = np.arange(0, 1, 1./FLAGS.batch_size) for idx in xrange(100): print(" [*] %d" % idx) z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx)) elif OPTION == 2: values = np.arange(0, 1, 1./FLAGS.batch_size) for idx in [random.randint(0, 99) for _ in xrange(100)]: print(" [*] %d" % idx) z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) z_sample = np.tile(z, (FLAGS.batch_size, 1)) #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) make_gif(samples, './samples/test_gif_%s.gif' % (idx)) elif OPTION == 3: values = np.arange(0, 1, 1./FLAGS.batch_size) for idx in xrange(100): print(" [*] %d" % idx) z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) make_gif(samples, './samples/test_gif_%s.gif' % (idx)) elif OPTION == 4: image_set = [] values = np.arange(0, 1, 1./FLAGS.batch_size) for idx in xrange(100): print(" [*] %d" % idx) z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) for kdx, z in enumerate(z_sample): z[idx] = values[kdx] image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})) make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx)) new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1)] make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8) elif OPTION == 5: image_set = [] values = np.arange(0, 1, 1./FLAGS.batch_size) z_idx = [[random.randint(0,99) for _ in xrange(5)] for _ in xrange(200)] for idx in xrange(200): print(" [*] %d" % idx) #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim]) z = np.random.uniform(-1e-1, 1e-1, size=(dcgan.z_dim)) z_sample = np.tile(z, (FLAGS.batch_size, 1)) for kdx, z in enumerate(z_sample): for jdx in xrange(5): z_sample[kdx][z_idx[idx][jdx]] = values[kdx] image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})) make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx)) new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 20]) for idx in range(64) + range(63, -1, -1)] make_gif(new_image_set, './samples/test_gif_random_merged.gif', duration=4) elif OPTION == 6: image_set = [] values = np.arange(0, 1, 1.0/FLAGS.batch_size).tolist() z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(100)] for idx in xrange(100): print(" [*] %d" % idx) z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) z_sample = np.tile(z, (FLAGS.batch_size, 1)) for kdx, z in enumerate(z_sample): for jdx in xrange(10): z_sample[kdx][z_idx[idx][jdx]] = values[kdx] image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})) save_images(image_set[-1], [8, 8], './samples/test_random_arange_%s.png' % (idx)) new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1)] make_gif(new_image_set, './samples/test_gif_merged_random.gif', duration=4) elif OPTION == 7: for _ in xrange(50): z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(8)] zs = [] for idx in xrange(8): z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) zs.append(np.tile(z, (8, 1))) z_sample = np.concatenate(zs) values = np.arange(0, 1, 1/8.) for idx in xrange(FLAGS.batch_size): for jdx in xrange(8): z_sample[idx][z_idx[idx/8][jdx]] = values[idx%8] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) save_images(samples, [8, 8], './samples/multiple_testt_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime())) elif OPTION == 8: counter = 0 for _ in xrange(50): import scipy.misc z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(8)] zs = [] for idx in xrange(8): z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) zs.append(np.tile(z, (8, 1))) z_sample = np.concatenate(zs) values = np.arange(0, 1, 1/8.) for idx in xrange(FLAGS.batch_size): for jdx in xrange(8): z_sample[idx][z_idx[idx/8][jdx]] = values[idx%8] samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) for sample in samples: scipy.misc.imsave('./samples/turing/%s.png' % counter, sample) counter += 1 else: import scipy.misc from glob import glob samples = [] fnames = glob("/Users/carpedm20/Downloads/x/1/*.png") fnames = sorted(fnames, key = lambda x: int(x.split("_")[1]) * 10000 + int(x.split('_')[2].split(".")[0])) for f in fnames: samples.append(scipy.misc.imread(f)) make_gif(samples, './samples/training.gif', duration=8, true_image=True)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, y_dim=10, c_dim=1, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) else: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir): raise Exception("[!] Train a model first, then run test mode") # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): print("FLAG1") pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True # extract zipfile print(FLAGS.dataset) print(os.path.join(FLAGS.data_path, "*.zip")) source_path = glob.glob(os.path.join(FLAGS.data_path, "*.zip")) print(source_path) for i, zipped_file in enumerate(source_path): print("Extracting image zip %s of %s" % (i + 1, len(source_path))) if os.path.exists(os.path.join(FLAGS.data_path, "celebA")): print("...File already exists") else: print(zipped_file) unzip_and_save(zipped_file, FLAGS.data_path) print("...Extracted!") print("Reading from %s" % os.path.join(FLAGS.data_path, "*/*.jpg")) unzipped_data_path = os.path.join( FLAGS.data_path, "*/*.jpg") #right now we support only one dataset print(unzipped_data_path) with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, data_path=FLAGS.data_path, #glob signature dataset_type=unzipped_data_path, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) else: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, data_path=unzipped_data_path, dataset_type=FLAGS.dataset, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): global classifier pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) sess = keras.backend.get_session() print("LOAD INIT") with tf.variable_scope("QQ"): dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, dataset_name=FLAGS.dataset, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, adversarial_path=FLAGS.adversarial_path, ground_truth_path=FLAGS.ground_truth_path, test_path=FLAGS.test_path, save_path=FLAGS.save_path, c_dim=FLAGS.c_dim, ) if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") if True: keras.backend.set_learning_phase(False) classifier = make_model('linear') classifier.load_weights("/tmp/" + model + "model") class Wrap: num_channels = 1 if model == "mnist" else 3 num_labels = 10 image_size = 28 if model == "mnist" else 32 def predict(self, xs): #return classifier(xs) return full_model(sess, xs)[1] attack = l2_attack.CarliniL2(sess, Wrap(), targeted=True, binary_search_steps=4, initial_const=1, max_iterations=10000, batch_size=100, learning_rate=1e-2, confidence=1, boxmin=-1, boxmax=1) indexs = [np.where(y_test == i)[0][0] for i in range(10)] indexs = np.transpose([indexs] * 10).flatten() targets = np.array([range(10)] * 10).flatten() targets = keras.utils.to_categorical(targets, 10) print(indexs.shape) print(targets.shape) adv = attack.attack(x_test[indexs], targets) np.save("samples/adv-" + model + "-samples.npy", adv) print('mean distortion', np.mean(np.sum((adv - x_test[indexs])**2, axis=(1, 2, 3))**.5)) restored, classified = full_model(sess, tf.constant(adv, dtype=tf.float32), "B") new, preds = sess.run([restored, classified], feed_dict={dcgan.advInputs: x_test[indexs]}) print( "Original classifier accuracy when run on the adversarial examples" ) print(np.argmax(classifier.predict(adv), axis=1) == y_test[indexs]) print( "Original classifier accuracy when run on the cleaned adversarial examples " ) print(np.argmax(classifier.predict(new), axis=1) == y_test[indexs])
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) if not os.path.exists(os.path.join('./logs', time.strftime('%d%m'))): os.makedirs(os.path.join('./logs', time.strftime('%d%m'))) gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess: if FLAGS.is_train: if FLAGS.model == "narrow": dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\ input_size=FLAGS.input_size,dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = Deep_DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\ input_size=FLAGS.input_size,dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: if FLAGS.model == "narrow": dcgan = EVAL(sess, input_size = 600, batch_size=1,ir_image_shape=[None,None,1],normal_image_shape=[None,None,3],dataset_name=FLAGS.dataset,\ is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = Deep_EVAL(sess, input_size = 600, batch_size=1,ir_image_shape=[None,None,1],normal_image_shape=[None,None,3],dataset_name=FLAGS.dataset,\ is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: OPTION = 2 # for validation list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] VAL_OPTION = 3 """ if OPTION == 1: data = json.load(open("/research2/IR_normal_small/json/traininput_single_224_ori_small.json")) data_label = json.load(open("/research2/IR_normal_small/json/traingt_single_224_ori_small.json")) elif OPTION == 2: data = json.load(open("/research2/IR_normal_small/json/testinput_single_224_ori_small.json")) data_label = json.load(open("/research2/IR_normal_small/json/testgt_single_224_ori_small.json")) """ if VAL_OPTION == 1: model = 'DCGAN.model-10000' dcgan.load(FLAGS.checkpoint_dir, model) list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] for idx in range(len(list_val)): os.makedirs( os.path.join('L1_loss_result', '%03d' % list_val[idx])) for idx2 in range(1, 10): print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread(img + '/3.bmp').astype(float) gt_ = scipy.misc.imread( '/research2/IR_normal_small/save016/1/12_Normal.bmp' ).astype(float) input_ = scipy.misc.imresize(input_, [600, 800]) input_ = input_ / 255.0 - 1.0 # normalize -1 ~1 gt_ = scipy.misc.imresize(gt_, [600, 800]) #input_ = input_[240:840,515:1315] #gt_ = gt_[240:840,515:1315] input_ = np.reshape(input_, (1, 600, 800, 1)) gt_ = np.reshape(gt_, (1, 600, 800, 3)) input_ = np.array(input_).astype(np.float32) gt_ = np.array(gt_).astype(np.float32) start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) gt_ = np.squeeze(gt_).astype(np.float32) output = np.zeros((600, 800, 3)).astype(np.float32) output[:, :, 0] = sample[:, :, 0] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 1] = sample[:, :, 1] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 2] = sample[:, :, 2] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[output == inf] = 0.0 sample = (output + 1.) / 2. os.makedirs( os.path.join('L1_loss_result', '%03d/%d' % (list_val[idx], idx2))) savename = './L1_loss_result/%03d/%d/single_normal_L1_%s.bmp' % ( list_val[idx], idx2, model) scipy.misc.imsave(savename, sample) elif VAL_OPTION == 2: # arbitary dataset print("Computing arbitary dataset ") trained_models = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) trained_models = natsorted(trained_models) datapath = '/home/yjyoon/Dropbox/ECCV_result/smartphone/iphone/input/gray_*.bmp' savepath = '/home/yjyoon/Dropbox/ECCV_result/smartphone/iphone/output' fulldatapath = os.path.join(glob.glob(datapath)) model = trained_models[-2] model = model.split('/') model = model[-1] dcgan.load(FLAGS.checkpoint_dir, model) for idx in xrange(len(fulldatapath)): #input_ = cv2.imread(fulldatapath[idx]) #input_ = cv2.cvtColor(input_,cv2.COLOR_BGR2YCR_CB) #input_ = cv2.resize(input_[:,:,0],(600,800)) input_ = scipy.misc.imread(fulldatapath[idx]).astype(float) input_ = (input_ / 127.5) - 1. # normalize -1 ~1 input_ = np.reshape( input_, (1, input_.shape[0], input_.shape[1], 1)) #[Y,Cr,Cb]= rgb2ycbcr(input_) #input_= rgb2gray(input_) #input_ = scipy.misc.imresize(Y,(600,800)) #input_ = np.reshape(input_,(1,600,800,1)) input_ = np.array(input_).astype(np.float32) start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.zeros((sample.shape[0], sample.shape[1], 3)).astype(np.float32) output[:, :, 0] = sample[:, :, 0] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 1] = sample[:, :, 1] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 2] = sample[:, :, 2] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[output == inf] = 0.0 sample = (output + 1.) / 2. name = fulldatapath[idx].split('/') name = name[-1].split('.') name = name[0] savename = savepath + '/normal_' + name + '.bmp' scipy.misc.imsave(savename, sample) elif VAL_OPTION == 3: # light source fixed list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] save_files = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) save_files = natsorted(save_files) savepath = './RMSS_ang_scale_loss_result' max_h = 600 max_w = 800 for model_idx in range(0, len(save_files), 2): model = save_files[model_idx] model = model.split('/') model = model[-1] dcgan.load(FLAGS.checkpoint_dir, model) for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread(img + '/3.bmp').astype(float) input_ = scipy.misc.imresize(input_, [600, 800]) input_ = (input_ / 127.5) - 1. # normalize -1 ~1 overlap = np.min(max_h, max_w) - 100 step_h = np.ceil(max_h, overlap) step_w = np.ceil(max_w, overlap) result = np.zeros(input_.shape[0], input_.shape[1], input_.shape[2], step_h * step_w) tmp_result = [] for h in range(0, input_.shape[0], overlap): for w in range(0, input_.shape[1], overlap): crop_input_ = input_[h:h + max_h, w:w + max_w] crop_input_ = np.reshape( crop_input_, (max_h, max_w, 1)) crop_intpu_ = np.array(crop_input_).astype( np.float32) start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={ dcgan.ir_images: crop_input_ }) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype( np.float32) output = np.sqrt( np.sum(np.square(sample), axis=2)) output = np.expand_dims(output, -1) output[output == 0.0] = 1e-10 output = sample / output tmp_result.append(output) result = recovering_fullimage( tmp_result, result, overlap, max_h, max_w) if not os.path.exists( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))): os.makedirs( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))) savename = os.path.join( savepath, '%03d/%d/single_normal_%s.bmp' % (list_val[idx], idx2, model)) scipy.misc.imsave(savename, result) elif VAL_OPTION == 4: # depends on light sources list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] save_files = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) save_files = natsorted(save_files) savepath = './L1_ang_loss_lights_result' if not os.path.exists(os.path.join(savepath)): os.makedirs(os.path.join(savepath)) model = save_files[-2] model = model.split('/') model = model[-1] dcgan.load(FLAGS.checkpoint_dir, model) for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): #tilt angles 1~9 for idx3 in range(1, 13): # light source print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread( img + '/%d.bmp' % idx3).astype( float) #input NIR image input_ = scipy.misc.imresize(input_, [600, 800]) input_ = input_ / 127.5 - 1.0 # normalize -1 ~1 input_ = np.reshape(input_, (1, 600, 800, 1)) input_ = np.array(input_).astype(np.float32) start_time = time.time() sample = sess.run( dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.zeros((600, 800, 3)).astype(np.float32) output[:, :, 0] = sample[:, :, 0] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 1] = sample[:, :, 1] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 2] = sample[:, :, 2] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[output == inf] = 0.0 sample = (output + 1.) / 2. if not os.path.exists( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))): os.makedirs( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))) savename = os.path.join( savepath, '%03d/%d/single_normal_%d.bmp' % (list_val[idx], idx2, idx3)) scipy.misc.imsave(savename, sample) def recovering_fullimage(output, result, overlap, max_h, max_w): count = 0 for h in range(0, input_.shape[0], overlap): for w in range(0, input_.shape[1], overlap): result[h:h + max_h, w:w + max_w, :, count] = output[count] count += 0 return np.max(result, axis=3)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, c_dim=1, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) else: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() if FLAGS.is_train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir): raise Exception("[!] Train a model first, then run test mode") # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) # y_dim is inferred from the dataset name or the labels file if FLAGS.conditional and FLAGS.dataset != 'mnist': labels_fname = os.path.join(FLAGS.data_dir, FLAGS.dataset, FLAGS.input_fname_labels) if not os.path.exists(labels_fname): raise Exception("[!] conditional requires image<->identity labels") #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, data_dir=FLAGS.data_dir, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, conditional=FLAGS.conditional, dense=FLAGS.dense, loss_type=FLAGS.loss_type, exp_num=FLAGS.exp_num) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") if FLAGS.generate: generate_samples(sess, dcgan, FLAGS) exit() # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def run_app(FLAGS): pp.pprint(FLAGS) if FLAGS.input_width is None: if FLAGS.dataset == "mnist_stacked": FLAGS.input_width = 28 FLAGS.input_height = 28 FLAGS.output_width = 28 FLAGS.output_height = 28 elif FLAGS.dataset == "cifar10": FLAGS.input_width = 32 FLAGS.input_height = 32 FLAGS.output_width = 32 FLAGS.output_height = 32 elif FLAGS.dataset == "celebA": FLAGS.input_width = 108 FLAGS.input_height = 108 FLAGS.output_width = 64 FLAGS.output_height = 64 FLAGS.crop = True if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height #FLAGS.checkpoint_dir = os.path.join(FLAGS.main_output_dir, FLAGS.checkpoint_dir) #FLAGS.sample_dir = os.path.join(FLAGS.main_output_dir, FLAGS.sample_dir) print "CREATING DIRECTORY" print FLAGS.main_output_dir print FLAGS.sample_dir if not os.path.exists(FLAGS.main_output_dir): os.makedirs(FLAGS.main_output_dir) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.sample_num, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, config=FLAGS, z_dim=FLAGS.z_dim ) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") if FLAGS.visualize: get_random_samples(sess, dcgan, FLAGS) if FLAGS.eval_infvo_lbfgsb_maxiter > 0: ivo_result = eval_inference_via_optimization(sess, dcgan, FLAGS) print("inference_via_optimisation's score:" + str(ivo_result)) return ivo_result if FLAGS.eval_mnist_stacked_examples > 0: assert(FLAGS.dataset == "mnist_stacked") result = eval_mnist_stacked(sess, dcgan, FLAGS) return result return "finished."
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) with tf.Session() as sess: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, output_size=FLAGS.output_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) data = glob(os.path.join("./data", FLAGS.dataset, "*.jpg")) sample_files = data[0:dcgan.sample_size] sample = [get_image(sample_file, dcgan.image_size, is_crop=dcgan.is_crop, resize_w=dcgan.output_size, is_grayscale = dcgan.is_grayscale) for sample_file in sample_files] sample_images = np.array(sample).astype(np.float32) n_input= FLAGS.c_dim*(FLAGS.output_size**2) #noise_std = 1e-1 noise_std=FLAGS.snr #m = 100 m=FLAG.m if not FLAGS.lam: lambda_=1/noise_std else: lambda_=FLAGS.lam #lambda_ = 1 / noise_std z=tf.Variable(tf.random_normal([dcgan.batch_size,dcgan.z_dim])) # Setup measurements x=tf.reshape(sample_images, [dcgan.batch_size,n_input]) A = tf.Variable((1.0/np.sqrt(m))*tf.random_normal((n_input, m)), name='A') noise = tf.Variable(noise_std * tf.random_normal((dcgan.batch_size, m)), name='noise') y = tf.add(tf.matmul(x, A), noise, name='y') # measure the generator output x_temp=sampler(dcgan,z) x_hat=tf.reshape(x_temp,[dcgan.batch_size,n_input]) y_hat = tf.matmul(x_hat, A, name='y_hat') # define loss measurement_loss = tf.reduce_sum((y - y_hat) ** 2,1) z_likelihood_loss = tf.reduce_sum(z ** 2,1) loss = tf.add(measurement_loss, lambda_ * z_likelihood_loss, name='loss') # Set up gradient descent wrt to z grad_z = tf.gradients(loss, z)[0] lr = tf.placeholder(tf.float32, shape=(), name='learning_rate') update_op = tf.assign(z, z - lr * grad_z, name='update_op') nIter=FLAGS.nIter tf.initialize_all_variables() dcgan.load(FLAGS.checkpoint_dir) sess.run([z.initializer,A.initializer, noise.initializer]) z_val=z.eval() for update_step in range(nIter): lr_val = 0.001 / (0.1 * update_step + 1) z_val, _ = sess.run([z, update_op], feed_dict={lr: lr_val}) est_images = sess.run(dcgan.sampler, feed_dict={dcgan.z : z_val}) total_images=sp.vstack((sample_images,est_images)) save_images(total_images, [4,32], './samples/result_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime()))