def main(_): if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) if not os.path.exists(args.sample_dir): os.makedirs(args.sample_dir) if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) lr = args.lr * np.ones([args.epoch]) lr[30:] = lr[0] / 10.0 if args.use_gpu: # added to control the gpu memory print("GPU\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.Session() as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0)
def main(_): #the following string is attached to checkpoint, log and image folder names eps_str = str(int(np.log10(args.eps))) name = "DnCNN_" + args.cost + str(args.type) + "_sigma" + str( int(args.sigma)) ckpt_dir = args.ckpt_dir + "/" + name sample_dir = args.sample_dir + "/" + name test_dir = args.test_dir + "/" + name log_dir = args.log_dir + "/" + name if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) lr = args.lr * np.ones([args.epoch]) lr[40:] = lr[0] / 10.0 #lr decay if args.use_gpu: # added to control the gpu memory print("GPU\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess, sigma=args.sigma, eps=args.eps, cost_str=args.cost, ckpt_dir=ckpt_dir, sample_dir=sample_dir, log_dir=log_dir) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model, test_dir) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.Session() as sess: model = denoiser(sess, sigma=args.sigma, eps=args.eps, cost_str=args.cost, ckpt_dir=ckpt_dir, sample_dir=sample_dir, log_dir=log_dir) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model, test_dir) else: print('[!]Unknown phase') exit(0)
def main(_): # checkpoint dir if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) lr = args.lr * np.ones([args.epoch]) # reduce learning rate after 60% total epoch small_lr_pos = int(args.epoch * 0.6) lr[small_lr_pos:] = lr[0] * 0.1 if args.use_gpu: # added to control the gpu memory print("Run Tensorflow [GPU]\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess) # init a denoiser class if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': # denoiser_test(model) pass else: print('[!]Unknown phase') sys.exit(1) else: # print("CPU\n") print "CPU Not supported yet!" sys.exit(1)
def test(): config = None if args.use_gpu == 1: config = tf.ConfigProto(gpu_options=tf.GPUOptions( per_process_gpu_memory_fraction=0.9)) with tf.Session(config=config) as sess: model = denoiser(sess) denoiser_test(model)
def main(_): if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) if not os.path.exists(args.sample_dir): os.makedirs(args.sample_dir) if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) #if not os.path.exists(args.save_dir): # os.makedirs(args.save_dir) if args.use_gpu: # added to control the gpu memory print("GPU\n") # uses the GPU #0 on linux machines os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" print("\n\nExecution Environment:\n\n{}".format( device_lib.list_local_devices())) gpu_options = tf.compat.v1.GPUOptions( per_process_gpu_memory_fraction=0.90) with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess, args.batch_size) if args.phase == 'train': denoiser_train(model, lr=args.lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.compat.v1.Session() as sess: model = denoiser(sess, batch_size=args.batch_size) if args.phase == 'train': denoiser_train(model, lr=args.lr) elif args.phase == 'test': pdb.set_trace() denoiser_test(model) else: print('[!]Unknown phase') exit(0)
def main(_): if args.use_gpu: # added to control the gpu memory print("Run Tensorflow [GPU]\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: model = denoiser(sess) denoiser_test(model) else: print "CPU Not supported yet!" sys.exit(1)
def main(_): if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) if not os.path.exists(args.sample_dir): os.makedirs(args.sample_dir) if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) config = tf.ConfigProto( device_count={"CPU": 32}, # limit to num_cpu_core CPU usage inter_op_parallelism_threads=16, intra_op_parallelism_threads=32) lr = args.lr * np.ones([args.epoch]) lr[30:] = lr[0] / 10.0 if args.use_gpu: # added to control the gpu memory print("GPU\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.device("/cpu:0"): with tf.Session(config=config) as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0)
def main(_): if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) if not os.path.exists(args.sample_dir): os.makedirs(args.sample_dir) if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) print(args) lr_init = args.lr lr_decay = [1.0, 0.2] if args.use_gpu: # added to control the gpu memory print("GPU\n") gpu_options = tf.GPUOptions(allow_growth=True) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) print(gpu_options) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess, sigma=args.sigma, kstage=args.kstage, batch_size=args.batch_size, config=args) if args.phase == 'train': denoiser_train(model, lr_init=lr_init, lr_decay=lr_decay) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.Session() as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0)
def main(_): if args.use_gpu: # added to control the gpu memory print("GPU\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess) if args.phase == 'inference': denoiser_inference(model) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.Session() as sess: model = denoiser(sess) if args.phase == 'inference': denoiser_inference(model) else: print('[!]Unknown phase') exit(0)
def main(_): if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) if not os.path.exists(args.sample_dir): os.makedirs(args.sample_dir) if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) lr = args.lr * np.ones([args.epoch]) lr[30:] = lr[0] / 10.0 if args.use_gpu: # added to control the gpu memory print("GPU\n") #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) config = tf.ConfigProto(allow_soft_placement=True) #config.gpu_options.allow_growth = True #config.gpu_options.per_process_gpu_memory_fraction = 0.9 with tf.Session(config=config) as sess: #with tf.device('/gpu:1'), tf.Session() as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.Session() as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0)
def main(_): if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) if not os.path.exists(sample_dir): os.makedirs(sample_dir) if not os.path.exists(test_dir): os.makedirs(test_dir) if args.use_gpu: # added to control the gpu memory print("GPU\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess, ip=ip) if args.phase == 'train': denoiser_train(model) elif args.phase == 'test': denoiser_test(model) elif args.phase == 'test_repeatability': denoiser_test2(model) elif args.phase == 'generate_map': denoiser_generate_map(model) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.Session() as sess: model = denoiser(sess, ip=ip) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0)
def main(_): if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) if not os.path.exists(args.sample_dir): os.makedirs(args.sample_dir) if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) if not os.path.exists('./tmp_jpeg'): os.makedirs('./tmp_jpeg') lr = args.lr * np.ones([args.epoch]) lr[30:] = lr[0] / 10.0 if args.use_gpu: # added to control the gpu memory print("GPU\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.Session() as sess: model = denoiser(sess, sigma=args.sigma) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0)
def main(_): if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) lr = args.lr * np.ones([args.epoch]) lr[30:] = lr[0] / 10.0 print("CPU\n") with tf.Session() as sess: model = denoiser(sess) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model) else: print('[!]Unknown phase') exit(0)
def test_SAR2SAR(img): sys.path.append('SAR2SAR-GRD-test') import tensorflow.compat.v1 as tf tf.disable_v2_behavior() from model import denoiser tf.reset_default_graph() # DEFINE PARAMETERS OF SPECKLE AND NORMALIZATION FACTOR M = 10.089038980848645 m = -1.429329123112601 L = 1 c = (1 / 2) * (special.psi(L) - np.log(L)) cn = c / (M - m) # normalized (0,1) mean of log speckle def normalize_sar(im): return ((np.log(im + np.spacing(1)) - m) * 255 / (M - m)).astype('float32') img = normalize_sar(img)/255.0 with tf.Session() as sess: model = denoiser(sess) model.load('SAR2SAR-GRD-test/checkpoint') Y_ = tf.placeholder(tf.float32, [None, None, None, 1], name='clean_image') pred = sess.run([model.Y], feed_dict={model.Y_: img}) sys.path.remove('SAR2SAR-GRD-test') def denormalize_sar(im): return np.exp((M - m) * (np.squeeze(im)).astype('float32') + m) return pred[0][0,...]
def test(): # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session() as sess: model = denoiser(sess) denoiser_test(model)
def main(_): if not os.path.exists(args.ckpt_dir): os.makedirs(args.ckpt_dir) if not os.path.exists(args.sample_dir): os.makedirs(args.sample_dir) if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) config = tf.ConfigProto( device_count={"CPU": 32}, # limit to num_cpu_core CPU usage inter_op_parallelism_threads=16, intra_op_parallelism_threads=32) lr = args.lr * np.ones([args.epoch]) lr[30:] = lr[0] / 10.0 if args.phase == 'test': with tf.device("/cpu:0"): with tf.Session(config=config) as sess: model = denoiser(sess, sigma=args.sigma, isDenoise=True) denoiser_test(model) elif args.phase == 'testcmp': with tf.device("/cpu:0"): with tf.Session(config=config) as sess: model = cmpdenoiser(sess) denoiser_test(model) elif args.phase == 'train': # distribution check server, cluster = createServer() if FLAGS.job_name == "ps": server.join() elif FLAGS.job_name == "worker": worker_device = "/job:worker/task:%d" % FLAGS.task_index with tf.device( tf.train.replica_device_setter(worker_device=worker_device, ps_device="/job:ps/cpu:0", cluster=cluster)): model = denoiser(sigma=args.sigma) denoiser_train(model, server, FLAGS.task_index, lr=lr) elif args.phase == 'trainconv': server, cluster = createServer() if FLAGS.job_name == "ps": server.join() elif FLAGS.job_name == "worker": worker_device = "/job:worker/task:%d" % FLAGS.task_index with tf.device( tf.train.replica_device_setter(worker_device=worker_device, ps_device="/job:ps/cpu:0", cluster=cluster)): model = convdenoiser() conv_denoise_train(model, server, FLAGS.task_index, lr=lr) elif args.phase == 'trainconvpatch': server, cluster = createServer() if FLAGS.job_name == "ps": server.join() elif FLAGS.job_name == "worker": worker_device = "/job:worker/task:%d" % FLAGS.task_index with tf.device( tf.train.replica_device_setter(worker_device=worker_device, ps_device="/job:ps/cpu:0", cluster=cluster)): model = convpatchdenoiser(num_workers=len( FLAGS.worker_hosts.split(",")), is_chief=FLAGS.task_index == 0) conv_patch_denoise_train(model, server, FLAGS.task_index) elif args.phase == 'traincmp': server, cluster = createServer() if FLAGS.job_name == "ps": server.join() elif FLAGS.job_name == "worker": worker_device = "/job:worker/task:%d" % FLAGS.task_index with tf.device( tf.train.replica_device_setter(worker_device=worker_device, ps_device="/job:ps/cpu:0", cluster=cluster)): cmp_denoise_train(server, FLAGS.task_index, num_worker=FLAGS.worker_hosts.split(",")) elif args.phase == 'compare': denoise_files = glob('./data/test/{}/*.jpg'.format(args.denoise_set)) noise_files = glob('./data/test/{}/*.jpg'.format(args.denoise_set + '_nodenoise')) diffs = 0.0 for file in denoise_files: noise_file = find_match_file([file], noise_files) noise_image = load_images(noise_file[0]).astype(np.float32) / 255.0 denoise_image = load_images(file).astype(np.float32) / 255.0 save_img = np.clip(255 * noise_image, 0, 255).astype('uint8') save_images( os.path.join(args.test_dir, file.split("/")[-1].split("_")[0] + ".jpg"), save_img) diff = cal_psnr(denoise_image, noise_image) diffs += diff diffs = diffs / len(denoise_files) elif args.phase == 'alone_conv_patch': with tf.device("/cpu:0"): model = convpatchdenoiser() conv_patch_denoise_train(model) elif args.phase == 'batch_cmp': place = [n for n in range(0, 20)] print(place) np.random.shuffle(place) print(place) # with load_data(filepath='./data/img_denoise_pats.npy', rand=False) as data_denoise: # with load_data(filepath='./data/img_noise_pats.npy', rand=False) as data_noise: # numBatch = int(data_denoise.shape[0] / 128) # noise_num = int(data_denoise.shape[0] / 128) # print ('%d---%d' % (numBatch, noise_num)) # for i in range(0, 10): # num = random.randint(0,numBatch) # print (num) # denoise = data_denoise[num:num + 1, :, :, :] # noise = data_noise[num:num + 1, :, :, :] # save_images(os.path.join(args.test_dir, 'denoised%d.jpg' % i), denoise) # save_images(os.path.join(args.test_dir, 'noise%d.jpg' % i), noise) else: print('[!]Unknown phase') exit(0)
def main(_): #the following string is attached to checkpoint, log and image folder names eps_str = str(int(np.log10(args.eps))) name = "NET_" + args.cost + "_eps1e" + eps_str + "_sigma" + str( int(args.sigma)) + "_" + str(args.type) ckpt_dir = args.ckpt_dir + "/" + name sample_dir = args.sample_dir + "/" + name test_dir = args.test_dir + "/" + name log_dir = args.log_dir + "/" + name if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) #learning rate decay schedule lr = args.lr * np.ones([args.epoch]) lr[100:] = lr[0] / 10.0 #lr decay after 100 epochs #dataset mnist = input_data.read_data_sets('MNIST_data', one_hot=True) print("Model: %s" % (name)) if args.use_gpu: # added to control the gpu memory print("GPU\n") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess, dataset=mnist, sigma=args.sigma, eps=args.eps, cost_str=args.cost, ckpt_dir=ckpt_dir, sample_dir=sample_dir, log_dir=log_dir) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model, test_dir) else: print('[!]Unknown phase') exit(0) else: print("CPU\n") with tf.Session() as sess: model = denoiser(sess, dataset=mnist, sigma=args.sigma, eps=args.eps, cost_str=args.cost, ckpt_dir=ckpt_dir, sample_dir=sample_dir, log_dir=log_dir) if args.phase == 'train': denoiser_train(model, lr=lr) elif args.phase == 'test': denoiser_test(model, test_dir) else: print('[!]Unknown phase') exit(0)
checkpoint_dir = '/content/SAR2SAR-test/checkpoint' if not os.path.exists(args.test_dir): os.makedirs(args.test_dir) from model import denoiser def denoiser_test(denoiser): test_data = args.test_data print( "[*] Start testing on real data. Working directory: %s. Collecting data from %s and storing test results in %s" % (os.getcwd(), test_data, args.test_dir)) test_files = glob((test_data + '/*.npy').format('float32')) denoiser.test(test_files, ckpt_dir=checkpoint_dir, save_dir=args.test_dir, dataset_dir=test_data, stride=args.stride_size) if __name__ == '__main__': if args.use_gpu: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: model = denoiser(sess) denoiser_test(model) else: with tf.Session() as sess: model = denoiser(sess) denoiser_test(model)
# encoding: utf-8 from options import Options from model import denoiser from data_loader import load_test_data import time if __name__ == '__main__': args = Options().initialize() model = denoiser(args) if args.eval: model.eval() # data = load_test_data(args) # model.set_test_input(data) model.test_new(args, 64)
print(res) file.write(res + '\n') #--------------------------------------------------------------------------- # experiment 2 : We denoise this image using a CNN Gaussian Denoiser(train with quantile) with all sigma # Goal see the influence of the CNN #--------------------------------------------------------------------------- list_ckp = [ "./checkpoint", "./checkpoint_sar", "checkpoint_sar_norm", "./checkpoint_sar_1" ] list_name = ["DuCNN25Nat", "DuCNN25Sar_quant", "DuCNN25Sar", "DuCNN13SarQuant"] list_lamb = [1, 1, 1, 3] if (True): print("eperiment 2:") model = denoiser(sess, sigma=25, add_noise=False) list_beta = [1, 1, 1, 4] for i in range(len(list_beta)): ckpt_dir = list_ckp[i] model.load(ckpt_dir) C = np.max(img_in) - np.min(img_in) b = img_in.min() / C img_inp = img_in / C - b img_input = img_inp.reshape(1, img_in.shape[1], img_in.shape[0], 1) out2, _, _ = model.denoise(img_input) out2 = out2.reshape(img_in.shape) out2 = C * (out2 + b) out2 = out2 + utils.debiased(args.L)
def test(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: model = denoiser(sess) denoiser_test(model)
# encoding: utf-8 from options import Options from model import denoiser from data_loader import Data_loader import time from torch.utils.data import DataLoader import torchvision.utils as tv from tensorboardX import SummaryWriter import numpy as np if __name__ == '__main__': args=Options().initialize() model = denoiser(args) writer = SummaryWriter('runs/threshold_16_vflip') start_epoch = 0 start_time = time.time() print("start training....") model.print_networks(args.verbose) niter=0 dataset = Data_loader() dataloader = DataLoader(dataset, batch_size=32, shuffle=True, num_workers=0, drop_last=False) batch_num=len(dataloader) for epoch in range(start_epoch, args.epoch): batch_id=0 for labels, inputs in dataloader: batch_id = batch_id + 1 data=[labels, inputs] input_widefield,input_confocal= model.set_input(data) # model.evaluate(iter_num, args) # whether pretrained model.optimize_parameters() loss = model.get_loss()