示例#1
0
def denoiser_train(denoiser, lr):
        noisy_eval_files = glob('./data/train/noisy/*.png')
        noisy_eval_files = sorted(noisy_eval_files)
        print(noisy_eval_files)
        eval_files = glob('./data/train/original/*.png')
        eval_files = sorted(eval_files)
        denoiser.train(eval_files, noisy_eval_files, batch_size=args.batch_size, ckpt_dir=args.ckpt_dir, epoch=args.epoch, lr=lr)
示例#2
0
def denoiser_train(denoiser, lr):
    #
    # load noisy and clean data
    #
    print("[*] Loading data...")

    #--------------------------------------------------------------------------
    #  apply log(x+1) to the raw value
    #--------------------------------------------------------------------------

    noisy_data = np.load(
        '../../model_input/rand2d/rand2d_noisy_pats_1e+05.npy')
    clean_data = np.load(
        '../../model_input/rand2d/rand2d_clean_pats_1e+05.npy')

    # print noisy_data.shape , clean_data.shape

    noisy_data = np.log(noisy_data + 1.)
    clean_data = np.log(clean_data + 1.)

    denoiser.train(noisy_data,
                   clean_data,
                   batch_size=args.batch_size,
                   ckpt_dir=args.ckpt_dir,
                   epoch=args.epoch,
                   lr=lr)
示例#3
0
def denoiser_train(denoiser, lr):
    eval_files = natsort.natsorted(
        glob('./data/test/{}/*.png'.format(args.eval_set)))
    denoiser.train(args.data,
                   eval_files,
                   batch_size=args.batch_size,
                   epoch=args.epoch,
                   lr=lr)
示例#4
0
def denoiser_train(denoiser, lr):
    with load_data(filepath='./data/img_clean_pats.npy') as data:
        # if there is a small memory, please comment this line and uncomment the line99 in model.py
        data = data.astype(np.float32) / 255.0  # normalize the data to 0-1
        eval_files = glob('./data/test/{}/*.png'.format(args.eval_set))
        eval_data = load_images(eval_files)  # list of array of different size, 4-D, pixel value range is 0-255
        denoiser.train(data, eval_data, batch_size=args.batch_size, ckpt_dir=args.ckpt_dir, epoch=args.epoch, lr=lr,
                       sample_dir=args.sample_dir)
示例#5
0
def denoiser_train(denoiser, lr):
    input_data = np.load('./data/input_data_pats.npy')
    label_data = np.load('./data/label_data_pats.npy')
    denoiser.train(input_data,
                   label_data,
                   batch_size=args.batch_size,
                   ckpt_dir=args.ckpt_dir,
                   epoch=args.epoch,
                   lr=lr)
示例#6
0
def denoiser_train(denoiser, lr):
    #
    # load noisy and clean data
    #
    print("[*] Loading data...")

    #--------------------------------------------------------------------------
    #  apply log(x+1) to the raw value, and select maxV to normalize
    #--------------------------------------------------------------------------

    noisy_data = np.load(
        '../../model_input/rand2d_homo_partial/rand2d_noisy_pats_1e+04.npy')
    clean_data = np.load(
        '../../model_input/rand2d_homo_partial/rand2d_clean_pats_1e+04.npy')
    print noisy_data.shape, clean_data.shape

    #--- max value ---#
    print "\nprev log(x+1)"
    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print max_noisy, max_clean, maxV

    #--- apply log(x + 1) ---#
    noisy_data = np.log(noisy_data + 1.)
    clean_data = np.log(clean_data + 1.)

    print "\nafter log(x+1)"
    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print max_noisy, max_clean, maxV

    maxV = float(int(maxV) + 3)  # add extra 3 for upper bound

    if maxV <= 25.0:
        maxV = 25.0

    print "Using %f for maxV (after log)" % maxV

    print "\nSaving maxV to use in matlab..."
    sio.savemat('maxV.mat', dict(maxV=maxV))

    print "\nDone!"

    #--- normalize with maxV ---#
    noisy_data = noisy_data / maxV
    clean_data = clean_data / maxV

    #--- run training ---#
    denoiser.train(noisy_data,
                   clean_data,
                   batch_size=args.batch_size,
                   ckpt_dir=args.ckpt_dir,
                   epoch=args.epoch,
                   lr=lr)
示例#7
0
def denoiser_train(denoiser, lr):
    #
    # load noisy and clean data
    #
    print("[*] Loading data...")

    #--------------------------------------------------------------------------
    #  apply log(x+1) to the raw value, and select maxV to normalize
    #--------------------------------------------------------------------------

    noisy_data = np.load('../../model_input/osa/osa_noisy_pats.npy')
    clean_data = np.load('../../model_input/osa/osa_clean_pats.npy')
    print noisy_data.shape , clean_data.shape


    #--- max value ---#
    print "\nprev log(x+1)"
    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print max_noisy, max_clean, maxV


    #--- apply log(x + 1) ---#
    noisy_data = np.log(noisy_data + 1.)
    clean_data = np.log(clean_data + 1.)

    #print "\nafter log(x+1)"
    #print "noisy_max \t clean_max \t max"
    #max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    ## maxV = max(max_noisy, max_clean)
    #print max_noisy, max_clean


    #maxV = 25.
    #print "Using %f for maxV (after log)" % maxV

    #print "\nSaving maxV to use in matlab..."
    #sio.savemat('maxV.mat', dict(maxV=maxV))

    #print  "\nDone!"


    ##--- normalize with maxV ---#
    #noisy_data = noisy_data / maxV 
    #clean_data = clean_data / maxV 


    #--- run training ---#
    denoiser.train(noisy_data,
            clean_data, 
            batch_size=args.batch_size,
            ckpt_dir=args.ckpt_dir,
            epoch=args.epoch, 
            lr=lr)
示例#8
0
def denoiser_train(denoiser, lr):
    with load_data(filepath='./noisydata/ndct/train/raw_float_pats.npy') as ndct_data, load_data(filepath='./noisydata/sparse/train/raw_float_pats.npy') as ldct_data:
        # if there is a small memory, please comment this line and uncomment the line99 in model.py
        ldct_eval_files = sorted(glob('./noisydata/sparse/train/*img.flt'.format(args.eval_set)))
        print(ldct_eval_files)
        ldct_eval_data = load_floats(ldct_eval_files)  # list of array of different size, 4-D, pixel value range is 0-255
        ndct_eval_files = sorted(glob('./noisydata/ndct/train/*img.flt'.format(args.eval_set)))
        print(ndct_eval_files)
        ndct_eval_data = load_floats(ndct_eval_files)  # list of array of different size, 4-D, pixel value range is 0-255
        denoiser.train(ndct_data, ldct_data, ndct_eval_data, ldct_eval_data, batch_size=args.batch_size, ckpt_dir=args.ckpt_dir, epoch=args.epoch, lr=lr,
                       sample_dir=args.sample_dir)
示例#9
0
def conv_denoise_train(denoiser, server, task_index, lr):
    denoise_files = glob('./data/test/{}/*.jpg'.format(args.denoise_set))
    noise_files = glob('./data/test/{}/*.jpg'.format(args.denoise_set +
                                                     '_nodenoise'))
    denoiser.train(server,
                   denoise_files,
                   noise_files,
                   width=args.img_width,
                   height=args.img_height,
                   ckpt_dir=args.ckpt_dir,
                   epoch=args.epoch,
                   lr=lr,
                   task_index=task_index)
示例#10
0
def denoiser_train(denoiser, lr):
    noisy_eval_files = glob(f'{args.dir}/train/noisy/*.png')
    noisy_eval_files = sorted(noisy_eval_files)
    print(noisy_eval_files)
    eval_files = glob(f'{args.dir}/train/original/*.png')
    eval_files = sorted(eval_files)
    denoiser.train(eval_files,
                   noisy_eval_files,
                   batch_size=args.batch_size,
                   ckpt_dir=os.path.join(args.dir, args.ckpt_dir),
                   epoch=args.epoch,
                   lr=lr,
                   evaluate_files=args.evaluate_files)
示例#11
0
def denoiser_train(denoiser, lr):

    train_files = glob('./data/{}/*.png'.format('Train400'))
    eval_files = glob('./data/{}/*.png'.format(args.eval_set))
    data = np.asarray(load_images(train_files))
    eval_data = load_images(eval_files)
    denoiser.train(data,
                   eval_data,
                   batch_size=args.batch_size,
                   ckpt_dir=args.ckpt_dir,
                   epoch=args.epoch,
                   lr=lr,
                   sample_dir=args.sample_dir)
    '''
示例#12
0
def denoiser_train(denoiser, lr):
    with load_data(filepath='./data/img_clean_pats.npy') as data:
        # if there is a small memory, please comment this line and uncomment the line99 in model.py
        data = data.astype(np.float32) / 255.0  # normalize the data to 0-1
        eval_files = glob('./data/test/{}/*.png'.format(args.eval_set))
        eval_data = load_images(
            eval_files
        )  # list of array of different size, 4-D, pixel value range is 0-255
        denoiser.train(data,
                       eval_data,
                       batch_size=args.batch_size,
                       ckpt_dir=args.ckpt_dir,
                       epoch=args.epoch,
                       lr=lr,
                       sample_dir=args.sample_dir)
示例#13
0
文件: main.py 项目: npovey/unet15
def denoiser_train(denoiser, lr):
    ndct_train = sorted(glob(args.ndct_train_file_path))
    ldct_train = sorted(glob(args.ldct_train_file_path))

    ndct_eval_data = sorted(glob(args.ndct_test_file_path))
    ldct_eval_data = sorted(glob(args.ldct_test_file_path))

    denoiser.train(ndct_train,
                   ldct_train,
                   ndct_eval_data,
                   ldct_eval_data,
                   lr,
                   ckpt_dir=args.ckpt_dir,
                   num_epochs=args.num_epochs,
                   sample_dir=args.sample_dir,
                   buffer_size=args.buffer_size)
示例#14
0
def denoiser_train(denoiser):
    print(args.save_dir + '/' + args.results_clean + '_' + database_output +
          '.npy')
    print(args.save_dir + '/' + args.results_noisy + '_' + database_output +
          '.npy')
    with load_data(filepath=args.save_dir + '/' + args.results_clean + '_' +
                   database_output + '.npy') as data_clean:
        with load_data(filepath=args.save_dir + '/' + args.results_noisy +
                       '_' + database_output + '.npy') as data_noisy:
            # if there is a small memory, please comment this line and uncomment the line99 in model.py
            data_clean = data_clean.astype(
                np.float32) / 255.0  # normalize the data to 0-1
            data_noisy = data_noisy.astype(
                np.float32) / 255.0  # normalize the data to 0-1
            eval_noisy_files = sorted(
                glob('./data/test/{}/*.png'.format(args.eval_noisy_set)))
            eval_clean_files = sorted(
                glob('./data/test/{}/*.png'.format(args.eval_clean_set)))
            eval_data_noisy = load_images(
                eval_noisy_files
            )  # list of array of different size, 4-D, pixel value range is 0-255
            eval_data_clean = load_images(
                eval_clean_files
            )  # list of array of different size, 4-D, pixel value range is 0-255
            print("work")

            numBatch = int(data_clean.shape[0] / args.batch_size)
            max_iter_number = 51200
            epoches = args.epoch
            if numBatch * epoches < max_iter_number:
                epoches = round(max_iter_number / numBatch)

            lr = args.lr * np.ones([epoches])
            lr[30:] = lr[0] / 10.0

            denoiser.train(data_clean,
                           data_noisy,
                           eval_data_clean,
                           eval_data_noisy,
                           batch_size=args.batch_size,
                           ckpt_dir=checkpoint_dir,
                           epoch=args.epoch,
                           lr=lr,
                           sample_dir=sample_dir,
                           logs_dir=logs_dir)
示例#15
0
def conv_patch_denoise_train(denoiser, server=None, task_index=0):
    with load_data(filepath='./data/img_denoise_pats.npy',
                   rand=False) as data_denoise:
        with load_data(filepath='./data/img_noise_pats.npy',
                       rand=False) as data_noise:
            data_denoise = data_denoise.astype(
                np.float32) / 255.0  # normalize the data to 0-1
            data_noise = data_noise.astype(
                np.float32) / 255.0  # normalize the data to 0-1
            denoiser.train(server,
                           data_denoise,
                           data_noise,
                           './data/test/{}/*.jpg'.format(args.test_set),
                           args.test_dir,
                           128,
                           ckpt_dir=args.ckpt_dir,
                           epoch=args.epoch,
                           task_index=task_index)
示例#16
0
def cmp_denoise_train(server, task_index, num_worker=7):
    with load_data(filepath='./data/img_denoise_pats.npy',
                   rand=False) as data_denoise:
        with load_data(filepath='./data/img_noise_pats.npy',
                       rand=False) as data_noise:
            denoiser = cmpdenoiser(num_workers=num_worker,
                                   is_chief=FLAGS.task_index == 0)
            print(len(data_denoise))
            data_denoise = data_denoise.astype(
                np.float32) / 255.0  # normalize the data to 0-1
            data_noise = data_noise.astype(
                np.float32) / 255.0  # normalize the data to 0-1
            denoiser.train(server,
                           data_denoise,
                           data_noise,
                           batch_size=args.batch_size,
                           ckpt_dir=args.ckpt_dir,
                           epoch=args.epoch,
                           task_index=task_index)
示例#17
0
def denoiser_train(denoiser, lr):
    #
    # load noisy and clean data
    #
    print("[*] Loading data...")

    #--------------------------------------------------------------------------
    #  apply log(x+1) to the raw value, and select maxV to normalize
    #--------------------------------------------------------------------------

    #
    # step 2: train DAE with noisy images
    noisy_data = np.load('../../model_input/srcpos_hetero/noisy_pats.npy')
    #noisy_data = np.load('../../model_input/srcpos_hetero/clean_pats.npy')

    clean_data = np.load('../../model_input/srcpos_hetero/clean_pats.npy')

    print noisy_data.shape , clean_data.shape

    #--- max value ---#
    print "\nprev log(x+1)"

    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print("{}\t{}\t{}".format(max_noisy, max_clean, maxV))


    print "noisy_min \t clean_min "
    min_noisy, min_clean = np.amin(noisy_data), np.amin(clean_data)
    print("{}\t{}".format(min_noisy, min_clean))


    #--- apply log(x + 1) ---#
    noisy_data = np.log(noisy_data + 1.)
    clean_data = np.log(clean_data + 1.)


    print "\nafter log(x+1)"
    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print max_noisy, max_clean, maxV


    maxV = float(int(maxV) + 3) # add extra 3 for upper bound

    # if maxV <= 25 use 25, else use maxV
    if maxV <= 25.0:
        maxV = 25.0


    print "Using %f for maxV (after log)" % maxV

    print "\nSaving maxV to use in matlab..."
    sio.savemat('maxV.mat', dict(maxV=maxV))

    print  "\nDone!"


    #--- normalize with maxV ---#
    noisy_data = noisy_data / maxV 
    clean_data = clean_data / maxV 

    print noisy_data.shape
    print clean_data.shape


    #
    # mcx images is 100x100 as default, here we resize to 128 x 128
    #

    noisy_data_resize = transform_resize(noisy_data)
    clean_data_resize = transform_resize(clean_data)

    print noisy_data_resize.shape
    print clean_data_resize.shape

    ##--- run training ---#
    denoiser.train(noisy_data_resize, clean_data_resize, batch_size=args.batch_size, ckpt_dir=args.ckpt_dir, epoch=args.epoch, lr=lr)
示例#18
0
def denoiser_train(denoiser, lr):
    #
    # load noisy and clean data
    #
    print("[*] Loading data...")

    #--------------------------------------------------------------------------
    #  apply log(x+1) to the raw value, and select maxV to normalize
    #--------------------------------------------------------------------------


    # 1) h**o: around 12K images:  osa_img_noisy_pats_1e+05.npy
    # 
    #  the original data is located at: data/osa/1e+05

    #
    # 2) hete: around 8K images:   rand2d/rand2d_noisy_pats_1e+05.npy
    #
    #   To generate rand2d() heteregeneous data:  prepare_data/rand2d_old
    #
    #  the original data is located at : data/rand2d/1e+05

    noisy_data = np.load('../../model_input/homo_hetero/osa_rand2d_noisy.npy')
    clean_data = np.load('../../model_input/homo_hetero/osa_rand2d_clean.npy')
    print noisy_data.shape , clean_data.shape


    #--- max value ---#
    print "\nprev log(x+1)"
    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print max_noisy, max_clean, maxV


    #--- apply log(x + 1) ---#
    noisy_data = np.log(noisy_data + 1.)
    clean_data = np.log(clean_data + 1.)

    print "\nafter log(x+1)"
    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print max_noisy, max_clean, maxV


    #maxV = float(int(maxV) + 3) # add extra 3 for upper bound
    maxV = 25.
    print "Using %f for maxV (after log)" % maxV

    print "\nSaving maxV to use in matlab..."
    sio.savemat('maxV.mat', dict(maxV=maxV))

    print  "\nDone!"


    #--- normalize with maxV ---#
    noisy_data = noisy_data / maxV 
    clean_data = clean_data / maxV 


    #--- run training ---#
    denoiser.train(noisy_data,
            clean_data, 
            batch_size=args.batch_size,
            ckpt_dir=args.ckpt_dir,
            epoch=args.epoch, 
            lr=lr)
示例#19
0
def denoiser_train(denoiser, lr):
    noisy_eval_files = glob('/home/mli/tomograms/pycharm_demos/single_density/without_Gaussian_shapes/normal_test/with_noise/1024*1024_with_oversampling=2/phase_contrast_images/Gaussian_noise_PPSNR=12dB/npy_format/phase_contrast_image_distance_4_in_npy_format/validate/*.npy')
    noisy_eval_files = sorted(noisy_eval_files)
    eval_files = glob('/home/mli/tomograms/pycharm_demos/single_density/without_Gaussian_shapes/normal_test/without_noise/1024*1024_with_oversampling=2/phase_contrast_images_1_without_elliptical_cylinder/nd_zoom_order=1/npy_format/phase_contrast_image_distance_4_in_npy_format/validate/*.npy')
    eval_files = sorted(eval_files)
    denoiser.train(eval_files, noisy_eval_files, batch_size=args.batch_size, ckpt_dir=args.ckpt_dir, epoch=args.epoch, lr=lr)
示例#20
0
def denoiser_train(denoiser, lr):
    #
    # load noisy and clean data
    #
    print("[*] Loading data...")

    #--------------------------------------------------------------------------
    #  apply log(x+1) to the raw value, and select maxV to normalize
    #--------------------------------------------------------------------------

    #noisy_data = np.load('../../model_input/spie2d/noisy1e4.npy')
    #clean_data = np.load('../../model_input/spie2d/clean1e7.npy')

    #noisy_data = np.load('../../model_input/spie2d/journal2_hom_pack1_p5.npy')
    #clean_data = np.load('../../model_input/spie2d/journal2_hom_pack1_p8.npy')

    noisy_data = np.load('../../model_input/spie_2d_hetgrid_p567/hetgrid_p5.npy')
    clean_data = np.load('../../model_input/spie_2d_hetgrid_p567/hetgrid_p7.npy')

    print noisy_data.shape 
    print clean_data.shape


    #--- max value ---#
    print "\nprev log(x+1)"
    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print max_noisy, max_clean, maxV


    #--- apply log(x + 1) ---#
    noisy_data = np.log(noisy_data + 1.)
    clean_data = np.log(clean_data + 1.)

    print "\nafter log(x+1)"
    print "noisy_max \t clean_max \t max"
    max_noisy, max_clean = np.amax(noisy_data), np.amax(clean_data)
    maxV = max(max_noisy, max_clean)
    print max_noisy, max_clean, maxV


    #maxV = float(int(maxV) + 3) # add extra 3 for upper bound
    maxV = 25.
    print "Using %f for maxV (after log)" % maxV

    print "\nSaving maxV to use in matlab..."
    sio.savemat('maxV.mat', dict(maxV=maxV))

    print  "\nDone!"


    #--- normalize with maxV ---#
    noisy_data = noisy_data / maxV 
    clean_data = clean_data / maxV 


    #--- run training ---#
    denoiser.train(noisy_data,
            clean_data, 
            batch_size=args.batch_size,
            ckpt_dir=args.ckpt_dir,
            epoch=args.epoch, 
            lr=lr)