def main(): # load data options = get_parser().parse_args() dataset = get_standard_dataset('lodopab') test_data = dataset.get_data_pairs('validation') ray_trafo = dataset.ray_trafo reduced_dataset = RandomSampleDataset(dataset, size_part=0.1, seed=options.seed) reconstructor = LearnedPDReconstructor(ray_trafo=ray_trafo, num_workers=8) reconstructor.load_hyper_params('params') reconstructor.save_best_learned_params_path = 'best-model-{}'.format( options.seed) reconstructor.log_dir = options.log_dir # create a Dival task table and run it task_table = TaskTable() task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, dataset=reduced_dataset, hyper_param_choices=[reconstructor.hyper_params]) results = task_table.run() # save report save_results_table(results, full_name)
def main(): # load data options = get_parser().parse_args() dataset = load_standard_dataset(options.dataset, ordered=True) ray_trafo = dataset.ray_trafo X = ray_trafo.range lodopab_cache_file_names = { 'train': [None, None], 'validation': [None, None] } if options.method == 'fbpunet': X = ray_trafo.domain train_path = 'cache_train_{}_fbp.npy'.format(options.dataset) validation_path = 'cache_validation_{}_fbp.npy'.format(options.dataset) lodopab_cache_file_names = { 'train': [train_path, None], 'validation': [validation_path, None] } cached_dataset = CachedDataset(dataset, space=(X, ray_trafo.domain), cache_files=lodopab_cache_file_names, size_part=options.size_part) test_data = dataset.get_data_pairs('validation', cached_dataset.validation_len) print('validation size: %d' % len(test_data)) reconstructor = get_reconstructor(options.method, dataset=options.dataset, size_part=options.size_part, pretrained=False) print(reconstructor.hyper_params) full_name = '{}_{}_{}'.format( options.dataset, options.method, options.size_part) print(full_name) reconstructor.save_best_learned_params_path = get_weights_path(full_name) reconstructor.log_dir = options.log_dir reconstructor.num_data_loader_workers = 16 # create a Dival task table and run it task_table = TaskTable() task_table.append( reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, dataset=cached_dataset, hyper_param_choices=[reconstructor.hyper_params] ) results = task_table.run() # save report save_results_table(results, full_name)
def main(): """Main function""" options = get_parser().parse_args() # options.dataset = 'lodopab' | 'ellipses' | 'lodopab-sparse' dataset = load_standard_dataset(options.dataset) test_data = dataset.get_data_pairs('test', 100) obs = list(y for y, x in test_data) gt = list(x for y, x in test_data) start = options.start count = options.count if count is None: count = len(test_data) test_data = DataPairs(obs[start:start + count], gt[start:start + count], name='test') # load reconstructor reconstructor = get_reconstructor(options.method, options.dataset, options.size_part) # eval on the test-set print('Reconstructor: %s' % options.method) print('Dataset: %s' % options.dataset) print('Offset: %d' % start) print('Count: %d' % count) task_table = TaskTable() task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, options={'skip_training': True}) task_table.run() print(task_table.results.to_string(show_columns=['misc'])) if options.size_part is not None: save_path = '{}_{}_{}_eval'.format(options.dataset, options.method, options.size_part) else: save_path = '{}_{}_eval'.format(options.dataset, options.method) save_path += '_offset_%d' % start save_results_table(task_table.results, save_path)
reconstructor = IRadonMapReconstructor( ray_trafo=ray_trafo, log_dir='ellipses_iradonmap/' + str(size_part), save_best_learned_params_path=get_weights_path( 'ellipses_iradonmap_{}'.format(size_part))) epochs = min(10 * full_size_epochs, int(1./size_part * full_size_epochs)) # create a Dival task table and run it task_table = TaskTable() task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, dataset=cached_dataset, hyper_param_choices={'scales': [5], 'skip_channels': [4], 'batch_size': [64], 'epochs': [epochs], 'fully_learned': [True], 'lr': [0.01], 'use_sigmoid': [True]}) results = task_table.run() # save report save_results_table(results, 'ellipses_iradonmap_{}'.format(size_part)) # select best parameters and save them best_choice, best_error = select_hyper_best_parameters(results) params = Params(best_choice) params.save('ellipses_iradonmap_{}'.format(size_part)) save_weights(reconstructor, 'ellipses_iradonmap_{}'.format(size_part))
reconstructor = FBPUNetReconstructor( ray_trafo, log_dir='ellipses_fbpunet_{}'.format(size_part), save_best_learned_params_path=get_weights_path( 'ellipses_fbpunet_{}'.format(size_part))) # create a Dival task table and run it task_table = TaskTable() task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, dataset=cached_dataset, hyper_param_choices={ 'scales': [5], 'skip_channels': [4], 'channels': [channels], 'batch_size': [16], 'epochs': [epochs], 'lr': [0.01], 'filter_type': ['Hann'], 'frequency_scaling': [1.0] }) results = task_table.run() # save report save_results_table(results, 'ellipses_fbpunet_{}'.format(size_part)) # select best parameters and save them best_choice, best_error = select_hyper_best_parameters(results) params = Params(best_choice) params.save('ellipses_fbpunet_{}'.format(size_part))
dataset = load_standard_dataset('lodopab_200') test_data = dataset.get_data_pairs('validation', 4) task_table = TaskTable() # create the reconstructor reconstructor = DeepImagePriorReconstructor(dataset.ray_trafo) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'lr': [0.0007], 'scales': [6], 'gamma': np.linspace(2, 7, 10), 'channels': [(128, ) * 6], 'skip_channels': [(0, 0, 0, 0, 4, 4)], 'iterations': [2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000], 'loss_function': ['poisson'] }) results = task_table.run() save_results_table(results, 'lodopab_200_diptv') # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc']))
dataset = load_standard_dataset('lodopab') test_data = dataset.get_data_pairs('validation', 2) task_table = TaskTable() # create the reconstructor reconstructor = DeepImagePriorReconstructor(dataset.ray_trafo) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'lr': [0.001, 0.0005, 0.0001], 'scales': [5], 'gamma': [0.0], 'channels': [(64, ) * 5, (128, ) * 5], 'skip_channels': [(0, 0, 0, 0, 0), (0, 0, 0, 0, 4), (0, 0, 0, 4, 4)], 'iterations': [3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000], 'loss_function': ['poisson'] }) results = task_table.run() save_results_table(results, 'lodopab_dip--5-SCALES') # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc']))
reconstructor = LearnedGDReconstructor( ray_trafo, log_dir='lodopab_learnedgd_{}'.format(size_part), save_best_learned_params_path=get_weights_path( 'lodopab_learnedgd_{}'.format(size_part))) # create a Dival task table and run it task_table = TaskTable() task_table.append( reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, dataset=cached_dataset, hyper_param_choices={ 'batch_size': [20], 'epochs': [epochs], 'niter': [10], 'lr': [0.00001], # 'lr_time_decay_rate': [3.2 * # size_part], 'init_frequency_scaling': [0.7], 'init_weight_xavier_normal': [True], 'init_weight_gain': [0.001] }) results = task_table.run() # save report save_results_table(results, 'lodopab_learnedgd_{}'.format(size_part)) # select best parameters and save them best_choice, best_error = select_hyper_best_parameters(results) params = Params(best_choice)
from dliplib.reconstructors.tv import TVReconstructor from dliplib.utils.reports import save_results_table # load data dataset = load_standard_dataset('lodopab_200') test_data = dataset.get_data_pairs('validation', 4) task_table = TaskTable() # create the reconstructor reconstructor = TVReconstructor(dataset.ray_trafo) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={'gamma': np.logspace(-8, -5, 20)}) results = task_table.run() save_results_table(results, 'lodopab_200_tv') # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc'])) print(best_choice) print(best_error) params = Params(best_choice) params.save('lodopab_200_tv')
from dliplib.utils.reports import save_results_table # load data dataset = load_standard_dataset('lodopab') test_data = dataset.get_data_pairs('validation', 100) task_table = TaskTable() # create the reconstructor reconstructor = FBPReconstructor(dataset.ray_trafo) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'filter_type': ['Ram-Lak', 'Hann'], 'frequency_scaling': np.linspace(0.5, 1, 40) }) results = task_table.run() save_results_table(results, 'lodopab_fbp') # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc'])) print(best_choice) print(best_error)
from dliplib.utils.reports import save_results_table # load data dataset = load_standard_dataset('ellipses') test_data = dataset.get_data_pairs('validation', 35) task_table = TaskTable() # create the reconstructor reconstructor = TVAdamReconstructor(dataset.ray_trafo) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'gamma': np.logspace(-4, -2, 10), 'iterations': [2000, 2500, 3000, 3500, 4000, 4500, 5000], 'loss_function': ['mse'] }) results = task_table.run() save_results_table(results, 'ellipses_tvadam') # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc'])) print(best_choice) print(best_error)
# load data dataset = load_standard_dataset('ellipses') test_data = dataset.get_data_pairs('validation', 5) task_table = TaskTable() # create the reconstructor reconstructor = DeepImagePriorReconstructor(dataset.ray_trafo) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'lr': [0.001], 'scales': [5], 'gamma': np.logspace(-4, -3, 5), 'channels': [(128, ) * 5], 'skip_channels': [(0, 0, 0, 4, 4)], 'iterations': [5000, 6000, 7000, 8000, 9000, 10000] }) results = task_table.run() save_results_table(results, 'ellipses_diptv') # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc'])) print(best_choice)
test_data = dataset.get_data_pairs('train', 5) task_table = TaskTable() size_part = 0.0001 # load reconstructor reconstructor = learnedpd_dip_reconstructor('lodopab_200', size_part) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'lr1': [0.0005], 'lr2': [1e-6], 'scales': [6], 'gamma': [2.5, 3.0, 4.0], 'channels': [(128, ) * 6], 'skip_channels': [(0, 0, 0, 0, 4, 4)], 'initial_iterations': [5000, 9750], 'iterations': [200, 250, 300, 350, 400, 500], "loss_function": ["poisson"] }) results = task_table.run() save_results_table(results, 'lodopab_200_learnedpd_dip_{}'.format(size_part)) # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc']))
# load data dataset = load_standard_dataset('ellipses') test_data = dataset.get_data_pairs('validation', 10) task_table = TaskTable() # create the reconstructor reconstructor = DeepImagePriorReconstructor(dataset.ray_trafo) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'lr': [0.001], 'scales': [5], 'gamma': [0], 'channels': [(128, ) * 5], 'skip_channels': [(0, ) * 5], 'iterations': [5000], }) results = task_table.run() save_results_table(results, 'ellipses_dip') # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc'])) print(best_choice)
data_size = 0.002 # load reconstructor reconstructor = learnedpd_dip_reconstructor('ellipses', data_size) # create a Dival task table and run it task_table.append( reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'lr1': [0.001], 'lr2': [1e-6, 1e-7, 1e-8], 'scales': [5], # 'gamma': np.logspace(-5, -3, 10), 'gamma': [0.0001291549665014884, 0.00021544346900318823, 0.0003162277660], 'channels': [(128, ) * 5], 'skip_channels': [(0, 0, 0, 0, 0)], 'initial_iterations': [5000], 'iterations': [100, 300, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 5000], }) results = task_table.run() save_results_table(results, 'ellipses_learnedpd_dip_{}'.format(data_size)) # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results)
int(1. / size_part * full_size_epochs)) reconstructor = LearnedGDReconstructor( ray_trafo, log_dir='ellipses_learnedgd_{}'.format(size_part), save_best_learned_params_path=get_weights_path( 'ellipses_learnedgd_{}'.format(size_part))) # create a Dival task table and run it task_table = TaskTable() task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, dataset=cached_dataset, hyper_param_choices={ 'batch_size': [32], 'epochs': [epochs], 'niter': [10], 'lr': [0.001], 'lr_time_decay_rate': [3.2 * size_part] }) results = task_table.run() # save report save_results_table(results, 'ellipses_learnedgd_{}'.format(size_part)) # select best parameters and save them best_choice, best_error = select_hyper_best_parameters(results) params = Params(best_choice) params.save('ellipses_learnedgd_{}'.format(size_part))
def main(): # load data options = get_parser().parse_args() dataset = load_standard_dataset('lodopab', ordered=True) ray_trafo = dataset.ray_trafo global_results = [] sizes = [0.001, 0.01, 0.1, 1.00] #sizes = [0.0001] #sizes = [0.001] #sizes = [0.01] #sizes = [0.1] for size_part in sizes: cached_dataset = CachedDataset(dataset, space=(ray_trafo.range, ray_trafo.domain), cache_files={ 'train': [None, None], 'validation': [None, None] }, size_part=size_part) test_data = dataset.get_data_pairs('validation', cached_dataset.validation_len) print('validation size: %d' % len(test_data)) full_size_epochs = 10 if size_part >= 0.10 else 5 lr = 0.0001 if size_part >= 0.10 else 0.001 # scale number of epochs by 1/size_part, but maximum 1000 times as many # epochs as for full size dataset epochs = min(1000 * full_size_epochs, int(1. / size_part * full_size_epochs)) reconstructor = LearnedPDReconstructor( ray_trafo, log_dir='lodopab_learnedpd_{}'.format(size_part), save_best_learned_params_path=get_weights_path( 'lodopab_learnedpd_{}'.format(size_part))) # create a Dival task table and run it task_table = TaskTable() task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, dataset=cached_dataset, hyper_param_choices={ 'batch_size': [1], 'epochs': [epochs], 'niter': [10], 'internal_ch': [64 if size_part >= 0.10 else 32], 'lr': [lr], 'lr_min': [lr], 'init_fbp': [True], 'init_frequency_scaling': [0.7] }) results = task_table.run() # save report save_results_table(results, 'lodopab_learnedpd_{}'.format(size_part)) # select best parameters and save them best_choice, best_error = select_hyper_best_parameters(results) params = Params(best_choice) params.save('lodopab_learnedpd_{}'.format(size_part))
from dliplib.utils.reports import save_results_table # load data dataset = load_standard_dataset('lodopab') test_data = dataset.get_data_pairs('validation', 4) task_table = TaskTable() # create the reconstructor reconstructor = TVAdamReconstructor(dataset.ray_trafo) # create a Dival task table and run it task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM], test_data=test_data, hyper_param_choices={ 'gamma': np.linspace(15, 25, 10), 'iterations': [2000, 2500, 3000, 3500], 'loss_function': ['poisson'] }) results = task_table.run() save_results_table(results, 'lodopab_tvadam') # select the best hyper-parameters and save them best_choice, best_error = select_hyper_best_parameters(results) print(results.to_string(show_columns=['misc'])) print(best_choice) print(best_error)