예제 #1
0
def learnedpd_dip_reconstructor(dataset='ellipses', size_part=1.0, name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :return: The Deep Image Prior (DIP) method for the specified dataset
    """
    try:
        standard_dataset = load_standard_dataset(dataset)
        params = Params.load('{}_learnedpd_{}'.format(dataset, size_part))
        learned_pd = LearnedPDReconstructor(standard_dataset.ray_trafo,
                                            hyper_params=params.dict,
                                            name='Learned PD ({}%)'.format(
                                                size_part * 100))
        load_weights(learned_pd, '{}_learnedpd_{}'.format(dataset, size_part))

        # load hyper-parameters and create reconstructor
        if name is None:
            name = 'Learned PD ({} $\%$) + DIP'.format(100 * size_part)
        params = Params.load('{}_learnedpd_dip_{}'.format(dataset, size_part))
        reconstructor = DeepImagePriorInitialReconstructor(
            standard_dataset.ray_trafo,
            ini_reco=learned_pd,
            hyper_params=params.dict,
            name=name)
        return reconstructor
    except Exception as e:
        raise Exception('The reconstructor doesn\'t exist')
예제 #2
0
def learnedgd_reconstructor(dataset='ellipses',
                            size_part=1.0,
                            pretrained=True,
                            name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :param size_part: Can be one of: [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0]
    :return: The Learned Gradient Descent method trained on the specified dataset and size
    """
    try:
        standard_dataset = load_standard_dataset(dataset)
        params = Params.load('{}_learnedgd_{}'.format(dataset, size_part))
        if name is None:
            name = 'Learned GD ({} $\%$)'.format(100 * size_part)
        reconstructor = LearnedGDReconstructor(standard_dataset.ray_trafo,
                                               hyper_params=params.dict,
                                               name=name)
        if pretrained:
            load_weights(reconstructor,
                         '{}_learnedgd_{}'.format(dataset, size_part))
        return reconstructor
    except Exception as e:
        raise Exception(
            'The reconstructor has not been trained with the selected data_size'
        )
예제 #3
0
def iradonmap_reconstructor(dataset='ellipses',
                            size_part=1.0,
                            pretrained=True,
                            name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :param size_part: Can be one of: [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0]
    :return: The iRadonMap method trained on the specified dataset and size
    """
    try:
        standard_dataset = load_standard_dataset(dataset)
        params = Params.load('{}_iradonmap_{}'.format(dataset, size_part))
        if name is None:
            name = 'iRadonMap ({} $\%$)'.format(100 * size_part)
        coord_mat = None
        try:
            coord_mat = np.load(
                os.path.join(BASE_DIR, 'reconstructors',
                             '{}_iradonmap_coord_mat.npy'.format(dataset)))
        except FileNotFoundError:
            pass
        reconstructor = IRadonMapReconstructor(standard_dataset.ray_trafo,
                                               hyper_params=params.dict,
                                               name=name,
                                               coord_mat=coord_mat)
        if pretrained:
            load_weights(reconstructor,
                         '{}_iradonmap_{}'.format(dataset, size_part))
        return reconstructor
    except Exception as e:
        raise Exception(
            'The reconstructor has not been trained with the selected data_size'
        )
예제 #4
0
def fbpunet_reconstructor(dataset='ellipses',
                          size_part=1.0,
                          pretrained=True,
                          name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :param size_part: Can be one of: [0.001, 0.002, 0.005, 0.01, 0.02, 0.05,
                      0.1, 0.2, 0.5, 1.0]
    :return: The FBP+UNet method trained on the specified dataset and size
    """
    try:
        standard_dataset = load_standard_dataset(dataset)
        params = Params.load('{}_fbpunet_{}'.format(dataset, size_part))
        if name is None:
            name = 'FBP+UNet ({} $\%$)'.format(100 * size_part)

        reconstructor = FBPUNetReconstructor(standard_dataset.ray_trafo,
                                             hyper_params=params.dict,
                                             name=name)
        if pretrained:
            load_weights(reconstructor,
                         '{}_fbpunet_{}'.format(dataset, size_part))
        return reconstructor
    except Exception as e:
        raise Exception(
            'The reconstructor does not exist for the selected data_size')
예제 #5
0
def tvadam_reconstructor(dataset='ellipses', name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :return: TV reconstructor for the specified dataset
    """
    try:
        params = Params.load('{}_tvadam'.format(dataset))
        standard_dataset = load_standard_dataset(dataset)
        if name is None:
            name = 'TV-Adam'
        reconstructor = TVAdamReconstructor(standard_dataset.ray_trafo,
                                            hyper_params=params.dict,
                                            name=name)
        return reconstructor
    except Exception as e:
        raise Exception('The reconstructor doesn\'t exist')
예제 #6
0
def dip_reconstructor(dataset='ellipses', name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :return: The Deep Image Prior (DIP) method for the specified dataset
    """
    try:
        standard_dataset = load_standard_dataset(dataset)
        params = Params.load('{}_dip'.format(dataset))
        if name is None:
            name = 'DIP'
        reconstructor = DeepImagePriorReconstructor(standard_dataset.ray_trafo,
                                                    hyper_params=params.dict,
                                                    name=name)
        return reconstructor
    except Exception as e:
        raise Exception('The reconstructor doesn\'t exist')
예제 #7
0
def fbp_reconstructor(dataset='ellipses', name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :return: Filtered back projection reconstructor for the specified dataset
    """
    try:
        params = Params.load('{}_fbp'.format(dataset))
        standard_dataset = load_standard_dataset(dataset)
        if name is None:
            name = 'FBP'
        reconstructor = FBPReconstructor(standard_dataset.ray_trafo,
                                         hyper_params=params.dict,
                                         name=name)
        return reconstructor
    except Exception as e:
        raise Exception('The reconstructor doesn\'t exist')
예제 #8
0
# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'lr': [0.001, 0.0005, 0.0001],
                      'scales': [5],
                      'gamma': [0.0],
                      'channels': [(64, ) * 5, (128, ) * 5],
                      'skip_channels': [(0, 0, 0, 0, 0), (0, 0, 0, 0, 4),
                                        (0, 0, 0, 4, 4)],
                      'iterations':
                      [3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000],
                      'loss_function': ['poisson']
                  })

results = task_table.run()

save_results_table(results, 'lodopab_dip--5-SCALES')

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)

params = Params(best_choice)
params.save('lodopab_dip')
예제 #9
0
dataset = load_standard_dataset('lodopab_200')
test_data = dataset.get_data_pairs('validation', 100)

task_table = TaskTable()

# create the reconstructor
reconstructor = FBPReconstructor(dataset.ray_trafo)

# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'filter_type': ['Ram-Lak', 'Hann'],
                      'frequency_scaling': np.linspace(0.5, 1, 40)
                  })

results = task_table.run()

save_results_table(results, 'lodopab_200_fbp')

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)

params = Params(best_choice)
params.save('lodopab_200_fbp')
    reconstructor=reconstructor,
    measures=[PSNR, SSIM],
    test_data=test_data,
    hyper_param_choices={
        'lr1': [0.001],
        'lr2': [1e-6, 1e-7, 1e-8],
        'scales': [5],
        # 'gamma': np.logspace(-5, -3, 10),
        'gamma':
        [0.0001291549665014884, 0.00021544346900318823, 0.0003162277660],
        'channels': [(128, ) * 5],
        'skip_channels': [(0, 0, 0, 0, 0)],
        'initial_iterations': [5000],
        'iterations':
        [100, 300, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 5000],
    })

results = task_table.run()

save_results_table(results, 'ellipses_learnedpd_dip_{}'.format(data_size))

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)

params = Params(best_choice)
params.save('ellipses_learnedpd_dip_{}'.format(data_size))
    # create a Dival task table and run it
    task_table = TaskTable()
    task_table.append(reconstructor=reconstructor,
                      measures=[PSNR, SSIM],
                      test_data=test_data,
                      dataset=cached_dataset,
                      hyper_param_choices={
                          'batch_size': [32],
                          'epochs': [epochs],
                          'niter': [10],
                          'lr': [0.001],
                          'lr_time_decay_rate': [3.2 * size_part]
                      })
    results = task_table.run()

    # save report
    save_results_table(results, 'ellipses_learnedgd_{}'.format(size_part))

    # select best parameters and save them
    best_choice, best_error = select_hyper_best_parameters(results)
    params = Params(best_choice)
    params.save('ellipses_learnedgd_{}'.format(size_part))

    # retrain the model with the optimal parameters and save the weights

    # reconstructor = LearnedGDReconstructor(dataset.ray_trafo, hyper_params=params.dict)
    # reconstructor.train(cached_dataset)

    save_weights(reconstructor, 'ellipses_learnedgd_{}'.format(size_part))
                save_best_learned_params_path=get_weights_path(
                    'ellipses_iradonmap_{}'.format(size_part)))

    epochs = min(10 * full_size_epochs, int(1./size_part * full_size_epochs))

    # create a Dival task table and run it
    task_table = TaskTable()
    task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM],
                      test_data=test_data, dataset=cached_dataset,
                      hyper_param_choices={'scales': [5],
                                           'skip_channels': [4],
                                           'batch_size': [64],
                                           'epochs': [epochs],
                                           'fully_learned': [True],
                                           'lr': [0.01],
                                           'use_sigmoid': [True]})
    results = task_table.run()

    # save report
    save_results_table(results, 'ellipses_iradonmap_{}'.format(size_part))

    # select best parameters and save them
    best_choice, best_error = select_hyper_best_parameters(results)
    params = Params(best_choice)
    params.save('ellipses_iradonmap_{}'.format(size_part))

    save_weights(reconstructor, 'ellipses_iradonmap_{}'.format(size_part))
    global_results.append(best_error)

print(global_results)
                      test_data=test_data,
                      dataset=cached_dataset,
                      hyper_param_choices={
                          'scales': [5],
                          'skip_channels': [4],
                          'channels': [channels],
                          'batch_size': [16],
                          'epochs': [epochs],
                          'lr': [0.01],
                          'filter_type': ['Hann'],
                          'frequency_scaling': [1.0]
                      })
    results = task_table.run()

    # save report
    save_results_table(results, 'ellipses_fbpunet_{}'.format(size_part))

    # select best parameters and save them
    best_choice, best_error = select_hyper_best_parameters(results)
    params = Params(best_choice)
    params.save('ellipses_fbpunet_{}'.format(size_part))

    # retrain the model with the optimal parameters and save the weights

    # reconstructor = FBPUNetReconstructor(dataset.ray_trafo, hyper_params=params.dict)
    # reconstructor.train(cached_dataset)
    save_weights(reconstructor, 'ellipses_fbpunet_{}'.format(size_part))
    global_results.append(best_error)

print(global_results)
예제 #14
0
        measures=[PSNR, SSIM],
        test_data=test_data,
        dataset=cached_dataset,
        hyper_param_choices={
            'batch_size': [20],
            'epochs': [epochs],
            'niter': [10],
            'lr': [0.00001],
            # 'lr_time_decay_rate': [3.2 *
            # size_part],
            'init_frequency_scaling': [0.7],
            'init_weight_xavier_normal': [True],
            'init_weight_gain': [0.001]
        })
    results = task_table.run()

    # save report
    save_results_table(results, 'lodopab_learnedgd_{}'.format(size_part))

    # select best parameters and save them
    best_choice, best_error = select_hyper_best_parameters(results)
    params = Params(best_choice)
    params.save('lodopab_learnedgd_{}'.format(size_part))

    # retrain the model with the optimal parameters and save the weights

    # reconstructor = LearnedGDReconstructor(dataset.ray_trafo, hyper_params=params.dict)
    # reconstructor.train(cached_dataset)

#    save_weights(reconstructor.model, 'lodopab_learnedgd_{}'.format(size_part))
예제 #15
0
        log_dir='lodopab_200_iradonmap/' + str(size_part),
        save_best_learned_params_path=get_weights_path(
            'lodopab_200_iradonmap_{}'.format(size_part)))

    epochs = min(10 * full_size_epochs, int(1. / size_part * full_size_epochs))

    # create a Dival task table and run it
    task_table = TaskTable()
    task_table.append(reconstructor=reconstructor,
                      measures=[PSNR, SSIM],
                      test_data=test_data,
                      dataset=cached_dataset,
                      hyper_param_choices={
                          'scales': [5],
                          'skip_channels': [4],
                          'batch_size': [32],
                          'epochs': [epochs],
                          'fully_learned': [True],
                          'lr': [0.01],
                          'use_sigmoid': [False, True]
                      })
    results = task_table.run()

    # save report
    save_results_table(results, 'lodopab_200_iradonmap_{}'.format(size_part))

    # select best parameters and save them
    best_choice, best_error = select_hyper_best_parameters(results)
    params = Params(best_choice)
    params.save('lodopab_200_iradonmap_{}'.format(size_part))
예제 #16
0
test_data = dataset.get_data_pairs('validation', 4)

task_table = TaskTable()

# create the reconstructor
reconstructor = TVAdamReconstructor(dataset.ray_trafo)

# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'gamma': np.linspace(15, 25, 10),
                      'iterations': [2000, 2500, 3000, 3500],
                      'loss_function': ['poisson']
                  })

results = task_table.run()

save_results_table(results, 'lodopab_tvadam')

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)

params = Params(best_choice)
params.save('lodopab_tvadam')
예제 #17
0
# load data
dataset = load_standard_dataset('ellipses')
test_data = dataset.get_data_pairs('validation', 100)

task_table = TaskTable()

# create the reconstructor
reconstructor = FBPReconstructor(dataset.ray_trafo)

# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'filter_type': ['Ram-Lak', 'Hann'],
                      'frequency_scaling': np.linspace(0.5, 1, 40)
                  })

results = task_table.run()

save_results_table(results, 'ellipses_fbp')

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)
params = Params(best_choice)
params.save('ellipses_fbp')

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)
예제 #18
0
dataset = load_standard_dataset('ellipses')
test_data = dataset.get_data_pairs('validation', 35)

task_table = TaskTable()

# create the reconstructor
reconstructor = TVAdamReconstructor(dataset.ray_trafo)

# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'gamma': np.logspace(-4, -2, 10),
                      'iterations': [2000, 2500, 3000, 3500, 4000, 4500, 5000],
                      'loss_function': ['mse']
                  })

results = task_table.run()

save_results_table(results, 'ellipses_tvadam')

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)

params = Params(best_choice)
params.save('ellipses_tvadam')
예제 #19
0
# create the reconstructor
reconstructor = DeepImagePriorReconstructor(dataset.ray_trafo)

# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'lr': [0.001],
                      'scales': [5],
                      'gamma': np.logspace(-4, -3, 5),
                      'channels': [(128, ) * 5],
                      'skip_channels': [(0, 0, 0, 4, 4)],
                      'iterations': [5000, 6000, 7000, 8000, 9000, 10000]
                  })

results = task_table.run()

save_results_table(results, 'ellipses_diptv')

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)

params = Params(best_choice)
params.save('ellipses_diptv')
# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'lr1': [0.0005],
                      'lr2': [1e-6],
                      'scales': [6],
                      'gamma': [2.5, 3.0, 4.0],
                      'channels': [(128, ) * 6],
                      'skip_channels': [(0, 0, 0, 0, 4, 4)],
                      'initial_iterations': [5000, 9750],
                      'iterations': [200, 250, 300, 350, 400, 500],
                      "loss_function": ["poisson"]
                  })

results = task_table.run()

save_results_table(results, 'lodopab_200_learnedpd_dip_{}'.format(size_part))

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)

params = Params(best_choice)
params.save('lodopab_200_learnedpd_dip_{}'.format(size_part))
# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'lr': [0.0007],
                      'scales': [6],
                      'gamma':
                      np.linspace(2, 7, 10),
                      'channels': [(128, ) * 6],
                      'skip_channels': [(0, 0, 0, 0, 4, 4)],
                      'iterations':
                      [2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000],
                      'loss_function': ['poisson']
                  })

results = task_table.run()

save_results_table(results, 'lodopab_200_diptv')

# select the best hyper-parameters and save them
best_choice, best_error = select_hyper_best_parameters(results)

print(results.to_string(show_columns=['misc']))
print(best_choice)
print(best_error)

params = Params(best_choice)
params.save('lodopab_200_diptv')
예제 #22
0
from dliplib.utils.helper import load_standard_dataset, set_use_latex


set_use_latex()

# load data
dataset = load_standard_dataset('ellipses')
test_data = dataset.get_data_pairs('validation', 5)

index = 3
obs, gt = test_data[index]
test_data = DataPairs([obs], [gt], name='test: %d' % index)

# forward operator
ray_trafo = dataset.ray_trafo
params = Params.load('ellipses_dip')

results = []

# Architecture-hyper-parameters
channels = [1, 8, 32, 64, 128, 128, 128, 128]
scales = [5, 5, 5, 5, 5, 4, 3, 2]
iters = [5000] * 8

# Compute reconstructions with each different architectures
for ch, sc, it in zip(channels, scales, iters):
    print('Channels: %d, Scales: %d' % (ch, sc))
    params.channels = (ch,) * sc
    params.skip_channels = (0,) * sc
    params.scales = sc
    params.iterations = it
def main():
    # load data
    options = get_parser().parse_args()

    dataset = load_standard_dataset('lodopab', ordered=True)
    ray_trafo = dataset.ray_trafo

    global_results = []

    sizes = [0.001, 0.01, 0.1, 1.00]
    #sizes = [0.0001]
    #sizes = [0.001]
    #sizes = [0.01]
    #sizes = [0.1]

    for size_part in sizes:
        cached_dataset = CachedDataset(dataset,
                                       space=(ray_trafo.range,
                                              ray_trafo.domain),
                                       cache_files={
                                           'train': [None, None],
                                           'validation': [None, None]
                                       },
                                       size_part=size_part)

        test_data = dataset.get_data_pairs('validation',
                                           cached_dataset.validation_len)
        print('validation size: %d' % len(test_data))

        full_size_epochs = 10 if size_part >= 0.10 else 5
        lr = 0.0001 if size_part >= 0.10 else 0.001
        # scale number of epochs by 1/size_part, but maximum 1000 times as many
        # epochs as for full size dataset
        epochs = min(1000 * full_size_epochs,
                     int(1. / size_part * full_size_epochs))

        reconstructor = LearnedPDReconstructor(
            ray_trafo,
            log_dir='lodopab_learnedpd_{}'.format(size_part),
            save_best_learned_params_path=get_weights_path(
                'lodopab_learnedpd_{}'.format(size_part)))

        # create a Dival task table and run it
        task_table = TaskTable()
        task_table.append(reconstructor=reconstructor,
                          measures=[PSNR, SSIM],
                          test_data=test_data,
                          dataset=cached_dataset,
                          hyper_param_choices={
                              'batch_size': [1],
                              'epochs': [epochs],
                              'niter': [10],
                              'internal_ch': [64 if size_part >= 0.10 else 32],
                              'lr': [lr],
                              'lr_min': [lr],
                              'init_fbp': [True],
                              'init_frequency_scaling': [0.7]
                          })
        results = task_table.run()

        # save report
        save_results_table(results, 'lodopab_learnedpd_{}'.format(size_part))

        # select best parameters and save them
        best_choice, best_error = select_hyper_best_parameters(results)
        params = Params(best_choice)
        params.save('lodopab_learnedpd_{}'.format(size_part))
                 int(1. / size_part * full_size_epochs))

    reconstructor = FBPUNetReconstructor(
        ray_trafo,
        log_dir='lodopab_fbpunet_{}'.format(size_part),
        save_best_learned_params_path=get_weights_path(
            'lodopab_fbpunet_{}'.format(size_part)))

    task_table.append(reconstructor=reconstructor,
                      measures=[PSNR, SSIM],
                      test_data=test_data,
                      dataset=cached_dataset,
                      hyper_param_choices={
                          'scales': [5],
                          'skip_channels': [4],
                          'batch_size': [32],
                          'epochs': [epochs],
                          'lr': [0.01],
                          'filter_type': ['Hann'],
                          'frequency_scaling': [1.0]
                      })
    results = task_table.run()

    # save report
    save_results_table(results, 'lodopab_fbpunet_{}'.format(size_part))

    # select best parameters and save them
    best_choice, best_error = select_hyper_best_parameters(results)
    params = Params(best_choice)
    params.save('lodopab_fbpunet_{}'.format(size_part))