Пример #1
0
def main():
    # load data
    options = get_parser().parse_args()

    dataset = load_standard_dataset(options.dataset, ordered=True)
    ray_trafo = dataset.ray_trafo

    X = ray_trafo.range

    lodopab_cache_file_names = {
        'train': [None, None],
        'validation': [None, None]
    }

    if options.method == 'fbpunet':
        X = ray_trafo.domain
        train_path = 'cache_train_{}_fbp.npy'.format(options.dataset)
        validation_path = 'cache_validation_{}_fbp.npy'.format(options.dataset)
        lodopab_cache_file_names = {
            'train': [train_path, None],
            'validation': [validation_path, None]
        }

    cached_dataset = CachedDataset(dataset,
                                   space=(X, ray_trafo.domain),
                                   cache_files=lodopab_cache_file_names,
                                   size_part=options.size_part)

    test_data = dataset.get_data_pairs('validation',
                                       cached_dataset.validation_len)
    print('validation size: %d' % len(test_data))

    reconstructor = get_reconstructor(options.method,
                                      dataset=options.dataset,
                                      size_part=options.size_part,
                                      pretrained=False)
    print(reconstructor.hyper_params)

    full_name = '{}_{}_{}'.format(
        options.dataset, options.method, options.size_part)
    print(full_name)
    reconstructor.save_best_learned_params_path = get_weights_path(full_name)
    reconstructor.log_dir = options.log_dir
    reconstructor.num_data_loader_workers = 16

    # create a Dival task table and run it
    task_table = TaskTable()
    task_table.append(
        reconstructor=reconstructor,
        measures=[PSNR, SSIM],
        test_data=test_data,
        dataset=cached_dataset,
        hyper_param_choices=[reconstructor.hyper_params]
    )
    results = task_table.run()

    # save report
    save_results_table(results, full_name)
def main():
    # load data
    options = get_parser().parse_args()

    dataset = load_standard_dataset('lodopab', ordered=True)
    ray_trafo = dataset.ray_trafo

    global_results = []

    sizes = [0.001, 0.01, 0.1, 1.00]
    #sizes = [0.0001]
    #sizes = [0.001]
    #sizes = [0.01]
    #sizes = [0.1]

    for size_part in sizes:
        cached_dataset = CachedDataset(dataset,
                                       space=(ray_trafo.range,
                                              ray_trafo.domain),
                                       cache_files={
                                           'train': [None, None],
                                           'validation': [None, None]
                                       },
                                       size_part=size_part)

        test_data = dataset.get_data_pairs('validation',
                                           cached_dataset.validation_len)
        print('validation size: %d' % len(test_data))

        full_size_epochs = 10 if size_part >= 0.10 else 5
        lr = 0.0001 if size_part >= 0.10 else 0.001
        # scale number of epochs by 1/size_part, but maximum 1000 times as many
        # epochs as for full size dataset
        epochs = min(1000 * full_size_epochs,
                     int(1. / size_part * full_size_epochs))

        reconstructor = LearnedPDReconstructor(
            ray_trafo,
            log_dir='lodopab_learnedpd_{}'.format(size_part),
            save_best_learned_params_path=get_weights_path(
                'lodopab_learnedpd_{}'.format(size_part)))

        # create a Dival task table and run it
        task_table = TaskTable()
        task_table.append(reconstructor=reconstructor,
                          measures=[PSNR, SSIM],
                          test_data=test_data,
                          dataset=cached_dataset,
                          hyper_param_choices={
                              'batch_size': [1],
                              'epochs': [epochs],
                              'niter': [10],
                              'internal_ch': [64 if size_part >= 0.10 else 32],
                              'lr': [lr],
                              'lr_min': [lr],
                              'init_fbp': [True],
                              'init_frequency_scaling': [0.7]
                          })
        results = task_table.run()

        # save report
        save_results_table(results, 'lodopab_learnedpd_{}'.format(size_part))

        # select best parameters and save them
        best_choice, best_error = select_hyper_best_parameters(results)
        params = Params(best_choice)
        params.save('lodopab_learnedpd_{}'.format(size_part))
#sizes = [0.01]
#sizes = [0.02]
#sizes = [0.05]

#sizes = [0.10]
#sizes = [0.25]

#sizes = [0.50]
sizes = [1.00]

# Divide training into several GPUs:
# sizes = [0.05]

for size_part in sizes:
    cached_dataset = CachedDataset(dataset,
                                   space=(ray_trafo.range, ray_trafo.domain),
                                   cache_files=ellipses_cache_file_names,
                                   size_part=size_part)

    test_data = dataset.get_data_pairs('validation',
                                       cached_dataset.validation_len)
    print('validation size: %d' % len(test_data))

    full_size_epochs = 50
    # scale number of epochs by 1/size_part, but maximum 100 times as many
    # epochs as for full size dataset
    epochs = min(100 * full_size_epochs,
                 int(1. / size_part * full_size_epochs))

    reconstructor = LearnedGDReconstructor(
        ray_trafo,
        log_dir='ellipses_learnedgd_{}'.format(size_part),
Пример #4
0
full_size_epochs = 30
sizes = [0.0001, 0.001, 0.01, 0.1, 1.00]
#sizes = [0.0001]
#sizes = [0.001]
#sizes = [0.01]
#sizes = [0.1]
#sizes = [1.00]

for size_part in sizes:
    del (task_table)
    del (reconstructor)

    cached_dataset = CachedDataset(dataset,
                                   space=(ray_trafo.range, ray_trafo.domain),
                                   cache_files={
                                       'train': [None, None],
                                       'validation': [None, None]
                                   },
                                   size_part=size_part)

    test_data = dataset.get_data_pairs('validation',
                                       cached_dataset.validation_len)
    print('validation size: %d' % len(test_data))

    reconstructor = IRadonMapReconstructor(
        ray_trafo=ray_trafo,
        log_dir='lodopab_200_iradonmap/' + str(size_part),
        save_best_learned_params_path=get_weights_path(
            'lodopab_200_iradonmap_{}'.format(size_part)))

    epochs = min(10 * full_size_epochs, int(1. / size_part * full_size_epochs))
    'validation': ['cache_validation_lodopab_fbp.npy', None]
}

# load data
dataset = load_standard_dataset('lodopab', ordered=True)
ray_trafo = dataset.ray_trafo

sizes = [0.0001, 0.001, 0.01, 0.1, 1.00]
#sizes = [0.0001]
#sizes = [0.001]
#sizes = [0.01]
#sizes = [0.1]

for size_part in sizes:
    cached_dataset = CachedDataset(dataset,
                                   space=(ray_trafo.domain, ray_trafo.domain),
                                   cache_files=lodopab_cache_file_names,
                                   size_part=size_part)

    # create a Dival task table and run it
    task_table = TaskTable()
    test_data = dataset.get_data_pairs('validation',
                                       cached_dataset.validation_len)
    print('validation size: %d' % len(test_data))

    full_size_epochs = 250
    # scale number of epochs by 1/size_part, but maximum 100 times as many
    # epochs as for full size dataset
    epochs = min(100 * full_size_epochs,
                 int(1. / size_part * full_size_epochs))

    reconstructor = FBPUNetReconstructor(