Exemplo n.º 1
0
def main():
    # load data
    options = get_parser().parse_args()

    dataset = load_standard_dataset(options.dataset, ordered=True)
    ray_trafo = dataset.ray_trafo

    X = ray_trafo.range

    lodopab_cache_file_names = {
        'train': [None, None],
        'validation': [None, None]
    }

    if options.method == 'fbpunet':
        X = ray_trafo.domain
        train_path = 'cache_train_{}_fbp.npy'.format(options.dataset)
        validation_path = 'cache_validation_{}_fbp.npy'.format(options.dataset)
        lodopab_cache_file_names = {
            'train': [train_path, None],
            'validation': [validation_path, None]
        }

    cached_dataset = CachedDataset(dataset,
                                   space=(X, ray_trafo.domain),
                                   cache_files=lodopab_cache_file_names,
                                   size_part=options.size_part)

    test_data = dataset.get_data_pairs('validation',
                                       cached_dataset.validation_len)
    print('validation size: %d' % len(test_data))

    reconstructor = get_reconstructor(options.method,
                                      dataset=options.dataset,
                                      size_part=options.size_part,
                                      pretrained=False)
    print(reconstructor.hyper_params)

    full_name = '{}_{}_{}'.format(
        options.dataset, options.method, options.size_part)
    print(full_name)
    reconstructor.save_best_learned_params_path = get_weights_path(full_name)
    reconstructor.log_dir = options.log_dir
    reconstructor.num_data_loader_workers = 16

    # create a Dival task table and run it
    task_table = TaskTable()
    task_table.append(
        reconstructor=reconstructor,
        measures=[PSNR, SSIM],
        test_data=test_data,
        dataset=cached_dataset,
        hyper_param_choices=[reconstructor.hyper_params]
    )
    results = task_table.run()

    # save report
    save_results_table(results, full_name)
Exemplo n.º 2
0
def generate_cache(dataset_name, file_names, frequency_scaling=1.0, only_fbp=False):
    dataset = load_standard_dataset(dataset_name, ordered=False)
    ray_trafo = dataset.ray_trafo
    for part in ['train', 'validation']:
        generate_dataset_cache(
            dataset=dataset,
            part=part,
            ray_trafo=ray_trafo,
            file_names=file_names[part],
            frequency_scaling=frequency_scaling,
            filter_type='Hann',
            only_fbp=only_fbp
        )
Exemplo n.º 3
0
def tvadam_reconstructor(dataset='ellipses', name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :return: TV reconstructor for the specified dataset
    """
    try:
        params = Params.load('{}_tvadam'.format(dataset))
        standard_dataset = load_standard_dataset(dataset)
        if name is None:
            name = 'TV-Adam'
        reconstructor = TVAdamReconstructor(standard_dataset.ray_trafo,
                                            hyper_params=params.dict,
                                            name=name)
        return reconstructor
    except Exception as e:
        raise Exception('The reconstructor doesn\'t exist')
Exemplo n.º 4
0
def fbp_reconstructor(dataset='ellipses', name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :return: Filtered back projection reconstructor for the specified dataset
    """
    try:
        params = Params.load('{}_fbp'.format(dataset))
        standard_dataset = load_standard_dataset(dataset)
        if name is None:
            name = 'FBP'
        reconstructor = FBPReconstructor(standard_dataset.ray_trafo,
                                         hyper_params=params.dict,
                                         name=name)
        return reconstructor
    except Exception as e:
        raise Exception('The reconstructor doesn\'t exist')
Exemplo n.º 5
0
def dip_reconstructor(dataset='ellipses', name=None):
    """
    :param dataset: Can be 'ellipses' or 'lodopab'
    :return: The Deep Image Prior (DIP) method for the specified dataset
    """
    try:
        standard_dataset = load_standard_dataset(dataset)
        params = Params.load('{}_dip'.format(dataset))
        if name is None:
            name = 'DIP'
        reconstructor = DeepImagePriorReconstructor(standard_dataset.ray_trafo,
                                                    hyper_params=params.dict,
                                                    name=name)
        return reconstructor
    except Exception as e:
        raise Exception('The reconstructor doesn\'t exist')
Exemplo n.º 6
0
def main():
    """Main function"""
    options = get_parser().parse_args()
    # options.dataset = 'lodopab' | 'ellipses' | 'lodopab-sparse'

    dataset = load_standard_dataset(options.dataset)
    test_data = dataset.get_data_pairs('test', 100)

    obs = list(y for y, x in test_data)
    gt = list(x for y, x in test_data)
    start = options.start
    count = options.count
    if count is None:
        count = len(test_data)
    test_data = DataPairs(obs[start:start + count],
                          gt[start:start + count],
                          name='test')

    # load reconstructor
    reconstructor = get_reconstructor(options.method, options.dataset,
                                      options.size_part)

    # eval on the test-set
    print('Reconstructor: %s' % options.method)
    print('Dataset: %s' % options.dataset)
    print('Offset: %d' % start)
    print('Count: %d' % count)

    task_table = TaskTable()
    task_table.append(reconstructor=reconstructor,
                      measures=[PSNR, SSIM],
                      test_data=test_data,
                      options={'skip_training': True})
    task_table.run()

    print(task_table.results.to_string(show_columns=['misc']))

    if options.size_part is not None:
        save_path = '{}_{}_{}_eval'.format(options.dataset, options.method,
                                           options.size_part)
    else:
        save_path = '{}_{}_eval'.format(options.dataset, options.method)
    save_path += '_offset_%d' % start

    save_results_table(task_table.results, save_path)
Exemplo n.º 7
0
import time

from dliplib.reconstructors import learnedpd_dip_reconstructor, diptv_reconstructor
from dliplib.utils.helper import load_standard_dataset, set_use_latex

set_use_latex()

# load data
dataset = load_standard_dataset('ellipses')
test_data = dataset.get_data_pairs('test', 100)
obs, gt = test_data[0]

data_size = 0.001

print('Eval: Learned PD ({}%) + DIP'.format(data_size * 100))
reconstructor = learnedpd_dip_reconstructor('ellipses', data_size)

t_start = time.time()
reco1 = reconstructor.reconstruct(obs)
t_end = time.time()

print('Elapsed time: %d s' % (t_end - t_start))

print('Eval: DIP+TV')
# load hyper-parameters and create reconstructor
reconstructor = diptv_reconstructor('ellipses')

t_start = time.time()
reco2 = reconstructor.reconstruct(obs)
t_end = time.time()
def main():
    # load data
    options = get_parser().parse_args()

    dataset = load_standard_dataset('lodopab', ordered=True)
    ray_trafo = dataset.ray_trafo

    global_results = []

    sizes = [0.001, 0.01, 0.1, 1.00]
    #sizes = [0.0001]
    #sizes = [0.001]
    #sizes = [0.01]
    #sizes = [0.1]

    for size_part in sizes:
        cached_dataset = CachedDataset(dataset,
                                       space=(ray_trafo.range,
                                              ray_trafo.domain),
                                       cache_files={
                                           'train': [None, None],
                                           'validation': [None, None]
                                       },
                                       size_part=size_part)

        test_data = dataset.get_data_pairs('validation',
                                           cached_dataset.validation_len)
        print('validation size: %d' % len(test_data))

        full_size_epochs = 10 if size_part >= 0.10 else 5
        lr = 0.0001 if size_part >= 0.10 else 0.001
        # scale number of epochs by 1/size_part, but maximum 1000 times as many
        # epochs as for full size dataset
        epochs = min(1000 * full_size_epochs,
                     int(1. / size_part * full_size_epochs))

        reconstructor = LearnedPDReconstructor(
            ray_trafo,
            log_dir='lodopab_learnedpd_{}'.format(size_part),
            save_best_learned_params_path=get_weights_path(
                'lodopab_learnedpd_{}'.format(size_part)))

        # create a Dival task table and run it
        task_table = TaskTable()
        task_table.append(reconstructor=reconstructor,
                          measures=[PSNR, SSIM],
                          test_data=test_data,
                          dataset=cached_dataset,
                          hyper_param_choices={
                              'batch_size': [1],
                              'epochs': [epochs],
                              'niter': [10],
                              'internal_ch': [64 if size_part >= 0.10 else 32],
                              'lr': [lr],
                              'lr_min': [lr],
                              'init_fbp': [True],
                              'init_frequency_scaling': [0.7]
                          })
        results = task_table.run()

        # save report
        save_results_table(results, 'lodopab_learnedpd_{}'.format(size_part))

        # select best parameters and save them
        best_choice, best_error = select_hyper_best_parameters(results)
        params = Params(best_choice)
        params.save('lodopab_learnedpd_{}'.format(size_part))
import numpy as np
from dival import TaskTable
from dival.measure import PSNR, SSIM

from dliplib.utils import Params
from dliplib.utils.helper import select_hyper_best_parameters
from dliplib.utils.helper import load_standard_dataset
from dliplib.reconstructors.dip import DeepImagePriorReconstructor
from dliplib.utils.reports import save_results_table

# load data
dataset = load_standard_dataset('lodopab_200')
test_data = dataset.get_data_pairs('validation', 4)

task_table = TaskTable()

# create the reconstructor
reconstructor = DeepImagePriorReconstructor(dataset.ray_trafo)

# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'lr': [0.0007],
                      'scales': [6],
                      'gamma':
                      np.linspace(2, 7, 10),
                      'channels': [(128, ) * 6],
                      'skip_channels': [(0, 0, 0, 0, 4, 4)],
                      'iterations':
Exemplo n.º 10
0
import numpy as np
from dival import TaskTable
from dival.measure import PSNR, SSIM

from dliplib.utils import Params
from dliplib.utils.helper import select_hyper_best_parameters, load_standard_dataset
from dliplib.reconstructors.dip import DeepImagePriorReconstructor
from dliplib.utils.reports import save_results_table

# load data
dataset = load_standard_dataset('lodopab')
test_data = dataset.get_data_pairs('validation', 2)

task_table = TaskTable()

# create the reconstructor
reconstructor = DeepImagePriorReconstructor(dataset.ray_trafo)

# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'lr': [0.001, 0.0005, 0.0001],
                      'scales': [5],
                      'gamma': [0.0],
                      'channels': [(64, ) * 5, (128, ) * 5],
                      'skip_channels': [(0, 0, 0, 0, 0), (0, 0, 0, 0, 4),
                                        (0, 0, 0, 4, 4)],
                      'iterations':
                      [3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000],
import numpy as np
from dival import TaskTable
from dival.measure import PSNR, SSIM

from dliplib.reconstructors import learnedpd_dip_reconstructor
from dliplib.utils import Params
from dliplib.utils.data.datasets import CachedDataset
from dliplib.utils.helper import select_hyper_best_parameters, load_standard_dataset
from dliplib.utils.reports import save_results_table

dataset = load_standard_dataset('lodopab_200', ordered=True)
test_data = dataset.get_data_pairs('train', 5)

task_table = TaskTable()
size_part = 0.0001

# load reconstructor
reconstructor = learnedpd_dip_reconstructor('lodopab_200', size_part)

# create a Dival task table and run it
task_table.append(reconstructor=reconstructor,
                  measures=[PSNR, SSIM],
                  test_data=test_data,
                  hyper_param_choices={
                      'lr1': [0.0005],
                      'lr2': [1e-6],
                      'scales': [6],
                      'gamma': [2.5, 3.0, 4.0],
                      'channels': [(128, ) * 6],
                      'skip_channels': [(0, 0, 0, 0, 4, 4)],
                      'initial_iterations': [5000, 9750],