Beispiel #1
0
def eval_loss_metric(model, resultdict=None):
    if resultdict is None:
        resultdict = defaultdict(lambda: defaultdict(lambda: {}))

    for dataset in datasets:

        data_loader = dl.CreateDataLoader(dataset,
                                          dataset_mode=dataset_mode,
                                          batch_size=model.batch_size)

        # evaluate model on data
        if (dataset_mode == '2afc'):
            (score,
             results_verbose) = dm.score_2afc_dataset(data_loader,
                                                      model.forward)
            resultdict[model.model_name][dataset]['score'] = results_verbose[
                'scores'].mean()
            resultdict[model.model_name][dataset]['std'] = results_verbose[
                'scores'].std()

            human_judgements = results_verbose['gts']
            human_scores = human_judgements**2 + (1 - human_judgements)**2
            resultdict['Human'][dataset]['score'] = human_scores.mean()
            resultdict['Human'][dataset]['std'] = human_scores.std()

        elif (dataset_mode == 'jnd'):
            raise Exception('not implemented / validated')
            (score,
             results_verbose) = dm.score_jnd_dataset(data_loader,
                                                     model.forward)

        # print results
        print(' Model [%s]  Dataset [%s]: %.2f' %
              (model.model_name, dataset, 100. * score))
    return resultdict
Beispiel #2
0
                 model_path=opt.model_path,
                 use_gpu=opt.use_gpu,
                 pnet_rand=opt.from_scratch,
                 pnet_tune=opt.train_trunk,
                 version=opt.version,
                 gpu_ids=opt.gpu_ids)

if (opt.model in ['net-lin', 'net']):
    print('Testing model [%s]-[%s]' % (opt.model, opt.net))
elif (opt.model in ['l2', 'ssim']):
    print('Testing model [%s]-[%s]' % (opt.model, opt.colorspace))

# initialize data loader
for dataset in opt.datasets:
    data_loader = dl.CreateDataLoader(dataset,
                                      dataset_mode=opt.dataset_mode,
                                      batch_size=opt.batch_size,
                                      nThreads=opt.nThreads)

    # evaluate model on data
    if (opt.dataset_mode == '2afc'):
        (score, results_verbose) = dm.score_2afc_dataset(data_loader,
                                                         model.forward,
                                                         name=dataset)
    elif (opt.dataset_mode == 'jnd'):
        (score, results_verbose) = dm.score_jnd_dataset(data_loader,
                                                        model.forward,
                                                        name=dataset)

    # print results
    print('  Dataset [%s]: %.2f' % (dataset, 100. * score))
Beispiel #3
0
    os.mkdir(opt.save_dir)

# initialize model
model = dm.DistModel()
model.initialize(model=opt.model,
                 net=opt.net,
                 use_gpu=opt.use_gpu,
                 is_train=True,
                 pnet_rand=opt.from_scratch,
                 pnet_tune=opt.train_trunk,
                 gpu_ids=opt.gpu_ids)

# load data from all training sets
data_loader = dl.CreateDataLoader(opt.datasets,
                                  dataset_mode='2afc',
                                  batch_size=opt.batch_size,
                                  serial_batches=False,
                                  nThreads=opt.nThreads)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
D = len(dataset)
print('Loading %i instances from' % dataset_size, opt.datasets)
visualizer = Visualizer(opt)

total_steps = 0
fid = open(os.path.join(opt.checkpoints_dir, opt.name, 'train_log.txt'), 'w+')
for epoch in range(1, opt.nepoch + opt.nepoch_decay + 1):
    epoch_start_time = time.time()
    for i, data in enumerate(dataset):
        iter_start_time = time.time()
        total_steps += opt.batch_size
    is_cuda = torch.cuda.is_available()
    device = "cuda" if is_cuda else "cpu"

    # load dataset
    datasets = [
        "val/traditional",
        "val/cnn",
        "val/superres",
        "val/deblur",
        "val/color",
        "val/frameinterp",
    ]
    dataset_mode = "2afc"
    data_loader = dl.CreateDataLoader(
        datasets,
        dataroot="../perceptual_sim_training/dataset",
        dataset_mode=dataset_mode,
        batch_size=1,
    ).load_data()

    # load functions to evaluate
    lp = LossProvider()
    metrics = {}
    metrics["Watson-fft"] = lp.get_loss_function("Watson-fft",
                                                 reduction="sum").to(device)
    metrics["L2"] = lp.get_loss_function("L2", reduction="sum").to(device)
    metrics["SSIM"] = lp.get_loss_function("SSIM", reduction="sum").to(device)
    metrics["Deeploss-vgg"] = lp.get_loss_function("Deeploss-vgg",
                                                   reduction="sum").to(device)

    # set-up output file
    out_data = defaultdict(lambda: [])
Beispiel #5
0
opt.save_dir = os.path.join(opt.checkpoints_dir,opt.name)
if(not os.path.exists(opt.save_dir)):
    os.mkdir(opt.save_dir)

print(opt)

psnr_df = pd.read_csv(os.path.join(opt.csv_path, 'psnr.csv'), index_col=0)
psnrs = psnr_df.to_numpy()

# initialize model
trainer = lpips.Trainer()
trainer.initialize(model=opt.model, net=opt.net, use_gpu=opt.use_gpu, is_train=True, 
    pnet_rand=opt.from_scratch, pnet_tune=opt.train_trunk, gpu_ids=opt.gpu_ids, lr=opt.lr)

# load data from all training sets
data_loader = dl.CreateDataLoader(opt, opt.datasets,dataset_mode=opt.dataset_mode, load_size=opt.load_size, batch_size=opt.batch_size, serial_batches=False, nThreads=opt.nThreads)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
D = len(dataset)
print('Loading %i instances from'%dataset_size,opt.datasets)
visualizer = Visualizer(opt)

if opt.dataset_mode == 'tnn':
    # data_loader_val = dl.CreateDataLoader(opt,'val', dataset_mode=opt.dataset_mode, load_size=opt.load_size, batch_size=opt.batch_size, serial_batches=False, nThreads=opt.nThreads)
    data_loader_test = dl.CreateDataLoader(opt, 'test', dataset_mode=opt.dataset_mode, load_size=opt.load_size, batch_size=opt.batch_size, nThreads=opt.nThreads)

total_steps = 0
fid = open(os.path.join(opt.checkpoints_dir,opt.name,'train_log.txt'),'w+')

if opt.dataset_mode == 'tnn':
    trainer.set_eval()
Beispiel #6
0
# initialize model
model = dm.DistModel()
# model.initialize(model=opt.model,net=opt.net,use_gpu=opt.use_gpu, is_train=True)
model.initialize(
    model=opt.model,
    net=opt.net,
    use_gpu=opt.use_gpu,
    is_train=True,
    pnet_rand=opt.from_scratch,
    pnet_tune=opt.train_trunk,
)

# load data from all training sets
data_loader = dl.CreateDataLoader(
    opt.datasets, dataset_mode="2afc", batch_size=opt.batch_size, serial_batches=False
)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
D = len(dataset)
print("Loading %i instances from" % dataset_size, opt.datasets)
visualizer = Visualizer(opt)

total_steps = 0
fid = open(os.path.join(opt.checkpoints_dir, opt.name, "train_log.txt"), "w+")
for epoch in range(1, opt.nepoch + opt.nepoch_decay + 1):
    epoch_start_time = time.time()
    for i, data in enumerate(dataset):
        iter_start_time = time.time()
        total_steps += opt.batch_size
        epoch_iter = total_steps - dataset_size * (epoch - 1)