Esempio n. 1
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")
        
    print('Initializing image data manager')
    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()

    model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes)
    # load the model
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    print("Loaded checkpoint from '{}'".format(args.resume))

    if use_gpu:
        model = model.cuda()

    test(model, testloader, use_gpu)
Esempio n. 2
0
        outputfile = open(
            '/home/xulzee/Documents/IQA/output/TID2013/vr_jpeg_result.txt',
            'a+')
        outputfile.write(
            ('{} {:.7f} {:.7f}'.format(i, output_txt[0], label_txt[0])) +
            '\r\n')

    outputfile.close()


use_gpu = torch.cuda.is_available()
model = Model()
print('Model structure:', model)

if use_gpu:
    model = model.cuda()

model_weights_file = '/home/xulzee/Documents/IQA/output/TID2013/79-0.0015128param.pth'
model.load_state_dict(torch.load(model_weights_file))
print('load weights from', model_weights_file)

test_dataset = MyDataset(
    data_file='/home/xulzee/Documents/IQA/vr_jpeg.h5')  # test datasets
test_dataloader = DataLoader(dataset=test_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=0)

if __name__ == '__main__':
    test()