Esempio n. 1
0
def load(mode, device = torch.device("cuda")):
    criterion = None
    if mode == "hdr":
        print_('\tExperiment running HDR loss.\n', bold=True)
        return HDRLoss(VGG16FeatureExtractor(), device).to(device)
    elif mode == "inpainting":
        print_('\tExperiment running inpainting loss.\n', bold=True)
        return InpaintingLoss(VGG16FeatureExtractor(), device).to(device)
    else:
        raise ValueError('unknown mode {}'.format(mode))
Esempio n. 2
0
def load(network, device):
    module = network[:network.rfind('.')]
    model = network[network.rfind('.') + 1:]
    mod = importlib.import_module(module)
    net_func = getattr(mod, model)

    net = net_func().to(device)

    num_params = sum([param.nelement() for param in net.parameters()])
    print_('\tModel {} loaded. Model params = {:2.1f}M\n'.format(
        network, num_params / 1000000),
           bold=True)

    return net
Esempio n. 3
0
def load(args):
    img_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=opt.MEAN, std=opt.STD)
    ])

    if args.mode == "hdr":
        print_('\tExperiment running on HDR data.\n', bold=True)

        if args.train:
            dataset_train = HDRDataset(os.path.join(args.train_dir, 'jpg'),
                                       os.path.join(args.train_dir, 'bin'),
                                       img_transform,
                                       train=True)
            dataset_val = HDRDataset(os.path.join(args.val_dir, 'jpg'),
                                     os.path.join(args.val_dir, 'bin'),
                                     img_transform,
                                     train=False)
            return dataset_train, dataset_val
        else:
            im_dir, label_dir = get_data_directory(args.test_dir)
            return HDRDataset(im_dir, label_dir, img_transform, train=False)

    elif args.mode == "inpainting":
        print_('\tExperiment running on inpainting data.\n', bold=True)

        if args.train:
            dataset_train = Places2(args.train_dir, img_transform, True)
            dataset_val = Places2(args.val_dir, img_transform, False)
            return dataset_train, dataset_val
        else:
            return Places2(args.test_dir, img_transform, train=False)

    else:
        raise ValueError(
            'Unknown mode {}. Choose either hdr or inpainting.'.format(
                args.mode))
Esempio n. 4
0
def print_test_args(args):
    print_(
        "\n\n\t-------------------------------------------------------------------\n",
        'm')
    print_(
        "\t  HDR image reconstruction from a single exposure using deep CNNs\n\n",
        'm')
    print_("\t  Settings\n", 'm')
    print_("\t  -------------------\n", 'm')
    print_("\t  Input image directory/file:     %s\n" % args.test_dir, 'm')
    print_("\t  Output directory:               %s\n" % args.out_dir, 'm')
    print_("\t  CNN weights:                    %s\n" % args.weights, 'm')
    print_(
        "\t-------------------------------------------------------------------\n\n\n",
        'm')
Esempio n. 5
0
        for ext in ('*.png', '*.jpeg', '*.jpg'):
            images.extend(glob.glob(os.path.join(images_dir, ext)))
        images.sort()

        return images


if __name__ == '__main__':
    args = parser.parse_args()
    args.train = False
    print_test_args(args)

    # use GPU if available.
    device = torch.device(
        'cuda' if torch.cuda.is_available() and not args.cpu else 'cpu')
    print_('\tUsing device: {}.\n'.format(device))

    # create output directory.
    make_dirs(args.out_dir)

    # load test data.
    img_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=opt.MEAN, std=opt.STD)
    ])

    dataset = HDRTestDataset(args.test_dir, img_transform)
    iterator_test_set = data.DataLoader(dataset, batch_size=1)
    print_('\tLoaded {} test images.\n'.format(len(dataset)))

    model = SoftConvNotLearnedMaskUNet().to(device)