コード例 #1
0
def which_network(network_name, **kwargs):
    mean_std = None
    # defining meand ans std
    if kwargs['in_chns'] == 3:
        mean, std = get_preprocessing_function('rgb')
        mean_std = [mean, std]
    elif kwargs['in_chns'] == 1:
        mean, std = get_preprocessing_function('grey')
        mean_std = [mean, std]
    else:
        mean, std = get_preprocessing_function(kwargs['in_chns'])
        mean_std = [mean, std]
    # checking whether it's an architecture or a network
    if os.path.isfile(network_name):
        checkpoint = torch.load(network_name, map_location='cpu')
        architecture = checkpoint['arch']
        if 'kwargs' in checkpoint:
            kwargs = checkpoint['kwargs']
        network = which_architecture(architecture, **kwargs)
        network.load_state_dict(checkpoint['state_dict'])
        if 'mean_std' in checkpoint:
            mean_std = checkpoint['mean_std']
    else:
        network = which_architecture(network_name, **kwargs)
        architecture = network_name
    return network, architecture, mean_std
コード例 #2
0
def get_transform(train,
                  colour_vision,
                  colour_space,
                  target_size=480,
                  other_tf=None):
    # in the original version instead of target_size they use base_size of 520
    min_size = int((0.5 if train else 1.0) * target_size)
    max_size = int((2.0 if train else 1.0) * target_size)
    transforms = [T.RandomResize(min_size, max_size)]
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
        transforms.append(T.RandomCrop(target_size))

    if other_tf is not None:
        transforms.append(*other_tf)

    colour_transformation = preprocessing.colour_transformation(
        colour_vision, colour_space)
    # get ind 0 because it returns a list!
    transforms.append(T.ColourTransformation(colour_transformation[0]))

    transforms.append(T.ToTensor())

    mean, std = get_preprocessing_function(colour_space, colour_vision)
    transforms.append(T.Normalize(mean=mean, std=std))

    return T.Compose(transforms)
コード例 #3
0
def main(args):
    args = parse_arguments(args)
    if args.model_path is None:
        args.model_path = args.model_name
    colour_space = args.colour_space
    target_size = args.target_size

    # loading the model
    is_segmentation = False
    if 'deeplabv3_resnet' in args.model_name or 'fcn_resnet' in args.model_name:
        is_segmentation = True
    transfer_weights = [args.model_path, None, is_segmentation]
    model = pretrained_models.get_pretrained_model(args.model_name,
                                                   transfer_weights)

    # selecting the layer
    model = pretrained_models.LayerActivation(
        pretrained_models.get_backbones(args.model_name, model),
        args.activation_layer)
    model = model.eval()
    model.cuda()

    mean, std = model_utils.get_preprocessing_function(colour_space,
                                                       'trichromat')
    transform = torch_transforms.Compose([
        cv2_transforms.ToTensor(),
        cv2_transforms.Normalize(mean, std),
    ])

    if args.task == '2afc':
        default_dist = DISTORTIONS_2AFC
        db_class = image_quality.BAPPS2afc
        run_fun = run_2afc
    else:
        default_dist = DISTORTIONS_JND
        db_class = image_quality.BAPPSjnd
        run_fun = run_jnd
    distortions = default_dist if args.distortion is None else [
        args.distortion
    ]

    eval_results = dict()
    for dist in distortions:
        print('Starting with %s' % dist)
        db = db_class(root=args.db_dir,
                      split=args.split,
                      distortion=dist,
                      transform=transform)
        db_loader = torch.utils.data.DataLoader(db,
                                                batch_size=args.batch_size,
                                                shuffle=False,
                                                num_workers=args.workers,
                                                pin_memory=True)
        print_val = dist if args.print else None
        eval_results[dist] = run_fun(db_loader, model, print_val)
    save_results(eval_results, args.out_file)
コード例 #4
0
def main(args):
    args = parse_arguments(args)
    if args.model_path is None:
        args.model_path = args.model_name
    colour_space = args.colour_space
    target_size = args.target_size

    # loading the model
    is_segmentation = False
    if 'deeplabv3_resnet' in args.model_name or 'fcn_resnet' in args.model_name:
        is_segmentation = True
    transfer_weights = [args.model_path, None, is_segmentation]
    model = pretrained_models.get_pretrained_model(args.model_name,
                                                   transfer_weights)

    # selecting the layer
    model = pretrained_models.LayerActivation(
        pretrained_models.get_backbones(args.model_name, model),
        args.activation_layer)
    model = model.eval()
    model.cuda()

    mean, std = model_utils.get_preprocessing_function(colour_space,
                                                       'trichromat')
    transform = torch_transforms.Compose([
        cv2_transforms.ToTensor(),
        cv2_transforms.Normalize(mean, std),
    ])

    parts = PARTS if args.part is None else [args.part]

    eval_results = dict()
    for part in parts:
        print('Starting with %s' % part)
        db = image_quality.LIVE(root=args.db_dir,
                                part=part,
                                transform=transform)
        db_loader = torch.utils.data.DataLoader(db,
                                                batch_size=args.batch_size,
                                                shuffle=False,
                                                num_workers=args.workers,
                                                pin_memory=True)
        print_val = part if args.print else None
        eval_results[part] = run_live(db_loader, model, print_val)
    save_results(eval_results, args.out_file)
コード例 #5
0
def main_worker(ngpus_per_node, args):
    mean, std = model_utils.get_preprocessing_function(args.colour_space,
                                                       args.vision_type)

    if args.gpus is not None:
        print("Use GPU: {} for training".format(args.gpus))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + args.gpus
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    # create model
    if args.transfer_weights is not None:
        print('Transferred model!')
        (model, _) = model_utils.which_network(args.transfer_weights[0],
                                               args.task_type,
                                               num_classes=args.old_classes)
        which_layer = -1
        if len(args.transfer_weights) == 2:
            which_layer = args.transfer_weights[1]
        model = model_utils.NewClassificationModel(model, which_layer,
                                                   args.num_classes)
    elif args.custom_arch:
        print('Custom model!')
        supported_customs = ['resnet_basic_custom', 'resnet_bottleneck_custom']
        if os.path.isfile(args.network_name):
            checkpoint = torch.load(args.network_name, map_location='cpu')
            customs = None
            if 'customs' in checkpoint:
                customs = checkpoint['customs']
                # TODO: num_classes is just for backward compatibility
                if 'num_classes' not in customs:
                    customs['num_classes'] = 1000
            model = which_architecture(checkpoint['arch'], customs,
                                       args.contrast_head)
            args.network_name = checkpoint['arch']

            model.load_state_dict(checkpoint['state_dict'], strict=False)
        elif args.network_name in supported_customs:
            model = custom_models.__dict__[args.network_name](
                args.blocks,
                contrast_head=args.contrast_head,
                pooling_type=args.pooling_type,
                in_chns=len(mean),
                num_classes=args.num_classes,
                inplanes=args.num_kernels,
                kernel_size=args.kernel_size)
    elif args.pretrained:
        print("=> using pre-trained model '{}'".format(args.network_name))
        model = models.__dict__[args.network_name](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.network_name))
        model = models.__dict__[args.network_name]()

    # TODO: why load weights is False?
    args.out_dir = prepare_training.prepare_output_directories(
        dataset_name='contrast',
        network_name=args.network_name,
        optimiser='sgd',
        load_weights=False,
        experiment_name=args.experiment_name,
        framework='pytorch')
    # preparing the output folder
    create_dir(args.out_dir)
    json_file_name = os.path.join(args.out_dir, 'args.json')
    with open(json_file_name, 'w') as fp:
        json.dump(dict(args._get_kwargs()), fp, sort_keys=True, indent=4)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpus is not None:
            torch.cuda.set_device(args.gpus)
            model.cuda(args.gpus)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpus])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpus is not None:
        torch.cuda.set_device(args.gpus)
        model = model.cuda(args.gpus)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if (args.network_name.startswith('alexnet')
                or args.network_name.startswith('vgg')):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpus)

    # optimiser
    if args.transfer_weights is None:
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        # for p in model.features.parameters():
        #     p.requires_grad = False
        params_to_optimize = [
            {
                'params': [p for p in model.features.parameters()],
                'lr': 1e-6
            },
            {
                'params': [p for p in model.fc.parameters()]
            },
        ]
        optimizer = torch.optim.SGD(params_to_optimize,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

    model_progress = []
    model_progress_path = os.path.join(args.out_dir, 'model_progress.csv')
    # optionally resume from a checkpoint
    # TODO: it would be best if resume load the architecture from this file
    # TODO: merge with which_architecture
    best_acc1 = 0
    if args.resume is not None:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location='cpu')
            args.initial_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            if args.gpus is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpus)
                model = model.cuda(args.gpus)
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            if os.path.exists(model_progress_path):
                model_progress = np.loadtxt(model_progress_path, delimiter=',')
                model_progress = model_progress.tolist()
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    train_trans = []
    valid_trans = []
    both_trans = []
    if args.mosaic_pattern is not None:
        mosaic_trans = preprocessing.mosaic_transformation(args.mosaic_pattern)
        both_trans.append(mosaic_trans)

    if args.num_augmentations != 0:
        augmentations = preprocessing.random_augmentation(
            args.augmentation_settings, args.num_augmentations)
        train_trans.append(augmentations)

    target_size = default_configs.get_default_target_size(
        args.dataset, args.target_size)

    # loading the training set
    train_trans = [*both_trans, *train_trans]
    db_params = {
        'colour_space': args.colour_space,
        'vision_type': args.vision_type,
        'mask_image': args.mask_image
    }
    if args.dataset in ['imagenet', 'celeba', 'natural']:
        path_or_sample = args.data_dir
    else:
        path_or_sample = args.train_samples
    train_dataset = dataloader.train_set(args.dataset,
                                         target_size,
                                         mean,
                                         std,
                                         extra_transformation=train_trans,
                                         data_dir=path_or_sample,
                                         **db_params)
    if args.dataset == 'natural':
        train_dataset.num_crops = args.batch_size
        args.batch_size = 1

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    # loading validation set
    valid_trans = [*both_trans, *valid_trans]
    validation_dataset = dataloader.validation_set(
        args.dataset,
        target_size,
        mean,
        std,
        extra_transformation=valid_trans,
        data_dir=path_or_sample,
        **db_params)
    if args.dataset == 'natural':
        validation_dataset.num_crops = train_dataset.num_crops
        args.batch_size = 1

    val_loader = torch.utils.data.DataLoader(validation_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # training on epoch
    for epoch in range(args.initial_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        misc_utils.adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train_log = train_on_data(train_loader, model, criterion, optimizer,
                                  epoch, args)

        # evaluate on validation set
        validation_log = validate_on_data(val_loader, model, criterion, args)

        model_progress.append([*train_log, *validation_log])

        # remember best acc@1 and save checkpoint
        acc1 = validation_log[2]
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if misc_utils.is_saving_node(args.multiprocessing_distributed,
                                     args.rank, ngpus_per_node):
            misc_utils.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.network_name,
                    'customs': {
                        'pooling_type': args.pooling_type,
                        'in_chns': len(mean),
                        'num_classes': args.num_classes,
                        'blocks': args.blocks,
                        'num_kernels': args.num_kernels,
                        'kernel_size': args.kernel_size
                    },
                    'preprocessing': {
                        'mean': mean,
                        'std': std
                    },
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                    'target_size': target_size,
                },
                is_best,
                out_folder=args.out_dir)
            # TODO: get this header directly as a dictionary keys
            header = 'epoch,t_time,t_loss,t_top1,t_top5,v_time,v_loss,v_top1,v_top5'
            np.savetxt(model_progress_path,
                       np.array(model_progress),
                       delimiter=',',
                       header=header)
コード例 #6
0
def main(args):
    args = parse_arguments(args)
    if args.imagenet_dir is None:
        args.imagenet_dir = '/home/arash/Software/imagenet/raw-data/validation/'
    colour_space = args.colour_space
    target_size = args.target_size
    if args.repeat:
        target_size = int(args.target_size / 2)

    mean, std = model_utils.get_preprocessing_function(colour_space,
                                                       'trichromat')
    extra_transformations = []
    if args.noise is not None:
        noise_kwargs = {'amount': float(args.noise[1])}
        extra_transformations.append(
            cv2_preprocessing.UniqueTransformation(imutils.gaussian_noise,
                                                   **noise_kwargs))
    if args.mosaic_pattern is not None:
        mosaic_trans = cv2_preprocessing.MosaicTransformation(
            args.mosaic_pattern)
        extra_transformations.append(mosaic_trans)

    # testing setting
    freqs = args.freqs
    if freqs is None:
        if args.model_fest:
            test_sfs = [
                107.558006181068, 60.2277887428434, 42.5666842683747,
                30.1176548488805, 21.2841900445655, 15.0589946730047,
                10.6611094334144, 7.5293662203794, 5.33055586478039,
                4.01568430264343
            ]
            args.gabor = 'model_fest'
        else:
            if target_size == 256:
                t4s = [
                    target_size / 2,
                    target_size / 2.5,
                    target_size / 3,
                    target_size / 3.5,
                    target_size / 3.75,
                    target_size / 4,
                ]
            else:
                # assuming 128
                t4s = [64]

            sf_base = ((target_size / 2) / np.pi)
            test_sfs = [
                sf_base / e for e in [
                    *np.arange(1, 21), *np.arange(21, 61, 5),
                    *np.arange(61, t4s[-1], 25), *t4s
                ]
            ]
    else:
        if len(freqs) == 3:
            test_sfs = np.linspace(freqs[0], freqs[1], int(freqs[2]))
        else:
            test_sfs = freqs
    # so the sfs gets sorted
    test_sfs = np.unique(test_sfs)
    contrasts = args.contrasts
    if contrasts is None:
        test_contrasts = [0.5]
    else:
        test_contrasts = contrasts
    test_thetas = np.linspace(0, np.pi, 7)
    test_rhos = np.linspace(0, np.pi, 4)
    test_ps = [0.0, 1.0]

    if args.pretrained:
        if args.side_by_side:
            if args.scale_factor is None:
                scale_factor = (args.target_size / 256)**2
            else:
                scale_factor = args.scale_factor
            model = pretrained_models.NewClassificationModel(
                args.model_path,
                grey_width=args.grey_width == 40,
                scale_factor=scale_factor)
        else:
            if args.scale_factor is None:
                scale_factor = (args.target_size / 128)**2
            else:
                scale_factor = args.scale_factor
            model = models_csf.ContrastDiscrimination(
                args.model_path,
                grey_width=args.grey_width == 40,
                scale_factor=scale_factor)
    else:
        model, _ = model_utils.which_network_classification(args.model_path, 2)
    model.eval()
    model.cuda()

    mean_std = None
    if args.visualise:
        mean_std = (mean, std)

    if args.avg_illuminant < 0:
        max_high = 1 + 2 * args.avg_illuminant
    elif args.avg_illuminant > 0:
        max_high = 1 + -2 * args.avg_illuminant
    else:
        max_high = 1.0
    mid_contrast = (0 + max_high) / 2

    all_results = None
    csf_flags = [mid_contrast for _ in test_sfs]

    if args.db == 'gratings':
        for i in range(len(csf_flags)):
            low = 0
            high = max_high
            j = 0
            while csf_flags[i] is not None:
                print('%.2d %.3d Doing %f - %f %f %f' %
                      (i, j, test_sfs[i], csf_flags[i], low, high))

                test_samples = {
                    'amp': [csf_flags[i]],
                    'lambda_wave': [test_sfs[i]],
                    'theta': test_thetas,
                    'rho': test_rhos,
                    'side': test_ps,
                    'avg_illuminant': args.avg_illuminant
                }
                db_params = {
                    'colour_space': colour_space,
                    'vision_type': args.vision_type,
                    'repeat': args.repeat,
                    'mask_image': args.gabor,
                    'grey_width': args.grey_width,
                    'side_by_side': args.side_by_side
                }

                db = dataloader.validation_set(args.db,
                                               target_size,
                                               mean,
                                               std,
                                               extra_transformations,
                                               data_dir=test_samples,
                                               **db_params)
                db.contrast_space = args.contrast_space

                db_loader = torch.utils.data.DataLoader(
                    db,
                    batch_size=args.batch_size,
                    shuffle=False,
                    num_workers=args.workers,
                    pin_memory=True,
                )

                if args.side_by_side:
                    new_results, all_results = run_gratings(
                        db_loader,
                        model,
                        args.out_file,
                        args.print,
                        mean_std=mean_std,
                        old_results=all_results)
                else:
                    new_results, all_results = run_gratings_separate(
                        db_loader,
                        model,
                        args.out_file,
                        args.print,
                        mean_std=mean_std,
                        old_results=all_results)
                new_contrast, low, high = sensitivity_sf(new_results,
                                                         test_sfs[i],
                                                         varname='all',
                                                         th=0.75,
                                                         low=low,
                                                         high=high)
                if (abs(csf_flags[i] - max_high) < 1e-3
                        or new_contrast == csf_flags[i] or j == 20):
                    print('had to skip', csf_flags[i])
                    csf_flags[i] = None
                else:
                    csf_flags[i] = new_contrast
                j += 1
コード例 #7
0
def main_worker(ngpus_per_node, args):
    mean, std = model_utils.get_preprocessing_function(args.colour_space,
                                                       args.vision_type)

    # preparing the output folder
    create_dir(args.out_dir)

    if args.gpus is not None:
        print("Use GPU: {} for training".format(args.gpus))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + args.gpus
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    # create model
    if args.transfer_weights is not None:
        print('Transferred model!')
        model = contrast_utils.AFCModel(args.network_name,
                                        args.transfer_weights)
    elif args.custom_arch:
        print('Custom model!')
        supported_customs = ['resnet_basic_custom', 'resnet_bottleneck_custom']
        if args.network_name in supported_customs:
            model = custom_models.__dict__[args.network_name](
                args.blocks,
                pooling_type=args.pooling_type,
                in_chns=len(mean),
                num_classes=args.num_classes,
                inplanes=args.num_kernels,
                kernel_size=args.kernel_size)
    elif args.pretrained:
        print("=> using pre-trained model '{}'".format(args.network_name))
        model = models.__dict__[args.network_name](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.network_name))
        model = models.__dict__[args.network_name]()

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpus is not None:
            torch.cuda.set_device(args.gpus)
            model.cuda(args.gpus)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpus])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpus is not None:
        torch.cuda.set_device(args.gpus)
        model = model.cuda(args.gpus)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if (args.network_name.startswith('alexnet')
                or args.network_name.startswith('vgg')):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = soft_cross_entropy

    # optimiser
    if args.transfer_weights is None:
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        params_to_optimize = [
            {
                'params': [p for p in model.parameters() if p.requires_grad]
            },
        ]
        optimizer = torch.optim.SGD(params_to_optimize,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

    model_progress = []
    model_progress_path = os.path.join(args.out_dir, 'model_progress.csv')
    # optionally resume from a checkpoint
    # TODO: it would be best if resume load the architecture from this file
    # TODO: merge with which_architecture
    best_acc1 = 0
    if args.resume is not None:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location='cpu')
            args.initial_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            if args.gpus is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpus)
                model = model.cuda(args.gpus)
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            if os.path.exists(model_progress_path):
                model_progress = np.loadtxt(model_progress_path, delimiter=',')
                model_progress = model_progress.tolist()
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    train_trans = []
    valid_trans = []
    both_trans = []
    if args.mosaic_pattern is not None:
        mosaic_trans = preprocessing.mosaic_transformation(args.mosaic_pattern)
        both_trans.append(mosaic_trans)

    if args.num_augmentations != 0:
        augmentations = preprocessing.random_augmentation(
            args.augmentation_settings, args.num_augmentations)
        train_trans.append(augmentations)

    target_size = default_configs.get_default_target_size(
        args.dataset, args.target_size)

    final_trans = [
        cv2_transforms.ToTensor(),
        cv2_transforms.Normalize(mean, std),
    ]

    train_trans.append(
        cv2_transforms.RandomResizedCrop(target_size, scale=(0.08, 1.0)))

    # loading the training set
    train_trans = torch_transforms.Compose(
        [*both_trans, *train_trans, *final_trans])
    train_dataset = image_quality.BAPPS2afc(root=args.data_dir,
                                            split='train',
                                            transform=train_trans,
                                            concat=0.5)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    valid_trans.extend([
        cv2_transforms.Resize(target_size),
        cv2_transforms.CenterCrop(target_size),
    ])

    # loading validation set
    valid_trans = torch_transforms.Compose(
        [*both_trans, *valid_trans, *final_trans])
    validation_dataset = image_quality.BAPPS2afc(root=args.data_dir,
                                                 split='val',
                                                 transform=valid_trans,
                                                 concat=0)

    val_loader = torch.utils.data.DataLoader(validation_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # training on epoch
    for epoch in range(args.initial_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        misc_utils.adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train_log = train_on_data(train_loader, model, criterion, optimizer,
                                  epoch, args)

        # evaluate on validation set
        validation_log = validate_on_data(val_loader, model, criterion, args)

        model_progress.append([*train_log, *validation_log])

        # remember best acc@1 and save checkpoint
        acc1 = validation_log[2]
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if misc_utils.is_saving_node(args.multiprocessing_distributed,
                                     args.rank, ngpus_per_node):
            misc_utils.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.network_name,
                    'customs': {
                        'pooling_type': args.pooling_type,
                        'in_chns': len(mean),
                        'num_classes': args.num_classes,
                        'blocks': args.blocks,
                        'num_kernels': args.num_kernels,
                        'kernel_size': args.kernel_size
                    },
                    'transfer_weights': args.transfer_weights,
                    'preprocessing': {
                        'mean': mean,
                        'std': std
                    },
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                    'target_size': target_size,
                },
                is_best,
                out_folder=args.out_dir)
            # TODO: get this header directly as a dictionary keys
            header = 'epoch,t_time,t_loss,t_top5,v_time,v_loss,v_top1'
            np.savetxt(model_progress_path,
                       np.array(model_progress),
                       delimiter=',',
                       header=header)
コード例 #8
0
def main(args):
    args.gpus = set_visible_gpus(args.gpus)
    args.device = prepare_device(args.gpus)

    args.architecture_kwargs = {
        'in_chns': args.in_chns
    }
    # TODO: this is not a nice solution here
    if 'wavenet' in args.architecture:
        args.architecture_kwargs['inplanes'] = args.num_kernels
        args.architecture_kwargs['planes'] = args.blocks

    # creating the model
    model, architecture, mean_std = geetup_net.which_network(
        args.architecture, **args.architecture_kwargs
    )
    model = model.to(args.device)

    args.out_dir = prepare_training.prepare_output_directories(
        dataset_name='geetup_' + args.dataset, network_name=architecture,
        optimiser='sgd', load_weights=False,
        experiment_name=args.experiment_name, framework='pytorch'
    )

    logging.basicConfig(
        filename=args.out_dir + '/experiment_info.log', filemode='w',
        format='%(levelname)s: %(message)s', level=logging.INFO
    )

    validation_pickle = _get_multi_sensory_paths(
        args.data_dir, args.validation_file
    )
    validation_dataset = geetup_db.get_validation_dataset(
        validation_pickle, args.target_size, mean_std
    )
    validation_loader = torch.utils.data.DataLoader(
        validation_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True
    )

    if args.random is not None:
        mean, std = get_preprocessing_function('rgb', 'trichromat')
        normalize_inverse = NormalizeInverse(mean, std)
        process_random_image(model, validation_loader, normalize_inverse, args)
        return

    args.criterion = nn.BCELoss().to(args.device)
    if args.evaluate:
        predict_outs = predict(
            validation_loader, model, args.criterion, args
        )
        for key, item in predict_outs.items():
            file_name = '_'.join(e for e in [key, *args.out_prefix])
            result_file = '%s/%s_%s' % (
                args.out_dir, file_name, args.validation_file
            )
            write_pickle(result_file, item)
        return

    training_pickle = _get_multi_sensory_paths(
        args.data_dir, args.train_file
    )
    train_dataset = geetup_db.get_train_dataset(
        training_pickle, args.target_size, mean_std, args.crop_scale,
        args.gaussian_sigma
    )
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True
    )

    # optimiser
    optimizer = torch.optim.SGD(
        model.parameters(), args.lr,
        momentum=args.momentum, weight_decay=args.weight_decay
    )

    epochs(model, train_loader, validation_loader, optimizer, args)
コード例 #9
0
def main_worker(ngpus_per_node, args):
    global best_acc1

    is_pill_img = 'wcs_xyz_png_1600' in args.data_dir

    mean, std = model_utils.get_preprocessing_function(args.colour_space,
                                                       args.vision_type)

    # preparing the output folder
    create_dir(args.out_dir)

    if args.gpus is not None:
        print("Use GPU: {} for training".format(args.gpus))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + args.gpus
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    if args.prediction:
        checkpoint = torch.load(args.network_weights, map_location='cpu')
        blocks = checkpoint['customs']['blocks']
        pooling_type = checkpoint['customs']['pooling_type']
        num_kernels = checkpoint['customs']['num_kernels']
        outputs = checkpoint['customs']['outputs']
        for key, val in outputs.items():
            if 'area' not in val:
                outputs[key] = None
        model = resnet.__dict__[args.network_name](blocks,
                                                   pooling_type=pooling_type,
                                                   in_chns=len(mean),
                                                   inplanes=num_kernels,
                                                   outputs=outputs)
        model.load_state_dict(checkpoint['state_dict'])
    elif args.transfer_weights is not None:
        print('Transferred model!')
        (model, _) = model_utils.which_network(args.transfer_weights,
                                               args.task_type,
                                               num_classes=args.old_classes)
        model = model_utils.NewClassificationModel(model, args.num_classes)
    elif args.custom_arch:
        print('Custom model!')
        if (args.network_name == 'resnet_basic_custom'
                or args.network_name == 'resnet_bottleneck_custom'):
            outputs = {'objects': None, 'munsells': None, 'illuminants': None}
            imagenet_weights = args.imagenet_weights
            if args.object_area is not None:
                outputs['objects'] = {
                    'num_classes': 2100,
                    'area': args.object_area
                }
            if args.munsell_area is not None:
                outputs['munsells'] = {
                    'num_classes': 1600,
                    'area': args.munsell_area
                }
            if args.illuminant_area is not None:
                outputs['illuminants'] = {
                    'num_classes': 280,
                    'area': args.illuminant_area
                }

            model = resnet.__dict__[args.network_name](
                args.blocks,
                pooling_type=args.pooling_type,
                in_chns=len(mean),
                inplanes=args.num_kernels,
                outputs=outputs,
                imagenet_weights=imagenet_weights)
    elif args.pretrained:
        print("=> using pre-trained model '{}'".format(args.network_name))
        model = models.__dict__[args.network_name](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.network_name))
        model = models.__dict__[args.network_name]()

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpus is not None:
            torch.cuda.set_device(args.gpus)
            model.cuda(args.gpus)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpus])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpus is not None:
        torch.cuda.set_device(args.gpus)
        model = model.cuda(args.gpus)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if (args.network_name.startswith('alexnet')
                or args.network_name.startswith('vgg')):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpus)

    # optimiser
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    model_progress = []
    model_progress_path = os.path.join(args.out_dir, 'model_progress.csv')
    # optionally resume from a checkpoint
    # TODO: it would be best if resume load the architecture from this file
    # TODO: merge with which_architecture
    if args.resume is not None:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location='cpu')
            args.initial_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            if args.gpus is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpus)
                model = model.cuda(args.gpus)
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            if os.path.exists(model_progress_path):
                model_progress = np.loadtxt(model_progress_path, delimiter=',')
                model_progress = model_progress.tolist()
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    normalize = transforms.Normalize(mean=mean, std=std)

    other_transformations = []
    if args.num_augmentations != 0:
        augmentations = preprocessing.random_augmentation(
            args.augmentation_settings, args.num_augmentations)
        other_transformations.append(augmentations)

    target_size = get_default_target_size(args.dataset, args.target_size)

    train_dataset, validation_dataset = get_train_val_dataset(
        args.data_dir, other_transformations, [], normalize,
        args.imagenet_weights)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    if args.prediction:
        manipulation_values = args.parameters['kwargs'][args.manipulation]
        manipulation_name = args.parameters['f_name']

        for j, manipulation_value in enumerate(manipulation_values):
            args.parameters['kwargs'][args.manipulation] = manipulation_value
            prediction_transformation = preprocessing.prediction_transformation(
                args.parameters, args.colour_space,
                tmp_c_space(manipulation_name))
            other_transformations = [prediction_transformation]
            _, validation_dataset = get_train_val_dataset(
                args.data_dir, other_transformations, other_transformations,
                normalize, args.imagenet_weights)

            val_loader = torch.utils.data.DataLoader(
                validation_dataset,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True)

            pred_log = predict(val_loader, model, criterion,
                               torch.device(args.gpus))
            from kernelphysiology.dl.utils import prepapre_testing
            prepapre_testing.save_predictions(pred_log, args.experiment_name,
                                              args.pred_name, args.dataset,
                                              manipulation_name,
                                              manipulation_value)
        return

    val_loader = torch.utils.data.DataLoader(validation_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    if args.ill_colour is not None:
        print('Performing with illuminant correction')
        args.ill_colour = np.loadtxt(args.ill_colour, delimiter=',')

    # training on epoch
    for epoch in range(args.initial_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        if args.imagenet_weights is None:
            adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train_log = train_on_data(train_loader, model, criterion, optimizer,
                                  epoch, args)

        # evaluate on validation set
        validation_log = validate_on_data(val_loader, model, criterion, args)

        model_progress.append([*train_log, *validation_log])

        # remember best acc@1 and save checkpoint
        acc1 = validation_log[2]
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.network_name,
                    'customs': {
                        'pooling_type': args.pooling_type,
                        'in_chns': len(mean),
                        'num_classes': args.num_classes,
                        'blocks': args.blocks,
                        'num_kernels': args.num_kernels,
                        'outputs': outputs
                    },
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                    'target_size': target_size,
                },
                is_best,
                out_folder=args.out_dir)
        # TODO: get this header directly as a dictionary keys
        header = 'epoch,t_time,t_loss,t_lo,t_lm,t_li,t_ao,t_am,t_ai,' \
                 'v_time,v_loss,v_lo,v_lm,v_li,v_ao,v_am,v_ai'
        np.savetxt(model_progress_path,
                   np.array(model_progress),
                   delimiter=',',
                   header=header)
コード例 #10
0
def train_on_data(train_loader, model, criterion, optimizer, epoch, args):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    losses_obj = AverageMeter()
    top1_obj = AverageMeter()
    top5_obj = AverageMeter()
    losses_mun = AverageMeter()
    top1_mun = AverageMeter()
    top5_mun = AverageMeter()
    losses_ill = AverageMeter()
    top1_ill = AverageMeter()
    top5_ill = AverageMeter()

    if args.top_k is None:
        topks = (1, )
    else:
        topks = (1, args.top_k)

    # switch to train mode
    model.train()

    mean, std = model_utils.get_preprocessing_function(args.colour_space,
                                                       args.vision_type)
    normalise_inverse = cv2_transforms.NormalizeInverse(mean, std)
    normalise_back = transforms.Normalize(mean=mean, std=std)

    end = time.time()
    for i, (input_image, targets) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        if args.gpus is not None:
            input_image = input_image.cuda(args.gpus, non_blocking=True)
        targets = targets.cuda(args.gpus, non_blocking=True)

        # compute output
        out_obj, out_mun, out_ill = model(input_image)

        if out_obj is None:
            loss_obj = 0
        else:
            loss_obj = criterion(out_obj, targets[:, 0])
            acc1_obj, acc5_obj = accuracy(out_obj, targets[:, 0], topk=topks)
            losses_obj.update(loss_obj.item(), input_image.size(0))
            top1_obj.update(acc1_obj[0], input_image.size(0))
            top5_obj.update(acc5_obj[0], input_image.size(0))
        if out_mun is None:
            loss_mun = 0
        else:
            loss_mun = criterion(out_mun, targets[:, 1])
            acc1_mun, acc5_mun = accuracy(out_mun, targets[:, 1], topk=topks)
            losses_mun.update(loss_mun.item(), input_image.size(0))
            top1_mun.update(acc1_mun[0], input_image.size(0))
            top5_mun.update(acc5_mun[0], input_image.size(0))
        if out_ill is None:
            loss_ill = 0
        else:
            loss_ill = criterion(out_ill, targets[:, 2])
            acc1_ill, acc5_ill = accuracy(out_ill, targets[:, 2], topk=topks)
            losses_ill.update(loss_ill.item(), input_image.size(0))
            top1_ill.update(acc1_ill[0], input_image.size(0))
            top5_ill.update(acc5_ill[0], input_image.size(0))

        loss = loss_obj + loss_mun + loss_ill
        losses.update(loss.item(), input_image.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if out_mun is None and args.ill_colour is not None:
            input_image2 = correct_image(normalise_inverse, normalise_back,
                                         input_image, out_ill, args.ill_colour)
            out_obj2, out_mun2, _ = model(input_image2)
            loss_mun2 = 0
            loss_obj2 = 0
            if out_mun2 is not None:
                loss_mun2 = criterion(out_mun2, targets[:, 1])
            if out_obj2 is not None:
                loss_obj2 = criterion(out_obj2, targets[:, 0])
            loss2 = loss_obj2 + loss_mun2
            optimizer.zero_grad()
            loss2.backward()
            optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # printing the accuracy at certain intervals
        if i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\t'
                  'Data {data_time.val:.2f} ({data_time.avg:.2f})\t'
                  'Loss {loss.val:.2f} ({loss.avg:.2f})\t'
                  'LO {obj_loss.val:.2f} ({obj_loss.avg:.2f})\t'
                  'LM {mun_loss.val:.2f} ({mun_loss.avg:.2f})\t'
                  'LI {ill_loss.val:.2f} ({ill_loss.avg:.2f})\t'
                  'Ao {obj_acc.val:.2f} ({obj_acc.avg:.2f})\t'
                  'AM {mun_acc.val:.2f} ({mun_acc.avg:.2f})\t'
                  'AI {ill_acc.val:.2f} ({ill_acc.avg:.2f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      obj_loss=losses_obj,
                      mun_loss=losses_mun,
                      ill_loss=losses_ill,
                      obj_acc=top1_obj,
                      mun_acc=top1_mun,
                      ill_acc=top1_ill))
    return [
        epoch, batch_time.avg, losses.avg, losses_obj.avg, losses_mun.avg,
        losses_ill.avg, top1_obj.avg, top1_mun.avg, top1_ill.avg
    ]
コード例 #11
0
def generic_evaluation(args, fn, save_fn=None, **kwargs):
    manipulation_values = args.parameters['kwargs'][args.manipulation]
    manipulation_name = args.parameters['f_name']
    other_mans = args.parameters['others']
    for j, current_network in enumerate(args.network_files):
        # which architecture
        (model, target_size) = model_utils.which_network(
            current_network,
            args.task_type,
            num_classes=args.num_classes,
            kill_kernels=args.kill_kernels,
            kill_planes=args.kill_planes,
            kill_lines=args.kill_lines)
        model.to(args.device)
        mean, std = model_utils.get_preprocessing_function(
            args.colour_space, args.network_chromaticities[j])
        normalize = transforms.Normalize(mean=mean, std=std)

        for i, manipulation_value in enumerate(manipulation_values):
            args.parameters['kwargs'][args.manipulation] = manipulation_value

            output_file = prepapre_testing._prepare_saving_file(
                args.experiment_name,
                args.network_names[j],
                args.dataset,
                manipulation_name,
                manipulation_value,
                extension='csv')
            if os.path.exists(output_file):
                continue

            if args.task_type == 'segmentation' or 'voc' in args.dataset:
                prediction_transformation = preprocessing.prediction_transformation_seg(
                    args.parameters, args.colour_space,
                    tmp_c_space(manipulation_name))
            else:
                prediction_transformation = preprocessing.prediction_transformation(
                    args.parameters, args.colour_space,
                    tmp_c_space(manipulation_name))
            colour_vision = 'trichromat'
            if _requires_colour_transform(manipulation_name,
                                          args.network_chromaticities[j]):
                colour_vision = args.network_chromaticities[j]

            other_transformations = []
            for oth_man in other_mans:
                if args.task_type == 'segmentation' or 'voc' in args.dataset:
                    other_transformations.append(
                        preprocessing.prediction_transformation_seg(
                            oth_man, args.colour_space,
                            tmp_c_space(oth_man['f_name'])))
                else:
                    other_transformations.append(
                        preprocessing.prediction_transformation(
                            oth_man, args.colour_space,
                            tmp_c_space(oth_man['f_name'])))
            if args.mosaic_pattern is not None:
                other_transformations.append(
                    preprocessing.mosaic_transformation(args.mosaic_pattern))
            if args.sf_filter is not None:
                other_transformations.append(
                    preprocessing.sf_transformation(args.sf_filter,
                                                    args.sf_filter_chn))
            other_transformations.append(prediction_transformation)

            print('Processing network %s and %s %f' %
                  (current_network, manipulation_name, manipulation_value))

            # which dataset
            # reading it after the model, because each might have their own
            # specific size
            # loading validation set
            target_size = get_default_target_size(args.dataset,
                                                  args.target_size)

            target_transform = utils_db.ImagenetCategoryTransform(
                args.categories, args.cat_dir)

            validation_dataset = utils_db.get_validation_dataset(
                args.dataset,
                args.validation_dir,
                colour_vision,
                args.colour_space,
                other_transformations,
                normalize,
                target_size,
                task=args.task_type,
                target_transform=target_transform)

            # TODO: nicer solution:
            if 'sampler' not in args:
                sampler = None
            else:
                sampler = args.sampler(validation_dataset)
            if 'collate_fn' not in args:
                args.collate_fn = None

            # FIXME: add segmentation datasests
            val_loader = torch.utils.data.DataLoader(
                validation_dataset,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True,
                sampler=sampler,
                collate_fn=args.collate_fn)

            if args.random_images is not None:
                out_folder = prepapre_testing.prepare_saving_dir(
                    args.experiment_name, args.network_names[j], args.dataset,
                    manipulation_name)
                normalize_inverse = NormalizeInverse(mean, std)
                fn(val_loader, out_folder, normalize_inverse,
                   manipulation_value, **kwargs)
            elif args.activation_map is not None:
                model = model_utils.LayerActivation(model, args.activation_map)
                current_results = fn(val_loader, model, **kwargs)
                save_fn(current_results, args.experiment_name,
                        args.network_names[j], args.dataset, manipulation_name,
                        manipulation_value)
            else:
                (_, _, current_results) = fn(val_loader, model, **kwargs)
                save_fn(current_results, args.experiment_name,
                        args.network_names[j], args.dataset, manipulation_name,
                        manipulation_value)