def init_model(self):
        print('Initializing model: {}'.format(args.model))
        self.model = models.init_model(name=self.args.model,
                                       num_classes=self.dm.num_attributes,
                                       pretrained=not self.args.no_pretrained,
                                       use_gpu=self.use_gpu)
        print('Model size: {:.3f} M'.format(count_num_param(self.model)))

        # Load pretrained weights if specified in args.
        self.loaded_args = self.args
        load_file = osp.join(args.save_experiment, args.load_weights)
        if args.load_weights:
            # TODO: implement result dict
            if check_isfile(load_file):
                cp = load_pretrained_weights([self.model], load_file)
                if "args" in cp:
                    self.loaded_args = cp["args"]
                else:
                    print("WARNING: Could not load args. ")
            else:
                print("WARNING: Could not load pretrained weights")

        # Load model onto GPU if GPU is used.
        self.model = self.model.cuda() if self.use_gpu else self.model

        # Select Loss function.
        if args.loss_func == "deepmar":
            pos_ratio = self.dm.dataset.get_positive_attribute_ratio()
            self.criterion = DeepMARLoss(pos_ratio,
                                         args.train_batch_size,
                                         use_gpu=self.use_gpu,
                                         sigma=args.loss_func_param)
        elif args.loss_func == "scel":
            self.criterion = SigmoidCrossEntropyLoss(
                num_classes=self.dm.num_attributes, use_gpu=self.use_gpu)
        elif args.loss_func == "sscel":
            attribute_grouping = self.dm.dataset.attribute_grouping
            self.criterion = SplitSoftmaxCrossEntropyLoss(attribute_grouping,
                                                          use_gpu=self.use_gpu)
        else:
            self.criterion = None

        self.f1_calibration_thresholds = None

        self.optimizer = init_optimizer(self.model, **optimizer_kwargs(args))
        self.scheduler = init_lr_scheduler(self.optimizer,
                                           **lr_scheduler_kwargs(args))

        self.model = nn.DataParallel(
            self.model) if self.use_gpu else self.model

        # Set default for max_epoch if it was not passed as an argument in the console.
        if self.args.max_epoch < 0:
            self.args.max_epoch = 30

        self.model_list = [self.model]
        self.optimizer_list = [self.optimizer]
        self.scheduler_list = [self.scheduler]
        self.criterion_list = [self.criterion]
Beispiel #2
0
    def get_baseline_data(self, filename, key, name):
        load_file = osp.join(self.args.save_experiment, filename)
        if filename and check_isfile(load_file):
            checkpoint = torch.load(load_file)

            if "result_dict" in checkpoint and checkpoint[
                    "result_dict"] is not None:
                result_dict = checkpoint["result_dict"]
                if key in result_dict and result_dict[key] is not None:
                    print("Loaded {} from file: {}".format(name, filename))
                    return result_dict[key]

        print("WARNING: Could not load {}. ".format(name))
        return None
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
        use_lmdb=args.use_lmdb,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train,
                     transform=transform_train,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query,
                     transform=transform_test,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform_test,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print("Model size: {:.3f} M".format(count_num_param(model)))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        if check_isfile(args.load_weights):
            checkpoint = torch.load(args.load_weights)
            pretrain_dict = checkpoint['state_dict']
            model_dict = model.state_dict()
            pretrain_dict = {
                k: v
                for k, v in pretrain_dict.items()
                if k in model_dict and model_dict[k].size() == v.size()
            }
            model_dict.update(pretrain_dict)
            model.load_state_dict(model_dict)
            print("Loaded pretrained weights from '{}'".format(
                args.load_weights))

    if args.resume:
        if check_isfile(args.resume):
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            args.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            print("Loaded checkpoint from '{}'".format(args.resume))
            print("- start_epoch: {}\n- rank1: {}".format(
                args.start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        distmat = test(model,
                       queryloader,
                       galleryloader,
                       use_gpu,
                       return_distmat=True)
        if args.vis_ranked_res:
            visualize_ranked_results(
                distmat,
                dataset,
                save_dir=osp.join(args.save_dir, 'ranked_results'),
                topk=20,
            )
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Beispiel #4
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'), mode='a')
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'), mode='a')
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(name=args.dataset,
                                                dataset_dir=args.root,
                                                fore_dir=args.fore_dir)

    transform_train = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.RandomHorizontalFlip(),
        ST.ToTensor(),
        ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ST.RandomErasing(0.5)
    ])

    transform_test = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.ToTensor(),
        ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset_hardSplit_seg(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids)
    print(model)

    criterion_xent = CrossEntropyLabelSmooth(use_gpu=use_gpu)
    criterion_htri = TripletLoss()
    criterion_mask = MaskLoss()
    criterion_split = HardSplitLoss()
    criterion_cluster = ClusterLoss()

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    if args.resume:
        if check_isfile(args.resume):
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            args.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            print("Loaded checkpoint from '{}'".format(args.resume))
            print("- start_epoch: {}\n- rank1: {}".format(
                args.start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):

        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, criterion_mask,
              criterion_split, criterion_cluster, optimizer, trainloader,
              use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and (
                epoch + 1) % args.eval_step == 0 or epoch == 0:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
    print("==========\nArgs:{}\n==========".format(args))
def main():
    torch.manual_seed(1)
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_devices
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(config.save_dir, config.checkpoint_suffix,
                        config.evaluate)
    print("\n==========\nArgs:")
    config.print_parameter()
    print("==========\n")

    if use_gpu:
        print("Currently using GPU {}".format(config.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(1)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(config.dataset))
    dataset = data_manager.init_imgreid_dataset(name=config.dataset,
                                                root=config.data_root)

    transform_train = T.Compose([
        T.Random2DTranslation(config.height, config.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=data_mean, std=data_std),
    ])

    transform_test = T.Compose([
        T.Resize((config.height, config.width)),
        T.ToTensor(),
        T.Normalize(mean=data_mean, std=data_std),
    ])

    pin_memory = True if use_gpu else False

    # train_batch_sampler = BalancedBatchSampler(dataset.train, n_classes=8, n_samples=8)
    # train_batch_sampler = CCLBatchSampler(dataset.train, n_classes=n_classes, n_samples=n_samples)
    # train_batch_sampler = CCLBatchSamplerV2(dataset.train, n_classes=n_classes, pos_samp_cnt=pos_samp_cnt,
    #                                         neg_samp_cnt=neg_samp_cnt, each_cls_max_cnt=each_cls_max_cnt)
    train_batch_sampler = ClassSampler(dataset.train,
                                       sample_cls_cnt=config.sample_cls_cnt,
                                       each_cls_cnt=config.each_cls_cnt)

    # trainloader = DataLoader(
    #     ImageDataset(dataset.train, transform=transform_train),
    #     batch_sampler=train_batch_sampler, batch_size=args.train_batch,
    #     shuffle=True, num_workers=args.workers, pin_memory=pin_memory, drop_last=True
    # )

    trainloader = DataLoader(ImageDatasetWCL(dataset,
                                             data_type='train',
                                             merge_h=256,
                                             merge_w=256,
                                             mean_std=[data_mean, data_std]),
                             batch_sampler=train_batch_sampler,
                             num_workers=config.workers,
                             pin_memory=pin_memory)

    queryloader = DataLoader(
        ImageDatasetWCL(dataset.query,
                        data_type='query',
                        merge_h=256,
                        merge_w=256,
                        mean_std=[data_mean, data_std]),
        batch_size=config.test_batch,
        shuffle=False,
        num_workers=config.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDatasetWCL(dataset.gallery,
                        data_type='gallery',
                        merge_h=256,
                        merge_w=256,
                        mean_std=[data_mean, data_std]),
        batch_size=config.test_batch,
        shuffle=False,
        num_workers=config.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    if config.dataset == 'vehicleid':
        train_query_loader = None
        train_gallery_loader = None
    else:
        train_query_loader = DataLoader(
            ImageDatasetWCL(dataset.train_query,
                            data_type='train_query',
                            merge_h=256,
                            merge_w=256,
                            mean_std=[data_mean, data_std]),
            batch_size=config.test_batch,
            shuffle=False,
            num_workers=config.workers,
            pin_memory=pin_memory,
            drop_last=False,
        )

        train_gallery_loader = DataLoader(
            ImageDatasetWCL(dataset.train_gallery,
                            data_type='train_gallery',
                            merge_h=256,
                            merge_w=256,
                            mean_std=[data_mean, data_std]),
            batch_size=config.test_batch,
            shuffle=False,
            num_workers=config.workers,
            pin_memory=pin_memory,
            drop_last=False,
        )

    print("Initializing model: {}".format(config.arch))
    model = init_model(name=config.arch,
                       num_classes=dataset.num_train_pids,
                       loss_type=config.loss_type)
    print("Model size: {:.3f} M".format(count_num_param(model)))

    if config.loss_type == 'xent':
        criterion = [nn.CrossEntropyLoss(), nn.CrossEntropyLoss()]
    elif config.loss_type == 'xent_triplet':
        criterion = XentTripletLoss(
            margin=config.margin,
            triplet_selector=RandomNegativeTripletSelector(
                margin=config.margin),
            each_cls_cnt=config.each_cls_cnt,
            n_class=config.sample_cls_cnt)
    elif config.loss_type == 'xent_tripletv2':
        criterion = XentTripletLossV2(
            margin=config.margin,
            triplet_selector=RandomNegativeTripletSelectorV2(
                margin=config.margin),
            each_cls_cnt=config.each_cls_cnt,
            n_class=config.sample_cls_cnt)
        # criterion = XentTripletLossV2(margin=0.04, triplet_selector=RandomNegativeTripletSelectorV2(margin=0.04),
        #                               each_cls_cnt=config.each_cls_cnt, n_class=config.sample_cls_cnt)
        # criterion = XentGroupTripletLossV2(margin=0.8, triplet_selector=AllTripletSelector(margin=0.8),
        #                               each_cls_cnt=config.each_cls_cnt, n_class=config.sample_cls_cnt)
    else:
        raise KeyError("Unsupported loss: {}".format(config.loss_type))

    optimizer = init_optim(config.optim, model.parameters(), config.lr,
                           config.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=config.stepsize,
                                         gamma=config.gamma)

    if config.resume is not None:
        if check_isfile(config.resume):
            checkpoint = torch.load(config.resume)
            pretrain_dict = checkpoint['state_dict']
            model_dict = model.state_dict()
            pretrain_dict = {
                k: v
                for k, v in pretrain_dict.items()
                if k in model_dict and model_dict[k].size() == v.size()
            }
            model_dict.update(pretrain_dict)
            model.load_state_dict(model_dict)
            config.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            if 'mAP' in checkpoint:
                mAP = checkpoint['mAP']
            else:
                mAP = 0
            print("Loaded checkpoint from '{}'".format(config.resume))
            print("- start_epoch: {}\n- rank1: {}\n- mAP: {}".format(
                config.start_epoch, rank1, mAP))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if config.evaluate:
        print("Evaluate only")
        test_model(model, queryloader, galleryloader, train_query_loader,
                   train_gallery_loader, use_gpu, config.test_batch,
                   config.loss_type, config.euclidean_distance_loss)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_map = 0
    best_epoch = 0

    for epoch in range(config.start_epoch, config.max_epoch):
        print("==> Start training")
        start_train_time = time.time()
        scheduler.step()
        print('epoch:', epoch, 'lr:', scheduler.get_lr())
        train(epoch, model, criterion, optimizer, trainloader,
              config.loss_type, config.print_freq)
        train_time += round(time.time() - start_train_time)

        if epoch >= config.start_eval and config.eval_step > 0 and epoch % config.eval_step == 0 \
           or epoch == config.max_epoch:
            print("==> Test")
            rank1, mAP = test_model(model, queryloader, galleryloader,
                                    train_query_loader, train_gallery_loader,
                                    use_gpu, config.test_batch,
                                    config.loss_type,
                                    config.euclidean_distance_loss)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_map = mAP
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'mAP': mAP,
                    'epoch': epoch + 1,
                },
                is_best,
                use_gpu_suo=False,
                fpath=osp.join(
                    config.save_dir, 'checkpoint_ep' + str(epoch + 1) +
                    config.checkpoint_suffix + '.pth.tar'))

    print("==> Best Rank-1 {:.2%}, mAP {:.2%}, achieved at epoch {}".format(
        best_rank1, best_map, best_epoch))
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Beispiel #6
0
    def init_model(self):
        print('Initializing main model: {}'.format(args.model))
        self.model_main = models.init_model(
            name=self.args.model,
            num_classes=self.dm.num_attributes,
            pretrained=not self.args.no_pretrained,
            use_gpu=self.use_gpu)
        print('Model size: {:.3f} M'.format(count_num_param(self.model_main)))

        print('Initializing HP model: {}'.format(args.hp_model))
        # Determine the size of the output vector for the HP-Net.
        num_hp_net_outputs = 1 if self.args.hp_net_simple else self.dm.num_attributes
        # Init the HP-Net
        self.model_hp = models.init_model(
            name="hp_net_" + self.args.hp_model,
            num_classes=num_hp_net_outputs,
            pretrained=not self.args.no_pretrained)
        print('Model size: {:.3f} M'.format(count_num_param(self.model_hp)))

        if self.args.rejector == "none":
            self.rejector = rejectors.NoneRejector()
        elif self.args.rejector == 'macc':
            self.rejector = rejectors.MeanAccuracyRejector(
                self.args.max_rejection_quantile)
        elif self.args.rejector == "median":
            self.rejector = rejectors.MedianRejector(
                self.args.max_rejection_quantile)
        elif self.args.rejector == "threshold":
            self.rejector = rejectors.ThresholdRejector(
                self.args.rejection_threshold,
                self.args.max_rejection_quantile)
        elif self.args.rejector == "quantile":
            self.rejector = rejectors.QuantileRejector(
                self.args.max_rejection_quantile)
        elif self.args.rejector == 'f1':
            self.rejector = rejectors.F1Rejector(
                self.args.max_rejection_quantile)
        else:
            raise ValueError("Unsupported rejection strategy: '{}'".format(
                self.args.rejector))
        print("Using rejection strategy '{}'".format(self.args.rejector))

        if self.args.hp_calib == 'none':
            self.hp_calibrator = NoneCalibrator()
        elif self.args.hp_calib == 'linear':
            self.hp_calibrator = LinearCalibrator()
        else:
            raise ValueError("Unsupported calibrator: '{}'".format(
                self.args.hp_calib))
        print("Using calibrator for HP-Loss '{}'".format(self.args.hp_calib))

        # Load pretrained weights if specified in args.
        load_file = osp.join(args.save_experiment, args.load_weights)
        self.loaded_args = self.args
        if args.load_weights:
            if check_isfile(load_file):
                cp = load_pretrained_weights([self.model_main, self.model_hp],
                                             load_file)
                if "args" in cp:
                    self.loaded_args = cp["args"]
                else:
                    print("WARNING: Could not load args. ")

                if "result_dict" in cp and cp[
                        "result_dict"] is not None and self.args.evaluate:
                    self.result_dict = cp["result_dict"]
                    self.result_manager = ResultManager(self.result_dict)
                    print("Loaded result dict with keys: ")
                    print(sorted(list(self.result_dict.keys())))
                    if "rejection_thresholds" in self.result_dict:
                        self.rejector.load_thresholds(
                            self.result_dict["rejection_thresholds"])
                        if self.rejector.is_initialized():
                            print("Loaded rejection thresholds. ")
                        else:
                            print(
                                "Loaded uninitialized (None) rejection thresholds. "
                            )
                    else:
                        print("WARNING: Could not load rejection thresholds. ")
            else:
                print("WARNING: Could not load pretrained weights")
        self.new_eval_split = self.args.eval_split != self.loaded_args.eval_split
        # Load model onto GPU if GPU is used.
        self.model_main = self.model_main.cuda(
        ) if self.use_gpu else self.model_main
        self.model = self.model_main
        self.model_hp = self.model_hp.cuda() if self.use_gpu else self.model_hp

        self.pos_ratio = self.dm.dataset.get_positive_attribute_ratio()
        # Select Loss function.
        # Select Loss function.
        if args.loss_func == "deepmar":

            self.criterion = DeepMARLoss(self.pos_ratio,
                                         args.train_batch_size,
                                         use_gpu=self.use_gpu,
                                         sigma=args.loss_func_param)
        elif args.loss_func == "scel":
            self.criterion = SigmoidCrossEntropyLoss(
                num_classes=self.dm.num_attributes, use_gpu=self.use_gpu)
        else:
            self.criterion = None

        self.criterion_main = self.criterion
        self.criterion_hp = HardnessPredictorLoss(
            self.args.use_deepmar_for_hp,
            self.pos_ratio,
            self.dm.num_attributes,
            use_gpu=self.use_gpu,
            sigma=self.args.hp_loss_param,
            use_visibility=self.args.use_bbs_gt,
            visibility_weight=self.args.hp_visibility_weight)
        self.f1_calibration_thresholds = None

        self.optimizer_main = init_optimizer(self.model_main,
                                             **optimizer_kwargs(args))
        self.scheduler_main = init_lr_scheduler(self.optimizer_main,
                                                **lr_scheduler_kwargs(args))

        self.optimizer = self.optimizer_main
        self.scheduler = self.scheduler_main

        op_args = optimizer_kwargs(args)
        sc_args = lr_scheduler_kwargs(args)
        op_args['lr'] *= self.args.hp_net_lr_multiplier
        self.optimizer_hp = init_optimizer(self.model_hp, **op_args)
        sc_args["stepsize"] = [
            i + self.args.hp_epoch_offset for i in sc_args["stepsize"]
        ]
        self.scheduler_hp = init_lr_scheduler(self.optimizer_hp, **sc_args)

        self.model_main = nn.DataParallel(
            self.model_main) if self.use_gpu else self.model_main
        self.model = self.model_main
        self.model_hp = nn.DataParallel(
            self.model_hp) if self.use_gpu else self.model_hp

        if not self.args.evaluate:
            self.init_epochs()

        self.model_list = [self.model_main, self.model_hp]
        self.optimizer_list = [self.optimizer_main, self.optimizer_hp]
        self.scheduler_list = [self.scheduler_main, self.scheduler_hp]
        self.criterion_list = [self.criterion_main, self.criterion_hp]