def __init__(
            self,
            datamanager,
            model,
            optimizer,
            margin=0.3,
            weight_t=1,
            weight_x=1,
            weight_r = 0.0001,
            scheduler=None,
            use_gpu=True,
            label_smooth=True
    ):
        super(ImageTripletAEEngine, self
              ).__init__(datamanager, model, optimizer, scheduler, use_gpu)

        self.weight_t = weight_t
        self.weight_x = weight_x
        self.weight_r = weight_r

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth
        )
        self.criterion_mse = torch.nn.MSELoss()
        self.random = RandomErasing(probability=0.5)
        self.mgn_loss = Loss(num_classes=self.datamanager.num_train_pids,use_gpu=self.use_gpu,label_smooth=label_smooth)
        self.BCE_criterion = torch.nn.BCEWithLogitsLoss()
Exemple #2
0
    def __init__(self,
                 datamanager,
                 model,
                 optimizer,
                 margin=0.3,
                 weight_t=1,
                 weight_x=1,
                 scheduler=None,
                 use_gpu=True,
                 label_smooth=True,
                 conf_penalty=0.0):
        super(ImageTripletEngine, self).__init__(datamanager, model, optimizer,
                                                 scheduler, use_gpu)

        self.weight_t = weight_t
        self.weight_x = weight_x

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(use_gpu=self.use_gpu,
                                            label_smooth=label_smooth,
                                            conf_penalty=conf_penalty)

        assert len(self.models) == 1

        self.model = self.models['model']
        self.optimizer = self.optims['model']
Exemple #3
0
    def __init__(self,
                 datamanager,
                 model,
                 optimizer,
                 margin=0.3,
                 weight_x=1,
                 weight_t=1,
                 weight_r=1,
                 weight_a=1,
                 scheduler=None,
                 use_gpu=True,
                 label_smooth=True,
                 swap_size=(8, 4)):
        super(ImageSoftmaxDCLTripletEngine,
              self).__init__(datamanager, use_gpu)

        self.model = model
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.register_model('model', model, optimizer, scheduler)

        self.weight_t = weight_t
        self.weight_x = weight_x
        self.weight_r = weight_r
        self.weight_a = weight_a

        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth)
        self.criterion_trip = TripletLoss(margin=0.7)
        self.criterion_rec = ReconstructionLoss()
        self.criterion_adver = AdversarialLoss()
        self.swap_size = swap_size
        self.swap = transforms.Randomswap((2, 2))
    def __init__(self,
                 datamanager,
                 model,
                 optimizer,
                 margin=0.3,
                 weight_t=1,
                 weight_x=1,
                 scheduler=None,
                 use_gpu=True,
                 label_smooth=True):
        super(ImageTripletEngine, self).__init__(datamanager, use_gpu)

        self.model = model
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.register_model('model', model, optimizer, scheduler)

        self.weight_t = weight_t
        self.weight_x = weight_x

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth)
Exemple #5
0
    def __init__(
            self,
            datamanager,
            model,
            optimizer,
            margin=0.3,
            weight_t=1,
            weight_x=1,
            scheduler=None,
            use_gpu=True,
            label_smooth=True,
            mmd_only=True,
    ):
        super(ImageMmdEngine, self).__init__(datamanager, model, optimizer, scheduler, use_gpu, mmd_only)

        self.optimizer.zero_grad()
        self.mmd_only = mmd_only ###
        self.weight_t = weight_t
        self.weight_x = weight_x

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth
        )
        self.criterion_mmd = MaximumMeanDiscrepancy(
            instances=self.datamanager.train_loader.sampler.num_instances,
            batch_size=self.datamanager.train_loader.batch_size,
            global_only=False,
            distance_only=True,
            all=False
        )
    def __init__(
            self,
            datamanager,
            model,
            optimizer,
            margin=0.3,
            weight_t=1,
            weight_x=1,
            weight_r=0.0001,
            scheduler=None,
            use_gpu=True,
            label_smooth=True
    ):
        super(ImageJointReconsVarEngine, self
              ).__init__(datamanager, model, optimizer, scheduler, use_gpu)

        self.weight_t = weight_t
        self.weight_x = weight_x
        self.weight_r = weight_r

        self.criterion_t = TripletLoss(margin=margin)
        self.local_triplet = TripletLoss_Local(margin=margin)
        self.done_once = True
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth
        )
        self.criterion_mse = torch.nn.MSELoss()
        self.random = RandomErasing(probability=0.5,sl=0.02)
        self.criterion = torch.nn.CrossEntropyLoss()
Exemple #7
0
    def __init__(self,
                 datamanager,
                 model,
                 optimizer,
                 margin=0.3,
                 weight_t=1,
                 weight_x=1,
                 weight_db_t=1,
                 weight_db_x=1,
                 weight_b_db_t=1,
                 weight_b_db_x=1,
                 scheduler=None,
                 use_gpu=True,
                 label_smooth=True,
                 top_drop_epoch=-1):
        super(ImageTripletDropBatchDropBotFeaturesEngine,
              self).__init__(datamanager, model, optimizer, scheduler, use_gpu)

        self.weight_t = weight_t
        self.weight_x = weight_x
        self.weight_db_t = weight_db_t
        self.weight_db_x = weight_db_x
        self.weight_b_db_t = weight_b_db_t
        self.weight_b_db_x = weight_b_db_x
        self.top_drop_epoch = top_drop_epoch

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth)
        self.criterion_db_t = TripletLoss(margin=margin)
        self.criterion_db_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth)
        self.criterion_b_db_t = TripletLoss(margin=margin)
        self.criterion_b_db_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth)
    def __init__(self, datamanager, model, optimizer, margin=0.3,
                 weight_t=1, weight_x=1, scheduler=None, use_cpu=False,
                 label_smooth=True, experiment=None, combine_method="mean", save_embed=None):
        super(ImageTripletEngine, self).__init__(datamanager, model, optimizer, scheduler, use_cpu, experiment, combine_method, save_embed)

        self.weight_t = weight_t
        self.weight_x = weight_x
        
        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth
        )
Exemple #9
0
    def __init__(self, datamanager, model, optimizer, margin=0.3,
                 weight_t=1, weight_x=1, scheduler=None, use_gpu=True,
                 label_smooth=True):
        super(ImageTripletEngine, self).__init__(datamanager, model, optimizer, scheduler, use_gpu)

        self.weight_t = weight_t
        self.weight_x = weight_x
        
        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth
        )
        self.criterion_c = CenterLoss(num_classes=751, feat_dim=2048)
    def __init__(
            self,
            datamanager,
            model,
            optimizer,
            margin=0.27,
            weight_t=1,
            weight_x=1,
            weight_r = 0.0000000001, #lambda
            scheduler=None,
            use_gpu=True,
            label_smooth=True,
            mmd_only=True,
            datamanager2=None,
    ):
        super(ImageMmdAEEngine, self).__init__(datamanager, model, optimizer, scheduler, use_gpu, mmd_only,datamanager2)

        self.optimizer.zero_grad()
        self.mmd_only = mmd_only ###
        self.weight_t = weight_t
        self.weight_x = weight_x
        self.weight_r = weight_r

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth
        )
        self.criterion_mmd = MaximumMeanDiscrepancy(
            instances=self.datamanager.train_loader.sampler.num_instances,
            batch_size=self.datamanager.train_loader.batch_size,
            global_only=False,
            distance_only=True,
            all=False,
            
        )
        self.criterion_mse = torch.nn.MSELoss()
        self.random = RandomErasing(probability=0.5,sl=0.07)
        self.randomt = RandomErasing(probability=0.5,sl=0.01)
        self.mgn_loss = Loss(num_classes=self.datamanager.num_train_pids,use_gpu=self.use_gpu,label_smooth=label_smooth)
        self.mgn_targetPredict =FC_Model().cuda() 
         
        self.BCE_criterion = torch.nn.BCEWithLogitsLoss()
Exemple #11
0
    def __init__(
        self,
        datamanager,
        model1,
        optimizer1,
        scheduler1,
        model2,
        optimizer2,
        scheduler2,
        margin=0.3,
        weight_t=0.5,
        weight_x=1.,
        weight_ml=1.,
        use_gpu=True,
        label_smooth=True,
        deploy='model1'
    ):
        super(ImageDMLEngine, self).__init__(datamanager, use_gpu)

        self.model1 = model1
        self.optimizer1 = optimizer1
        self.scheduler1 = scheduler1
        self.register_model('model1', model1, optimizer1, scheduler1)

        self.model2 = model2
        self.optimizer2 = optimizer2
        self.scheduler2 = scheduler2
        self.register_model('model2', model2, optimizer2, scheduler2)

        self.weight_t = weight_t
        self.weight_x = weight_x
        self.weight_ml = weight_ml

        assert deploy in ['model1', 'model2', 'both']
        self.deploy = deploy

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth
        )
Exemple #12
0
    def __init__(self,
                 datamanager,
                 model,
                 optimizer,
                 margin=0.3,
                 weight_t=1,
                 weight_x=1,
                 scheduler=None,
                 use_cpu=False,
                 label_smooth=True):
        super(PoseTripleEngine, self).__init__(datamanager, model, optimizer,
                                               scheduler, use_cpu)
        self.weight_t = weight_t
        self.weight_x = weight_x

        self.criterion_t = TripletLoss(margin=margin)
        # TODO modify the criterion for pairwise comparison
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth)
        self.att_criterion = Isolate_loss()
Exemple #13
0
    def __init__(
        self,
        datamanager,
        model,
        optimizer,
        margin=0.3,
        weight_t=1,
        weight_x=1,
        weight_r=0.0001,
        scheduler=None,
        use_gpu=True,
        label_smooth=True,
        only_recons=True,
    ):
        super(ImageReconsVarEngine,
              self).__init__(datamanager, model, optimizer, scheduler, use_gpu)

        self.optimizer.zero_grad()

        self.weight_t = weight_t
        self.weight_x = weight_x
        self.weight_r = weight_r

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth)
        self.criterion_mse = torch.nn.MSELoss()
        self.criterion_mmd = MaximumMeanDiscrepancy(
            instances=self.datamanager.train_loader.sampler.num_instances,
            batch_size=self.datamanager.train_loader.batch_size,
            global_only=False,
            distance_only=True,
            all=False)
        self.only_recons = only_recons
        self.random = RandomErasing(probability=0.5)
        self.random2 = RandomErasing(probability=0.65, sl=0.15)
Exemple #14
0
    def __init__(self,
                 datamanager,
                 model,
                 optimizer,
                 margin=0.3,
                 weight_t=1,
                 weight_x=1,
                 weight_r=1,
                 scheduler=None,
                 use_gpu=True,
                 label_smooth=True):
        super(ImageJointReconsEngine,
              self).__init__(datamanager, model, optimizer, scheduler, use_gpu)

        self.weight_t = weight_t
        self.weight_x = weight_x
        self.weight_r = weight_r

        self.criterion_t = TripletLoss(margin=margin)
        self.criterion_x = CrossEntropyLoss(
            num_classes=self.datamanager.num_train_pids,
            use_gpu=self.use_gpu,
            label_smooth=label_smooth)
        self.criterion_mse = torch.nn.MSELoss()
 def __init__(self, num_classes,use_gpu,label_smooth):
     super(Loss, self).__init__()
     self.cross_entropy_loss = CrossEntropyLossTorch()
     self.triplet_loss = TripletLoss(margin=1.2)
Exemple #16
0
def main():
    global best_rank1, best_mAP
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu:
        use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(
            osp.join(
                args.save_dir,
                'log_train{}.txt'.format(time.strftime('-%Y-%m-%d-%H-%M-%S'))))
    else:
        sys.stdout = Logger(
            osp.join(
                args.save_dir,
                'log_test{}.txt'.format(time.strftime('-%Y-%m-%d-%H-%M-%S'))))
    writer = SummaryWriter(log_dir=args.save_dir, comment=args.arch)
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = False if 'resnet3dt' in args.arch else True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_vidreid_dataset(root=args.root,
                                                name=args.dataset,
                                                split_id=args.split_id,
                                                use_pose=args.use_pose)

    transform_train = list()
    print('Transform:')
    if args.misalign_aug:
        print('+ Misalign Augmentation')
        transform_train.append(T.GroupMisAlignAugment())
    if args.rand_crop:
        print('+ Random Crop')
        transform_train.append(T.GroupRandomCrop(size=(240, 120)))
    print('+ Resize to ({} x {})'.format(args.height, args.width))
    transform_train.append(T.GroupResize((args.height, args.width)))
    if args.flip_aug:
        print('+ Random HorizontalFlip')
        transform_train.append(T.GroupRandomHorizontalFlip())
    print('+ ToTensor')
    transform_train.append(T.GroupToTensor())
    print(
        '+ Normalize with mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]'
    )
    transform_train.append(
        T.GroupNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]))
    if args.rand_erase:
        print('+ Random Erasing')
        transform_train.append(T.GroupRandomErasing())
    transform_train = T.Compose(transform_train)

    transform_test = T.Compose([
        T.GroupResize((args.height, args.width)),
        T.GroupToTensor(),
        T.GroupNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        VideoDataset(dataset.train,
                     seq_len=args.seq_len,
                     sample=args.train_sample,
                     transform=transform_train,
                     training=True,
                     pose_info=dataset.process_poses,
                     num_split=args.num_split,
                     num_parts=args.num_parts,
                     num_scale=args.num_scale,
                     pyramid_part=args.pyramid_part,
                     enable_pose=args.use_pose),
        sampler=eval(args.train_sampler)(dataset.train,
                                         batch_size=args.train_batch,
                                         num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(dataset.query,
                     seq_len=args.seq_len,
                     sample=args.test_sample,
                     transform=transform_test,
                     pose_info=dataset.process_poses,
                     num_split=args.num_split,
                     num_parts=args.num_parts,
                     num_scale=args.num_scale,
                     pyramid_part=args.pyramid_part,
                     enable_pose=args.use_pose),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery,
                     seq_len=args.seq_len,
                     sample=args.test_sample,
                     transform=transform_test,
                     pose_info=dataset.process_poses,
                     num_split=args.num_split,
                     num_parts=args.num_parts,
                     num_scale=args.num_scale,
                     pyramid_part=args.pyramid_part,
                     enable_pose=args.use_pose),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'},
                              last_stride=args.last_stride,
                              num_parts=args.num_parts,
                              num_scale=args.num_scale,
                              num_split=args.num_split,
                              pyramid_part=args.pyramid_part,
                              num_gb=args.num_gb,
                              use_pose=args.use_pose,
                              learn_graph=args.learn_graph,
                              consistent_loss=args.consistent_loss,
                              bnneck=args.bnneck,
                              save_dir=args.save_dir)

    input_size = sum(calc_splits(
        args.num_split)) if args.pyramid_part else args.num_split
    input_size *= args.num_scale * args.seq_len
    num_params, flops = compute_model_complexity(
        model,
        input=[
            torch.randn(1, args.seq_len, 3, args.height, args.width),
            torch.ones(1, input_size, input_size)
        ],
        verbose=True,
        only_conv_linear=False)
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if args.label_smooth:
        criterion_xent = CrossEntropyLabelSmooth(
            num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    else:
        criterion_xent = nn.CrossEntropyLoss()
    criterion_htri = TripletLoss(margin=args.margin, soft=args.soft_margin)

    param_groups = model.parameters()
    optimizer = init_optim(args.optim, param_groups, args.lr,
                           args.weight_decay)

    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)
    if args.warmup:
        scheduler = lr_scheduler.WarmupMultiStepLR(optimizer,
                                                   milestones=args.stepsize,
                                                   gamma=args.gamma,
                                                   warmup_iters=10,
                                                   warmup_factor=0.01)

    if args.load_weights and check_isfile(args.load_weights):
        # load pretrained weights but ignore layers that don't match in size
        checkpoint = torch.load(args.load_weights)
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {
            k: v
            for k, v in pretrain_dict.items()
            if k in model_dict and model_dict[k].size() == v.size()
        }
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)
        print("Loaded pretrained weights from '{}'".format(args.load_weights))

    if args.resume and check_isfile(args.resume):
        print("Loaded checkpoint from '{}'".format(args.resume))
        from functools import partial
        import pickle
        pickle.load = partial(pickle.load, encoding="latin1")
        pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
        checkpoint = torch.load(args.resume,
                                map_location=lambda storage, loc: storage,
                                pickle_module=pickle)

        print('Loaded model weights')
        model.load_state_dict(checkpoint['state_dict'])
        if optimizer is not None and 'optimizer' in checkpoint:
            print('Loaded optimizer')
            optimizer.load_state_dict(checkpoint['optimizer'])
            if use_gpu:
                for state in optimizer.state.values():
                    for k, v in state.items():
                        if isinstance(v, torch.Tensor):
                            state[k] = v.cuda()
        start_epoch = checkpoint['epoch'] + 1
        print('- start_epoch: {}'.format(start_epoch))
        best_rank1 = checkpoint['rank1']
        print("- rank1: {}".format(best_rank1))
        if 'mAP' in checkpoint:
            best_mAP = checkpoint['mAP']
            print("- mAP: {}".format(best_mAP))
    else:
        start_epoch = 0

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        distmat = test(model,
                       queryloader,
                       galleryloader,
                       args.pool,
                       use_gpu,
                       return_distmat=True)
        if args.visualize_ranks:
            visualize_ranked_results(
                distmat,
                dataset,
                save_dir=osp.join(args.save_dir, 'ranked_results'),
                topk=20,
            )
        return

    start_time = time.time()
    train_time = 0
    best_epoch = start_epoch
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch,
              model,
              criterion_xent,
              criterion_htri,
              optimizer,
              trainloader,
              use_gpu,
              writer=writer)
        train_time += round(time.time() - start_train_time)

        if epoch >= args.zero_wd > 0:
            set_wd(optimizer, 0)
            for group in optimizer.param_groups:
                assert group['weight_decay'] == 0, '{} is not zero'.format(
                    group['weight_decay'])

        scheduler.step(epoch)

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1, mAP = test(model, queryloader, galleryloader, args.pool,
                              use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_mAP = mAP
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'optimizer': optimizer.state_dict(),
                    'rank1': rank1,
                    'mAP': mAP,
                    'epoch': epoch,
                }, False,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

            writer.add_scalar(tag='acc/rank1',
                              scalar_value=rank1,
                              global_step=epoch + 1)
            writer.add_scalar(tag='acc/mAP',
                              scalar_value=mAP,
                              global_step=epoch + 1)

    print("==> Best Rank-1 {:.2%}, mAP: {:.2%}, achieved at epoch {}".format(
        best_rank1, best_mAP, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
def main():
    global args
    
    torch.manual_seed(args.seed)
    if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU, however, GPU is highly recommended")

    print("Initializing image data manager")
    dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
    trainloader, testloader_dict = dm.return_dataloaders()

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent', 'htri'})
    print("Model size: {:.3f} M".format(count_num_param(model)))

    criterion_xent = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
    criterion_htri = TripletLoss(margin=args.margin)
    
    optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)

    if args.load_weights and check_isfile(args.load_weights):
        # load pretrained weights but ignore layers that don't match in size
        checkpoint = torch.load(args.load_weights)
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)
        print("Loaded pretrained weights from '{}'".format(args.load_weights))

    if args.resume and check_isfile(args.resume):
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch'] + 1
        print("Loaded checkpoint from '{}'".format(args.resume))
        print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1']))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")

        for name in args.target_names:
            print("Evaluating {} ...".format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)
        
            if args.visualize_ranks:
                visualize_ranked_results(
                    distmat, dm.return_testdataset_by_name(name),
                    save_dir=osp.join(args.save_dir, 'ranked_results', name),
                    topk=20
                )
        return

    start_time = time.time()
    ranklogger = RankLogger(args.source_names, args.target_names)
    train_time = 0
    print("=> Start training")

    if args.fixbase_epoch > 0:
        print("Train {} for {} epochs while keeping other layers frozen".format(args.open_layers, args.fixbase_epoch))
        initial_optim_state = optimizer.state_dict()

        for epoch in range(args.fixbase_epoch):
            start_train_time = time.time()
            train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu, fixbase=True)
            train_time += round(time.time() - start_train_time)

        print("Done. All layers are open to train for {} epochs".format(args.max_epoch))
        optimizer.load_state_dict(initial_optim_state)

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)
        
        scheduler.step()
        
        if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch:
            print("=> Test")
            
            for name in args.target_names:
                print("Evaluating {} ...".format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1 = test(model, queryloader, galleryloader, use_gpu)
                ranklogger.write(name, epoch + 1, rank1)

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
    ranklogger.show_summary()
Exemple #18
0
def main():
    global args

    torch.manual_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU, however, GPU is highly recommended")

    print("Initializing image data manager")
    dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
    trainloader, testloader_dict = dm.return_dataloaders()

    # ReID-Stream:
    print("Initializing ReID-Stream: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dm.num_train_pids,
                              reid_dim=args.reid_dim,
                              loss={'xent', 'htri'})
    print("ReID Model size: {:.3f} M".format(count_num_param(model)))

    criterion_xent = CrossEntropyLoss(num_classes=dm.num_train_pids,
                                      use_gpu=use_gpu,
                                      label_smooth=args.label_smooth)
    criterion_htri = TripletLoss(margin=args.margin)

    # 2. Optimizer
    # Main ReID-Stream:
    optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")

        for name in args.target_names:
            print("Evaluating {} ...".format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            distmat = test(model,
                           queryloader,
                           galleryloader,
                           use_gpu,
                           return_distmat=True)

            if args.visualize_ranks:
                visualize_ranked_results(distmat,
                                         dm.return_testdataset_by_name(name),
                                         save_dir=osp.join(
                                             args.save_dir, 'ranked_results',
                                             name),
                                         topk=20)
        return

    start_time = time.time()
    ranklogger = RankLogger(args.source_names, args.target_names)
    train_time = 0
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, \
              optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (
                epoch + 1) % args.eval_freq == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")

            for name in args.target_names:
                print("Evaluating {} ...".format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1 = test(model, queryloader, galleryloader, use_gpu)
                ranklogger.write(name, epoch + 1, rank1)

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, False,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
    ranklogger.show_summary()
Exemple #19
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', type=str, default='', help='path to config file')
    parser.add_argument('--gpu-devices', type=str, default='', )
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
    print('Show configuration\n{}\n'.format(cfg))
    torch.backends.cudnn.benchmark = True

    datamanager = ImageDataManager(**imagedata_kwargs(cfg))
    trainloader, queryloader, galleryloader = datamanager.return_dataloaders()
    print('Building model: {}'.format(cfg.model.name))
    model = build_model(cfg.model.name, datamanager.num_train_pids, 'triplet', pretrained=cfg.model.pretrained)

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    model = nn.DataParallel(model).cuda()

    criterion_t = TripletLoss(margin=cfg.loss.triplet.margin)
    criterion_x = CrossEntropyLoss(datamanager.num_train_pids, label_smooth=cfg.loss.softmax.label_smooth)
    optimizer = build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume, model, optimizer=optimizer)

    if cfg.test.evaluate:
        distmat = evaluate(model, queryloader, galleryloader, dist_metric=cfg.test.dist_metric,
                           normalize_feature=cfg.test.normalize_feature, rerank=cfg.test.rerank, return_distmat=True)
        if cfg.test.visrank:
            visualize_ranked_results(distmat, datamanager.return_testdataset(), 'image', width=cfg.data.width,
                                     height=cfg.data.height, save_dir=osp.join(cfg.data.save_dir, 'visrank'))
        return

    time_start = time.time()
    print('=> Start training')
    for epoch in range(cfg.train.start_epoch, cfg.train.max_epoch):
        train(epoch, cfg.train.max_epoch, model, criterion_t, criterion_x, optimizer, trainloader,
              fixbase_epoch=cfg.train.fixbase_epoch, open_layers=cfg.train.open_layers)
        scheduler.step()
        if (epoch + 1) % cfg.test.eval_freq == 0 or (epoch + 1) == cfg.train.max_epoch:
            rank1 = evaluate(model, queryloader, galleryloader, dist_metric=cfg.test.dist_metric,
                             normalize_feature=cfg.test.normalize_feature, rerank=cfg.test.rerank)
            save_checkpoint({
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'rank1': rank1,
                'optimizer': optimizer.state_dict(),
            }, cfg.data.save_dir)
    elapsed = round(time.time() - time_start)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print('Elapsed {}'.format(elapsed))
def main():
    ################################# Setting gpu and out file ###################################################################
    if args.use_cpu: use_gpu = False
    use_gpu = 1
    if use_gpu == 1:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
        os.environ['PYTHONHASHSEED'] = str(args.seed)
        if args.is_deterministic == 1:
            print("Currently using GPU {}".format(args.gpu_devices))
            print('is deterministic')
            np.random.seed(args.seed)
            torch.manual_seed(args.seed)
            random.seed(args.seed)
            torch.cuda.manual_seed(args.seed)
            torch.cuda.manual_seed_all(args.seed)
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
            use_gpu = torch.cuda.is_available()
    else:
        print("Currently using CPU (GPU is highly recommended)")

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))

    print("==========\nArgs:{}\n==========".format(args))
    print("Initializing dataset {}".format(args.dataset))

    [dataset, trainloader_reid, queryloader,
     galleryloader] = initialize_loader(use_gpu)

    ################################### Initialize model ###############################################################
    if args.num_classes_attributes == 0:
        num_classes_attributes = (6, 4, 11, 14, 6)

    if args.num_classes_attributes > 1:
        num_classes_attributes = (33, 6, 58, 65, 9)

    if args.dataset == 'pa100K':
        num_classes_attributes = (28, 9, 3, 14, 24, 9)
        if args.num_classes_attributes == 0:
            num_classes_attributes = (5, 3, 3, 6, 9, 10)

    if args.dataset == 'dukemtmcreid':
        num_classes_attributes = (15, 2, 47, 62, 9)

        if args.num_classes_attributes == 0:
            num_classes_attributes = (2, 2, 8, 11, 6)

    num_group_attributes = len(num_classes_attributes)

    print("Initializing model: {}".format(args.arch))
    if args.arch == 'resnetAttW2VText':
        dim_features = 50 * len(num_classes_attributes)

        model = models.init_model(args.arch,
                                  num_classes=dataset.num_train_pids,
                                  loss={'xent', 'htri'},
                                  num_group_attributes=num_group_attributes,
                                  dim_features=50,
                                  glove=True)
        criterion_attributes = LossW2V(
            num_classes_attributes=num_classes_attributes,
            attr_loss_type=args.attr_loss_type)

    if args.arch == 'resnetAttW2VAttributes':
        dim_features = sum(num_classes_attributes)

        model = models.init_model(
            args.arch,
            num_classes=dataset.num_train_pids,
            loss={'xent', 'htri'},
            num_group_attributes=num_group_attributes,
            num_classes_attributes=num_classes_attributes,
            dim_features=dim_features,
            glove=True)

        criterion_attributes = LossW2V(
            num_classes_attributes=num_classes_attributes)

    all_parameters = model.parameters()
    optimizer = init_optim(args.optim, all_parameters, args.lr,
                           args.weight_decay)

    ################################### Loss functions ##############################
    print("Model size: {:.3f} M".format(count_num_param(model)))
    criterion_htri_reid = TripletLoss(margin=0.3)
    criterion_xent_reid = nn.CrossEntropyLoss()

    ################################### Pretrained models ##############################
    if args.load_weights:
        load_weights(model)
    if args.resume:
        resume(model)
    if use_gpu:
        model = nn.DataParallel(model).cuda()

    ################################### Only evaluation ###############################
    writer = SummaryWriter(log_dir=osp.join(args.exp_dir, 'tensorboard'))

    if args.evaluate:
        print("Evaluate by test")
        test_and_evaluate_dist_mat(writer,
                                   model,
                                   queryloader,
                                   galleryloader,
                                   use_gpu=use_gpu,
                                   save_features=True,
                                   load_features=False,
                                   arch=args.arch,
                                   size=dim_features)
        return

    ################################### Training ######################################
    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0

    # Schedule learning rate
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):

        start_train_time = time.time()

        if args.is_warmup:
            adjust_lr(epoch, optimizer, args.lr)

        if args.arch == 'resnetAttW2VText':
            train_w2v_single_batch_text(writer, epoch, model,
                                        criterion_htri_reid,
                                        criterion_attributes, optimizer,
                                        trainloader_reid, use_gpu)

        if args.arch == 'resnetAttW2VAttributes':
            train_w2v_single_batch_attributes(writer, epoch, model,
                                              criterion_htri_reid,
                                              criterion_attributes, optimizer,
                                              trainloader_reid, use_gpu)

            train_time += round(time.time() - start_train_time)

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))

    state_dict = model.module.state_dict()

    save_checkpoint(state={
        'state_dict': state_dict,
        'epoch': epoch,
    },
                    fpath=osp.join(
                        args.save_dir,
                        'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("Evaluate by test")
    test_and_evaluate_dist_mat(writer,
                               model,
                               queryloader,
                               galleryloader,
                               use_gpu=use_gpu,
                               save_features=True,
                               load_features=False,
                               arch=args.arch,
                               size=dim_features)
Exemple #21
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_vidreid_dataset(root=args.root, name=args.dataset)

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    # decompose tracklets into images for image-based training
    new_train = []
    for img_paths, pid, camid in dataset.train:
        for img_path in img_paths:
            new_train.append((img_path, pid, camid))

    trainloader = DataLoader(
        ImageDataset(new_train, transform=transform_train),
        sampler=RandomIdentitySampler(new_train, args.train_batch, args.num_instances),
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(dataset.query, seq_len=args.seq_len, sample='evenly', transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery, seq_len=args.seq_len, sample='evenly', transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'})
    print("Model size: {:.3f} M".format(count_num_param(model)))

    criterion_xent = nn.CrossEntropyLoss()
    criterion_htri = TripletLoss(margin=args.margin)
    criterion_KA = KALoss(margin=args.margin, same_margin = args.same_margin, use_auto_samemargin = args.use_auto_samemargin)        
    cirterion_lifted = LiftedLoss(margin=args.margin)
    cirterion_batri = BA_TripletLoss(margin=args.margin)
    
    if args.use_auto_samemargin == True:
        G_params = [{'params': model.parameters(), 'lr': args.lr }, {'params': criterion_KA.auto_samemargin, 'lr': args.lr}]
    else :
        G_params = [para for _, para in model.named_parameters()]
    
    optimizer = init_optim(args.optim, G_params, args.lr, args.weight_decay)

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        if check_isfile(args.load_weights):
            checkpoint = torch.load(args.load_weights)
            pretrain_dict = checkpoint['state_dict']
            model_dict = model.state_dict()
            pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
            model_dict.update(pretrain_dict)
            model.load_state_dict(model_dict)
            print("Loaded pretrained weights from '{}'".format(args.load_weights))

    if args.resume:
        if check_isfile(args.resume):
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            args.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            print("Loaded checkpoint from '{}'".format(args.resume))
            print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        distmat = test(model, queryloader, galleryloader, args.pool, use_gpu, return_distmat=True)
        if args.vis_ranked_res:
            visualize_ranked_results(
                distmat, dataset,
                save_dir=osp.join(args.save_dir, 'ranked_results'),
                topk=20,
            )
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        adjust_learning_rate(optimizer, epoch)
        
        train(epoch, model, cirterion_batri, cirterion_lifted, criterion_xent, criterion_htri, criterion_KA, optimizer, trainloader, use_gpu)
        
        train_time += round(time.time() - start_train_time)
        
        scheduler.step()
        
        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (epoch + 1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu)
            is_best = rank1 > best_rank1
            
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
Exemple #22
0
def main():
    global args, best_rank1

    torch.manual_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train, args.train_batch,
                                      args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print("Model size: {:.3f} M".format(count_num_param(model)))

    if args.label_smooth:
        criterion_xent = CrossEntropyLabelSmooth(
            num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    else:
        criterion_xent = nn.CrossEntropyLoss()
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    if args.load_weights and check_isfile(args.load_weights):
        # load pretrained weights but ignore layers that don't match in size
        checkpoint = torch.load(args.load_weights)
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {
            k: v
            for k, v in pretrain_dict.items()
            if k in model_dict and model_dict[k].size() == v.size()
        }
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)
        print("Loaded pretrained weights from '{}'".format(args.load_weights))

    if args.resume and check_isfile(args.resume):
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch'] + 1
        best_rank1 = checkpoint['rank1']
        print("Loaded checkpoint from '{}'".format(args.resume))
        print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch,
                                                      best_rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        distmat = test(model,
                       queryloader,
                       galleryloader,
                       use_gpu,
                       return_distmat=True)
        if args.visualize_ranks:
            visualize_ranked_results(
                distmat,
                dataset,
                save_dir=osp.join(args.save_dir, 'ranked_results'),
                topk=20,
            )
        return

    start_time = time.time()
    train_time = 0
    best_epoch = args.start_epoch
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
def main():
    global args

    set_random_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print('==========\nArgs:{}\n=========='.format(args))

    if use_gpu:
        print('Currently using GPU {}'.format(args.gpu_devices))
        cudnn.benchmark = True
    else:
        print('Currently using CPU, however, GPU is highly recommended')

    print('Initializing video data manager')
    dm = VideoDataManager(use_gpu, **video_dataset_kwargs(args))
    trainloader, testloader_dict = dm.return_dataloaders()

    print('Initializing model: {}'.format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dm.num_train_pids,
                              loss={'xent', 'htri'},
                              pretrained=not args.no_pretrained,
                              use_gpu=use_gpu)
    print('Model size: {:.3f} M'.format(count_num_param(model)))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    if args.resume and check_isfile(args.resume):
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch'] + 1
        best_rank1 = checkpoint['rank1']
        print('Loaded checkpoint from "{}"'.format(args.resume))
        print('- start_epoch: {}\n- rank1: {}'.format(args.start_epoch,
                                                      best_rank1))

    model = nn.DataParallel(model).cuda() if use_gpu else model

    criterion = CrossEntropyLoss(num_classes=dm.num_train_pids,
                                 use_gpu=use_gpu,
                                 label_smooth=args.label_smooth)
    criterion_htri = TripletLoss(margin=args.margin)
    optimizer = init_optimizer(model, **optimizer_kwargs(args))
    scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs(args))

    if args.evaluate:
        print('Evaluate only')

        for name in args.target_names:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            distmat = test(model,
                           queryloader,
                           galleryloader,
                           args.pool_tracklet_features,
                           use_gpu,
                           return_distmat=True)

            if args.visualize_ranks:
                visualize_ranked_results(distmat,
                                         dm.return_testdataset_by_name(name),
                                         save_dir=osp.join(
                                             args.save_dir, 'ranked_results',
                                             name),
                                         topk=20)
        return

    start_time = time.time()
    ranklogger = RankLogger(args.source_names, args.target_names)
    train_time = 0
    print('=> Start training')

    if args.fixbase_epoch > 0:
        print(
            'Train {} for {} epochs while keeping other layers frozen'.format(
                args.open_layers, args.fixbase_epoch))
        initial_optim_state = optimizer.state_dict()

        for epoch in range(args.fixbase_epoch):
            start_train_time = time.time()
            train(epoch,
                  model,
                  criterion_xent,
                  criterion_htri,
                  optimizer,
                  trainloader,
                  use_gpu,
                  fixbase=True)
            train_time += round(time.time() - start_train_time)

        print('Done. All layers are open to train for {} epochs'.format(
            args.max_epoch))
        optimizer.load_state_dict(initial_optim_state)

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (
                epoch + 1) % args.eval_freq == 0 or (epoch +
                                                     1) == args.max_epoch:
            print('=> Test')

            for name in args.target_names:
                print('Evaluating {} ...'.format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1 = test(model, queryloader, galleryloader,
                             args.pool_tracklet_features, use_gpu)
                ranklogger.write(name, epoch + 1, rank1)

            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'rank1': rank1,
                    'epoch': epoch,
                }, False,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        'Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.'.
        format(elapsed, train_time))
    ranklogger.show_summary()
Exemple #24
0
def main():
    global args

    set_random_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
    else:
        print("Currently using CPU, however, GPU is highly recommended")

    print("Initializing MultiScale data manager")
    dm = ImageDataManager(use_gpu,
                          scales=[224, 160],
                          **image_dataset_kwargs(args))
    trainloader, testloader_dict = dm.return_dataloaders()
    # sys.exit(0)

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dm.num_train_pids,
                              input_size=args.width,
                              loss={'xent', 'htri'},
                              use_gpu=use_gpu)
    print("Model size: {:.3f} M".format(count_num_param(model)))
    print(model)

    # criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
    criterion_xent = CrossEntropyLoss(num_classes=dm.num_train_pids,
                                      use_gpu=use_gpu,
                                      label_smooth=args.label_smooth)
    criterion_htri = TripletLoss(margin=args.margin)
    optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)
    # scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3, verbose=True, threshold=1e-04)

    if args.load_weights and check_isfile(
            args.load_weights
    ):  # load pretrained weights but ignore layers that don't match in size
        checkpoint = torch.load(args.load_weights)
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {
            k: v
            for k, v in pretrain_dict.items()
            if k in model_dict and model_dict[k].size() == v.size()
        }
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)
        print("Loaded pretrained weights from '{}'".format(args.load_weights))

    if args.resume and check_isfile(args.resume):
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch'] + 1
        print("Loaded checkpoint from '{}'".format(args.resume))
        print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch,
                                                      checkpoint['rank1']))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")

        for name in args.target_names:
            print("Evaluating {} ...".format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            test_set = dm.return_testdataset_by_name(name)
            rank1, mAP = test(model,
                              test_set,
                              name,
                              queryloader,
                              galleryloader,
                              use_gpu,
                              visualize=args.visualize_ranks)

        return

    start_time = time.time()
    ranklogger = RankLogger(args.source_names, args.target_names)
    maplogger = RankLogger(args.source_names, args.target_names)
    train_time = 0

    # Tensorboard
    writer = SummaryWriter(log_dir=osp.join('runs', 'tensorboard'))
    print("=> Start training")

    if args.fixbase_epoch > 0:
        print(
            "Train {} for {} epochs while keeping other layers frozen".format(
                args.open_layers, args.fixbase_epoch))
        initial_optim_state = optimizer.state_dict()

        for epoch in range(args.fixbase_epoch):
            start_train_time = time.time()
            loss, prec1 = train(epoch,
                                model,
                                criterion,
                                optimizer,
                                trainloader,
                                writer,
                                use_gpu,
                                fixbase=True)
            writer.add_scalar('train/loss', loss, epoch + 1)
            writer.add_scalar('train/prec1', prec1, epoch + 1)
            print(
                'Epoch: [{:02d}] [Average Loss:] {:.4f}\t [Average Prec.:] {:.2%}'
                .format(epoch + 1, loss, prec1))
            train_time += round(time.time() - start_train_time)

        print("Done. All layers are open to train for {} epochs".format(
            args.max_epoch))
        optimizer.load_state_dict(initial_optim_state)

    args.start_epoch += args.fixbase_epoch
    args.max_epoch += args.fixbase_epoch

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        loss, prec1 = train(epoch, model, criterion, optimizer, trainloader,
                            writer, use_gpu)
        writer.add_scalar('train/loss', loss, epoch + 1)
        writer.add_scalar('train/prec1', prec1, epoch + 1)
        print(
            'Epoch: [{:02d}] [Average Loss:] {:.4f}\t [Average Prec.:] {:.2%}'.
            format(epoch + 1, loss, prec1))
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (
                epoch + 1) % args.eval_freq == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("=> Test")

            for name in args.target_names:
                print("Evaluating {} ...".format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']

                test_set = dm.return_testdataset_by_name(name)

                if epoch + 1 == args.max_epoch:
                    rank1, mAP = test(model,
                                      test_set,
                                      name,
                                      queryloader,
                                      galleryloader,
                                      use_gpu,
                                      visualize=True)
                else:
                    rank1, mAP = test(model, test_set, name, queryloader,
                                      galleryloader, use_gpu)

                writer.add_scalar('test/top1', rank1, epoch + 1)
                writer.add_scalar('test/mAP', mAP, epoch + 1)

                ranklogger.write(name, epoch + 1, rank1)
                maplogger.write(name, epoch + 1, mAP)

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, False,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    # save last checkpoint
    save_checkpoint(
        {
            'state_dict': state_dict,
            'rank1': rank1,
            'epoch': epoch,
        }, False,
        osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
    ranklogger.show_summary()
    maplogger.show_summary()