Пример #1
0
def get_data(name,
             data_dir,
             height,
             width,
             batch_size,
             workers,
             combine_trainval,
             crop,
             tracking_icams,
             fps,
             real=True,
             synthetic=True,
             re=0,
             num_instances=0,
             camstyle=0,
             zju=0,
             colorjitter=0):
    root = osp.join(data_dir, name)
    if name == 'duke_tracking':
        if tracking_icams != 0:
            tracking_icams = [tracking_icams]
        else:
            tracking_icams = list(range(1, 9))
        dataset = datasets.create(name,
                                  root,
                                  type='tracking_gt',
                                  iCams=tracking_icams,
                                  fps=fps,
                                  trainval=combine_trainval)
    elif name == 'aic_tracking':
        dataset = datasets.create(name,
                                  root,
                                  type='tracking_gt',
                                  fps=fps,
                                  trainval=combine_trainval)
    else:
        dataset = datasets.create(name, root, real, synthetic)
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    num_classes = dataset.num_train_ids

    train_transformer = T.Compose([
        T.ColorJitter(brightness=0.1 * colorjitter,
                      contrast=0.1 * colorjitter,
                      saturation=0.1 * colorjitter,
                      hue=0),
        T.Resize((height, width)),
        T.RandomHorizontalFlip(),
        T.Pad(10 * crop),
        T.RandomCrop((height, width)),
        T.ToTensor(),
        normalizer,
        T.RandomErasing(probability=re),
    ])
    test_transformer = T.Compose([
        T.Resize((height, width)),
        # T.RectScale(height, width, interpolation=3),
        T.ToTensor(),
        normalizer,
    ])

    if zju:
        train_loader = DataLoader(Preprocessor(dataset.train,
                                               root=dataset.train_path,
                                               transform=train_transformer),
                                  batch_size=batch_size,
                                  num_workers=workers,
                                  sampler=ZJU_RandomIdentitySampler(
                                      dataset.train, batch_size, num_instances)
                                  if num_instances else None,
                                  shuffle=False if num_instances else True,
                                  pin_memory=True,
                                  drop_last=False if num_instances else True)
    else:
        train_loader = DataLoader(
            Preprocessor(dataset.train,
                         root=dataset.train_path,
                         transform=train_transformer),
            batch_size=batch_size,
            num_workers=workers,
            sampler=RandomIdentitySampler(dataset.train, num_instances)
            if num_instances else None,
            shuffle=False if num_instances else True,
            pin_memory=True,
            drop_last=True)
    query_loader = DataLoader(Preprocessor(dataset.query,
                                           root=dataset.query_path,
                                           transform=test_transformer),
                              batch_size=batch_size,
                              num_workers=workers,
                              shuffle=False,
                              pin_memory=True)
    gallery_loader = DataLoader(Preprocessor(dataset.gallery,
                                             root=dataset.gallery_path,
                                             transform=test_transformer),
                                batch_size=batch_size,
                                num_workers=workers,
                                shuffle=False,
                                pin_memory=True)
    if camstyle <= 0:
        camstyle_loader = None
    else:
        camstyle_loader = DataLoader(Preprocessor(dataset.camstyle,
                                                  root=dataset.camstyle_path,
                                                  transform=train_transformer),
                                     batch_size=camstyle,
                                     num_workers=workers,
                                     shuffle=True,
                                     pin_memory=True,
                                     drop_last=True)
    return dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader
Пример #2
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get_source_data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get_target_data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print("Test with the original model trained on source domain:")
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    if args.evaluate:
        return

    # Criterion
    criterion = []
    criterion.append(
        TripletLoss(margin=args.margin,
                    num_instances=args.num_instances).cuda())
    criterion.append(
        TripletLoss(margin=args.margin,
                    num_instances=args.num_instances).cuda())

    # Optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat(
            [target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.train],
            0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.trainval, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        # train model with new generated dataset
        trainer = Trainer(model, criterion, print_freq=args.print_freq)
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, optimizer)
        # Evaluate
        rank1 = evaluator.evaluate(test_loader, tgt_dataset.query,
                                   tgt_dataset.gallery)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': iter_n + 1,
                'num_ids': num_ids,
            },
            True,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  rank1: {:5.1%} \n'.format(
            iter_n + 1, rank1))
Пример #3
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError('Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print('Resuming checkpoints from finetuned model on another dataset...\n')
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances, isAvg=True, use_semi=True).cuda(),
        TripletLoss(args.margin, args.num_instances, isAvg=True, use_semi=True).cuda(),
        None
    ]

    
    # Optimizer
    optimizer = torch.optim.Adam(
        model.parameters(), lr=args.lr
    )


    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height,args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])


    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model, src_extfeat_loader, print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([source_features[f].unsqueeze(0) for f, _, _, _ in src_dataset.train], 0) 

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n+1))
        target_features, tarNames = extract_features(model, tgt_extfeat_loader, print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([target_features[f].unsqueeze(0) for f, _, _, _ in tgt_dataset.trainval], 0)
        target_real_label = np.asarray([tarNames[f].unsqueeze(0) for f, _, _, _ in tgt_dataset.trainval]) 
        numTarID = len(set(target_real_label))
        print('n_clusters =', numTarID)
        # calculate distance and rerank result
        print('Calculating feature distances...') 
        target_features = target_features.numpy()
        cluster = KMeans(n_clusters=numTarID, n_jobs=8, n_init=1)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        time0 = time.time()
        clusterRes = cluster.fit(target_features)
        time1 = time.time()
        print('clustering cost', time1 - time0)
        labels, centers = clusterRes.labels_, clusterRes.cluster_centers_

        criterion[2] = ClassificationLoss(normalize(centers, axis=1))
        # labels = splitLowconfi(target_features,labels,centers)
        # num_ids = len(set(labels))
        # print('Iteration {} have {} training ids'.format(iter_n+1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, cam, timestamp), label in zip(tgt_dataset.trainval, labels):
            # if label==-1: continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname,label,cam, timestamp))
        print('Iteration {} have {} training images'.format(iter_n+1, len(new_dataset)))
        train_loader = DataLoader(
            Preprocessor(new_dataset, root=tgt_dataset.images_dir, transform=train_transformer),
            batch_size=args.batch_size, num_workers=4,
            sampler=RandomIdentitySampler(new_dataset, args.num_instances),
            pin_memory=True, drop_last=True
        )

        evaluator = Evaluator(model, print_freq=args.print_freq)

        if iter_n % 2 == 0:
            trainer = Trainer(model, train_loader, criterion, optimizer)

            for epoch in range(args.epochs):
                trainer.train(epoch)

            rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
        else:
            classOptimizer = torch.optim.Adam([
                {'params': model.parameters()},
                {'params': criterion[2].classifier.parameters(), 'lr': 1e-3}
            ], lr=args.lr)

            classTrainer = ClassTrainer(model, train_loader, criterion, classOptimizer)

            for epoch in range(args.epochs):
                # trainer.train(epoch, remRate=0.2+(0.6/args.iteration)*(1+iter_n)) # to at most 80%
                classTrainer.train(epoch)

            rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1, 'best_top1': rank_score.market1501[0],
        }, True, fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return (rank_score.map, rank_score.market1501[0])
Пример #4
0
def get_data(data_dir,
             source,
             target,
             height,
             width,
             batch_size,
             re=0,
             workers=8):

    dataset = DA(data_dir, source, target)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    num_classes = dataset.num_train_ids

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(height, width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
        T.RandomErasing(EPSILON=re),
    ])

    test_transformer = T.Compose([
        T.Resize((height, width), interpolation=3),
        T.ToTensor(),
        normalizer,
    ])

    source_train_loader = DataLoader(Preprocessor(
        dataset.source_train,
        root=osp.join(dataset.source_images_dir, dataset.source_train_path),
        transform=train_transformer),
                                     batch_size=batch_size,
                                     num_workers=workers,
                                     shuffle=True,
                                     pin_memory=True,
                                     drop_last=True)

    target_train_loader = DataLoader(ModifiedTargetPreprocessor(
        dataset.target_train,
        root=osp.join(dataset.target_images_dir, dataset.target_train_path),
        transform=train_transformer),
                                     batch_size=batch_size,
                                     num_workers=workers,
                                     shuffle=True,
                                     pin_memory=True,
                                     drop_last=True)

    # target_train_loader = DataLoader(
    #     UnsupervisedCamStylePreprocessor(dataset.target_train,
    #                                      root=osp.join(dataset.target_images_dir, dataset.target_train_path),
    #                                      camstyle_root=osp.join(dataset.target_images_dir,
    #                                                             dataset.target_train_camstyle_path),
    #                                      num_cam=dataset.target_num_cam, transform=train_transformer),
    #     batch_size=batch_size, num_workers=workers,
    #     shuffle=True, pin_memory=True, drop_last=True)

    query_loader = DataLoader(Preprocessor(dataset.query,
                                           root=osp.join(
                                               dataset.target_images_dir,
                                               dataset.query_path),
                                           transform=test_transformer),
                              batch_size=batch_size,
                              num_workers=workers,
                              shuffle=False,
                              pin_memory=True)

    gallery_loader = DataLoader(Preprocessor(dataset.gallery,
                                             root=osp.join(
                                                 dataset.target_images_dir,
                                                 dataset.gallery_path),
                                             transform=test_transformer),
                                batch_size=batch_size,
                                num_workers=workers,
                                shuffle=False,
                                pin_memory=True)

    return dataset, num_classes, source_train_loader, target_train_loader, query_loader, gallery_loader
Пример #5
0
def get_data(dataname,
             data_dir,
             height,
             width,
             batch_size,
             trainer,
             re=0,
             workers=8):
    root = osp.join(data_dir, dataname)

    dataset = datasets.create(dataname, root, trainer)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(height, width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
        T.RandomErasing(EPSILON=re),
    ])

    test_transformer = T.Compose([
        T.Resize((height, width), interpolation=3),
        T.ToTensor(),
        normalizer,
    ])

    if trainer <= 0:
        num_classes = dataset.num_train_ids
        train_loader = DataLoader(Preprocessor(dataset.train,
                                               root=osp.join(
                                                   dataset.images_dir,
                                                   dataset.train_path),
                                               transform=train_transformer),
                                  batch_size=batch_size,
                                  num_workers=workers,
                                  shuffle=True,
                                  pin_memory=True,
                                  drop_last=True)
        query_loader = DataLoader(Preprocessor(dataset.query,
                                               root=osp.join(
                                                   dataset.images_dir,
                                                   dataset.query_path),
                                               transform=test_transformer),
                                  batch_size=batch_size,
                                  num_workers=workers,
                                  shuffle=False,
                                  pin_memory=True)

        gallery_loader = DataLoader(Preprocessor(dataset.gallery,
                                                 root=osp.join(
                                                     dataset.images_dir,
                                                     dataset.gallery_path),
                                                 transform=test_transformer),
                                    batch_size=batch_size,
                                    num_workers=workers,
                                    shuffle=False,
                                    pin_memory=True)
        return dataset, num_classes, train_loader, query_loader, gallery_loader

    elif trainer == 1:
        num_classes = dataset.num_pair_ids
        pair_loader = DataLoader(Preprocessor_Pair(
            dataset.pair,
            root=osp.join(dataset.images_dir, dataset.pair_path),
            transform=train_transformer),
                                 batch_size=batch_size,
                                 num_workers=workers,
                                 shuffle=True,
                                 pin_memory=True,
                                 drop_last=True)
        query_loader = DataLoader(Preprocessor_Pair(
            dataset.query,
            root=osp.join(dataset.images_dir, dataset.query_path),
            transform=test_transformer),
                                  batch_size=batch_size,
                                  num_workers=workers,
                                  shuffle=False,
                                  pin_memory=True)
        gallery_loader = DataLoader(Preprocessor_Pair(
            dataset.gallery,
            root=osp.join(dataset.images_dir, dataset.gallery_path),
            transform=test_transformer),
                                    batch_size=batch_size,
                                    num_workers=workers,
                                    shuffle=False,
                                    pin_memory=True)
        return dataset, num_classes, pair_loader, query_loader, gallery_loader

    elif trainer == 2:
        num_classes = dataset.num_triplet_ids
        triplet_loader = DataLoader(Preprocessor_Triplet(
            dataset.triplet,
            root=osp.join(dataset.images_dir, dataset.triplet_path),
            transform=train_transformer),
                                    batch_size=batch_size,
                                    num_workers=workers,
                                    shuffle=True,
                                    pin_memory=True,
                                    drop_last=True)
        query_loader = DataLoader(Preprocessor_Pair(
            dataset.query,
            root=osp.join(dataset.images_dir, dataset.query_path),
            transform=test_transformer),
                                  batch_size=batch_size,
                                  num_workers=workers,
                                  shuffle=False,
                                  pin_memory=True)
        gallery_loader = DataLoader(Preprocessor_Pair(
            dataset.gallery,
            root=osp.join(dataset.images_dir, dataset.gallery_path),
            transform=test_transformer),
                                    batch_size=batch_size,
                                    num_workers=workers,
                                    shuffle=False,
                                    pin_memory=True)
        return dataset, num_classes, triplet_loader, query_loader, gallery_loader
Пример #6
0
def main(args):
    np.random.seed(args.seed) # With the seed reset (every time), the same set of numbers will appear every time.
    torch.manual_seed(args.seed) # Sets the seed for generating random numbers.
    cudnn.benchmark = True # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. It enables benchmark mode in cudnn.

    # Redirect print to both console and log file
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print(args)
    shutil.copy(sys.argv[0], osp.join(args.logs_dir,
                                      osp.basename(sys.argv[0])))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size * 8, args.workers, #https://deeplizard.com/learn/video/kWVgvsejXsE#:~:text=The%20num_workers%20attribute%20tells%20the,sequentially%20inside%20the%20main%20process.
                 )

    # Create model
    model = models.create("ft_net_inter",
                          num_classes=num_classes, stride=args.stride)

    # Load from checkpoint
    start_epoch = 0
    best_top1 = 0
    top1 = 0
    is_best = False
    if args.checkpoint is not None:
        if args.evaluate:
            checkpoint = load_checkpoint(args.checkpoint)
            param_dict = model.state_dict() # A state_dict is simply a Python dictionary object that maps each layer to its parameter tensor.
            for k, v in checkpoint['state_dict'].items():
                if 'model' in k:
                    param_dict[k] = v
            model.load_state_dict(param_dict)
        else:
            model.model.load_param(args.checkpoint)
    model = model.cuda()

    # Distance metric
    metric = None

    # Evaluator
    evaluator = Evaluator(model, use_cpu=args.use_cpu)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    train_transformer = [
        T.Resize((args.height, args.width), interpolation=3),
        T.RandomHorizontalFlip(),
        T.Pad(10),
        T.RandomCrop((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        T.RandomErasing(probability=0.5),
    ]
    train_transformer = T.Compose(train_transformer)
    for cluster_epoch in range(args.cluster_epochs):
        # -------------------------Stage 1 intra camera training--------------------------
        # Cluster and generate new dataset and model
        # Divides the training set into (subsets) and according to that each camera id is there for each image
        # then it forms clustering on each subset according to the pair wise similarity
        # then assigning images with in each cluster with identical label 
        # then cross entropy loss is used 
        cluster_result = get_intra_cam_cluster_result(model, train_loader,
                                                      args.class_number_stage1,
                                                      args.linkage)
        cluster_datasets = [
            datasets.create("cluster", osp.join(args.data_dir, args.dataset),
                            cluster_result[cam_id], cam_id)
            for cam_id in cluster_result.keys()
        ]

        cluster_dataloaders = [
            DataLoader(Preprocessor(dataset.train_set,
                                    root=dataset.images_dir,
                                    transform=train_transformer),
                       batch_size=args.batch_size,
                       num_workers=args.workers,
                       shuffle=True,
                       pin_memory=False,
                       drop_last=True) for dataset in cluster_datasets
        ]
        param_dict = model.model.state_dict()
        model = models.create("ft_net_intra",
                              num_classes=[
                                  args.class_number_stage1
                                  for cam_id in cluster_result.keys()
                              ],
                              stride=args.stride)

        model_param_dict = model.model.state_dict()
        for k, v in model_param_dict.items():
            if k in param_dict.keys():
                model_param_dict[k] = param_dict[k]
        model.model.load_state_dict(model_param_dict)

        model = model.cuda()
        criterion = nn.CrossEntropyLoss().cuda()

        # Optimizer
        param_groups = make_params(model, args.lr, args.weight_decay)
        optimizer = torch.optim.SGD(param_groups, momentum=0.9)
        # Trainer
        trainer = IntraCameraTrainer(
            model, criterion, warm_up_epoch=args.warm_up)
        print("start training")
        # Start training
        for epoch in range(0, args.epochs_stage1):
            trainer.train(cluster_epoch,
                          epoch,
                          cluster_dataloaders,
                          optimizer,
                          print_freq=args.print_freq,
                          )
        # -------------------------------------------Stage 2 inter camera training-----------------------------------
        mix_rate = get_mix_rate(
            args.mix_rate, cluster_epoch, args.cluster_epochs, power=args.decay_factor)

        cluster_result = get_inter_cam_cluster_result(
            model,
            train_loader,
            args.class_number_stage2,
            args.linkage,
            mix_rate,
            use_cpu=args.use_cpu)

        cluster_dataset = datasets.create(
            "cluster", osp.join(args.data_dir, args.dataset), cluster_result,
            0)

        cluster_dataloaders = DataLoader(
            Preprocessor(cluster_dataset.train_set,
                         root=cluster_dataset.images_dir,
                         transform=train_transformer),
            batch_size=args.batch_size_stage2,
            num_workers=args.workers,
            sampler=RandomIdentitySampler(cluster_dataset.train_set,
                                          args.batch_size_stage2,
                                          args.instances),
            pin_memory=False,
            drop_last=True)

        param_dict = model.model.state_dict()
        model = models.create("ft_net_inter",
                              num_classes=args.class_number_stage2,
                              stride=args.stride)
        model.model.load_state_dict(param_dict)

        model = model.cuda()
        # Criterion
        criterion_entropy = nn.CrossEntropyLoss().cuda()
        criterion_triple = TripletLoss(margin=args.margin).cuda()

        # Optimizer
        param_groups = make_params(model,
                                   args.lr * args.batch_size_stage2 / 32,
                                   args.weight_decay)

        optimizer = torch.optim.SGD(param_groups, momentum=0.9)
        # Trainer
        trainer = InterCameraTrainer(model,
                                     criterion_entropy,
                                     criterion_triple,
                                     warm_up_epoch=args.warm_up,
                                     )

        print("start training")
        # Start training
        for epoch in range(0, args.epochs_stage2):
            trainer.train(cluster_epoch,
                          epoch,
                          cluster_dataloaders,
                          optimizer,
                          print_freq=args.print_freq)
        if (cluster_epoch + 1) % 5 == 0: # in 4th, 9th, 14th epochs, see in the output

            evaluator = Evaluator(model, use_cpu=args.use_cpu)
            top1, mAP = evaluator.evaluate(
                test_loader, dataset.query, dataset.gallery, metric, return_mAP=True)

            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)

            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': cluster_epoch + 1,
                    'best_top1': best_top1,
                    'cluster_epoch': cluster_epoch + 1,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
        if cluster_epoch == (args.cluster_epochs - 1):
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': cluster_epoch + 1,
                    'best_top1': best_top1,
                    'cluster_epoch': cluster_epoch + 1,
                },
                False,
                fpath=osp.join(args.logs_dir, 'latest.pth.tar'))

        print('\n * cluster_epoch: {:3d} top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(cluster_epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    best_rank1, mAP = evaluator.evaluate(
        test_loader, dataset.query, dataset.gallery, metric, return_mAP=True)
Пример #7
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [SortedTripletLoss(args.margin, isAvg=True).cuda()]

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    evaluator = Evaluator(model, print_freq=args.print_freq)
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    st_model = ST_Model(tgt_dataset.meta['num_cameras'])

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, tarNames = extract_features(
            model, tgt_extfeat_loader, print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0)
            for f, _, _, _ in tgt_dataset.trainval
        ], 0)
        # target_real_label = np.asarray([tarNames[f].unsqueeze(0) for f, _, _, _ in tgt_dataset.trainval])

        # calculate distance and rerank result
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)

        # if iter_n > 0:
        #     rerank_dist = st_model.apply(rerank_dist, tgt_dataset.trainval, tgt_dataset.trainval)

        cluster = HDBSCAN(metric='precomputed', min_samples=10)
        # select & cluster images as training set of this epochs
        clusterRes = cluster.fit(rerank_dist)
        labels, label_num = clusterRes.labels_, clusterRes.labels_.max() + 1
        # centers = np.zeros((label_num, target_features.shape[1]))
        # nums = [0] * target_features.shape[1]
        print('clusters num =', label_num)

        # generate new dataset
        new_dataset = []
        index = -1
        for (fname, _, cam, timestamp), label in zip(tgt_dataset.trainval,
                                                     labels):
            index += 1
            if label == -1: continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam, timestamp))
            # centers[label] += target_features[index]
            # nums[label] += 1
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        # learn ST model
        same, _ = st_model.fit(new_dataset)

        # st_model.fit(tgt_dataset.trainval)

        def filter(i, j):
            _, _, c1, t1 = tgt_dataset.trainval[i]
            _, _, c2, t2 = tgt_dataset.trainval[j]
            return same.in_peak(c1, c2, t1, t2, 0.2)

        ranking = np.argsort(rerank_dist)[:, 1:]

        cluster_size = 23.535612535612536
        must_conn = int(cluster_size / 2)
        might_conn = int(cluster_size * 2)

        length = len(tgt_dataset.trainval)
        pos = [[] for _ in range(length)]
        neg = [[] for _ in range(length)]
        for i in range(length):
            for j_ in range(might_conn):
                j = ranking[i][j_]
                if j_ < must_conn and i in ranking[j][:must_conn]:
                    pos[i].append(j)
                elif i in ranking[j][:might_conn] and filter(i, j):
                    pos[i].append(j)
                # if j_ < must_conn or filter(i, j):
                #     pos[i].append(j)
                else:
                    neg[i].append(j)
            if len(neg[i]) < len(pos[i]):
                neg[i].extend(ranking[i][j_ + 1:j_ + 1 + len(pos[i]) -
                                         len(neg[i])])

        # learn visual model
        # for i in range(label_num):
        #     centers[i] /= nums[i]
        # criterion[3] = ClassificationLoss(normalize(centers, axis=1)).cuda()
        #
        # classOptimizer = torch.optim.Adam([
        #     {'params': model.parameters()},
        #     {'params': criterion[3].classifier.parameters(), 'lr': 1e-3}
        # ], lr=args.lr)

        train_loader = DataLoader(Preprocessor(tgt_dataset.trainval,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=TripletSampler(
                                      tgt_dataset.trainval, pos, neg),
                                  pin_memory=True,
                                  drop_last=True)

        trainer = Trainer(model, train_loader, criterion, optimizer)

        for epoch in range(args.epochs):
            trainer.train(epoch)

        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                        tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    save_checkpoint(
        {
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': rank_score.market1501[0],
        },
        True,
        fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return (rank_score.map, rank_score.market1501[0])