Esempio n. 1
0
 def save_model(model, is_best, best_mAP):
     save_checkpoint(
         {
             'state_dict': model.state_dict(),
             'epoch': epoch + 1,
             'best_mAP': best_mAP,
         },
         is_best,
         fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
Esempio n. 2
0
 def save_model(model_ema, is_best, best_mAP, mid):
     save_checkpoint(
         {
             'state_dict': model_ema.state_dict(),
             'epoch': epoch + 1,
             'best_mAP': best_mAP,
         },
         is_best,
         fpath=osp.join(args.logs_dir,
                        'model' + str(mid) + '_checkpoint.pth.tar'))
Esempio n. 3
0
def cotrain(configs, data, iter_step=1, train_ratio=0.2):
    """
    cotrain model:
    params:
    model_names: model configs
    data: dataset include train and untrain data
    save_paths: paths for storing models
    iter_step: maximum iteration steps
    train_ratio: labeled data ratio

    """
    assert iter_step >= 1
    train_data, untrain_data = dp.split_dataset(data.trainval, train_ratio,
                                                args.seed)
    data_dir = data.images_dir

    new_train_data = train_data
    for step in range(iter_step):
        pred_probs = []
        add_ids = []
        for view in range(2):
            configs[view].set_training(True)
            model = mu.train(new_train_data, data_dir, configs[view])
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': step + 1,
                    'train_data': new_train_data
                },
                False,
                fpath=os.path.join(configs[view].logs_dir,
                                   configs[view].model_name,
                                   'cotrain.epoch%d' % step))
            if len(untrain_data) == 0:
                continue
            pred_probs.append(
                mu.predict_prob(model, untrain_data, data_dir, configs[view]))
            add_ids.append(dp.sel_idx(pred_probs[view], train_data))

            # calculate predict probility on all data
            p_b = mu.predict_prob(model, data.trainval, data_dir,
                                  configs[view])
            p_y = np.argmax(p_b, axis=1)
            t_y = [c for (_, c, _, _) in data.trainval]
            print(np.mean(t_y == p_y))

        if len(untrain_data) == 0:
            break

        # update training data
        pred_y = np.argmax(sum(pred_probs), axis=1)
        add_id = sum(add_ids)
        new_train_data, untrain_data = dp.update_train_untrain(
            add_id, new_train_data, untrain_data, pred_y)
Esempio n. 4
0
 def save_model_when_running(curr_best_map):
     metric.train(model,train_loader)
     top_map = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
     is_best = top_map > curr_best_map
     curr_best_map = max(top_map, curr_best_map)
     save_checkpoint({
         'state_dict': model.module.state_dict(),
         'epoch': epoch + 1,
         'best_map': top_map,
     }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
     return curr_best_map
Esempio n. 5
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError('Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print('Resuming checkpoints from finetuned model on another dataset...\n')
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [
        # TripletLoss(args.margin, args.num_instances, isAvg=True, use_semi=True).cuda(),
        SortedTripletLoss(args.margin, isAvg=True).cuda(),
        # HoughTripletLoss(args.margin, args.num_instances, isAvg=True, use_semi=True).cuda(),
        # None,
        None, None, None
    ]


    # Optimizer
    optimizer = torch.optim.Adam(
        model.parameters(), lr=args.lr
    )


    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height,args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    evaluator = Evaluator(model, print_freq=args.print_freq)
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    st_model = ST_Model(tgt_dataset.meta['num_cameras'])
    same = None
    # train_loader2 = None
    best_mAP = 0

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model, src_extfeat_loader, print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([source_features[f].unsqueeze(0) for f, _, _, _ in src_dataset.train], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n+1))
        target_features, tarNames = extract_features(model, tgt_extfeat_loader, print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([target_features[f].unsqueeze(0) for f, _, _, _ in tgt_dataset.trainval], 0)
        # target_real_label = np.asarray([tarNames[f].unsqueeze(0) for f, _, _, _ in tgt_dataset.trainval])

        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features, target_features, lambda_value=args.lambda_value)

        ranking = np.argsort(rerank_dist)[:, 1:]

        if iter_n != 0:
            st_dist = np.zeros(rerank_dist.shape)
            for i, (_, _, c1, t1) in enumerate(tgt_dataset.trainval):
                for j, (_, _, c2, t2) in enumerate(tgt_dataset.trainval):
                    if not same.in_peak(c1, c2, t1, t2, 0.25):
                        st_dist[i, j] = 1

            rerank_dist = rerank_dist + st_dist * 10

        # if iter_n > 0:
        #     rerank_dist = st_model.apply(rerank_dist, tgt_dataset.trainval, tgt_dataset.trainval)

        cluster = HDBSCAN(metric='precomputed', min_samples=10)
        # select & cluster images as training set of this epochs
        clusterRes = cluster.fit(rerank_dist.astype(np.float64))
        labels, label_num = clusterRes.labels_, clusterRes.labels_.max() + 1
        centers = np.zeros((label_num, target_features.shape[1]))
        nums = [0] * target_features.shape[1]
        print('clusters num =', label_num)

        # generate new dataset
        new_dataset = []
        index = -1
        for (fname, _, cam, timestamp), label in zip(tgt_dataset.trainval, labels):
            index += 1
            if label == -1: continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam, timestamp))
            centers[label] += target_features[index]
            nums[label] += 1
        print('Iteration {} have {} training images'.format(iter_n+1, len(new_dataset)))

        # learn ST model
        # if iter_n % 2 == 0:
        # if iter_n == 0:
            # cluster = HDBSCAN(metric='precomputed', min_samples=10)
            # # select & cluster images as training set of this epochs
            # clusterRes = cluster.fit(rerank_dist.astype(np.float64))
            # labels, label_num = clusterRes.labels_, clusterRes.labels_.max() + 1
            # centers = np.zeros((label_num, target_features.shape[1]))
            # nums = [0] * target_features.shape[1]
            # print('clusters num =', label_num)
            #
            # # generate new dataset
            # new_dataset = []
            # index = -1
            # for (fname, _, cam, timestamp), label in zip(tgt_dataset.trainval, labels):
            #     index += 1
            #     if label == -1: continue
            #     # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            #     new_dataset.append((fname, label, cam, timestamp))
            #     centers[label] += target_features[index]
            #     nums[label] += 1
            # print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset)))

            # same, _ = st_model.fit(new_dataset)
        # st_model.fit(tgt_dataset.trainval)
        same, _ = st_model.fit(new_dataset)

        train_loader = DataLoader(
            Preprocessor(new_dataset, root=tgt_dataset.images_dir, transform=train_transformer),
            batch_size=args.batch_size, num_workers=4,
            sampler=RandomIdentitySampler(new_dataset, args.num_instances),
            pin_memory=True, drop_last=True
        )

        def filter(i, j):
            _, _, c1, t1 = tgt_dataset.trainval[i]
            _, _, c2, t2 = tgt_dataset.trainval[j]
            return st_model.val(c1, c2, t1, t2) > 0.01

        # if iter_n == 0:
        #     ranking = np.argsort(rerank_dist)[:, 1:]

        # dukemtmc
        # cluster_size = 23.535612535612536

        # market1501
        cluster_size = 17.22503328894807

        must_conn = int(cluster_size / 2)
        might_conn = int(cluster_size * 2)

        length = len(tgt_dataset.trainval)
        pos = [[] for _ in range(length)]
        neg = [[] for _ in range(length)]
        for i in range(length):
            for j_ in range(might_conn):
                j = ranking[i][j_]
                if j_ < must_conn and i in ranking[j][:must_conn]:
                    pos[i].append(j)
                elif i in ranking[j][:might_conn] and filter(i, j):
                    pos[i].append(j)
                else:
                    neg[i].append(j)
            # pos[i] = pos[i][-1:]
            # neg[i] = neg[i][:1]

        SP, SF, DP, DF = 0, 0, 0, 0
        for i in range(length):
            for j in pos[i]:
                if tgt_dataset.trainval[i][1] == tgt_dataset.trainval[j][1]:
                    SP += 1
                else:
                    SF += 1
            for j in neg[i]:
                if tgt_dataset.trainval[i][1] == tgt_dataset.trainval[j][1]:
                    DP += 1
                else:
                    DF += 1
        print('stat: %.1f %.1f %.3f, %.3f' % ((SP + SF) / length, (DP + DF) / length, SP / (SP + SF), DF / (DP + DF)))

        train_loader2 = DataLoader(
            Preprocessor(tgt_dataset.trainval, root=tgt_dataset.images_dir, transform=train_transformer),
            batch_size=args.batch_size, num_workers=4,
            # sampler=RandomIdentitySampler(new_dataset, args.num_instances),
            # shuffle=True,
            sampler=TripletSampler(tgt_dataset.trainval, pos, neg),
            pin_memory=True, drop_last=True
        )

        # learn visual model
        for i in range(label_num):
            centers[i] /= nums[i]
        criterion[3] = ClassificationLoss(normalize(centers, axis=1)).cuda()

        classOptimizer = torch.optim.Adam([
            {'params': model.parameters()},
            {'params': criterion[3].classifier.parameters(), 'lr': 1e-3}
        ], lr=args.lr)

        # trainer = HoughTrainer(model, st_model, train_loader, criterion, classOptimizer)
        trainer = ClassificationTrainer(model, train_loader, criterion, classOptimizer)
        trainer2 = Trainer(model, train_loader2, criterion, optimizer)

        for epoch in range(args.epochs):
            trainer.train(epoch)
            if epoch % 8 == 0:
                trainer2.train(epoch)
            # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
        if rank_score.map > best_mAP:
            best_mAP = rank_score.map
            save_checkpoint({
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1, 'best_top1': rank_score.market1501[0],
                }, True, fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1, 'best_top1': rank_score.market1501[0],
        }, False, fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return (rank_score.map, rank_score.market1501[0])
Esempio n. 6
0
    save_pth = os.path.join(config.logs_dir, config.model_name,
                            '%s' % (args.checkpoint))
    if os.path.exists(save_pth) is not True:
        raise ValueError('wrong model pth')
    checkpoint = load_checkpoint(save_pth)
    state_dict = {
        k: v
        for k, v in checkpoint['state_dict'].items()
        if k in model.state_dict().keys()
    }
    model.load_state_dict(state_dict)
else:
    model = mu.train(data.trainval, data.images_dir, config)

    save_checkpoint(
        {
            'state_dict': model.state_dict(),
            'epoch': 0,
            'train_data': data.trainval
        },
        False,
        fpath=os.path.join(config.logs_dir, config.model_name,
                           'full_supervised'))
mu.evaluate(model, data, config)

if args.evaluate is True:
    train_data = checkpoint['train_data']
    weight = [w for (_, _, _, w) in train_data]
    print(np.median(weight))
    print(np.mean(weight))
Esempio n. 7
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = InceptionNet(num_channels=8,
                         num_features=args.features,
                         dropout=args.dropout,
                         num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=os.path.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(
        os.path.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)

    features, _ = extract_features(evaluator.model, test_loader)
    distmat = pairwise_distance(features,
                                dataset.query,
                                dataset.gallery,
                                metric=metric)
    evaluate_all(distmat,
                 query=dataset.query,
                 gallery=dataset.gallery,
                 cmc_topk=(1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50))

    torch.save(model, os.path.join(args.logs_dir, 'model.pt'))
Esempio n. 8
0
def spaco(configs, data, iter_step=1, gamma=0.3, train_ratio=0.2):
    """
    self-paced co-training model implementation based on Pytroch
    params:
    model_names: model names for spaco, such as ['resnet50','densenet121']
    data: dataset for spaco model
    save_pathts: save paths for two models
    iter_step: iteration round for spaco
    gamma: spaco hyperparameter
    train_ratio: initiate training dataset ratio
    """
    num_view = len(configs)
    train_data, untrain_data = dp.split_dataset(data.trainval, train_ratio,
                                                args.seed)
    data_dir = data.images_dir
    ###########
    # initiate classifier to get preidctions
    ###########

    add_ratio = 0.5
    pred_probs = []
    add_ids = []
    start_step = 0
    for view in range(num_view):
        if configs[view].checkpoint is None:
            model = mu.train(train_data, data_dir, configs[view])
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': 0,
                    'train_data': train_data
                },
                False,
                fpath=os.path.join(configs[view].logs_dir,
                                   configs[view].model_name, 'spaco.epoch0'))
        else:
            model = models.create(configs[view].model_name,
                                  num_features=configs[view].num_features,
                                  dropout=configs[view].dropout,
                                  num_classes=configs[view].num_classes)
            model = torch.nn.DataParallel(model).cuda()
            checkpoint = load_checkpoint(configs[view].checkpoint)
            model.load_state_dict(checkpoint['state_dict'])
            start_step = checkpoint['epoch']
            configs[view].set_training(False)
            add_ratio += start_step * 0.5
            # mu.evaluate(model, data, configs[view])
        pred_probs.append(
            mu.predict_prob(model, untrain_data, data_dir, configs[view]))
        add_ids.append(dp.sel_idx(pred_probs[view], train_data, add_ratio))
    pred_y = np.argmax(sum(pred_probs), axis=1)
    for step in range(start_step, iter_step):
        for view in range(num_view):
            # update v_view
            ov = add_ids[1 - view]
            pred_probs[view][ov, pred_y[ov]] += gamma
            add_id = dp.sel_idx(pred_probs[view], train_data, add_ratio)

            # update w_view
            new_train_data, _ = dp.update_train_untrain(
                add_id, train_data, untrain_data, pred_y)
            configs[view].set_training(True)
            model = mu.train(new_train_data, data_dir, configs[view])

            # update y
            pred_probs[view] = mu.predict_prob(model, untrain_data, data_dir,
                                               configs[view])
            pred_y = np.argmax(sum(pred_probs), axis=1)

            # udpate v_view for next view
            add_ratio += 0.5
            pred_probs[view][ov, pred_y[ov]] += gamma
            add_ids[view] = dp.sel_idx(pred_probs[view], train_data, add_ratio)

            # calculate predict probility on all data
            p_b = mu.predict_prob(model, data.trainval, data_dir,
                                  configs[view])
            p_y = np.argmax(p_b, axis=1)
            t_y = [c for (_, c, _, _) in data.trainval]
            print(np.mean(t_y == p_y))
            #             evaluation current model and save it
            # mu.evaluate(model,data,configs[view])
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': step + 1,
                    'train_data': new_train_data
                },
                False,
                fpath=os.path.join(configs[view].logs_dir,
                                   configs[view].model_name,
                                   'spaco.epoch%d' % (step + 1)))
Esempio n. 9
0
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    print(args)
    # Create data loaders
    dataset, num_classes, source_train_loader, query_loader_2, gallery_loader_2, query_loader_3, gallery_loader_3 = \
        get_data(args.data_dir, args.source, args.target, args.height,
                 args.width, args.batch_size, args.num_instance, args.workers)

    # Create model
    MaskNet, TaskNet = models.create(args.arch,
                                     num_features=args.features,
                                     dropout=args.dropout,
                                     num_classes=num_classes)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        MaskNet.load_state_dict(checkpoint['MaskNet'])
        TaskNet.load_state_dict(checkpoint['TaskNet'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    MaskNet = nn.DataParallel(MaskNet).cuda()
    TaskNet = nn.DataParallel(TaskNet).cuda()

    # Evaluator
    evaluator = Evaluator([MaskNet, TaskNet])
    if args.evaluate:
        print("Test:")
        print("partial-iLIDS:")
        evaluator.evaluate(query_loader_2, gallery_loader_2, dataset.query_2,
                           dataset.gallery_2, args.output_feature)
        print("partial-REID:")
        evaluator.evaluate(query_loader_3, gallery_loader_3, dataset.query_3,
                           dataset.gallery_3, args.output_feature)

        return

    # Criterion
    criterion = []
    criterion.append(nn.CrossEntropyLoss().cuda())
    criterion.append(TripletLoss(margin=args.margin))
    criterion.append(nn.MSELoss(reduce=True, size_average=True).cuda())

    # Optimizer
    param_groups = [
        {
            'params': MaskNet.module.parameters(),
            'lr_mult': 0.1
        },
    ]
    optimizer_Mask = torch.optim.SGD(param_groups,
                                     lr=args.lr,
                                     momentum=args.momentum,
                                     weight_decay=args.weight_decay,
                                     nesterov=True)
    #
    base_param_ids = set(map(id, TaskNet.module.base.parameters()))
    new_params = [
        p for p in TaskNet.parameters() if id(p) not in base_param_ids
    ]
    param_groups = [{
        'params': TaskNet.module.base.parameters(),
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]
    optimizer_Ide = torch.optim.SGD(param_groups,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)

    # Trainer
    trainer = Trainer([MaskNet, TaskNet], criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 10
        if epoch <= 9:
            lr = 0.0008 * (epoch / 10.0)
        elif epoch <= 16:
            lr = 0.1
        elif epoch <= 23:
            lr = 0.001
        else:
            lr = 0.0001

        for g in optimizer_Mask.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in optimizer_Ide.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    tmp_2 = best_2 = 0
    tmp_3 = best_3 = 0
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, [source_train_loader],
                      [optimizer_Mask, optimizer_Ide], args.batch_size)

        save_checkpoint(
            {
                'MaskNet': MaskNet.module.state_dict(),
                'TaskNet': TaskNet.module.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        if epoch == 9:
            save_checkpoint(
                {
                    'MaskNet': MaskNet.module.state_dict(),
                    'TaskNet': TaskNet.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir, 'epoch9_checkpoint.pth.tar'))

        evaluator = Evaluator([MaskNet, TaskNet])
        if epoch > 9 and epoch % 1 == 0:
            tmp_2 = evaluator.evaluate(query_loader_2, gallery_loader_2,
                                       dataset.query_2, dataset.gallery_2,
                                       args.output_feature)
            tmp_3 = evaluator.evaluate(query_loader_3, gallery_loader_3,
                                       dataset.query_3, dataset.gallery_3,
                                       args.output_feature)

        if (tmp_2 > best_2):
            save_checkpoint(
                {
                    'MaskNet': MaskNet.module.state_dict(),
                    'TaskNet': TaskNet.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir,
                               'best_checkpoint_piLIDS.pth.tar'))
            best_2 = tmp_2
        print("iLIDS_best:", best_2)
        #
        if (tmp_3 > best_3):
            save_checkpoint(
                {
                    'MaskNet': MaskNet.module.state_dict(),
                    'TaskNet': TaskNet.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir, 'best_checkpoint_pREID.pth.tar'))
            best_3 = tmp_3
        print("REID_best:", best_3)
        print('\n * Finished epoch {:3d} \n'.format(epoch))
Esempio n. 10
0
        start_epoch = checkpoint['epoch'] - 1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))

    meta_test_loader=IterLoader(meta_test_loader,length=200)
    import random

    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, meta_train_loader, meta_test_loader, optimizer_defense, noise, args)
        meta_test_loader.new_epoch()
        if epoch < args.start_save:
            continue
        if (epoch+1) % 5 == 0:
            # metric.train(model, meta_train_loader)
            print("eval on current attack ")
            _, _, rank_score = test(sourceSet, model, noise, args, evaSrc, epoch, args.source)
            ###eval raw result
            print("eval on new model ")
            s = evaSrc.evaluate(sc_test_loader, sourceSet.query, sourceSet.gallery)
            top1 = rank_score.map
            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint({
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, is_best, epoch , fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
                  format(epoch, top1, best_top1, ' *' if is_best else ''))
Esempio n. 11
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    # assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          num_diff_features=128,
                          dropout=args.dropout,
                          cut_at_pooling=False)
    print(model)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items()
                           if k in model.state_dict()}
        model_dict = model.state_dict()
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset)>1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name], metric)
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
            return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError:
                pass
        # top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
        # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        top1 = 1
        # is_best = top1 > best_top1
        # best_top1 = max(top1, best_top1)
        # save_checkpoint({
        #     'state_dict': model.module.state_dict(),
        #     'epoch': epoch + 1,
        #     'best_top1': best_top1,
        # }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
        is_best = False
        best_top1 = 1
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                               dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Esempio n. 12
0
def main_worker(args):
    global start_epoch, best_mAP
    start_time = time.monotonic()

    cudnn.benchmark = True

    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create datasets
    iters = args.iters if (args.iters > 0) else None
    print("==> Load unlabeled dataset")
    dataset = get_data(args.dataset, args.data_dir)
    test_loader = get_test_loader(dataset, args.height, args.width,
                                  args.batch_size, args.workers)

    # Create model
    model = create_model(args)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        evaluator.evaluate(test_loader,
                           dataset.query,
                           dataset.gallery,
                           cmc_flag=True)
        return

    # Create feature memory
    memory = CamMemory(2048,
                       len(dataset.train),
                       temp=args.temp,
                       momentum=args.momentum).cuda()

    # Initialize target-domain instance features
    print("==> Initialize instance features in the feature memory")
    cluster_loader = get_test_loader(dataset,
                                     args.height,
                                     args.width,
                                     args.batch_size,
                                     args.workers,
                                     testset=sorted(dataset.train))
    features, _ = extract_features(model, cluster_loader, print_freq=50)
    features = torch.cat(
        [features[f].unsqueeze(0) for f, _, _ in sorted(dataset.train)], 0)
    memory.features = F.normalize(features, dim=1).cuda()

    del cluster_loader

    # optimizer for meta models
    params = [{
        "params": [value]
    } for value in model.module.params() if value.requires_grad]
    optimizer = torch.optim.Adam(params,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=args.step_size,
                                                   gamma=0.1)

    # Trainer
    va_trainer = Trainer_USL(model, memory)
    cluster = DBSCAN(eps=args.eps,
                     min_samples=4,
                     metric='precomputed',
                     n_jobs=-1)
    # instance pre-training
    pseudo_labeled_dataset = []
    pseudo_labels = torch.arange(len(dataset.train))
    for i, ((fname, _, cid),
            label) in enumerate(zip(sorted(dataset.train), pseudo_labels)):
        pseudo_labeled_dataset.append((fname, label.item(), cid))
    for epoch in range(args.startE):
        torch.cuda.empty_cache()
        memory.labels = pseudo_labels.cuda()
        train_loader = get_train_loader(dataset.images_dir,
                                        args.height,
                                        args.width,
                                        args.batch_size,
                                        args.workers,
                                        -1,
                                        iters,
                                        trainset=pseudo_labeled_dataset)
        print(f'-----Examplar Pretraining, Epoch{epoch}...------')
        va_trainer.train(epoch,
                         train_loader,
                         optimizer,
                         print_freq=args.print_freq,
                         train_iters=args.iters)
    # test pre-train
    evaluator.evaluate(test_loader,
                       dataset.query,
                       dataset.gallery,
                       cmc_flag=False)
    trainer = CamTrainer(model, memory)
    # start training
    for epoch in range(args.epochs):
        # Calculate distance
        torch.cuda.empty_cache()
        features = memory.features.clone()
        rerank_dist = compute_jaccard_distance(features,
                                               k1=args.k1,
                                               k2=args.k2)

        # select & cluster images as training set of this epochs
        pseudo_labels = cluster.fit_predict(rerank_dist)

        pseudo_labels = generate_pseudo_labels(pseudo_labels, features)
        pseudo_labeled_dataset = []
        for i, ((fname, _, cid),
                label) in enumerate(zip(sorted(dataset.train), pseudo_labels)):
            pseudo_labeled_dataset.append((fname, label.item(), cid))
        # statistics of clusters and un-clustered instances
        memory.labels = pseudo_labels.cuda()
        train_loader = get_train_loader(dataset.images_dir,
                                        args.height,
                                        args.width,
                                        args.batch_size,
                                        args.workers,
                                        args.num_instances,
                                        iters,
                                        trainset=pseudo_labeled_dataset)
        trainer.train(epoch,
                      train_loader,
                      optimizer,
                      print_freq=args.print_freq,
                      train_iters=args.iters,
                      symmetric=args.symmetric)
        if (epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1):
            mAP = evaluator.evaluate(test_loader,
                                     dataset.query,
                                     dataset.gallery,
                                     cmc_flag=False)
            is_best = (mAP > best_mAP)
            best_mAP = max(mAP, best_mAP)
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': epoch + 1,
                    'best_mAP': best_mAP,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print(
                '\n * Finished epoch {:3d}  model mAP: {:5.1%}  best: {:5.1%}{}\n'
                .format(epoch, mAP, best_mAP, ' *' if is_best else ''))

        lr_scheduler.step()
    print('==> Test with the best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(test_loader,
                       dataset.query,
                       dataset.gallery,
                       cmc_flag=True)
    end_time = time.monotonic()
    print('Total running time: ', timedelta(seconds=end_time - start_time))
Esempio n. 13
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.source_batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    # Create source data_loader
    source_dataset, source_num_classes, source_train_loader\
        , source_val_loader, source_test_loader = \
        get_source_data(args.source_dataset, args.split, args.data_dir, args.height,
                 args.width, args.source_batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create target data_loader
    target_dataset, target_num_classes, target_train_loader\
        , target_val_loader, target_test_loader = \
        get_target_data(args.target_dataset, args.split, args.data_dir, args.height,
                 args.width, args.target_batch_size, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = ResNet_recon(num_features=1024, dropout=args.dropout)
    if args.evaluate:
        model_evalu = nn.DataParallel(model).cuda()
    model = model.cuda(0)

    # For source triplet-loss
    trip_embedding = Trip_embedding(num_features=1024,
                                    num_diff_features=128, dropout=args.dropout).cuda(1)
    # For target reconstruction-loss
    recon_module = Reconstruct(num_features=1024).cuda(1)
    # Criterion
    criterion = ReconTripLoss().cuda(1)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        model_path = load_checkpoint(args.resume)
        model.load_state_dict(model_path['state_dict'])
        # trip_embedding.load_state_dict(model_path['trip_em'])
        recon_module.load_state_dict(model_path['recon_dict'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    # model = nn.DataParallel(model, device_ids=[0,1]).cuda(1)

    # model.cuda(0)
    # trip_embedding.cuda(1)
    # recon_module.cuda(1)
    # criterion.cuda(1)
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator

    if args.evaluate:
        evaluator = Evaluator(model_evalu)
        metric.train(model_evalu, source_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.source_dataset) > 1:
            for dataset_name in args.source_dataset:
                print("{} source_test result:".format(dataset_name))
                evaluator.evaluate(source_test_loader[dataset_name],
                                   source_dataset.query[dataset_name],
                                   source_dataset.gallery[dataset_name],
                                   metric)
            return
        else:
            print("source test result")
            evaluator.evaluate(source_test_loader, source_dataset.query,
                               source_dataset.gallery, metric)
            print("target test result")
            evaluator.evaluate(target_test_loader, target_dataset.query,
                               target_dataset.gallery, metric)
            return

    evaluator = Evaluator(model)
    # Optimizer
    optimizer = torch.optim.Adam([{'params': model.parameters()},
                                  # {'params': trip_embedding.parameters()},
                                  {'params': recon_module.parameters()}],
                                  lr=args.lr, weight_decay=args.weight_decay)
    # Trainer
    trainer = Transfer_Trainer(model, recon_module, trip_embedding, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/
                                 float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    top1 = 0
    is_best = True
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(source_train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(source_train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        if epoch % 25 == 0 and epoch != 0:
            top1 = evaluator.evaluate(source_test_loader, source_dataset.query, source_dataset.query)
            target_top1 = evaluator.evaluate(target_test_loader, target_dataset.query, target_dataset.query)
            print('target_top1 = {:5.1%}'.format(target_top1))
            # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)

        save_checkpoint({
            'state_dict': model.state_dict(),
            'recon_dict': recon_module.state_dict(),
            # 'trip_em': trip_embedding.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, source_train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(source_test_loader[dataset_name], source_dataset.query[dataset_name],
                               source_dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(source_test_loader, source_dataset.query, source_dataset.gallery, metric)
Esempio n. 14
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features, norm=True,
                          dropout=args.dropout)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = OIMLoss(model.module.num_features, num_classes,
                        scalar=args.oim_scalar,
                        momentum=args.oim_momentum).cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Esempio n. 15
0
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.camstyle, args.re,
                 0 if args.debug else args.workers,
                 camstyle_path = args.camstyle_path)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model, args.logs_dir)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.output_feature, args.rerank)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.module.base.parameters(),
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    if args.camstyle == 0:
        trainer = Trainer(model, criterion)
    else:
        trainer = CamStyleTrainer(model, criterion, camstyle_loader)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model, args.logs_dir)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.output_feature, args.rerank)
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, _, _, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    trans_train_loader, num_classes = get_fake_data(args.trans_name, args.trans_data_txt, args.height,
                                       args.width, args.batch_size, args.workers)
    # Create model
    model = models.create(args.arch,
                          dropout=0, num_classes=num_classes)
    # model = models.create(args.arch, num_features=1024, num_diff_features=args.features,
    #                       dropout=args.dropout, num_classes=num_classes, iden_pretrain=True)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, trans_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr * 0.1**(epoch//args.lr_change_epochs)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, trans_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        # for tag, value in criterion.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        #################################
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, trans_train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
def main(args):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create model loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.a1 == 'inception' else \
            (256, 128)

    dataset, dataset_test, num_classes, train_loader, query_loader, gallery_loader = get_data(
        args.dataset, args.split, args.data_dir, args.height, args.width,
        args.batch_size, args.workers, args.combine_trainval, args.loss_mode,
        args.instances_num)

    # Create CNN model, generate 128 dimenional vector through 2 layer fully-connected network
    cnnmodel = models.create(args.a1,
                             num_features=args.features,
                             dropout=args.dropout)

    # Create the score computation model
    classifiermodel = models.create(args.a2, input_num=args.features)

    # Create the crf_mean_field model
    crfmodel = models.create(args.a3, layer_num=args.layernum)

    # Module cude accelaration
    cnnmodel = nn.DataParallel(cnnmodel).cuda()
    classifiermodel = classifiermodel.cuda()
    crfmodel = crfmodel.cuda()

    # Criterion1 Identiciation loss
    criterion_oim = MULOIMLoss(args.features,
                               num_classes,
                               scalar=args.oim_scalar,
                               momentum=args.oim_momentum)

    # Criterion2 Verification loss
    criterion_veri = PairLoss(args.sampling_rate)

    ## Criterion accerlation cuda
    criterion_oim.cuda()
    criterion_veri.cuda()

    # Optimizer

    base_param_ids = set(map(id, cnnmodel.module.base.parameters()))
    new_params = [
        p for p in cnnmodel.parameters() if id(p) not in base_param_ids
    ]
    param_groups = [{
        'params': cnnmodel.module.base.parameters(),
        'lr_mult': 1
    }, {
        'params': new_params,
        'lr_mult': 1
    }, {
        'params': classifiermodel.parameters(),
        'lr_mult': 1
    }, {
        'params': crfmodel.parameters(),
        'lr_mult': 1
    }]

    # Optimizer
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.cnnlr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Schedule Learning rate
    def adjust_lr(epoch):
        # step_size = 60 if args.arch == 'inception' else 40
        lr = args.cnnlr * (0.1**(epoch // 10))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Trainer
    trainer = MULJOINT_MAN_Trainer(cnnmodel, classifiermodel, crfmodel,
                                   criterion_veri, criterion_oim,
                                   args.instances_num)
    start_epoch = best_top1 = 0

    # Evaluation
    evaluator = MsEvaluator(cnnmodel, classifiermodel, crfmodel)
    if args.evaluate == 1:
        checkpoint = load_checkpoint(
            osp.join('../crf_affinity8_models/model101',
                     'cnncheckpoint.pth.tar'))
        cnnmodel.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(
            osp.join('../crf_affinity8_models/model101',
                     'crfcheckpoint.pth.tar'))
        crfmodel.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(
            osp.join('../crf_affinity8_models/model101',
                     'classifiercheckpoint.pth.tar'))
        classifiermodel.load_state_dict(checkpoint['state_dict'])

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery)
        print(top1)

    else:
        if args.retrain:
            checkpoint = load_checkpoint(
                osp.join('/home/zhangle/crf_affinity/logs/mars',
                         'cnncheckpoint.pth.tar'))
            cnnmodel.load_state_dict(checkpoint['state_dict'])

            checkpoint = load_checkpoint(
                osp.join('/home/zhangle/crf_affinity/logs/mars',
                         'crfcheckpoint.pth.tar'))
            crfmodel.load_state_dict(checkpoint['state_dict'])

            checkpoint = load_checkpoint(
                osp.join('/home/zhangle/crf_affinity/logs/mars',
                         'classifiercheckpoint.pth.tar'))
            classifiermodel.load_state_dict(checkpoint['state_dict'])
        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery)
        for epoch in range(start_epoch, args.epochs):
            adjust_lr(epoch)
            trainer.train(epoch, train_loader, optimizer)

            if True:  #epoch % 6 == 0:

                top1 = evaluator.evaluate(query_loader, gallery_loader,
                                          dataset.query, dataset.gallery)
                print(top1)
                top1 = top1[0]
                is_best = top1 > best_top1
                best_top1 = max(top1, best_top1)

                save_checkpoint(
                    {
                        'state_dict': cnnmodel.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    },
                    is_best,
                    fpath=osp.join(args.logs_dir, 'cnncheckpoint.pth.tar'))

                save_checkpoint(
                    {
                        'state_dict': classifiermodel.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    },
                    is_best,
                    fpath=osp.join(args.logs_dir,
                                   'classifiercheckpoint.pth.tar'))

                save_checkpoint(
                    {
                        'state_dict': crfmodel.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    },
                    is_best,
                    fpath=osp.join(args.logs_dir, 'crfcheckpoint.pth.tar'))

            print(
                '\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%} {}\n'.
                format(epoch, top1, best_top1, '*' if is_best else ''))
Esempio n. 18
0
def main(args):
    np.random.seed(args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Log args
    print(args)
    args_dict = vars(args)
    with open(osp.join(args.logs_dir, 'args.json'), 'w') as f:
        json.dump(args_dict, f)

    # Create data loaders
    dataset, num_classes, train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height, \
                 args.width, args.crop_height, args.crop_width, args.batch_size, \
                 args.caffe_sampler, \
                 args.workers)

    # Create model
    valid_args = ['features', 'use_relu', 'dilation']
    model_kwargs = {k:v for k,v in args_dict.items() if k in valid_args}
    model = models.create(args.arch, **model_kwargs)
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    params = []
    params_dict = dict(model.named_parameters())
    for key, value in params_dict.items():
        if value.requires_grad == False:
            continue
        if key[-4:]=='bias':
            params += [{'params': [value], 'lr':args.lr*2, 'weight_decay':args.weight_decay*0.0}]
        else:
            params += [{'params': [value], 'lr':args.lr*1, 'weight_decay':args.weight_decay*1.0}]
    optimizer = SGD_caffe(params,
                          lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)

    # Trainer
    trainer = Trainer(model, criterion)

    # Evaluate
    def evaluate(test_model):
        print('Test with model {}:'.format(test_model))
        checkpoint = load_checkpoint(osp.join(args.logs_dir, '{}.pth.tar'.format(test_model)))
        model.module.load(checkpoint)
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, msg='TEST')

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 200 else \
            args.lr * (0.2 ** ((epoch-200)//200 + 1))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(1, args.epochs+1):

        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        # Save
        if epoch%200 == 0 or epoch==args.epochs:
            save_dict = model.module.save_dict()
            save_dict.update({'epoch': epoch})
            save_checkpoint(save_dict, fpath=osp.join(args.logs_dir, 'epoch_{}.pth.tar'.format(epoch)))

        print('\n * Finished epoch {:3d}\n'.format(epoch))

    evaluate('epoch_750')
Esempio n. 19
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
            return lr

    title = args.dataset
    log_filename = 'log.txt'
    if not os.path.isdir(args.logs_dir):
        mkdir_p(args.logs_dir)
    logger = Logger(os.path.join(args.logs_dir, log_filename), title=title)
    logger.set_names([
        'Learning Rate', 'Train Loss', 'Train Prec', 'Test Top1', 'Test Top5',
        'Test Top10'
    ])
    # Start training
    for epoch in range(start_epoch, args.epochs):
        lr = adjust_lr(epoch)
        loss, prec = train_model(train_loader, model, optimizer, criterion,
                                 epoch)
        top1, top5, top10 = test_model(test_loader, model)
        logger.append([lr, loss, prec, top1, top5, top10])
        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch + 1, top1, best_top1, ' *' if is_best else ''))

    logger.close()
    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    top1, top5, top10 = test_model(test_loader, model)
Esempio n. 20
0
def spaco(configs,
          data,
          iter_steps=1,
          gamma=0,
          train_ratio=0.2,
          regularizer='hard',
          device='cuda:0'):
  """
    self-paced co-training model implementation based on Pytroch
    params:
    model_names: model names for spaco, such as ['resnet50','densenet121']
    data: dataset for spaco model
    save_pathts: save paths for two models
    iter_step: iteration round for spaco
    gamma: spaco hyperparameter
    train_ratio: initiate training dataset ratio
    """
  num_view = len(configs)
  train_data, untrain_data = dp.split_dataset(data.trainval, train_ratio,
                                              args.seed)
  data_dir = data.images_dir
  query_gallery = list(set(data.query) | set(data.gallery))
  save_dir = os.path.join('logs', 'parallel_spaco', regularizer,
                          'seed_%d' % args.seed)
  ###########
  # initiate classifier to get preidctions
  ###########

  add_ratio = 0.5
  pred_probs = []
  sel_ids = []
  weights = []
  features = []
  start_step = 0
  for view in range(num_view):
    net = models.create(configs[view].model_name,
                        num_features=configs[view].num_features,
                        dropout=configs[view].dropout,
                        num_classes=configs[view].num_classes).to(device)
    mu.train(net, train_data, data_dir, configs[view], device)
    pred_probs.append(mu.predict_prob(net, untrain_data, data_dir, configs[view], device))
    predictions = mu.predict_prob(net, data.trainval, data_dir, configs[view], device)
    mAP = mu.evaluate(net, data, configs[view], device)
    save_checkpoint(
        {
            'state_dict': net.state_dict(),
            'epoch': 0,
            'train_data': train_data,
            'trainval': data.trainval,
            'predictions': predictions,
            'performance': mAP
        },
        False,
        fpath=os.path.join(save_dir, '%s.epoch0' % (configs[view].model_name)))
  pred_y = np.argmax(sum(pred_probs), axis=1)

  # initiate weights for unlabled examples
  for view in range(num_view):
    sel_id, weight = dp.get_ids_weights(pred_probs[view], pred_y, train_data,
                                        add_ratio, gamma, regularizer, num_view)
    sel_ids.append(sel_id)
    weights.append(weight)

  # start iterative training
  for step in range(start_step, iter_steps):
    for view in range(num_view):
      # update v_view
      sel_ids[view], weights[view] = update_ids_weights(view, pred_probs,
                                                        sel_ids, weights,
                                                        pred_y, train_data,
                                                        add_ratio, gamma,
                                                        regularizer)
      # update w_view
      new_train_data, _ = dp.update_train_untrain(sel_ids[view], train_data,
                                                  untrain_data, pred_y,
                                                  weights[view])
      configs[view].set_training(True)
      net = models.create(configs[view].model_name,
                          num_features=configs[view].num_features,
                          dropout=configs[view].dropout,
                          num_classes=configs[view].num_classes).to(device)
      mu.train(net, new_train_data, data_dir, configs[view], device)

      # update y
      pred_probs[view] = mu.predict_prob(net, untrain_data, data_dir,
                                         configs[view], device)

      # calculate predict probility on all data
      test_acc(net, data.trainval, data_dir, configs[view], device)

      #             evaluation current model and save it
      mAP = mu.evaluate(net, data, configs[view], device)
      predictions = mu.predict_prob(net, data.trainval, data_dir,
                                    configs[view], device)
      save_checkpoint(
          {
              'state_dict': net.state_dict(),
              'epoch': step + 1,
              'train_data': new_train_data,
              'trainval': data.trainval,
              'predictions': predictions,
              'performance': mAP
          },
          False,
          fpath=os.path.join(
              save_dir, '%s.epoch%d' % (configs[view].model_name, step + 1)))
      if step + 1 == iter_steps:
        features += [
            mu.get_feature(net, query_gallery, data.images_dir, configs[view], device)
        ]
    add_ratio += 1.2
    #  pred_y = np.argmax(sum(pred_probs), axis=1)
  acc = mu.combine_evaluate(features, data)
  print(acc)
Esempio n. 21
0
def main(args):
    args.step_size = args.step_size.split(',')
    args.step_size = [int(x) for x in args.step_size]
    # seed
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    else:
        torch.backends.cudnn.benchmark = True

    if args.logs_dir is None:
        args.logs_dir = osp.join(f'logs/zju/{args.dataset}', datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    else:
        args.logs_dir = osp.join(f'logs/zju/{args.dataset}', args.logs_dir)
    if args.train:
        os.makedirs(args.logs_dir, exist_ok=True)
        copy_tree('./reid', args.logs_dir + '/scripts/reid')
        for script in os.listdir('.'):
            if script.split('.')[-1] == 'py':
                dst_file = os.path.join(args.logs_dir, 'scripts', os.path.basename(script))
                shutil.copyfile(script, dst_file)
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'), )
    print('Settings:')
    print(vars(args))
    print('\n')

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                 args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.re, args.num_instances,
                 camstyle=0, zju=1, colorjitter=args.colorjitter)

    # Create model
    model = models.create('ide', feature_dim=args.feature_dim, norm=args.norm,
                          num_classes=num_classes, last_stride=args.last_stride, arch=args.arch)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        resume_fname = osp.join(f'logs/zju/{args.dataset}', args.resume, 'model_best.pth.tar')
        model, start_epoch, best_top1 = checkpoint_loader(model, resume_fname)
        print("=> Last epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))
        start_epoch += 1
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = [LSR_loss().cuda() if args.LSR else nn.CrossEntropyLoss().cuda(),
                 TripletLoss(margin=None if args.softmargin else args.margin).cuda()]

    # Optimizer
    if 'aic' in args.dataset:
        # Optimizer
        if hasattr(model.module, 'base'):  # low learning_rate the base network (aka. DenseNet-121)
            base_param_ids = set(map(id, model.module.base.parameters()))
            new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
            param_groups = [{'params': model.module.base.parameters(), 'lr_mult': 1},
                            {'params': new_params, 'lr_mult': 2}]
        else:
            param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups, lr=args.lr, momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, )

    # Trainer
    trainer = Trainer(model, criterion)

    # Evaluator
    evaluator = Evaluator(model)

    if args.train:
        # Schedule learning rate
        def adjust_lr(epoch):
            if epoch <= args.warmup:
                alpha = epoch / args.warmup
                warmup_factor = 0.01 * (1 - alpha) + alpha
            else:
                warmup_factor = 1
            lr = args.lr * warmup_factor * (0.1 ** bisect_right(args.step_size, epoch))
            print('Current learning rate: {}'.format(lr))
            for g in optimizer.param_groups:
                if 'aic' in args.dataset:
                    g['lr'] = lr * g.get('lr_mult', 1)
                else:
                    g['lr'] = lr

        # Draw Curve
        epoch_s = []
        loss_s = []
        prec_s = []
        eval_epoch_s = []
        eval_top1_s = []

        # Start training
        for epoch in range(start_epoch + 1, args.epochs + 1):
            t0 = time.time()
            adjust_lr(epoch)
            # train_loss, train_prec = 0, 0
            train_loss, train_prec = trainer.train(epoch, train_loader, optimizer, fix_bn=args.fix_bn, print_freq=10)

            if epoch < args.start_save:
                continue

            if epoch % 10 == 0:
                top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
                eval_epoch_s.append(epoch)
                eval_top1_s.append(top1)
            else:
                top1 = 0

            is_best = top1 >= best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch,
                'best_top1': best_top1,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
            epoch_s.append(epoch)
            loss_s.append(train_loss)
            prec_s.append(train_prec)
            draw_curve(os.path.join(args.logs_dir, 'train_curve.jpg'), epoch_s, loss_s, prec_s,
                       eval_epoch_s, None, eval_top1_s)

            t1 = time.time()
            t_epoch = t1 - t0
            print('\n * Finished epoch {:3d}  top1: {:5.1%}  best_eval: {:5.1%} {}\n'.
                  format(epoch, top1, best_top1, ' *' if is_best else ''))
            print('*************** Epoch takes time: {:^10.2f} *********************\n'.format(t_epoch))
            pass

        # Final test
        print('Test with best model:')
        model, start_epoch, best_top1 = checkpoint_loader(model, osp.join(args.logs_dir, 'model_best.pth.tar'))
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))

        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
    else:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
        pass
Esempio n. 22
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get_source_data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get_target_data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise (RuntimeError(
            'Please specify the number of classes (ids) of the network.'))

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise (RuntimeWarning('Not using a pre-trained model.'))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print("Test with the original model trained on source domain:")
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    if args.evaluate:
        return

    # Criterion
    criterion = []
    criterion.append(
        TripletLoss(margin=args.margin,
                    num_instances=args.num_instances).cuda())
    criterion.append(
        TripletLoss(margin=args.margin,
                    num_instances=args.num_instances).cuda())

    # Optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat(
            [target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.train],
            0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.trainval, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        # train model with new generated dataset
        trainer = Trainer(model, criterion, print_freq=args.print_freq)
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, optimizer)
        # Evaluate
        rank1 = evaluator.evaluate(test_loader, tgt_dataset.query,
                                   tgt_dataset.gallery)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': iter_n + 1,
                'num_ids': num_ids,
            },
            True,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  rank1: {:5.1%} \n'.format(
            iter_n + 1, rank1))
Esempio n. 23
0
def  main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    #device_ids = [0, 1, 2, 3]
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset,  args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 )


    # Create model
    model = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes,cut_at_pooling=False, FCN=True)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_dict = model.state_dict()
        checkpoint_load = {k: v for k, v in (checkpoint['state_dict']).items() if k in model_dict}
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
#        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))

    #model = nn.DataParallel(model)
    model = nn.DataParallel(model).cuda()


    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader,  dataset.query, dataset.gallery)
        return

    # Criterion
    #criterion = nn.CrossEntropyLoss().cuda()
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion, 0, 0, SMLoss_mode=0)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else args.step_size
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)#if lr_mult do not find,return defualt value 1

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        is_best = True
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
             logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
         except AttributeError, e:
             pass
     #################################
     if epoch % 130 ==0 and epoch != 0:
         # top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
         # top1 = evaluator.evaluate(val_loader, dataset.query, dataset.gallery)
         top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
         is_best = top1 > best_top1
         best_top1 = max(top1, best_top1)
         # top1 = 1
         # is_best = False
         # best_top1 = 1
     save_checkpoint({
         'state_dict': model.module.state_dict(),
         'adapt_metric':criterion.state_dict(),
         'epoch': epoch + 1,
         'best_top1': best_top1,
     }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
     if (epoch+1) % 50 ==0:
         checkpoint_file =osp.join(args.logs_dir, 'checkpoint.pth.tar')
         stepcheckpoint_file = osp.join(args.logs_dir, 'checkpoint{}.pth.tar'.format(epoch+1))
         shutil.copy(checkpoint_file,stepcheckpoint_file)
     print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
           format(epoch, top1, best_top1, ' *' if is_best else ''))
 now = datetime.datetime.now()
 print(now.strftime('%Y-%m-%d %H:%M:%S'))
 # Final test
 print('Test with best model:')
 checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
 model.module.load_state_dict(checkpoint['state_dict'])
 criterion.load_state_dict(checkpoint['adapt_metric'])
Esempio n. 25
0
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.camstyle, args.re, args.workers)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.output_feature, args.rerank)
        return

    # Criterion
    #criterion = nn.CrossEntropyLoss().cuda()

    class LSROloss(nn.Module):
        def __init__(self):  # change target to range(0,750)
            super(LSROloss, self).__init__()
            #input means the prediction score(torch Variable) 32*752,target means the corresponding label,
        def forward(
            self, input, target, flg
        ):  # while flg means the flag(=0 for true data and 1 for generated data)  batchsize*1
            # print(type(input))
            if input.dim(
            ) > 2:  # N defines the number of images, C defines channels,  K class in total
                input = input.view(input.size(0), input.size(1),
                                   -1)  # N,C,H,W => N,C,H*W
                input = input.transpose(1, 2)  # N,C,H*W => N,H*W,C
                input = input.contiguous().view(
                    -1, input.size(2))  # N,H*W,C => N*H*W,C

        # normalize input
            maxRow, _ = torch.max(
                input.data, 1
            )  # outputs.data  return the index of the biggest value in each row
            maxRow = maxRow.unsqueeze(1)
            input.data = input.data - maxRow

            target = target.view(-1, 1)  # batchsize*1
            flg = flg.view(-1, 1)
            #len=flg.size()[0]
            flos = F.log_softmax(input)  # N*K?      batchsize*751
            flos = torch.sum(flos, 1) / flos.size(
                1)  # N*1  get average      gan loss
            logpt = F.log_softmax(input)  # size: batchsize*751
            #print("logpt",logpt.size())
            #print("taarget", target.size())
            logpt = logpt.gather(1, target)  # here is a problem
            logpt = logpt.view(-1)  # N*1     original loss
            flg = flg.view(-1)
            flg = flg.type(torch.cuda.FloatTensor)
            #print("logpt",logpt.size())
            #print("flg", flg.size())
            #print("flos", flos.size())
            loss = -1 * logpt * (1 - flg) - flos * flg
            return loss.mean()

    criterion = LSROloss()

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.module.base.parameters(),
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    if args.camstyle == 0:
        trainer = Trainer(model, criterion)
    else:
        trainer = CamStyleTrainer(model, criterion, camstyle_loader)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.output_feature, args.rerank)
Esempio n. 26
0
def main(args):
    print(args)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.big_height is None or args.big_width is None or args.target_height is None or args.target_width is None:
        args.big_height, args.big_width, args.target_height, args.target_width = (
            256, 256, 224, 224)
    dataset, num_classes, train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.big_height, args.big_width,
                 args.target_height, args.target_width, args.batch_size, args.num_instances,
                 args.workers, args.combine_trainval)
    # Create model
    model = models.create(args.arch,
                          num_classes=num_classes,
                          num_features=args.features)
    # print(model)
    # Load from checkpoint
    start_epoch = best = 0
    if args.weights and args.arch != 'dpn107' and args.arch != 'densenet161':

        checkpoint = load_checkpoint(args.weights)
        if args.arch == 'cross_trihard_senet101' or args.arch == 'cross_trihard_se_resnet152':
            del (checkpoint['last_linear.weight'])
            del (checkpoint['last_linear.bias'])
            model.base.load_state_dict(checkpoint)
            #model.base.load_param(args.weights)
        elif args.arch == 'Hrnet48':
            pass
        elif args.arch == 'se_152_ibn':
            del (checkpoint['last_linear.weight'])
            del (checkpoint['last_linear.bias'])
            model.base.load_state_dict(checkpoint, strict=False)
        elif args.arch == 'densenet169_ibn_a':
            checkpoint = torch.load(args.weights)['state_dict']
            del (checkpoint['module.classifier.weight'])
            del (checkpoint['module.classifier.bias'])
            model.load_state_dict(
                {k.replace('module.', ''): v
                 for k, v in checkpoint.items()},
                strict=False)
        else:
            del (checkpoint['fc.weight'])
            del (checkpoint['fc.bias'])
            model.base.load_state_dict(checkpoint)
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        return

    # Criterion
    ranking_loss = nn.MarginRankingLoss(margin=args.margin).cuda()
    criterion = { 'crossentropy': nn.CrossEntropyLoss().cuda(), \
                  'trihard': TripletLoss(ranking_loss).cuda() }

    # Optimizer
    if hasattr(model.module, 'base'):
        base_params = []
        base_bn_params = []
        for name, p in model.module.base.named_parameters():
            if 'bn' in name:
                base_bn_params.append(p)
            else:
                base_params.append(p)
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': base_params,
            'lr_mult': 0.1
        }, {
            'params': base_bn_params,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': args.lr_mult
        }]
    else:
        param_groups = model.parameters()

    if args.optimizer == 0:
        optimizer = torch.optim.SGD(param_groups,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    else:
        print('Adam')
        optimizer = torch.optim.Adam(params=param_groups,
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)

    # Trainer
    trainer = Cross_Trihard_Trainer(model,
                                    criterion,
                                    metric_loss_weight=args.metric_loss_weight)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size, step_size2, step_size3 = args.step_size, args.step_size2, args.step_size3
        #lr = args.lr * (0.1 ** (epoch // step_size))
        if epoch <= step_size:
            lr = args.lr
        elif epoch <= step_size2:
            lr = args.lr * 0.1
        elif epoch <= step_size3:
            lr = args.lr * 0.01
        else:
            lr = args.lr * 0.001
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        return lr

    # Start training
    for epoch in range(start_epoch + 1, args.epochs + 1):
        lr = adjust_lr(epoch)
        trainer.train(epoch,
                      train_loader,
                      optimizer,
                      lr,
                      warm_up=True,
                      warm_up_ep=args.warm_up_ep)
        if epoch % args.epoch_inter == 0 or epoch >= args.dense_evaluate:
            tmp_res = evaluator.evaluate(test_loader, dataset.query,
                                         dataset.gallery)
            print('tmp_res: ', tmp_res)
            print('best: ', best)
            if tmp_res > best and epoch >= args.start_save:
                best = tmp_res
                save_checkpoint(
                    {
                        'state_dict': model.module.state_dict(),
                        'epoch': epoch,
                    },
                    False,
                    fpath=osp.join(args.logs_dir, 'pass%d.pth.tar' % (epoch)))
Esempio n. 27
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    ## get_source_data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get_target_data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(2048) -> FC(args.features)
    num_class = 0
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=num_class, num_split=args.num_split, cluster=args.dce_loss) #duke
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=num_class, num_split=args.num_split, cluster=args.dce_loss)
    else:
        raise RuntimeError('Please specify the number of classes (ids) of the network.')
    
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        print('Resuming checkpoints from finetuned model on another dataset...\n')
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint, strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model')
    model = nn.DataParallel(model).cuda()
   
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print("Test with the original model trained on source domain:")
    best_top1 = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    if args.evaluate:
        return

    # Criterion
    criterion = []
    criterion.append(TripletLoss(margin=args.margin,num_instances=args.num_instances).cuda())
    criterion.append(TripletLoss(margin=args.margin,num_instances=args.num_instances).cuda())

    #multi lr
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if
                  id(p) not in base_param_ids]
    param_groups = [
        {'params': model.module.base.parameters(), 'lr_mult': 1.0},
        {'params': new_params, 'lr_mult': 1.0}]
    # Optimizer
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=0.9, weight_decay=args.weight_decay)    

    ##### adjust lr
    def adjust_lr(epoch):
        if epoch <= 7:
            lr = args.lr
        elif epoch <=14:
            lr = 0.3 * args.lr
        else:
            lr = 0.1 * args.lr
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    ##### training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        Resize((args.height,args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    iter_nums = args.iteration
    start_epoch = args.start_epoch
    cluster_list = []
    top_percent = args.rho
    EF = 100 // iter_nums + 1
    eug = None
    for iter_n in range(start_epoch, iter_nums):
        #### get source datas' feature
        if args.load_dist and iter_n == 0:
            dist = pickle.load(open('dist' + str(args.num_split) + '.pkl', 'rb'))
            euclidean_dist_list = dist['euclidean']
            rerank_dist_list = dist['rerank']
        else:
            source_features, _ = extract_features(model, src_extfeat_loader, for_eval=False)
            if isinstance(source_features[src_dataset.trainval[0][0]], list):
                len_f = len(source_features[src_dataset.trainval[0][0]])
                source_features = [torch.cat([source_features[f][i].unsqueeze(0) for f, _, _ in src_dataset.trainval], 0) for i in range(len_f)]
            else:
                source_features = torch.cat([source_features[f].unsqueeze(0) for f, _, _ in src_dataset.trainval], 0) # synchronization feature order with s_dataset.trainval
            #### extract training images' features
            print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n+1))
            target_features, _ = extract_features(model, tgt_extfeat_loader, for_eval=False)
            if isinstance(target_features[tgt_dataset.trainval[0][0]], list):
                len_f = len(target_features[tgt_dataset.trainval[0][0]])
                target_features = [torch.cat([target_features[f][i].unsqueeze(0) for f, _, _ in tgt_dataset.trainval], 0) for i in range(len_f)]
            else:
                target_features = torch.cat([target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval], 0) # synchronization feature order with dataset.trainval
            #### calculate distance and rerank result
            print('Calculating feature distances...') 
            # target_features = target_features.numpy()
            euclidean_dist_list, rerank_dist_list = compute_dist(
                source_features, target_features, lambda_value=args.lambda_value, no_rerank=args.no_rerank, num_split=args.num_split) # lambda=1 means only source dist
            del target_features
            del source_features
        
        labels_list, cluster_list = generate_selflabel(
            euclidean_dist_list, rerank_dist_list, iter_n, args, cluster_list)
        #### generate new dataset
        train_loader = generate_dataloader(tgt_dataset, labels_list, train_transformer, iter_n, args)

        if iter_n == 5:
            u_data, l_data = updata_lable(tgt_dataset, labels_list[0], args.tgt_dataset, sample=args.sample)
            eug = EUG(model_name=args.arch, batch_size=args.batch_size, mode=args.mode, num_classes=num_class, 
            data_dir=args.data_dir, l_data=l_data, u_data=u_data, print_freq=args.print_freq, 
            save_path=args.logs_dir, pretrained_model=model, rerank=True)
            eug.model = model

        if eug is not None:
            nums_to_select = int(min((iter_n + 1) * int(len(u_data) // (iter_nums)), len(u_data)))
            pred_y, pred_score = eug.estimate_label()
            
            print('This is running {} with EF= {}%, step {}:\t Nums_to_be_select {}, \t Logs-dir {}'.format(
                args.mode, EF, iter_n+1, nums_to_select, args.logs_dir
            ))
            selected_idx = eug.select_top_data(pred_score, nums_to_select)
            new_train_data = eug.generate_new_train_data(selected_idx, pred_y)
            eug_dataloader = eug.get_dataloader(new_train_data, training=True)

            top1 = iter_trainer(model, tgt_dataset, train_loader, eug_dataloader, test_loader, optimizer, 
                criterion, args.epochs, args.logs_dir, args.print_freq, args.lr)
            eug.model = model
            del train_loader
            # del eug_dataloader
        else:
            top1 = iter_trainer(model, tgt_dataset, train_loader, None, test_loader, optimizer, 
            criterion, args.epochs, args.logs_dir, args.print_freq, args.lr)
            del train_loader

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': iter_n + 1,
            'best_top1': best_top1,
            # 'num_ids': num_ids,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(iter_n+1, top1, best_top1, ' *' if is_best else ''))
Esempio n. 28
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances, use_semi=False).cuda(),
        TripletLoss(args.margin, args.num_instances, use_semi=False).cuda()
    ]
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

            # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)
        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        # assign label for target ones
        newLab = labelNoise(torch.from_numpy(target_features),
                            torch.from_numpy(labels))
        # unknownFeats = target_features[labels==-1,:]
        counter = 0
        from collections import defaultdict
        realIDs, fakeIDs = defaultdict(list), []
        for (fname, realID, cam), label in zip(tgt_dataset.trainval, newLab):
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam))
            realIDs[realID].append(counter)
            fakeIDs.append(label)
            counter += 1
        precision, recall, fscore = calScores(realIDs, np.asarray(fakeIDs))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))
        print(
            f'precision:{precision * 100}, recall:{100 * recall}, fscore:{fscore}'
        )
        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        trainer = Trainer(model, criterion)

        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, optimizer)  # to at most 80%
        # test only
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    save_checkpoint(
        {
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': rank_score.market1501[0],
        },
        True,
        fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return rank_score.map, rank_score.market1501[0]
Esempio n. 29
0
def main_worker(args):
    global start_epoch, best_mAP

    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    else:
        log_dir = osp.dirname(args.resume)
        sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    iters = args.iters if (args.iters>0) else None
    dataset_source, num_classes, train_loader_source, test_loader_source = \
        get_data(args.dataset_source, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, args.num_instances, iters)

    dataset_target, _, train_loader_target, test_loader_target = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, 0, iters)

    # Create model
    model = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=num_classes)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        copy_state_dict(checkpoint['state_dict'], model)
        start_epoch = checkpoint['epoch']
        best_mAP = checkpoint['best_mAP']
        print("=> Start epoch {}  best mAP {:.1%}"
              .format(start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test on source domain:")
        evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True, rerank=args.rerank)
        print("Test on target domain:")
        evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
        return

    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
    optimizer = torch.optim.Adam(params)
    lr_scheduler = WarmupMultiStepLR(optimizer, args.milestones, gamma=0.1, warmup_factor=0.01, warmup_iters=args.warmup_step)

    # Trainer
    trainer = PreTrainer(model, num_classes, margin=args.margin)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        lr_scheduler.step()
        train_loader_source.new_epoch()
        train_loader_target.new_epoch()

        trainer.train(epoch, train_loader_source, train_loader_target, optimizer,
                    train_iters=len(train_loader_source), print_freq=args.print_freq)

        if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):

            _, mAP = evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True)

            is_best = mAP > best_mAP
            best_mAP = max(mAP, best_mAP)
            save_checkpoint({
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_mAP': best_mAP,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d}  source mAP: {:5.1%}  best: {:5.1%}{}\n'.
                  format(epoch, mAP, best_mAP, ' *' if is_best else ''))

    print("Test on target domain:")
    evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
        coModel = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
        coModel = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        coModel.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()
    coModel = nn.DataParallel(coModel).cuda()

    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [
        TripletLoss(args.margin,
                    args.num_instances,
                    isAvg=False,
                    use_semi=False).cuda(),
        TripletLoss(args.margin,
                    args.num_instances,
                    isAvg=False,
                    use_semi=False).cuda(),
    ]

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    coOptimizer = torch.optim.Adam(coModel.parameters(), lr=args.lr)

    optims = [optimizer, coOptimizer]

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, tarNames = extract_features(
            model, tgt_extfeat_loader, print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        target_real_label = np.asarray(
            [tarNames[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval])
        numTarID = len(set(target_real_label))
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        cluster = KMeans(n_clusters=numTarID, n_jobs=8, n_init=1)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        clusterRes = cluster.fit(target_features)
        labels, centers = clusterRes.labels_, clusterRes.cluster_centers_
        labels = splitLowconfi(target_features, labels, centers)
        # num_ids = len(set(labels))
        # print('Iteration {} have {} training ids'.format(iter_n+1, num_ids))
        # generate new dataset
        new_dataset, unknown_dataset = [], []
        # assign label for target ones
        unknownLab = labelNoise(torch.from_numpy(target_features),
                                torch.from_numpy(labels))
        # unknownFeats = target_features[labels==-1,:]
        unCounter = 0
        for (fname, _, cam), label in zip(tgt_dataset.trainval, labels):
            if label == -1:
                unknown_dataset.append(
                    (fname, int(unknownLab[unCounter]), cam))  # unknown data
                unCounter += 1
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)
        # hard samples
        unLoader = DataLoader(Preprocessor(unknown_dataset,
                                           root=tgt_dataset.images_dir,
                                           transform=train_transformer),
                              batch_size=args.batch_size,
                              num_workers=4,
                              sampler=RandomIdentitySampler(
                                  unknown_dataset, args.num_instances),
                              pin_memory=True,
                              drop_last=True)

        # train model with new generated dataset
        trainer = CoTrainerAsy(model, coModel, train_loader, unLoader,
                               criterion, optims)
        # trainer = CoTeaching(
        #    model, coModel, train_loader, unLoader, criterion, optims
        # )
        # trainer = CoTrainerAsySep(
        #     model, coModel, train_loader, unLoader, criterion, optims
        # )

        evaluator = Evaluator(model, print_freq=args.print_freq)
        #evaluatorB = Evaluator(coModel, print_freq=args.print_freq)
        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch,
                          remRate=0.2 + (0.6 / args.iteration) *
                          (1 + iter_n))  # to at most 80%
            # trainer.train(epoch, remRate=0.7+(0.3/args.iteration)*(1+iter_n))
        # test only
        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                        tgt_dataset.gallery)
        #print('co-model:\n')
        #rank_score = evaluatorB.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    save_checkpoint(
        {
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': rank_score.market1501[0],
        },
        True,
        fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return (rank_score.map, rank_score.market1501[0])
Esempio n. 31
0
def main(args):
    # For fast training.
    cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print('log_dir=', args.logs_dir)

    # Print logs
    print(args)

    # Create data loaders
    dataset, num_classes, source_train_loader, target_train_loader, \
    query_loader, gallery_loader = get_data(args.data_dir, args.source,
                                            args.target, args.height,
                                            args.width, args.batch_size,
                                            args.re, args.workers)

    # Create model
    model = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)

    # Invariance learning model
    num_tgt = len(dataset.target_train)
    model_inv = InvNet(args.features, num_tgt,
                        beta=args.inv_beta, knn=args.knn,
                        alpha=args.inv_alpha)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        model_inv.load_state_dict(checkpoint['state_dict_inv'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} "
              .format(start_epoch))

    # Set model
    model = nn.DataParallel(model).to(device)
    model_inv = model_inv.to(device)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.output_feature)
        return

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))

    base_params_need_for_grad = filter(lambda p: p.requires_grad, model.module.base.parameters())

    new_params = [p for p in model.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': base_params_need_for_grad, 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, model_inv, lmd=args.lmd, include_mmd=args.include_mmd)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.epochs_decay
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader, optimizer)

        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'state_dict_inv': model_inv.state_dict(),
            'epoch': epoch + 1,
        }, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.
              format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.output_feature)
Esempio n. 32
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset,  args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 )

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
        #        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)
    # optimizer = torch.optim.Adam(param_groups,lr=args.lr)
    # Trainer
    trainer = Trainer(model, criterion, 0, 0, SMLoss_mode=0)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else args.step_size
        lr = args.lr * (0.1**(epoch // step_size))
        # if epoch>70:
        #     lr = 0.01
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        is_best = True
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    results = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                 dataset.gallery)

    # save the parameters and results
    with open('%s/opts.yaml' % args.logs_dir, 'w') as fp:
        yaml.dump(vars(args), fp, default_flow_style=False)

    txtName = args.logs_dir + "results.txt"
    file = open(txtName, 'w')
    for key in results:
        file.write(key + ': ' + str(results[key]) + '\n')
Esempio n. 33
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.loss == 'triplet':
        assert args.num_instances > 1, 'TripletLoss requires num_instances > 1'
        assert args.batch_size % args.num_instances == 0, \
            'num_instances should divide batch_size'
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.workers, args.num_instances,
                 combine_trainval=args.combine_trainval)

    # Create model
    if args.loss == 'xentropy':
        model = InceptionNet(num_classes=num_classes,
                             num_features=args.features,
                             dropout=args.dropout)
    elif args.loss == 'oim':
        model = InceptionNet(num_features=args.features,
                             norm=True,
                             dropout=args.dropout)
    elif args.loss == 'triplet':
        model = InceptionNet(num_features=args.features, dropout=args.dropout)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    model = torch.nn.DataParallel(model).cuda()

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> start epoch {}  best top1 {:.1%}".format(
            args.start_epoch, best_top1))
    else:
        best_top1 = 0

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    if args.loss == 'xentropy':
        criterion = torch.nn.CrossEntropyLoss()
    elif args.loss == 'oim':
        criterion = OIMLoss(model.module.num_features,
                            num_classes,
                            scalar=args.oim_scalar,
                            momentum=args.oim_momentum)
    elif args.loss == 'triplet':
        criterion = TripletLoss(margin=args.triplet_margin)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    criterion.cuda()

    # Optimizer
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
    else:
        raise ValueError("Cannot recognize optimizer type:", args.optimizer)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1**(epoch // 60))
        elif args.optimizer == 'adam':
            lr = args.lr if epoch <= 100 else \
                args.lr * (0.001 ** (epoch - 100) / 50)
        else:
            raise ValueError("Cannot recognize optimizer type:",
                             args.optimizer)
        for g in optimizer.param_groups:
            g['lr'] = lr

    # Start training
    for epoch in range(args.start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Esempio n. 34
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    
    train, val, trainval = [], [], []
    numbers = [0, 0, 0]

    dataset_cuhk03 = merge('cuhk03', train, val, trainval, numbers, args.data_dir, args.split)
    dataset_market1501 = merge('market1501', train, val, trainval, numbers, args.data_dir, args.split)
    merge('cuhksysu', train, val, trainval, numbers, args.data_dir, args.split)
    merge('mars', train, val, trainval, numbers, args.data_dir, args.split)
    
    num_train_ids, num_val_ids, num_trainval_ids = numbers
    
    assert num_val_ids == dataset_cuhk03.num_val_ids + dataset_market1501.num_val_ids

    print("============================================")
    print("JSTL dataset loaded")
    print("  subset   | # ids | # images")
    print("  ---------------------------")
    print("  train    | {:5d} | {:8d}"
          .format(num_train_ids, len(train)))
    print("  val      | {:5d} | {:8d}"
          .format(num_val_ids, len(val)))
    print("  trainval | {:5d} | {:8d}"
          .format(num_trainval_ids, len(trainval)))

    query_cuhk03, gallery_cuhk03 = dataset_cuhk03.query, dataset_cuhk03.gallery
    query_market1501, gallery_market1501 = dataset_market1501.query, dataset_market1501.gallery

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = trainval if args.combine_trainval else train
    num_classes = (num_trainval_ids if args.combine_trainval
                   else num_train_ids)

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(args.height, args.width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=args.data_dir,
                     transform=train_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        sampler=RandomIdentitySampler(train_set, args.num_instances),
        pin_memory=True, drop_last=True)

    val_loader = DataLoader(
        Preprocessor(val, root=args.data_dir,
                     transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    test_loader_cuhk03 = DataLoader(
        Preprocessor(list(set(query_cuhk03) | set(gallery_cuhk03)),
                     root=dataset_cuhk03.images_dir, transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    test_loader_market1501 = DataLoader(
        Preprocessor(list(set(query_market1501) | set(gallery_market1501)),
                     root=dataset_market1501.images_dir, transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, val, val, metric)
        print("Test(cuhk03):")
        evaluator.evaluate(test_loader_cuhk03, query_cuhk03, gallery_cuhk03, metric)
        print("Test(market1501):")
        evaluator.evaluate(test_loader_market1501, query_market1501, gallery_market1501, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val, val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)

    print("Test(cuhk03):")
    evaluator.evaluate(test_loader_cuhk03, query_cuhk03, gallery_cuhk03, metric)
    print("Test(market1501):")
    evaluator.evaluate(test_loader_market1501, query_market1501, gallery_market1501, metric)
Esempio n. 35
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval, args.batch_id)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Esempio n. 36
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)