Пример #1
0
def main(args):
    fix(args.seed)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    
    print(args)
    # Create data loaders
    dataset, test_dataset, num_classes, source_train_loader, grid_query_loader, grid_gallery_loader,prid_query_loader, prid_gallery_loader,viper_query_loader, viper_gallery_loader, ilid_query_loader, ilid_gallery_loader = \
        get_data(args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instance, args.re, args.workers)

    # Create model
    Encoder, Transfer, CamDis = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)

    invNet = InvNet(args.features, num_classes, args.batch_size, beta=args.beta, knn=args.knn, alpha=args.alpha).cuda()

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        Encoder.load_state_dict(checkpoint['Encoder'])
        Transfer.load_state_dict(checkpoint['Transfer'])
        CamDis.load_state_dict(checkpoint['CamDis'])
        invNet.load_state_dict(checkpoint['InvNet'])
        start_epoch = checkpoint['epoch']

    Encoder = Encoder.cuda()
    Transfer = Transfer.cuda()
    CamDis = CamDis.cuda()

    model = [Encoder, Transfer, CamDis]
    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        # -----------------------------
        v = evaluator.eval_viper(viper_query_loader, viper_gallery_loader, test_dataset.viper_query, test_dataset.viper_gallery, args.output_feature, seed=57)
        p = evaluator.eval_prid(prid_query_loader, prid_gallery_loader, test_dataset.prid_query, test_dataset.prid_gallery, args.output_feature, seed=40)
        g = evaluator.eval_grid(grid_query_loader, grid_gallery_loader, test_dataset.grid_query, test_dataset.grid_gallery, args.output_feature, seed=35)
        l = evaluator.eval_ilids(ilid_query_loader, test_dataset.ilid_query, args.output_feature, seed=24)
        # -----------------------------

    criterion = []
    criterion.append(nn.CrossEntropyLoss().cuda())
    criterion.append(TripletLoss(margin=args.margin))


    # Optimizer
    base_param_ids = set(map(id, Encoder.base.parameters()))
    new_params = [p for p in Encoder.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': Encoder.base.parameters(), 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer_Encoder = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=0.9, weight_decay=5e-4, nesterov=True)
    # ====
    base_param_ids = set(map(id, Transfer.base.parameters()))
    new_params = [p for p in Transfer.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': Transfer.base.parameters(), 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer_Transfer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=0.9, weight_decay=5e-4, nesterov=True)
    # ====
    param_groups = [
        {'params':CamDis.parameters(), 'lr_mult':1.0},
    ]
    optimizer_Cam = torch.optim.SGD(param_groups, lr=args.lr,momentum=0.9, weight_decay=5e-4, nesterov=True)

    optimizer = [optimizer_Encoder, optimizer_Transfer, optimizer_Cam]

    # Trainer
    trainer = Trainer(model, criterion, InvNet=invNet)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = args.lr * (0.1 ** ((epoch) // step_size))
        for g in optimizer_Encoder.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in optimizer_Transfer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in optimizer_Cam.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, optimizer, args.tri_weight, args.adv_weight, args.mem_weight)

        save_checkpoint({
            'Encoder': Encoder.state_dict(),
            'Transfer': Transfer.state_dict(),
            'CamDis': CamDis.state_dict(),
            'InvNet': invNet.state_dict(),
            'epoch': epoch + 1,
        }, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        evaluator = Evaluator(model)
        print('\n * Finished epoch {:3d} \n'.
              format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, args.output_feature, args.rerank)
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.loss == 'triplet':
        assert args.num_instances > 1, 'TripletLoss requires num_instances > 1'
        assert args.batch_size % args.num_instances == 0, \
            'num_instances should divide batch_size'

    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.workers, args.num_instances,
                 combine_trainval=args.combine_trainval)

    # Create model
    if args.loss == 'xentropy':
        model = ResNetflow(args.depth, pretrained=True,
                       num_classes=num_classes,
                       num_features=args.features, dropout=args.dropout)
    elif args.loss == 'oim':
        model = ResNetflow(args.depth, pretrained=True, num_features=args.features,
                       norm=True, dropout=args.dropout)
    elif args.loss == 'triplet':
        model = ResNetflow(args.depth, pretrained=True,
                       num_features=args.features, dropout=args.dropout)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    model = torch.nn.DataParallel(model).cuda()


    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> start epoch {}  best top1 {:.1%}"
              .format(args.start_epoch, best_top1))
    else:
        best_top1 = 0

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    if args.loss == 'xentropy':
        criterion = torch.nn.CrossEntropyLoss()
    elif args.loss == 'oim':
        criterion = OIMLoss(model.module.num_features, num_classes,
                            scalar=args.oim_scalar, momentum=args.oim_momentum)
    elif args.loss == 'triplet':
        criterion = TripletLoss(margin=args.triplet_margin)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    criterion.cuda()


    # Optimizer
    if args.optimizer == 'sgd':
        if args.loss == 'xentropy':
            base_param_ids = set(map(id, model.module.base.parameters()))
            new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
            param_groups = [
                {'params': model.module.base.parameters(), 'lr_mult': 0.1},
                {'params': new_params, 'lr_mult': 1.0}]
        else:
            param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)

    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                     weight_decay=args.weight_decay)
    else:
        raise ValueError("Cannot recognize optimizer type:", args.optimizer)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate

    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1 ** (epoch // 40))
        elif args.optimizer == 'adam':
            lr = args.lr if epoch <= 100 else \
                args.lr * (0.001 ** (epoch - 100) / 50)
        else:
            raise ValueError("Cannot recognize optimizer type:", args.optimizer)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(args.start_epoch, args.epochs):

        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        if epoch % 3  == 0:
            #top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
            top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, multi_shot=True)
            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint({
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))
    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, multi_shot=True)
Пример #3
0
    def softmax_train(self, train_data, unselected_data, step, epochs,
                      step_size, init_lr, dropout, loss):
        """ create model and dataloader """
        model = models.create(self.model_name,
                              dropout=self.dropout,
                              num_classes=self.num_classes,
                              embeding_fea_size=self.embeding_fea_size,
                              classifier=loss,
                              fixed_layer=self.fixed_layer)

        model = nn.DataParallel(model).cuda()

        # the base parameters for the backbone (e.g. ResNet50)
        base_param_ids = set(map(id, model.module.CNN.base.parameters()))
        base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                           model.module.CNN.base.parameters())
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]

        # set the learning rate for backbone to be 0.1 times
        param_groups = [{
            'params': base_params_need_for_grad,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]

        exclusive_criterion = ExLoss(self.embeding_fea_size,
                                     len(unselected_data),
                                     t=10).cuda()

        optimizer = torch.optim.SGD(param_groups,
                                    lr=init_lr,
                                    momentum=self.train_momentum,
                                    weight_decay=5e-4,
                                    nesterov=True)

        # change the learning rate by step
        def adjust_lr(epoch, step_size):

            use_unselcted_data = True
            lr = init_lr / (10**(epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)
            if epoch >= step_size:
                use_unselcted_data = False
                # print("Epoch {}, CE loss, current lr {}".format(epoch, lr))
            return use_unselcted_data

        s_dataloader = self.get_dataloader(train_data,
                                           training=True,
                                           is_ulabeled=False)
        u_dataloader = self.get_dataloader(unselected_data,
                                           training=True,
                                           is_ulabeled=True)
        """ main training process """
        trainer = Trainer(model,
                          exclusive_criterion,
                          fixed_layer=self.fixed_layer,
                          lamda=self.lamda)
        for epoch in range(epochs):
            use_unselcted_data = adjust_lr(epoch, step_size)
            trainer.train(epoch,
                          s_dataloader,
                          u_dataloader,
                          optimizer,
                          use_unselcted_data,
                          print_freq=len(s_dataloader) // 2)

        ckpt_file = osp.join(self.save_path, "step_{}.ckpt".format(step))
        torch.save(model.state_dict(), ckpt_file)
        self.model = model
Пример #4
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    # criterion = TripletLoss(margin=args.margin).cuda()
    criterion = MSMLTripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 50 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
	if epoch <= 50:
	    lr = args.lr
	elif epoch <= 200:
	    lr = 0.1 * args.lr
	else:
	    lr = 0.01 * args.lr
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Пример #5
0
def main(args):
    device = torch.device(
        'cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu')
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    if torch.cuda.is_available():
        torch.cuda.set_device(args.gpu)

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader, data_path = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval, args.make)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        working_dir = '/export/home/lbereska/projects/reid/'
        # working_dir = osp'.'
        load_path = osp.join(working_dir, 'logs/triplet', args.resume,
                             'model_best.pth.tar')
        checkpoint = load_checkpoint(load_path)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model, device_ids=[args.gpu]).to(device)

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        # print("Validation:")  # todo only for testonly
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Visualizer
    tsne = Visualize(model)
    if args.tsne:
        # metric.train(model, train_loader)
        perplexity, n_points = args.tsne
        tsne.visualize(test_loader, data_path, perplexity, n_points)
        return

    criterion = TripletLoss(margin=args.margin).to(device)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    trainer = Trainer(model, criterion, device)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch,
                      train_loader,
                      optimizer,
                      print_freq=args.print_freq)
        if epoch < args.start_save:
            continue
        if epoch % 1 == 0:
            top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print(
                '\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
                format(epoch, top1, best_top1, ' *' if is_best else ''))
        # print('epoch {}'.format(epoch))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Пример #6
0
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    detectmodel, ic = getdetection()
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(detectmodel, ic, args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.re, args.workers)

    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)
    model = model.cuda()
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()
    # base_param_ids = set(map(id, model.base.parameters()))
    # new_params = [p for p in model.parameters() if
    #               id(p) not in base_param_ids]
    # param_groups = [
    #     {'params': model.base.parameters(), 'lr_mult': 0.1},
    #     {'params': new_params, 'lr_mult': 0.1}]
    # param_groups = [
    #     {'params': new_params, 'lr_mult': 1}]

    param_groups = model.parameters()

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    print('-----------args.camstyle-------------')
    print(args.camstyle)
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 15
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    #label = torch.load('/media/sdc2/wyf/DDT-master/pcalayer23.pkl')

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join('./resnet18_hd_bn_FC_three_branch.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))
    torch.save(model, './resnet18_hd_bn_FC_three_branch.pkl')
Пример #7
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features, norm=True,
                          dropout=args.dropout)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = OIMLoss(model.module.num_features, num_classes,
                        scalar=args.oim_scalar,
                        momentum=args.oim_momentum).cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Пример #8
0
def train_model_w(model,
                  dataloader,
                  testdata,
                  data_dir,
                  data_params,
                  epochs=50,
                  weights=None):
    """
    train model given the dataloader the criterion,
    stop when epochs are reached
    params:
        model: model for training
        dataloader: training data
        epochs: training epochs
        criterion
    """
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    # optimizer = torch.optim.Adam(params=param_groups, lr=0.1, weight_decay=5e-4,amsgrad=True)
    optimizer = torch.optim.SGD(param_groups,
                                lr=config.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    def adjust_lr(epoch):
        step_size = config.step_size  #20#40
        #0-150 0.1 150-250 0.01 250-350 0.001
        #resume 0.01
        # if epoch == 100:
        #     step_size = 50
        # if epoch < 60: expand = 0
        # elif epoch < 80: expand = 1
        # else: expand = 2
        lr = config.lr * (0.1**(epoch // step_size)
                          )  # 0.1 * 0.1^(epoch divide step_size)
        #if(epoch >= 70):
        #    lr *= 0.1
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    if (weights is not None):
        criterion = nn.CrossEntropyLoss(
            weight=torch.from_numpy(weights).float()).cuda()
    else:
        criterion = nn.CrossEntropyLoss().cuda()
    trainer = Trainer(model, criterion)
    ac_now = 0.0
    ac_max = 0.1
    losses_avg = 0.0
    losses_avg_c = 0
    for epoch in range(epochs):
        adjust_lr(epoch)
        losses_avg = trainer.train(epoch, dataloader, optimizer)
        '''
        if losses_avg <= 0.004:
            print('terminate: losses_avg < 0.003')
            return model
        elif losses_avg < 0.008 and losses_avg > 0.006:
            losses_avg_c = losses_avg_c + 1
            if(losses_avg_c >= 3):
                print('terminate: 0.007 < losses_avg < 0.008')
                return model
        '''
        if (epoch + 1) % 1 == 0 and epoch >= 9:
            pred_, lab_, score = pre_from_feature_ww(model, testdata, data_dir,
                                                     data_params)
            numt = np.where(pred_ == lab_)[0]
            ac_now = numt.size / float(len(lab_))
            print('test data.val %.3f' % (ac_now))
            # print(classification_report_imbalanced_light(lab_, pred_, score, len(np.unique(lab_))))
            #print(numt)
            if (ac_now >= ac_max):
                ac_max = ac_now
                torch.save(model, 'res50_sd260_cross_val_maxtemp.pkl')
            # torch.save(model, 'res50_cv3_epoch%d_%.3f.pkl' % (epoch, ac_now))

    model = torch.load('res50_sd260_cross_val_maxtemp.pkl').module
    model = nn.DataParallel(model).cuda()
    return model
Пример #9
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    start = time.time()

    # Redirect print to both console and log file
    if not args.evaluate:
        dt = datetime.datetime.now()
        sys.stdout = Logger(
            osp.join(
                args.logs_dir,
                'log_' + str(dt.month).zfill(2) + str(dt.day).zfill(2) +
                str(dt.hour).zfill(2) + str(dt.minute).zfill(2) + '.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    mt_train_loader, mt_num_classes, test_loader, query_set, gallery_set = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=mt_num_classes,
                          double_loss=True)
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)

    # Criterion
    criterion_1 = nn.CrossEntropyLoss().cuda()
    criterion_2 = PosetLoss_G2G(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    num_task = len(
        mt_num_classes)  # num_task equals camera number, each camera is a task
    trainer = Trainer(model, criterion_1, criterion_2, num_task)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    start_epoch = best_top1 = 0
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, mt_train_loader, optimizer)
        if (epoch % args.start_save == (args.start_save - 1)):
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                },
                0,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            # Final test
            print('Test with the model after epoch {:d}:'.format(epoch + 1))
            checkpoint = load_checkpoint(
                osp.join(args.logs_dir, 'checkpoint.pth.tar'))
            model.module.load_state_dict(checkpoint['state_dict'])
            metric.train(model, mt_train_loader)
            evaluator.evaluate(test_loader, query_set, gallery_set, metric)
    end = time.time()
    print('Total time: {:.1f}s'.format(end - start))
Пример #10
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    random.seed(args.seed) 

    cudnn.benchmark = True
    # Redirect print to both console and log file
    date_str = '{}'.format(datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    if (not args.evaluate) and args.log:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_{}.txt'.format(date_str)))
        # save opts
        with open(osp.join(args.logs_dir, 'args_{}.json'.format(date_str)), 'w') as fp:
            json.dump(vars(args), fp, indent=1)

    assert args.real or args.synthetic, "At least one dataset should be used"
    # Create data loaders
    print (args.real, args.synthetic)

    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                    args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.real, args.synthetic, args.re, 0, args.camstyle)

    # Create model
    model = models.create('ide', num_features=args.features, norm=args.norm,
                          dropout=args.dropout, num_classes=num_classes, last_stride=args.last_stride,
                          output_feature=args.output_feature, arch=args.arch)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        if args.evaluate:
            model, start_epoch, best_top1 = checkpoint_loader(model, args.resume, eval_only=True)
        else:
            model, start_epoch, best_top1 = checkpoint_loader(model, args.resume)
        print("=> Start epoch {}  best top1_eval {:.1%}".format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, eval_only=True)
        return
    if args.evaluate_VID:
        print("Test on VID dataset:")
        mAP_list = []
        cmc1_list = []
        cmc5_list = []
        cmc10_list = []
        for i in range(10):
            dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
            get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                    args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.re, 0, args.camstyle)
            mAP, cmc1, cmc5 , cmc10 = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, eval_only=True)
            mAP_list.append(mAP)
            cmc1_list.append(cmc1)
            cmc5_list.append(cmc5)
            cmc10_list.append(cmc10)
        print('Final VID test [mAP: {:5.2%}], [cmc1: {:5.2%}], [cmc5: {:5.2%}], [cmc10: {:5.2%}]'
          .format(np.mean(mAP_list), np.mean(cmc1_list), np.mean(cmc5_list), np.mean(cmc10_list)))
        return 

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda() if not args.LSR else LSR_loss().cuda()

    if args.train or args.finetune:
        # Optimizer
        if hasattr(model.module, 'base'):  # low learning_rate the base network (aka. ResNet-50)
            base_param_ids = set(map(id, model.module.base.parameters()))
            new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
            param_groups = [{'params': model.module.base.parameters(), 'lr_mult': 0.1},
                            {'params': new_params, 'lr_mult': 1.0}]
        else:
            param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
                                    nesterov=True)

        # Trainer
        if args.camstyle == 0:
            trainer = Trainer(model, criterion)
        else:
            trainer = CamStyleTrainer(model, criterion, camstyle_loader)

        # Schedule learning rate
        def adjust_lr(epoch):
            step_size = args.step_size
            lr = args.lr * (0.1 ** (epoch // step_size))
            if args.finetune:
                if args.dataset == "veri":
                    lr = lr / 50
                elif args.dataset == "vihicle_id":
                    lr = lr / 10
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

        # Draw Curve
        epoch_s = []
        loss_s = []
        prec_s = []

        # Start training
        for epoch in range(start_epoch, args.epochs):
            t0 = time.time()
            adjust_lr(epoch)
            # train_loss, train_prec = 0, 0
            train_loss, train_prec = trainer.train(epoch, train_loader, optimizer, fix_bn=args.fix_bn)

            if epoch < args.start_save:
                continue
            
            if epoch % 20 == 0 or args.finetune:
                evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, eval_only=True)
            
            top1_eval = 50

            is_best = top1_eval >= best_top1
            best_top1 = max(top1_eval, best_top1)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint_{}.pth.tar'.format(date_str)))
            epoch_s.append(epoch)
            loss_s.append(train_loss)
            prec_s.append(train_prec)
            draw_curve(os.path.join(args.logs_dir, 'train_{}.jpg'.format(date_str)), epoch_s, loss_s, prec_s)

            t1 = time.time()
            t_epoch = t1 - t0
            print('\n * Finished epoch {:3d}  top1_eval: {:5.1%}  best_eval: {:5.1%} \n'.
                  format(epoch, top1_eval, best_top1, ' *' if is_best else ''))
            print('*************** Epoch takes time: {:^10.2f} *********************\n'.format(t_epoch))
            pass

        # Final test
        print('Test with best model:')
        model, start_epoch, best_top1 = checkpoint_loader(model, osp.join(args.logs_dir, 'model_best.pth.tar'),
                                                          eval_only=True)
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))

        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, eval_only=True)
Пример #11
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = ResNet(args.depth, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)
    model = nn.DataParallel(model).cuda()
    # Load from checkpoint
    start_epoch = best_map = 0
    if args.if_resume:
        print(args.resume)
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        prior_best_map = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, prior_best_map))
    # model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    alpha= args.alpha
    beta = args.beta
    gamma = args.gamma
    criterion = TripletLoss_biu(margin = args.margin, num_instances=args.num_instances, 
                                    alpha = alpha, beta =beta , gamma =gamma).cuda()

    # Optimizer
    if args.optimizer == 'sgd':
            # base_param_ids = set(map(id, model.module.base.parameters()))
            # new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
            # param_groups = [
            #     {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            #     {'params': new_params, 'lr_mult': 1.0}]
        param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    else :
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1 ** (epoch // 40))
        else :
            lr = args.lr if epoch <= 80 else \
                 args.lr * (0.1 ** ((epoch - 100) / 60.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        if epoch % 3 ==0:
            metric.train(model,train_loader)
            top_map = evaluator.evaluate(test_loader, dataset.query, dataset.gallery) 
            is_best = top_map > prior_best_map
            prior_best_map = max(top_map, prior_best_map)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_map': top_map,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        # print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
        #       format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Пример #12
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances, isAvg=True,
                    use_semi=True).cuda(),
        TripletLoss(args.margin, args.num_instances, isAvg=True,
                    use_semi=True).cuda(),
    ]

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, tarNames = extract_features(
            model, tgt_extfeat_loader, print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        target_real_label = np.asarray(
            [tarNames[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval])
        numTarID = len(set(target_real_label))
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        cluster = KMeans(n_clusters=numTarID, n_jobs=8, n_init=1)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        clusterRes = cluster.fit(target_features)
        labels, centers = clusterRes.labels_, clusterRes.cluster_centers_
        # labels = splitLowconfi(target_features,labels,centers)
        # num_ids = len(set(labels))
        # print('Iteration {} have {} training ids'.format(iter_n+1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, cam), label in zip(tgt_dataset.trainval, labels):
            # if label==-1: continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))
        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        # train model with new generated dataset
        trainer = Trainer(model, criterion)

        evaluator = Evaluator(model, print_freq=args.print_freq)

        # Start training
        for epoch in range(args.epochs):
            # trainer.train(epoch, remRate=0.2+(0.6/args.iteration)*(1+iter_n)) # to at most 80%
            trainer.train(epoch, train_loader, optimizer)
        # test only
        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                        tgt_dataset.gallery)
        #print('co-model:\n')
        #rank_score = evaluatorB.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    save_checkpoint(
        {
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': rank_score.market1501[0],
        },
        True,
        fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return (rank_score.map, rank_score.market1501[0])
Пример #13
0
def main(args):
    random.seed(args.seed)
    np.random.seed(1)
    torch.manual_seed(1)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # params
    params = {}
    if args.model_type == 'masks':
        params['num_m_features'] = args.num_m_features
        params['masks'] = args.masks
    else:
        print('unkrown model type.')
        return

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (256, 256)
    dataset, num_classes, random_train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.logs_dir, args.model_type, params,
                 args.height, args.width, args.crop_height, args.crop_width,
                 args.batch_size, args.workers, args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_classes=num_classes, params=params)

    # Load from checkpoint
    start_epoch = best_top1 = best_mAP = 0
    if args.weights:
        checkpoint = load_checkpoint(args.weights)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model, [0, 1, 2, 3]).cuda()

    # Criterion
    criterion = TripletLoss().cuda()

    # Optimizer
    base_params = []
    for name, p in model.module.base.named_parameters():
        base_params.append(p)
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    if args.model_type == 'masks':
        param_groups = [{
            'params': base_params,
            'lr_mult': args.lrm
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        print('unkrown model type.')
        return

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion, num_classes, args.logs_dir)

    # Evaluator
    evaluator = Evaluator(model)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.step_size
        if epoch < step_size:
            lr = args.lr
        elif epoch >= step_size and epoch < args.epochs:
            lr = args.lr * 0.1
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        return lr

    # Start training
    for epoch in range(start_epoch, args.epochs):
        lr = adjust_lr(epoch)

        if epoch < args.warm_up_ep:
            trainer.train(epoch, random_train_loader, optimizer, lr, True,
                          args.warm_up_ep)
        else:
            trainer.train(epoch, random_train_loader, optimizer, lr, False,
                          args.warm_up_ep)

        if epoch < args.start_save:
            continue

        if epoch % 10 == 9:
            print('Epoch: [%d]' % epoch)
            top1, mAP = evaluator.evaluate(test_loader, dataset.query,
                                           dataset.gallery)

            is_best = mAP > best_mAP
            best_mAP = max(mAP, best_mAP)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                    'best_mAP': mAP,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'model_best.pth.tar'))

            if epoch == args.epochs - 1:
                save_checkpoint(
                    {
                        'state_dict': model.module.state_dict(),
                        'epoch': epoch + 1,
                        'best_mAP': mAP,
                    },
                    True,
                    fpath=osp.join(args.logs_dir, 'last.pth.tar'))
Пример #14
0
def main(args):
    # For fast training.
    cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print('log_dir=', args.logs_dir)

    # Print logs
    print(args)

    # Create data loaders
    dataset, num_classes, source_train_loader, target_train_loader, \
    query_loader, gallery_loader = get_data(args.data_dir, args.source,
                                            args.target, args.height,
                                            args.width, args.batch_size,
                                            args.camstyle_type,
                                            args.re, args.workers)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Invariance learning model
    num_tgt = len(dataset.target_train)
    model_inv = InvNet(args.features,
                       num_tgt,
                       beta=args.inv_beta,
                       knn=args.knn,
                       alpha=args.inv_alpha)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        model_inv.load_state_dict(checkpoint['state_dict_inv'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    # Set model
    model = nn.DataParallel(model).to(device)
    model_inv = model_inv.to(device)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        query, retrieveds = evaluator.evaluate(query_loader, gallery_loader,
                                               dataset.query, dataset.gallery,
                                               args.output_feature)
        for i in range(len(query)):
            Image.open("data/" + args.target + "/query/" + query[i]).save(
                "q_%d.jpg" % i)
            for j in range(len(retrieveds[i])):
                Image.open("data/" + args.target + "/bounding_box_test/" +
                           retrieveds[i][j]).save("r_%d_%d.jpg" % (i, j))
        return

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))

    base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                       model.module.base.parameters())

    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': base_params_need_for_grad,
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model,
                      model_inv,
                      lmd=args.lmd,
                      include_mmd=args.include_mmd,
                      include_coral=args.include_coral,
                      lmd_ext=args.lmd_ext)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.epochs_decay
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader,
                      optimizer)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'state_dict_inv': model_inv.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))

    # Final test

    print('Test with best model:')
    evaluator = Evaluator(model)
    query, retrieveds = evaluator.evaluate(query_loader, gallery_loader,
                                           dataset.query, dataset.gallery,
                                           args.output_feature)
    for i in range(len(query)):
        Image.open("data/" + args.target + "/query/" + query[i]).save(
            "q_%d.jpg" % i)
        for j in range(len(retrieveds[i])):
            Image.open("data/" + args.target + "/bounding_box_test/" +
                       retrieveds[i][j]).save("r_%d_%d.jpg" % (i, j))
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.camstyle, args.re, args.workers)

    # Create model
    model = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} "
              .format(start_epoch))

    if args.cuda:
        model = nn.DataParallel(model).cuda()
        criterion = nn.CrossEntropyLoss().cuda()
    else:
        model = nn.DataParallel(model)
        criterion = nn.CrossEntropyLoss()
    # model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, args.output_feature, args.rerank)
        return

    # Criterion
    # criterion = nn.CrossEntropyLoss()
    # criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': model.module.base.parameters(), 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    if args.camstyle == 0:
        trainer = Trainer(model, criterion, args.cuda)
    else:
        trainer = CamStyleTrainer(model, criterion, camstyle_loader)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
        }, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.
              format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, args.output_feature, args.rerank)
Пример #16
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # Distance metric
    # metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print(
        "Test with the original model trained on source domain (direct transfer):"
    )
    rank_score_best = evaluator.evaluate(test_loader, tgt_dataset.query,
                                         tgt_dataset.gallery)
    best_map = rank_score_best.map  #market1501[0]-->rank-1

    if args.evaluate:
        return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances).cuda(),
        TripletLoss(args.margin, args.num_instances).cuda(),
        AccumulatedLoss(args.margin, args.num_instances).cuda(),
    ]

    # Optimizer
    optimizer = torch.optim.SGD(
        model.parameters(),
        lr=args.lr,
        momentum=0.9,
    )

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0  #this value controls the usage of source data
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)

            # HDBSCAN cluster
            import hdbscan
            cluster_hdbscan = hdbscan.HDBSCAN(min_cluster_size=10,
                                              min_samples=4,
                                              metric='precomputed')

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        if args.use_hdbscan_clustering:
            print(
                'Use the better chlustering algorithm HDBSCAN for clustering')
            labels = cluster_hdbscan.fit_predict(rerank_dist)
        else:
            print('Use DBSCAN for clustering')
            labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))

        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.trainval, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        # train model with new generated dataset
        trainer = Trainer(model, criterion, print_freq=args.print_freq)
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, optimizer)

        # Evaluate
        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                        tgt_dataset.gallery)

        #Save the best ckpt:
        rank1 = rank_score.market1501[0]
        mAP = rank_score.map
        is_best_mAP = mAP > best_map
        best_map = max(mAP, best_map)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': iter_n + 1,
                'best_mAP': best_map,
                # 'num_ids': num_ids,
            },
            is_best_mAP,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print(
            '\n * Finished epoch {:3d}  top1: {:5.1%}  mAP: {:5.1%}  best_mAP: {:5.1%}{}\n'
            .format(iter_n + 1, rank1, mAP, best_map,
                    ' *' if is_best_mAP else ''))

    return (rank_score.map, rank_score.market1501[0])
Пример #17
0
    def train(self,
              train_data,
              step,
              loss,
              dropout=0.5):  # train_data = BuMain.get_new_train_data()返回值
        # adjust training epochs and learning rate
        epochs = self.initial_steps if step == 0 else self.later_steps  # step为0时:epochs = 20   否则epochs = 2
        init_lr = 0.1 if step == 0 else 0.01  # step为0时:lr = 0.1 否则lr = 0.01
        step_size = self.step_size if step == 0 else sys.maxsize  # step为0时:step_size = 16 否则step_size = maxsize(系统最大值)
        """ create model and dataloader """
        dataloader = self.get_dataloader(
            train_data, training=True)  # 调用get_dataloader()获得训练数据

        # the base parameters for the backbone (e.g. ResNet50)            返回参数标签
        base_param_ids = set(map(id, self.model.module.CNN.base.parameters()))

        # we fixed the first three blocks to save GPU memory              过滤参数 得到需要的参数
        base_params_need_for_grad = filter(
            lambda p: p.requires_grad, self.model.module.CNN.base.parameters())

        # params of the new layers                                        新参数
        new_params = [
            p for p in self.model.parameters() if id(p) not in base_param_ids
        ]

        # set the learning rate for backbone to be 0.1 times              对于梯度计算需要的参数 lr = 0.1
        param_groups = [  # 新参数 lr = 1.0
            {
                'params': base_params_need_for_grad,
                'lr_mult': 0.1
            }, {
                'params': new_params,
                'lr_mult': 1.0
            }
        ]
        # 优化器为SGD   lr = 0.1 or 0.01    momentum = 0.9  weight_decay(权重衰减) = 0.0005 nesterov:momentum的变种
        optimizer = torch.optim.SGD(param_groups,
                                    lr=init_lr,
                                    momentum=0.9,
                                    weight_decay=5e-4,
                                    nesterov=True)

        # change the learning rate by step  根据轮次不同更新lr
        def adjust_lr(epoch, step_size):
            lr = init_lr / (10**(epoch // step_size)
                            )  # lr = 0.1/(10^(epoch//16))
            for g in optimizer.param_groups:  #
                g['lr'] = lr * g.get('lr_mult', 1)

        """ main training process """
        trainer = Trainer(self.model,
                          self.criterion,
                          fixed_layer=self.fixed_layer)  # fixed_layer = False
        for epoch in range(epochs):  # epochs = 20 or 2
            adjust_lr(epoch, step_size)  # epoch:0~19    step_size = 16
            trainer.train(epoch,
                          dataloader,
                          optimizer,
                          print_freq=max(5,
                                         len(dataloader) // 30 *
                                         10))  # 调用trainer.train()
Пример #18
0
def main(args):
    cudnn.deterministic = False
    cudnn.benchmark = True

    exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset))
    output_dir = osp.join(exp_database_dir, args.method, args.sub_method)
    log_file = osp.join(output_dir, 'log.txt')
    # Redirect print to both console and log file
    sys.stdout = Logger(log_file)

    # Create data loaders
    dataset, num_classes, train_loader, _, _ = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.combine_all,
                 args.workers, args.test_fea_batch)

    # Create model
    #model = seTest.resnst50().cuda()
    model = resmap.create(args.arch,
                          ibn_type=args.ibn,
                          final_layer=args.final_layer,
                          neck=args.neck).cuda()
    num_features = model.num_features
    #num_features = 64
    # print(model)
    # print('\n')

    feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
    hei = args.height // feamap_factor[args.final_layer]
    wid = args.width // feamap_factor[args.final_layer]
    matcher = QAConv(num_features, hei, wid).cuda()

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')

    # Criterion
    criterion = ClassMemoryLoss(matcher, num_classes, num_features, hei, wid,
                                args.mem_batch_size).cuda()

    # Optimizer
    base_param_ids = set(map(id, model.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.base.parameters(),
        'lr': 0.1 * args.lr
    }, {
        'params': new_params,
        'lr': args.lr
    }, {
        'params': criterion.parameters(),
        'lr': args.lr
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    # Load from checkpoint
    start_epoch = 0

    if args.resume or args.evaluate:
        print('Loading checkpoint...')
        if args.resume and (args.resume != 'ori'):
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(
                osp.join(output_dir, 'checkpoint.pth.tar'))
        model.load_state_dict(checkpoint['model'])
        criterion.load_state_dict(checkpoint['criterion'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))
    elif args.pre_epochs > 0:
        pre_tr = PreTrainer(model, criterion, optimizer, train_loader,
                            args.pre_epochs, args.max_steps, args.num_trials)
        result_file = osp.join(exp_database_dir, args.method,
                               'pretrain_metric.txt')
        model, criterion, optimizer = pre_tr.train(result_file, args.method,
                                                   args.sub_method)

    # Decay LR by a factor of 0.1 every step_size epochs
    lr_scheduler = StepLR(optimizer,
                          step_size=args.step_size,
                          gamma=0.1,
                          last_epoch=start_epoch - 1)

    model = nn.DataParallel(model).cuda()
    criterion = nn.DataParallel(criterion).cuda()

    enhance_data_aug = False

    if not args.evaluate:
        # Trainer
        trainer = Trainer(model, criterion)

        t0 = time.time()
        # Start training
        for epoch in range(start_epoch, args.epochs):
            loss, acc = trainer.train(epoch, train_loader, optimizer)

            lr = list(map(lambda group: group['lr'], optimizer.param_groups))
            lr_scheduler.step()
            train_time = time.time() - t0

            print(
                '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds.                  \n'
                %
                (epoch + 1, lr[0], lr[1], lr[2], loss, acc * 100, train_time))

            save_checkpoint(
                {
                    'model': model.module.state_dict(),
                    'criterion': criterion.module.state_dict(),
                    'optim': optimizer.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(output_dir, 'checkpoint.pth.tar'))

            if not enhance_data_aug and epoch < args.epochs - 1 and acc > args.acc_thr:
                enhance_data_aug = True
                print('\nAcc = %.2f%% > %.2f%%. Start to Flip and Block.\n' %
                      (acc * 100, args.acc_thr * 100))

                train_transformer = T.Compose([
                    T.Resize((args.height, args.width), interpolation=3),
                    T.Pad(10),
                    T.RandomCrop((args.height, args.width)),
                    T.RandomHorizontalFlip(0.5),
                    T.RandomRotation(5),
                    T.ColorJitter(brightness=(0.5, 2.0),
                                  contrast=(0.5, 2.0),
                                  saturation=(0.5, 2.0),
                                  hue=(-0.1, 0.1)),
                    T.RandomOcclusion(args.min_size, args.max_size),
                    T.ToTensor(),
                ])

                train_loader = DataLoader(Preprocessor(
                    dataset.train,
                    root=osp.join(dataset.images_dir, dataset.train_path),
                    transform=train_transformer),
                                          batch_size=args.batch_size,
                                          num_workers=args.workers,
                                          shuffle=True,
                                          pin_memory=True,
                                          drop_last=True)

    # Final test
    print('Evaluate the learned model:')
    t0 = time.time()

    # Evaluator
    evaluator = Evaluator(model)

    avg_rank1 = 0
    avg_mAP = 0
    num_testsets = 0
    results = {}

    test_names = args.testset.strip().split(',')
    for test_name in test_names:
        if test_name not in datasets.names():
            print('Unknown dataset: %s.' % test_name)
            continue

        testset, test_query_loader, test_gallery_loader = \
            get_test_data(test_name, args.data_dir, args.height, args.width, args.workers, args.test_fea_batch)

        if not args.do_tlift:
            testset.has_time_info = False
        test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \
        test_dist_rerank, test_dist_tlift, pre_tlift_dict = \
            evaluator.evaluate(matcher, testset, test_query_loader, test_gallery_loader,
                                args.test_gal_batch, args.test_prob_batch,
                               args.tau, args.sigma, args.K, args.alpha)

        results[test_name] = [test_rank1, test_mAP]
        if test_name != args.dataset:
            avg_rank1 += test_rank1
            avg_mAP += test_mAP
            num_testsets += 1

        if testset.has_time_info:
            print(
                '  %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,'
                ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' %
                (test_name, test_rank1 * 100, test_mAP * 100,
                 test_rank1_rerank * 100, test_mAP_rerank * 100,
                 test_rank1_tlift * 100, test_mAP_tlift * 100))
        else:
            print('  %s: rank1=%.1f, mAP=%.1f.\n' %
                  (test_name, test_rank1 * 100, test_mAP * 100))

        result_file = osp.join(exp_database_dir, args.method,
                               test_name + '_results.txt')
        with open(result_file, 'a') as f:
            f.write('%s/%s:\n' % (args.method, args.sub_method))
            if testset.has_time_info:
                f.write(
                    '\t%s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f, rank1_rerank_tlift=%.1f, '
                    'mAP_rerank_tlift=%.1f.\n\n' %
                    (test_name, test_rank1 * 100, test_mAP * 100,
                     test_rank1_rerank * 100, test_mAP_rerank * 100,
                     test_rank1_tlift * 100, test_mAP_tlift * 100))
            else:
                f.write('\t%s: rank1=%.1f, mAP=%.1f.\n\n' %
                        (test_name, test_rank1 * 100, test_mAP * 100))

        if args.save_score:
            test_gal_list = np.array(
                [fname for fname, _, _, _ in testset.gallery], dtype=np.object)
            test_prob_list = np.array(
                [fname for fname, _, _, _ in testset.query], dtype=np.object)
            test_gal_ids = [pid for _, pid, _, _ in testset.gallery]
            test_prob_ids = [pid for _, pid, _, _ in testset.query]
            test_gal_cams = [c for _, _, c, _ in testset.gallery]
            test_prob_cams = [c for _, _, c, _ in testset.query]
            test_score_file = osp.join(exp_database_dir, args.method,
                                       args.sub_method,
                                       '%s_score.mat' % test_name)
            sio.savemat(test_score_file, {
                'score': 1. - test_dist,
                'score_rerank': 1. - test_dist_rerank,
                'score_tlift': 1. - test_dist_tlift,
                'gal_time': pre_tlift_dict['gal_time'],
                'prob_time': pre_tlift_dict['prob_time'],
                'gal_list': test_gal_list,
                'prob_list': test_prob_list,
                'gal_ids': test_gal_ids,
                'prob_ids': test_prob_ids,
                'gal_cams': test_gal_cams,
                'prob_cams': test_prob_cams
            },
                        oned_as='column',
                        do_compression=True)

    test_time = time.time() - t0
    avg_rank1 /= num_testsets
    avg_mAP /= num_testsets

    for key in results.keys():
        print('%s: rank1=%.1f%%, mAP=%.1f%%.' %
              (key, results[key][0] * 100, results[key][1] * 100))
    print('Average: rank1=%.2f%%, mAP=%.2f%%.\n\n' %
          (avg_rank1 * 100, avg_mAP * 100))

    result_file = osp.join(exp_database_dir, args.method,
                           args.sub_method[:-5] + '_avg_results.txt')
    with open(result_file, 'a') as f:
        f.write('%s/%s:\n' % (args.method, args.sub_method))
        if not args.evaluate:
            f.write('\t Loss: %.3f, acc: %.2f%%. ' % (loss, acc * 100))
            f.write("Train: %.0fs. " % train_time)
        f.write("Test: %.0fs. " % test_time)
        f.write('Rank1: %.2f%%, mAP: %.2f%%.\n' %
                (avg_rank1 * 100, avg_mAP * 100))
        for key in results.keys():
            f.write('\t %s: Rank1: %.1f%%, mAP: %.1f%%.\n' %
                    (key, results[key][0] * 100, results[key][1] * 100))
        f.write('\n')

    if not args.evaluate:
        print('Finished training at epoch %d, loss = %.3f, acc = %.2f%%.\n' %
              (epoch + 1, loss, acc * 100))
        print(
            "Total training time: %.3f sec. Average training time per epoch: %.3f sec."
            % (train_time, train_time / (args.epochs - start_epoch + 1)))
    print("Total testing time: %.3f sec.\n" % test_time)

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')
Пример #19
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, 'num_instances should divide batch_size'

    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.batch_size, args.num_instances, args.workers)

    # Create model
    print('num_features: %d, features:%d ' % (args.num_features, num_classes))
    model = models.create("deepperson",
                          num_features=args.num_features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluate(model, val_loader, dataset.val, dataset.val)
        print("Test:")
        evaluate(model, test_loader, dataset.query, dataset.gallery)
        return

    # Criterion
    criterion = DeepLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, add_soft=args.add_soft)
        if epoch < args.start_save:
            continue
        top1 = evaluate(model, val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with last model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluate(model, test_loader, dataset.query, dataset.gallery)
Пример #20
0
    # Change the learning rate by step.
    def adjust_lr(epoch, step_size=55):
        lr = 1e-3 * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)  # !!! !!!
        return epoch < step_size  # Use unselect data?

    #
    labeled_loader = get_loader(labeled_set, dataset.images_dir, training=True)
    unlabeled_loader = get_loader(unlabeled_set,
                                  dataset.images_dir,
                                  training=True)
    #
    criterion = Exclusive_Loss(len(unlabeled_set), t=10).cuda()
    #
    trainer = Trainer(model, unlabeled_criterion=criterion)
    #
    for epoch in range(epochs):
        #
        trainer.train(epoch,
                      labeled_loader,
                      unlabeled_loader,
                      optimizer,
                      use_unselect_data=adjust_lr(epoch, step_size=55))
    save_checkpoint({
        'step': step,
        'state_dict': model.state_dict()
    }, fpath, 'checkpoint_' + str(step) + '.pt')
    # Get the labeled__features and unlabeled_features.
    labeled__features, _ = extract_features(
        model, get_loader(labeled_data, dataset.images_dir, training=False))
Пример #21
0
    def train(self,
              train_data,
              step,
              epochs=70,
              step_size=55,
              init_lr=0.1,
              dropout=0.5):
        """ create model and dataloader """
        model = models.create(self.model_name,
                              dropout=self.dropout,
                              num_classes=self.num_classes,
                              mode=self.mode)
        model = nn.DataParallel(model).cuda()
        dataloader = self.get_dataloader(train_data, training=True)

        # the base parameters for the backbone (e.g. ResNet50)
        base_param_ids = set(map(id, model.module.CNN.base.parameters()))

        # we fixed the first three blocks to save GPU memory
        base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                           model.module.CNN.parameters())

        # params of the new layers
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]

        # set the learning rate for backbone to be 0.1 times
        param_groups = [{
            'params': base_params_need_for_grad,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]

        criterion = nn.CrossEntropyLoss().cuda()
        optimizer = torch.optim.SGD(param_groups,
                                    lr=init_lr,
                                    momentum=0.5,
                                    weight_decay=5e-4,
                                    nesterov=True)

        # change the learning rate by step
        def adjust_lr(epoch, step_size):
            lr = init_lr / (10**(epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

            if epoch % step_size == 0:
                print("Epoch {}, current lr {}".format(epoch, lr))

        """ main training process """
        trainer = Trainer(model, criterion)
        for epoch in range(epochs):
            adjust_lr(epoch, step_size)
            trainer.train(epoch,
                          dataloader,
                          optimizer,
                          print_freq=len(dataloader) // 30 * 10)

        torch.save(
            model.state_dict(),
            osp.join(self.save_path, "{}_step_{}.ckpt".format(self.mode,
                                                              step)))
        self.model = model
Пример #22
0
def main(args):
    cudnn.deterministic = False
    cudnn.benchmark = True

    exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset))
    output_dir = osp.join(exp_database_dir, args.method, args.sub_method)
    log_file = osp.join(output_dir, 'log.txt')
    # Redirect print to both console and log file
    sys.stdout = Logger(log_file)

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.combine_all,
                 args.min_size, args.max_size, args.workers, args.test_fea_batch)

    # Create model
    model = resmap.create(args.arch,
                          final_layer=args.final_layer,
                          neck=args.neck).cuda()
    num_features = model.num_features
    # print(model)
    print('\n')

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')

    # Criterion

    feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
    hei = args.height // feamap_factor[args.final_layer]
    wid = args.width // feamap_factor[args.final_layer]
    criterion = QAConvLoss(num_classes, num_features, hei, wid,
                           args.mem_batch_size).cuda()

    # Optimizer
    base_param_ids = set(map(id, model.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.base.parameters(),
        'lr': 0.1 * args.lr
    }, {
        'params': new_params,
        'lr': args.lr
    }, {
        'params': criterion.parameters(),
        'lr': args.lr
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    # Decay LR by a factor of 0.1 every step_size epochs
    lr_scheduler = StepLR(optimizer, step_size=args.step_size, gamma=0.1)

    # Load from checkpoint
    start_epoch = 0

    if args.resume or args.evaluate:
        print('Loading checkpoint...')
        if args.resume and (args.resume != 'ori'):
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(
                osp.join(output_dir, 'checkpoint.pth.tar'))
        model.load_state_dict(checkpoint['model'])
        criterion.load_state_dict(checkpoint['criterion'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    model = nn.DataParallel(model).cuda()
    criterion = nn.DataParallel(criterion).cuda()

    if not args.evaluate:
        # Trainer
        trainer = Trainer(model, criterion)

        t0 = time.time()
        # Start training
        for epoch in range(start_epoch, args.epochs):
            loss, acc = trainer.train(epoch, train_loader, optimizer)

            lr = list(map(lambda group: group['lr'], optimizer.param_groups))
            lr_scheduler.step(epoch + 1)
            train_time = time.time() - t0

            print(
                '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds.\n'
                %
                (epoch + 1, lr[0], lr[1], lr[2], loss, acc * 100, train_time))

            save_checkpoint(
                {
                    'model': model.module.state_dict(),
                    'criterion': criterion.module.state_dict(),
                    'optim': optimizer.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(output_dir, 'checkpoint.pth.tar'))

    # Final test
    print('Evaluate the learned model:')
    t0 = time.time()

    # Evaluator
    evaluator = Evaluator(model)

    test_names = args.testset.strip().split(',')
    for test_name in test_names:
        if test_name not in datasets.names():
            print('Unknown dataset: %s.' % test_name)
            continue

        testset, test_query_loader, test_gallery_loader = \
            get_test_data(test_name, args.data_dir, args.height, args.width, args.test_fea_batch)

        test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \
        test_dist_rerank, test_dist_tlift, pre_tlift_dict = \
            evaluator.evaluate(test_query_loader, test_gallery_loader, testset, criterion.module,
                               args.test_gal_batch, args.test_prob_batch,
                               args.tau, args.sigma, args.K, args.alpha)

        print('  %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,'
              ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' %
              (test_name, test_rank1 * 100, test_mAP * 100,
               test_rank1_rerank * 100, test_mAP_rerank * 100,
               test_rank1_tlift * 100, test_mAP_tlift * 100))

        result_file = osp.join(exp_database_dir, args.method,
                               test_name + '_results.txt')
        with open(result_file, 'a') as f:
            f.write('%s/%s:\n' % (args.method, args.sub_method))
            f.write(
                '\t%s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f, rank1_rerank_tlift=%.1f, '
                'mAP_rerank_tlift=%.1f.\n\n' %
                (test_name, test_rank1 * 100, test_mAP * 100,
                 test_rank1_rerank * 100, test_mAP_rerank * 100,
                 test_rank1_tlift * 100, test_mAP_tlift * 100))

        if args.save_score:
            test_gal_list = np.array(
                [fname for fname, _, _, _ in testset.gallery], dtype=np.object)
            test_prob_list = np.array(
                [fname for fname, _, _, _ in testset.query], dtype=np.object)
            test_gal_ids = [pid for _, pid, _, _ in testset.gallery]
            test_prob_ids = [pid for _, pid, _, _ in testset.query]
            test_gal_cams = [c for _, _, c, _ in testset.gallery]
            test_prob_cams = [c for _, _, c, _ in testset.query]
            test_score_file = osp.join(exp_database_dir, args.method,
                                       args.sub_method,
                                       '%s_score.mat' % test_name)
            sio.savemat(test_score_file, {
                'score': 1. - test_dist,
                'score_rerank': 1. - test_dist_rerank,
                'score_tlift': 1. - test_dist_tlift,
                'gal_time': pre_tlift_dict['gal_time'],
                'prob_time': pre_tlift_dict['prob_time'],
                'gal_list': test_gal_list,
                'prob_list': test_prob_list,
                'gal_ids': test_gal_ids,
                'prob_ids': test_prob_ids,
                'gal_cams': test_gal_cams,
                'prob_cams': test_prob_cams
            },
                        oned_as='column',
                        do_compression=True)

    test_time = time.time() - t0
    if not args.evaluate:
        print('Finished training at epoch %d, loss %.3f, acc %.2f%%.\n' %
              (epoch + 1, loss, acc * 100))
        print(
            "Total training time: %.3f sec. Average training time per epoch: %.3f sec."
            % (train_time, train_time / (args.epochs - start_epoch + 1)))
    print("Total testing time: %.3f sec.\n" % test_time)

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')
Пример #23
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print(args)
    # Create data loaders
    if args.big_height is None or args.big_width is None or args.target_height is None or args.target_width is None:
        args.big_height, args.big_width, args.target_height, args.target_width = (
            256, 256, 224, 224)
    dataset, num_classes, train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.big_height, args.big_width,
                 args.target_height, args.target_width, args.batch_size, args.num_instances,
                 args.workers, args.combine_trainval)

    # Create models
    model = models.create(name=args.arch,
                          num_classes=num_classes,
                          num_features=args.features,
                          norm=False)

    # Load from checkpoint
    start_epoch = best = 0
    if args.weights and hasattr(model, 'base'):
        print('loading resnet50')
        checkpoint = load_checkpoint(args.weights)
        del (checkpoint['fc.weight'])
        del (checkpoint['fc.bias'])
        model.base.load_state_dict(checkpoint)
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        return

    # Criterion
    if args.arch == 'ResNet50_mgn_lr' or args.arch == 'ResNet101_mgn_lr':
        criterion = MGN_loss(margin1=1.2,
                             num_instances=4,
                             alpha=1.0,
                             gamma=1.0,
                             theta=0.1,
                             has_trip=False).cuda()
    elif args.arch == 'ResNet_reid_50' or args.arch == 'ResNet_reid_101':
        criterion = XentropyLoss_SAC(theta=0.2, gamma=1).cuda()
    else:
        criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    frozen_layerName = [
        'conv1',
        'bn1',
        'relu',
        'maxpool',
        'layer1',
        'layer2',
    ]
    ##### Optimizer
    if args.frozen_sublayer:
        frozen_Source = None
        if hasattr(model.module, 'base'):
            frozen_Source = 'model.module.base.'
        elif hasattr(model.module, frozen_layerName[0]):
            frozen_Source = 'model.module.'
        else:
            raise RuntimeError(
                'Not freeze layers but frozen_sublayer is True!')

        base_params_set = set()
        for subLayer in frozen_layerName:
            if hasattr(eval(frozen_Source[:-1]), subLayer):
                print('frozen layer: ', subLayer)
                single_module_param = eval(frozen_Source + subLayer +
                                           '.parameters()')
                # base_params.append(single_module_param)
                single_module_param_set = set(map(id, single_module_param))
                base_params_set = base_params_set | single_module_param_set
            else:
                print("current model doesn't have ", subLayer)

        new_params = [
            p for p in model.parameters() if id(p) not in base_params_set
        ]

        base_params = [
            p for p in model.parameters() if id(p) in base_params_set
        ]
        param_groups = [{
            'params': base_params,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)
    # Schedule learning rate
    print(args.step_epoch)
    scheduler = WarmupMultiStepLR(optimizer,
                                  args.step_epoch,
                                  gamma=args.gamma,
                                  warmup_factor=args.warm_up_factor,
                                  warmup_iters=args.warm_up_iter)
    # Start training
    for epoch in range(start_epoch + 1, args.epochs + 1):
        scheduler.step()
        trainer.train(epoch, train_loader, optimizer)

        if epoch % args.epoch_inter == 0 or epoch >= args.dense_evaluate:
            tmp_mAP, tmp_res = evaluator.evaluate(test_loader, dataset.query,
                                                  dataset.gallery)
            if epoch >= args.start_save:
                if tmp_mAP > best:
                    best = tmp_mAP
                    flag = True
                else:
                    flag = False
                save_checkpoint(
                    {
                        'state_dict': model.module.state_dict(),
                        'epoch': epoch,
                        'best_map': tmp_mAP
                    },
                    flag,
                    fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
Пример #24
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (224, 224)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create model
    model, model_discriminator = models.create(args.arch, num_classes=num_classes, num_features=args.features)
    model = model.cuda()
    model_discriminator = model_discriminator.cuda()
    evaluator = Evaluator(model)

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['model'])
        model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  "
          .format(start_epoch))
    #model = nn.DataParallel(model).cuda()

    if args.evaluate:
        metric.train(model, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
        exit()

    # Init
    current_margin = args.margin
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes).cuda()#nn.CrossEntropyLoss().cuda()
    criterion_I = TripletLoss(margin= current_margin).cuda()
    criterion_D = nn.CrossEntropyLoss().cuda()
    print(args)

    # Observe that all parameters are being optimized
    if args.arch == 'ide':
        ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))
    else:
        ignored_params = list(map(id, model.classifier.parameters())) + list(map(id, model.base.fc.parameters() )) 
    base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
    for k,v in model.named_parameters():
    	print(k)
    # optimizer_ft = optim.Adam([
    #          {'params': base_params, 'lr': 0.0001},
    #          {'params': model.classifier.parameters(), 'lr': 0.0001},
    #      ], weight_decay=5e-4)

    # optimizer_ft = torch.optim.SGD([
    #          {'params': filter(lambda p: p.requires_grad,base_params), 'lr': 0.0001},
    #          {'params': model.classifier.parameters(), 'lr': 0.0001},
    #     ],
    #     momentum=0.9,
    #     weight_decay=5e-4,
    #     nesterov=True)


    optimizer_ft = torch.optim.Adam([
             {'params': filter(lambda p: p.requires_grad,base_params), 'lr': 1e-4},
             {'params': model.classifier.parameters(), 'lr': 1e-4},
        ],
        weight_decay=5e-4)

    optimizer_discriminator = torch.optim.Adam([
             {'params': model_discriminator.model.parameters(), 'lr': 1e-4},
             {'params': model_discriminator.classifier.parameters(), 'lr': 1e-4}
        ],
        weight_decay=5e-4)

    # optimizer_discriminator = optim.Adam([
    #          {'params': model_discriminator.model.parameters(), 'lr': 0.0001},
    #          {'params': model_discriminator.classifier.parameters(), 'lr': 0.0001}
    #      ], weight_decay=5e-4)

    # Trainer
    trainer = Trainer(model, model_discriminator, criterion_z, criterion_I, criterion_D, trainvallabel, 1, 1 ,0.15 , 0.05, 5) # c: 0.15, u: 0.05

    flag = 1
    best_top1 = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        triple_loss, tot_loss = trainer.train(epoch, train_loader, optimizer_ft, optimizer_discriminator)
        '''
        if (flag == 1 and triple_loss < 0.1):
            for g in optimizer_ft.param_groups:
                g['lr'] = 0.001
            flag = 0
        if (flag == 0 and triple_loss > 0.1):
            for g in optimizer_ft.param_groups:
                g['lr'] = 0.0001
            flag = 1
        '''
        save_checkpoint({
            'model': model.state_dict(),
            'model_discriminator': model_discriminator.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, False, epoch, args.logs_dir, fpath='checkpoint.pth.tar')
        print(epoch)
        if epoch < 200:
            continue
        if not epoch % 10 ==0:
            continue
        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'model': model.state_dict(),
            'model_discriminator': model_discriminator.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, epoch, args.logs_dir, fpath='checkpoint.pth.tar')

    print('Test with best model:')
    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    checkpoint = load_checkpoint(osp.join(args.logs_dir,'model_best.pth.tar'))
    model.load_state_dict(checkpoint['model'])
    metric.train(model, train_loader)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
    print(args)
Пример #25
0
def main(args):
    cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print('log_dir= ', args.logs_dir)

    # Print logs
    print('args= ', args)

    # Create data loaders
    dataset, num_classes, query_loader, gallery_loader, propagate_loader = get_data(
        args.data_dir, args.target, args.height, args.width, args.batch_size,
        args.re, args.workers)

    # Create model
    model = stb_net.MemoryBankModel(out_dim=2048, use_bnneck=args.use_bnneck)

    # Create memory bank
    cap_memory = CAPMemory(beta=args.inv_beta,
                           alpha=args.inv_alpha,
                           all_img_cams=dataset.target_train_all_img_cams)

    # Set model
    model = nn.DataParallel(model.to(device))
    cap_memory = cap_memory.to(device)

    # Load from checkpoint
    if len(args.load_ckpt) > 0:
        print('  Loading pre-trained model: {}'.format(args.load_ckpt))
        trained_dict = torch.load(args.load_ckpt)
        filtered_trained_dict = {
            k: v
            for k, v in trained_dict.items()
            if not k.startswith('module.classifier')
        }
        for k in filtered_trained_dict.keys():
            if 'embeding' in k:
                print('pretrained model has key= {}'.format(k))
        model_dict = model.state_dict()
        model_dict.update(filtered_trained_dict)
        model.load_state_dict(model_dict)

    # Evaluator
    if args.evaluate:
        print("Test:")
        eval_results = test_model(model, query_loader, gallery_loader)
        print(
            'rank1: %.4f, rank5: %.4f, rank10: %.4f, rank20: %.4f, mAP: %.4f' %
            (eval_results[1], eval_results[2], eval_results[3],
             eval_results[4], eval_results[0]))
        return

    # Optimizer
    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        lr = args.base_lr
        weight_decay = args.weight_decay
        params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]

    optimizer = torch.optim.Adam(params)
    lr_scheduler = WarmupMultiStepLR(optimizer,
                                     args.milestones,
                                     gamma=0.1,
                                     warmup_factor=0.01,
                                     warmup_iters=10)

    # Trainer
    trainer = Trainer(model, cap_memory)

    # Start training
    for epoch in range(args.epochs):
        lr_scheduler.step(epoch)

        # image grouping
        print('Epoch {} image grouping:'.format(epoch))
        updated_label, init_intra_id_feat = img_association(
            model,
            propagate_loader,
            min_sample=4,
            eps=args.thresh,
            rerank=True,
            k1=20,
            k2=6,
            intra_id_reinitialize=True)

        # update train loader
        new_train_loader, loader_size = update_train_loader(
            dataset,
            dataset.target_train,
            updated_label,
            args.height,
            args.width,
            args.batch_size,
            args.re,
            args.workers,
            dataset.target_train_all_img_cams,
            sample_position=5)
        num_batch = int(float(loader_size) / args.batch_size)

        # train an epoch
        trainer.train(epoch,
                      new_train_loader,
                      optimizer,
                      num_batch=num_batch,
                      all_pseudo_label=torch.from_numpy(updated_label).to(
                          torch.device('cuda')),
                      init_intra_id_feat=init_intra_id_feat)

        # test
        if (epoch + 1) % 10 == 0:
            print('Test with epoch {} model:'.format(epoch))
            eval_results = test_model(model, query_loader, gallery_loader)
            print(
                '    rank1: %.4f, rank5: %.4f, rank10: %.4f, rank20: %.4f, mAP: %.4f'
                % (eval_results[1], eval_results[2], eval_results[3],
                   eval_results[4], eval_results[0]))

        # save final model
        if (epoch + 1) % args.epochs == 0:
            torch.save(
                model.state_dict(),
                osp.join(args.logs_dir,
                         'final_model_epoch_' + str(epoch + 1) + '.pth'))
            print('Final Model saved.')
Пример #26
0
def main(args):
    # seed
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    else:
        torch.backends.cudnn.benchmark = True

    if args.logs_dir is None:
        args.logs_dir = osp.join(f'logs/ide/{args.dataset}', datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    else:
        args.logs_dir = osp.join(f'logs/ide/{args.dataset}', args.logs_dir)
    if args.train:
        os.makedirs(args.logs_dir, exist_ok=True)
        copy_tree('./reid', args.logs_dir + '/scripts/reid')
        for script in os.listdir('.'):
            if script.split('.')[-1] == 'py':
                dst_file = os.path.join(args.logs_dir, 'scripts', os.path.basename(script))
                shutil.copyfile(script, dst_file)
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'), )
    print('Settings:')
    print(vars(args))
    print('\n')

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                 args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.re, 0, args.camstyle)

    # Create model
    model = models.create('ide', feature_dim=args.feature_dim, num_classes=num_classes, norm=args.norm,
                          dropout=args.dropout, last_stride=args.last_stride, arch=args.arch)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        resume_fname = osp.join(f'logs/ide/{args.dataset}', args.resume, 'model_best.pth.tar')
        model, start_epoch, best_top1 = checkpoint_loader(model, resume_fname)
        print("=> Last epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))
        start_epoch += 1
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda() if not args.LSR else LSR_loss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):  # low learning_rate the base network (aka. ResNet-50)
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
        param_groups = [{'params': model.module.base.parameters(), 'lr_mult': 0.1},
                        {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    if args.camstyle == 0:
        trainer = Trainer(model, criterion)
    else:
        trainer = CamStyleTrainer(model, criterion, camstyle_loader)

    # Evaluator
    evaluator = Evaluator(model)

    if args.train:
        # Schedule learning rate
        def adjust_lr(epoch):
            step_size = args.step_size
            lr = args.lr * (0.1 ** (epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

        # Draw Curve
        epoch_s = []
        loss_s = []
        prec_s = []
        eval_epoch_s = []
        eval_top1_s = []

        # Start training
        for epoch in range(start_epoch + 1, args.epochs + 1):
            t0 = time.time()
            adjust_lr(epoch)
            # train_loss, train_prec = 0, 0
            train_loss, train_prec = trainer.train(epoch, train_loader, optimizer, fix_bn=args.fix_bn)

            if epoch < args.start_save:
                continue

            if epoch % 5 == 0:
                top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
                eval_epoch_s.append(epoch)
                eval_top1_s.append(top1)
            else:
                top1 = 0

            is_best = top1 >= best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch,
                'best_top1': best_top1,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
            epoch_s.append(epoch)
            loss_s.append(train_loss)
            prec_s.append(train_prec)
            draw_curve(os.path.join(args.logs_dir, 'train_curve.jpg'), epoch_s, loss_s, prec_s,
                       eval_epoch_s, None, eval_top1_s)

            t1 = time.time()
            t_epoch = t1 - t0
            print('\n * Finished epoch {:3d}  top1: {:5.1%}  best_eval: {:5.1%} {}\n'.
                  format(epoch, top1, best_top1, ' *' if is_best else ''))
            print('*************** Epoch takes time: {:^10.2f} *********************\n'.format(t_epoch))
            pass

        # Final test
        print('Test with best model:')
        model, start_epoch, best_top1 = checkpoint_loader(model, osp.join(args.logs_dir, 'model_best.pth.tar'))
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))

        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
    else:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
        pass
Пример #27
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir + '/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144,
                                   56) if args.arch == 'inception' else (256,
                                                                         128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    model, model_discriminator = models.create(args.arch,
                                               num_classes=num_classes,
                                               num_features=args.features,
                                               attention_mode=args.att_mode)
    # print(model)
    model = model.cuda()
    model_discriminator = model_discriminator.cuda()

    evaluator = Evaluator(model)
    metric = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['model'])
        model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        exit()

    current_margin = args.margin
    #criterion_z = nn.CrossEntropyLoss().cuda()
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes,
                                          epsilon=0.5).cuda()
    criterion_I = TripletLoss(margin=current_margin).cuda()
    criterion_D = nn.CrossEntropyLoss().cuda()

    print(args)

    if args.arch == 'ide':
        ignored_params = list(map(id, model.model.fc.parameters())) + list(
            map(id, model.classifier.parameters()))
    else:
        ignored_params = list(map(id, model.classifier.parameters()))

    base_params = filter(lambda p: id(p) not in ignored_params,
                         model.parameters())

    if args.use_adam:
        optimizer_ft = torch.optim.Adam([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': model.classifier.parameters(),
                'lr': args.lr
            },
        ],
                                        weight_decay=5e-4)

        optimizer_discriminator = torch.optim.Adam(
            [{
                'params': model_discriminator.model.parameters(),
                'lr': args.lr
            }, {
                'params': model_discriminator.classifier.parameters(),
                'lr': args.lr
            }],
            weight_decay=5e-4)

    else:
        optimizer_ft = torch.optim.SGD([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': model.classifier.parameters(),
                'lr': args.lr
            },
        ],
                                       momentum=0.9,
                                       weight_decay=5e-4,
                                       nesterov=True)
        optimizer_discriminator = torch.optim.SGD([
            {
                'params': model_discriminator.model.parameters(),
                'lr': args.lr
            },
            {
                'params': model_discriminator.classifier.parameters(),
                'lr': args.lr
            },
        ],
                                                  momentum=0.9,
                                                  weight_decay=5e-4,
                                                  nesterov=True)

    scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma,
                                  args.warmup_factor, args.warmup_iters,
                                  args.warmup_methods)

    trainer = Trainer(model, model_discriminator, criterion_z, criterion_I,
                      criterion_D, trainvallabel, 1, 1, 0.3, 0.05, 5)

    flag = 1
    best_top1 = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        triple_loss, tot_loss = trainer.train(epoch, train_loader,
                                              optimizer_ft,
                                              optimizer_discriminator)

        save_checkpoint(
            {
                'model': model.state_dict(),
                'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        if epoch < 100:
            continue
        if not epoch % 10 == 0:
            continue

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery, metric)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'model': model.state_dict(),
                'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

    print('Test with best model:')
    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.format(
        epoch, top1, best_top1, ' *' if is_best else ''))

    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['model'])
    metric.train(model, train_loader)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, metric)
    print(args)
Пример #28
0
def main(args):
    # For fast training.
    cudnn.benchmark = True
    torch_device = 'cuda:' + str(args.gpuid)
    device = torch.device(torch_device)
    torch.cuda.set_device(device)
    fixRandomSeed(1)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(
            osp.join(args.logs_dir,
                     'log_%s_%s_C.txt' % (args.source, args.target)))
    print('log_dir=', args.logs_dir)

    # Print logs
    print(args)

    # Create data loaders
    dataset, num_classes, source_train_loader, target_train_loader, \
    query_loader, gallery_loader = get_data(args.data_dir, args.source,
                                            args.target, args.height,
                                            args.width, args.batch_size,
                                            args.re, args.workers)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Invariance learning model
    num_tgt = len(dataset.target_train)
    model_inv = InvNet(args.features,
                       num_tgt,
                       beta=args.inv_beta,
                       knn=args.knn,
                       alpha=args.inv_alpha)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        model_inv.load_state_dict(checkpoint['state_dict_inv'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    # Set model
    model = nn.DataParallel(model, device_ids=[int(args.gpuid)])
    model_inv = model_inv.cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.output_feature)
        return

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))

    base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                       model.module.base.parameters())

    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': base_params_need_for_grad,
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, model_inv, lmd=args.lmd)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.epochs_decay
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader,
                      optimizer)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'state_dict_inv': model_inv.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))
        if (epoch + 1) % 5 == 0:
            print('Testing after %d epoch :' % (epoch))
            evaluator = Evaluator(model)
            evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                               dataset.gallery, args.output_feature)

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.output_feature)
def main(args):

    args = parser.parse_args()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'Part_log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)

    model = models.create(args.arch,
                          num_features=512,
                          pretrained=True,
                          dropout=args.dropout,
                          num_classes=args.features,
                          embedding=False)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        #start_epoch = checkpoint['epoch']
        start_epoch = 0
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model)
    #model = nn.DataParallel(model).cpu()
    if args.cuda:
        model.cuda()
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    # criterion = TripletLoss(margin=args.margin).cpu()
    criterion = TripletLoss(margin=args.margin)
    if args.cuda:
        criterion.cuda()
    #
    # Optimizer

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    '''
    optimizer = torch.optim.Adam([{'params': model.module.w1.parameters(), 'lr': 1e-6, 'weight_decay': 5e-4},
                                  {'params': model.module.w2.parameters(), 'lr': 1e-6, 'weight_decay': 5e-4},
                                  {'params': model.module.w3.parameters(), 'lr': 1e-6, 'weight_decay': 5e-4},
                                  {'params': model.module.w4.parameters(), 'lr': 1e-6, 'weight_decay': 5e-4},
                                  {'params': model.module.w5.parameters(), 'lr': 1e-6, 'weight_decay': 5e-4}], lr=args.lr,
                                 weight_decay=args.weight_decay)'''

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    accs_market = AverageMeter()
    accs_cuhk03 = AverageMeter()
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1, cuhk03_top1, market_top1 = evaluator.evaluate(
            val_loader, dataset.val, dataset.val)
        accs_market.update(market_top1, args.batch_size * 40)
        accs_cuhk03.update(cuhk03_top1, args.batch_size * 40)

        plotter.plot('acc', 'test-multishot', epoch, market_top1)
        plotter.plot('acc', 'test-singleshot', epoch, cuhk03_top1)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Пример #30
0
def main(args):
    # seed
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    else:
        torch.backends.cudnn.benchmark = True

    if args.logs_dir is None:
        args.logs_dir = osp.join(
            f'logs/triplet/{args.dataset}',
            datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    else:
        args.logs_dir = osp.join(f'logs/triplet/{args.dataset}', args.logs_dir)
    if args.train:
        os.makedirs(args.logs_dir, exist_ok=True)
        copy_tree('./reid', args.logs_dir + '/scripts/reid')
        for script in os.listdir('.'):
            if script.split('.')[-1] == 'py':
                dst_file = os.path.join(args.logs_dir, 'scripts',
                                        os.path.basename(script))
                shutil.copyfile(script, dst_file)
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'), )
    print('Settings:')
    print(vars(args))
    print('\n')

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be larger than 1"
    assert args.batch_size % args.num_instances == 0, 'num_instances should divide batch_size'
    dataset, num_classes, train_loader, query_loader, gallery_loader, _ = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                 args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.re, args.num_instances,
                 False)

    # Create model for triplet (num_classes = 0, num_instances > 0)
    model = models.create('ide',
                          feature_dim=args.feature_dim,
                          num_classes=0,
                          norm=args.norm,
                          dropout=args.dropout,
                          last_stride=args.last_stride)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        resume_fname = osp.join(f'logs/triplet/{args.dataset}', args.resume,
                                'model_best.pth.tar')
        model, start_epoch, best_top1 = checkpoint_loader(model, resume_fname)
        print("=> Last epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
        start_epoch += 1
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Evaluator
    evaluator = Evaluator(model)

    if args.train:
        # Schedule learning rate
        def adjust_lr(epoch):
            if epoch <= args.step_size:
                lr = args.lr
            else:
                lr = args.lr * (0.001**(float(epoch - args.step_size) /
                                        (args.epochs - args.step_size)))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

        # Draw Curve
        epoch_s = []
        loss_s = []
        prec_s = []
        eval_epoch_s = []
        eval_top1_s = []

        # Start training
        for epoch in range(start_epoch + 1, args.epochs + 1):
            adjust_lr(epoch)
            # train_loss, train_prec = 0, 0
            train_loss, train_prec = trainer.train(epoch,
                                                   train_loader,
                                                   optimizer,
                                                   fix_bn=args.fix_bn)

            if epoch < args.start_save:
                continue

            if epoch % 25 == 0:
                top1 = evaluator.evaluate(query_loader, gallery_loader,
                                          dataset.query, dataset.gallery)
                eval_epoch_s.append(epoch)
                eval_top1_s.append(top1)
            else:
                top1 = 0

            is_best = top1 >= best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch,
                    'best_top1': best_top1,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
            epoch_s.append(epoch)
            loss_s.append(train_loss)
            prec_s.append(train_prec)
            draw_curve(os.path.join(args.logs_dir, 'train_curve.jpg'), epoch_s,
                       loss_s, prec_s, eval_epoch_s, None, eval_top1_s)
            pass

        # Final test
        print('Test with best model:')
        model, start_epoch, best_top1 = checkpoint_loader(
            model, osp.join(args.logs_dir, 'model_best.pth.tar'))
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
    else:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        pass