예제 #1
0
def test(args):

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    cfg = load_configs(args.config)
    dataset = get_vehicle_dataloader(cfg, quick_check=args.check)
    model = globals()[cfg['MODEL']]()
    model.load_param(args.ckpt)
    model = nn.DataParallel(model).cuda()
    evaluator = Evaluator(cfg, model, dataset)
    mAP, cmc = evaluator.evaluate()
    cmc1, cmc5, cmc10 = cmc[0], cmc[4], cmc[9]
    print("(mAP: {:.5f} cmc-1: {:.5f} cmc-5: {:.5f} cmc-10: {:.5f})".format(
        mAP, cmc1, cmc5, cmc10))
예제 #2
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    # cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (240, 240)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, args.combine_trainval)

    # Create model

    img_branch = models.create(args.arch,
                               cut_layer=args.cut_layer,
                               num_classes=num_classes,
                               num_features=args.features)
    diff_branch = models.create(args.arch,
                                cut_layer=args.cut_layer,
                                num_classes=num_classes,
                                num_features=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        img_branch.load_state_dict(checkpoint['state_dict_img'])
        diff_branch.load_state_dict(checkpoint['state_dict_diff'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    img_branch = nn.DataParallel(img_branch).cuda()
    diff_branch = nn.DataParallel(diff_branch).cuda()
    # img_branch = nn.DataParallel(img_branch)
    # diff_branch = nn.DataParallel(diff_branch)

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()
    # criterion = nn.CrossEntropyLoss()

    # Evaluator
    evaluator = Evaluator(img_branch, diff_branch, criterion)
    if args.evaluate:
        # print("Validation:")
        # top1, _ = evaluator.evaluate(val_loader)
        # print("Validation acc: {:.1%}".format(top1))
        print("Test:")
        top1, (gt, pred) = evaluator.evaluate(test_loader)
        print("Test acc: {:.1%}".format(top1))
        from confusion_matrix import plot_confusion_matrix
        plot_confusion_matrix(gt, pred, dataset.classes, args.logs_dir)
        return

    img_param_groups = [
        {
            'params': img_branch.module.low_level_modules.parameters(),
            'lr_mult': 0.1
        },
        {
            'params': img_branch.module.high_level_modules.parameters(),
            'lr_mult': 0.1
        },
        {
            'params': img_branch.module.classifier.parameters(),
            'lr_mult': 1
        },
    ]

    diff_param_groups = [
        {
            'params': diff_branch.module.low_level_modules.parameters(),
            'lr_mult': 0.1
        },
        {
            'params': diff_branch.module.high_level_modules.parameters(),
            'lr_mult': 0.1
        },
        {
            'params': diff_branch.module.classifier.parameters(),
            'lr_mult': 1
        },
    ]

    img_optimizer = torch.optim.SGD(img_param_groups,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    diff_optimizer = torch.optim.SGD(diff_param_groups,
                                     lr=args.lr,
                                     momentum=args.momentum,
                                     weight_decay=args.weight_decay,
                                     nesterov=True)

    # Trainer
    trainer = Trainer(img_branch, diff_branch, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.step_size
        lr = args.lr * (0.1**(epoch // step_size))
        for g in img_optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in diff_optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, img_optimizer, diff_optimizer)
        if epoch < args.start_save:
            continue
        top1, _ = evaluator.evaluate(val_loader)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict_img': img_branch.module.state_dict(),
                'state_dict_diff': diff_branch.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    img_branch.module.load_state_dict(checkpoint['state_dict_img'])
    diff_branch.module.load_state_dict(checkpoint['state_dict_diff'])
    top1, (gt, pred) = evaluator.evaluate(test_loader)
    from confusion_matrix import plot_confusion_matrix
    plot_confusion_matrix(gt, pred, dataset.classes, args.logs_dir)
    print('\n * Test Accuarcy: {:5.1%}\n'.format(top1))
예제 #3
0
train_dataloader = TextDataLoader(dataset=train_dataset, dictionary=dictionary, batch_size=args.batch_size)
val_dataset = TextDataset(val_data, dictionary, args.sort_dataset, args.min_length, args.max_length)
val_dataloader = TextDataLoader(dataset=val_dataset, dictionary=dictionary, batch_size=64)
test_dataset = TextDataset(test_data, dictionary, args.sort_dataset, args.min_length, args.max_length)
test_dataloader = TextDataLoader(dataset=test_dataset, dictionary=dictionary, batch_size=64)

logger.info("Constructing model...")
model = args.model(n_classes=preprocessor.n_classes, dictionary=dictionary, args=args)
if args.use_gpu:
    model = model.cuda() 

logger.info("Training...")
trainable_params = [p for p in model.parameters() if p.requires_grad]
if args.optimizer == 'Adam':
    optimizer = Adam(params=trainable_params, lr=args.initial_lr)
if args.optimizer == 'Adadelta':
    optimizer = Adadelta(params=trainable_params, lr=args.initial_lr, weight_decay=0.95)
lr_plateau = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.7, patience=5, min_lr=0.0001)
criterion = nn.CrossEntropyLoss
trainer = Trainer(model, train_dataloader, val_dataloader, 
                  criterion=criterion, optimizer=optimizer, 
                  lr_schedule=args.lr_schedule, lr_scheduler=lr_plateau, 
                  use_gpu=args.use_gpu, logger=logger)
trainer.run(epochs=args.epochs)

logger.info("Evaluating...")
logger.info('Best Model: {}'.format(trainer.best_checkpoint_filepath))
model.load_state_dict(torch.load(trainer.best_checkpoint_filepath)) # load best model
evaluator = Evaluator(model, test_dataloader, use_gpu=args.use_gpu, logger=logger)
evaluator.evaluate()
예제 #4
0
class BaseTrainer(object):
    def __init__(self, cfg, model, dataset):
        super(BaseTrainer, self).__init__()
        self.cfg = cfg
        self.logger = Logger(cfg)
        #self.debugger = SummaryWriter(os.path.join('debug', cfg['NAME'], 'loss' ))
        #self.mAP_marker = SummaryWriter(os.path.join('debug', cfg['NAME'], 'mAP'))
        self.source_loader, self.target_loader, self.test_loader, self.query, self.gallery, self.train_transformer,self.source_train, self.target_train, self.target_cluster_loader = dataset
        self.best_mAP = 0
        self.num_gpus = torch.cuda.device_count()
        if os.path.exists(self.cfg['PRETRAIN']):
            model.load_param(self.cfg['PRETRAIN'])
            print("load checkpoint from {}".format(self.cfg['PRETRAIN']))
        self.model = nn.DataParallel(model).cuda()
        self.evaluator = Evaluator(self.cfg, self.model, dataset)

        self.num_gpus = torch.cuda.device_count()

        self.optimizer = make_optimizer(self.cfg, self.model, num_gpus=self.num_gpus)
        self.scheduler = WarmupMultiStepLR(self.optimizer, self.cfg['MILESTONES'], self.cfg['GAMMA'], self.cfg['WARMUP_FACTOR'])
        #self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.cfg['LR'], momentum=0.9, weight_decay=0, nesterov=True)
        #self.scheduler = MultiStepLR(self.optimizer, milestones=self.cfg['MILESTONES'])

        self.num_gpus = torch.cuda.device_count()
        self.logger.write('num gpus:{} \n'.format(self.num_gpus))

    def train(self):

        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.cfg['LR'], momentum=0.9, weight_decay=0, nesterov=True)
        self.scheduler = MultiStepLR(self.optimizer, milestones=self.cfg['MILESTONES'])
        CE = nn.CrossEntropyLoss().cuda()

        for epoch in range(self.cfg['EPOCHS']):
            self.model.train()
            stats = ('ce_loss', 'total_loss')
            meters_trn = {stat: AverageMeter() for stat in stats}

            for i,inputs in enumerate(self.source_loader):
                imgs = Variable(inputs[0])
                labels = Variable(inputs[1]).cuda()
                scores = self.model(imgs, state='web stream')

                ce_loss = CE(scores, labels)
                total_loss = ce_loss
                self.optimizer.zero_grad()
                total_loss.backward()
                self.optimizer.step()

                for k in stats:
                    v = locals()[k]
                    meters_trn[k].update(v.item(), self.cfg['BATCHSIZE'])

            self.logger.write("epoch: %d | lr: %.5f | loss: %.5f | \n"%(
                epoch+1,
                self.scheduler.get_lr()[0],
                meters_trn['ce_loss'].avg,
            ))
            self.scheduler.step()

            self.evaluate(epoch, stats)

    def evaluate(self, epoch, stats=None):

        if self.cfg['TARGET'] == 'VehicleID':
            mAP, cmc1, cmc5, cmc10 = self.evaluator.evaluate_VeID()
        else:
            mAP, cmc = self.evaluator.evaluate(eval_cls=True)

        cmc1, cmc5, cmc10 = cmc[0], cmc[4], cmc[9]

        '''
        if stats is not None:
            for stat in stats:
                self.mAP_marker.add_scalar(stat, mAP, epoch+1)
        '''
        is_best = mAP > self.best_mAP
        self.best_mAP = max(mAP, self.best_mAP)
        self.logger.write("mAP: {:.1f}% | cmc-1: {:.1f}% | cmc-5: {:.1f}% | cmc-10: {:.1f}% | Best mAP: {:.1f}% |\n".format(mAP * 100, cmc1 * 100, cmc5 * 100, cmc10 * 100, self.best_mAP * 100))
        self.logger.write("==========================================\n")
        save_checkpoint({
            'state_dict':self.model.module.state_dict(),
            'epoch':epoch+1,
            'best_mAP': self.best_mAP,
        }, is_best=is_best, fpath=os.path.join("ckpt", self.cfg['NAME'], 'checkpoint.pth'))

    def cls_visualization(self):

        for i,inputs in enumerate(self.target_loader):
            imgs, _, fnames = inputs[0], inputs[1], inputs[-1]
            self.model.eval()
            cls_score, _ = self.model(imgs, 'auxiliary')
            predict = torch.max(cls_score, dim=1)[1].data.squeeze()
            for p, fname in zip(predict, fnames):
                dir_ = os.path.join('vis', self.cfg['CLS_PATH'])
                mkdir_if_missing(os.path.join(dir_, '%d'%(p.item())))
                dst = os.path.join(dir_, '%d'%(p.item()), fname+'.jpg')
                src = os.path.join('/home/share/zhihui/VeRi/image_train/', fname+'.jpg')
                shutil.copyfile(src, dst)

    def tSNE(self, img_path='tSNE.jpg'):

        source_feats, aux_feats = [], []
        source_labels, aux_labels = [], []

        for i,inputs in enumerate(self.source_loader):
            imgs, vids = Variable(inputs[0]).cuda(), inputs[1]
            outputs = self.model(imgs)
            for output, vid in zip(outputs, vids):
                source_feats.append(output.data.cpu().numpy().tolist())
                source_labels.append('VehicleID')
            '''
            source_feats = np.array(source_feats)
            source_labels = np.array(source_labels)
            '''

        for i,inputs in enumerate(self.auxiliary_loader):
            imgs, tids = Variable(inputs[0]).cuda(), inputs[1]
            outputs = self.model(imgs)
            for output, tid in zip(outputs, tids):
                aux_feats.append(output.data.cpu().numpy().tolist())
                aux_labels.append('CompCars')

        tsne = TSNE(n_components=2, init='pca', random_state=501)

        source_feats = np.array(source_feats[:1000])
        aux_feats = np.array(aux_feats[:1000])
        feats = np.concatenate((source_feats,aux_feats), axis=0)
        labels = source_labels[:1000] + aux_labels[:1000]

        pickle.dump(feats, open('feat.pkl', 'wb'))
        pickle.dump(labels, open('labels.pkl', 'wb'))

        '''