예제 #1
0
    def train(self, train_data, step, loss, dropout=0.5):
        # adjust training epochs and learning rate
        epochs = self.initial_steps if step==0 else self.later_steps
        init_lr = 0.1 if step==0 else 0.01 
        step_size = self.step_size if step==0 else sys.maxsize

        """ create model and dataloader """
        dataloader = self.get_dataloader(train_data, training=True)

        # the base parameters for the backbone (e.g. ResNet50)
        base_param_ids = set(map(id, self.model.module.CNN.base.parameters()))

        # we fixed the first three blocks to save GPU memory
        base_params_need_for_grad = filter(lambda p: p.requires_grad, self.model.module.CNN.base.parameters())

        # params of the new layers
        new_params = [p for p in self.model.parameters() if id(p) not in base_param_ids]

        # set the learning rate for backbone to be 0.1 times
        param_groups = [
            {'params': base_params_need_for_grad, 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]

        optimizer = torch.optim.SGD(param_groups, lr=init_lr, momentum=0.9, weight_decay=5e-4, nesterov=True)

        def adjust_lr(epoch, step_size):
            lr = init_lr / (10 ** (epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

        """ main training process """
        trainer = Trainer(self.model, self.criterion, fixed_layer=self.fixed_layer)
        for epoch in range(epochs):
            adjust_lr(epoch, step_size)
            trainer.train(epoch, dataloader, optimizer, print_freq=max(5, len(dataloader) // 30 * 10))
예제 #2
0
    def train(self,
              train_data,
              epochs=70,
              step_size=55,
              init_lr=0.1,
              dropout=0.5):
        """ create model and dataloader """
        model = models.create(self.model_name,
                              dropout=self.dropout,
                              num_classes=self.num_classes)
        model = nn.DataParallel(model).cuda()
        dataloader = self.get_dataloader(train_data, training=True)

        # the base parameters for the backbone (e.g. ResNet50)
        base_param_ids = set(map(id, model.module.CNN.base.parameters()))

        # we fixed the first three blocks to save GPU memory
        base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                           model.module.CNN.parameters())

        # params of the new layers
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]

        # set the learning rate for backbone to be 0.1 times
        param_groups = [{
            'params': base_params_need_for_grad,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]

        criterion = nn.CrossEntropyLoss().cuda()
        optimizer = torch.optim.SGD(param_groups,
                                    lr=init_lr,
                                    momentum=0.5,
                                    weight_decay=5e-4,
                                    nesterov=True)

        # change the learning rate by step
        def adjust_lr(epoch, step_size):
            lr = init_lr / (10**(epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

            if epoch % step_size == 0:
                print("Epoch {}, current lr {}".format(epoch, lr))

        """ main training process """
        trainer = Trainer(model, criterion)
        for epoch in range(epochs):
            adjust_lr(epoch, step_size)
            trainer.train(epoch, dataloader, optimizer, print_freq=10)

        torch.save(model.state_dict(),
                   osp.join(self.save_path, "model_{}.ckpt".format(epoch)))
        self.model = model
    def softmax_train(self, train_data, unselected_data, step, epochs, step_size, init_lr, dropout, loss):

        """ create model and dataloader """
        model = models.create(self.model_name, dropout=self.dropout, num_classes=self.num_classes, 
                              embeding_fea_size=self.embeding_fea_size, classifier = loss, fixed_layer=self.fixed_layer)

        model = nn.DataParallel(model).cuda()

        # the base parameters for the backbone (e.g. ResNet50)
        base_param_ids = set(map(id, model.module.CNN.base.parameters())) 
        base_params_need_for_grad = filter(lambda p: p.requires_grad, model.module.CNN.base.parameters()) 
        new_params = [p for p in model.parameters() if id(p) not in base_param_ids]

        # set the learning rate for backbone to be 0.1 times
        param_groups = [
            {'params': base_params_need_for_grad, 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]


        exclusive_criterion = ExLoss(self.embeding_fea_size, len(unselected_data) , t=10).cuda()

        optimizer = torch.optim.SGD(param_groups, lr=init_lr, momentum=self.train_momentum, weight_decay = 5e-4, nesterov=True)

        # change the learning rate by step
        def adjust_lr(epoch, step_size):
            
            use_unselcted_data = True
            lr = init_lr / (10 ** (epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)
            if epoch >= step_size:
                use_unselcted_data = False
                # print("Epoch {}, CE loss, current lr {}".format(epoch, lr))
            return use_unselcted_data


        s_dataloader = self.get_dataloader(train_data, training=True, is_ulabeled=False)
        u_dataloader = self.get_dataloader(unselected_data, training=True, is_ulabeled=True)


        """ main training process """
        trainer = Trainer(model, exclusive_criterion, fixed_layer=self.fixed_layer, lamda = self.lamda)
        for epoch in range(epochs):
            use_unselcted_data = adjust_lr(epoch, step_size)
            trainer.train(epoch, s_dataloader, u_dataloader, optimizer, use_unselcted_data, print_freq=len(s_dataloader)//2)

        ckpt_file = osp.join(self.save_path,  "step_{}.ckpt".format(step))
        torch.save(model.state_dict(), ckpt_file)
        self.model = model
예제 #4
0
def train_model(model, dataloader, epochs=50):
    """
    train model given the dataloader the criterion,
    stop when epochs are reached
    params:
        model: model for training
        dataloader: training data
        epochs: training epochs
        criterion
    """
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    def adjust_lr(epoch):
        step_size = 40
        lr = 0.1 * (0.1**(epoch // step_size)
                    )  # 0.1 * 0.1^(epoch divide step_size)
        #lr *= 0.1 # 0.0001 ?
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    criterion = nn.CrossEntropyLoss().cuda()
    trainer = Trainer(model, criterion)
    ac_now = 0.0
    ac_max = 0.1
    losses_avg = 0.0
    losses_avg_c = 0

    for epoch in range(epochs):
        adjust_lr(epoch)
        losses_avg = trainer.train(epoch, dataloader, optimizer)
        '''
        if losses_avg < 0.003:
            print('terminate: losses_avg < 0.003')
            return
        elif losses_avg < 0.008 and losses_avg > 0.007:
            losses_avg_c = losses_avg_c + 1
            if(losses_avg_c >= 10):
                print('terminate: 0.007 < losses_avg < 0.008')
                return
        '''
    '''
예제 #5
0
def train_model(model, dataloader, config):
    """
    train model given the dataloader the criterion,
    stop when epochs are reached
    params:
        model: model for training
        dataloader: training data
        config: training configuration
        criterion
    """
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr': 0.01
        }, {
            'params': new_params,
            'lr': 0.1
        }]
    else:
        param_groups = model.parameters()

    criterion, optimizer = get_optim_params(config, param_groups)

    trainer = Trainer(model, criterion)

    # schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = config.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    scheduler = lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.1)
    for epoch in range(config.epochs):
        #adjust_lr(epoch)
        scheduler.step()
        trainer.train(epoch,
                      dataloader,
                      optimizer,
                      print_freq=config.print_freq)
예제 #6
0
def train_model(model, dataloader, epochs=50):
    """
    train model given the dataloader the criterion,
    stop when epochs are reached
    params:
        model: model for training
        dataloader: training data
        epochs: training epochs
        criterion
    """
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    def adjust_lr(epoch):
        step_size = 40
        lr = 0.1 * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    criterion = nn.CrossEntropyLoss().cuda()
    trainer = Trainer(model, criterion)
    for epoch in range(epochs):
        adjust_lr(epoch)
        trainer.train(epoch, dataloader, optimizer)
예제 #7
0
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.camstyle, args.re, args.workers)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.output_feature, args.rerank)
        return

    # Criterion
    #criterion = nn.CrossEntropyLoss().cuda()

    class LSROloss(nn.Module):
        def __init__(self):  # change target to range(0,750)
            super(LSROloss, self).__init__()
            #input means the prediction score(torch Variable) 32*752,target means the corresponding label,
        def forward(
            self, input, target, flg
        ):  # while flg means the flag(=0 for true data and 1 for generated data)  batchsize*1
            # print(type(input))
            if input.dim(
            ) > 2:  # N defines the number of images, C defines channels,  K class in total
                input = input.view(input.size(0), input.size(1),
                                   -1)  # N,C,H,W => N,C,H*W
                input = input.transpose(1, 2)  # N,C,H*W => N,H*W,C
                input = input.contiguous().view(
                    -1, input.size(2))  # N,H*W,C => N*H*W,C

        # normalize input
            maxRow, _ = torch.max(
                input.data, 1
            )  # outputs.data  return the index of the biggest value in each row
            maxRow = maxRow.unsqueeze(1)
            input.data = input.data - maxRow

            target = target.view(-1, 1)  # batchsize*1
            flg = flg.view(-1, 1)
            #len=flg.size()[0]
            flos = F.log_softmax(input)  # N*K?      batchsize*751
            flos = torch.sum(flos, 1) / flos.size(
                1)  # N*1  get average      gan loss
            logpt = F.log_softmax(input)  # size: batchsize*751
            #print("logpt",logpt.size())
            #print("taarget", target.size())
            logpt = logpt.gather(1, target)  # here is a problem
            logpt = logpt.view(-1)  # N*1     original loss
            flg = flg.view(-1)
            flg = flg.type(torch.cuda.FloatTensor)
            #print("logpt",logpt.size())
            #print("flg", flg.size())
            #print("flos", flos.size())
            loss = -1 * logpt * (1 - flg) - flos * flg
            return loss.mean()

    criterion = LSROloss()

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.module.base.parameters(),
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    if args.camstyle == 0:
        trainer = Trainer(model, criterion)
    else:
        trainer = CamStyleTrainer(model, criterion, camstyle_loader)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.output_feature, args.rerank)
예제 #8
0
def main(args):
    args.step_size = args.step_size.split(',')
    args.step_size = [int(x) for x in args.step_size]
    # seed
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    else:
        torch.backends.cudnn.benchmark = True

    if args.logs_dir is None:
        args.logs_dir = osp.join(f'logs/zju/{args.dataset}', datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    else:
        args.logs_dir = osp.join(f'logs/zju/{args.dataset}', args.logs_dir)
    if args.train:
        os.makedirs(args.logs_dir, exist_ok=True)
        copy_tree('./reid', args.logs_dir + '/scripts/reid')
        for script in os.listdir('.'):
            if script.split('.')[-1] == 'py':
                dst_file = os.path.join(args.logs_dir, 'scripts', os.path.basename(script))
                shutil.copyfile(script, dst_file)
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'), )
    print('Settings:')
    print(vars(args))
    print('\n')

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                 args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.re, args.num_instances,
                 camstyle=0, zju=1, colorjitter=args.colorjitter)

    # Create model
    model = models.create('ide', feature_dim=args.feature_dim, norm=args.norm,
                          num_classes=num_classes, last_stride=args.last_stride, arch=args.arch)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        resume_fname = osp.join(f'logs/zju/{args.dataset}', args.resume, 'model_best.pth.tar')
        model, start_epoch, best_top1 = checkpoint_loader(model, resume_fname)
        print("=> Last epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))
        start_epoch += 1
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = [LSR_loss().cuda() if args.LSR else nn.CrossEntropyLoss().cuda(),
                 TripletLoss(margin=None if args.softmargin else args.margin).cuda()]

    # Optimizer
    if 'aic' in args.dataset:
        # Optimizer
        if hasattr(model.module, 'base'):  # low learning_rate the base network (aka. DenseNet-121)
            base_param_ids = set(map(id, model.module.base.parameters()))
            new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
            param_groups = [{'params': model.module.base.parameters(), 'lr_mult': 1},
                            {'params': new_params, 'lr_mult': 2}]
        else:
            param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups, lr=args.lr, momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, )

    # Trainer
    trainer = Trainer(model, criterion)

    # Evaluator
    evaluator = Evaluator(model)

    if args.train:
        # Schedule learning rate
        def adjust_lr(epoch):
            if epoch <= args.warmup:
                alpha = epoch / args.warmup
                warmup_factor = 0.01 * (1 - alpha) + alpha
            else:
                warmup_factor = 1
            lr = args.lr * warmup_factor * (0.1 ** bisect_right(args.step_size, epoch))
            print('Current learning rate: {}'.format(lr))
            for g in optimizer.param_groups:
                if 'aic' in args.dataset:
                    g['lr'] = lr * g.get('lr_mult', 1)
                else:
                    g['lr'] = lr

        # Draw Curve
        epoch_s = []
        loss_s = []
        prec_s = []
        eval_epoch_s = []
        eval_top1_s = []

        # Start training
        for epoch in range(start_epoch + 1, args.epochs + 1):
            t0 = time.time()
            adjust_lr(epoch)
            # train_loss, train_prec = 0, 0
            train_loss, train_prec = trainer.train(epoch, train_loader, optimizer, fix_bn=args.fix_bn, print_freq=10)

            if epoch < args.start_save:
                continue

            if epoch % 10 == 0:
                top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
                eval_epoch_s.append(epoch)
                eval_top1_s.append(top1)
            else:
                top1 = 0

            is_best = top1 >= best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch,
                'best_top1': best_top1,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
            epoch_s.append(epoch)
            loss_s.append(train_loss)
            prec_s.append(train_prec)
            draw_curve(os.path.join(args.logs_dir, 'train_curve.jpg'), epoch_s, loss_s, prec_s,
                       eval_epoch_s, None, eval_top1_s)

            t1 = time.time()
            t_epoch = t1 - t0
            print('\n * Finished epoch {:3d}  top1: {:5.1%}  best_eval: {:5.1%} {}\n'.
                  format(epoch, top1, best_top1, ' *' if is_best else ''))
            print('*************** Epoch takes time: {:^10.2f} *********************\n'.format(t_epoch))
            pass

        # Final test
        print('Test with best model:')
        model, start_epoch, best_top1 = checkpoint_loader(model, osp.join(args.logs_dir, 'model_best.pth.tar'))
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))

        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
    else:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
        pass
예제 #9
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print(
        "Test with the original model trained on target domain (direct transfer):"
    )
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    if args.evaluate:
        return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances).cuda(),
        TripletLoss(args.margin, args.num_instances).cuda(),
    ]

    # Optimizer
    optimizer = torch.optim.SGD(
        model.parameters(),
        lr=args.lr,
        momentum=0.9,
    )

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.trainval, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        # train model with new generated dataset
        trainer = Trainer(model, criterion, print_freq=args.print_freq)
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, optimizer)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    return (rank_score.map, rank_score.market1501[0])
예제 #10
0
def main(args):
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log5.txt'))
    # sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    now = datetime.datetime.now()
    print(now.strftime('%Y-%m-%d %H:%M:%S'))
    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_diff_features=args.features, \
                          pretrained=True)

    # model_path = load_checkpoint(args.model_path)
    # model.load_state_dict(model_path['state_dict'])

    # Criterion
    criterion = AdaptTripletLoss(margin=args.margin, num_feature=args.features).cuda()
    # criterion = TripletLoss(margin=args.margin,\
    #                          metric_embedding=args.metric_embedding).cuda()

    start_epoch = best_top1 = top1 = 0
    is_best = False
    if args.resume_from_trip:
        model_path = load_checkpoint(args.resume_from_trip)
        model.load_state_dict(model_path['state_dict'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    if args.resume:
        model_path = load_checkpoint(args.resume)
        model.load_state_dict(model_path['state_dict'])
        criterion.load_state_dict(model_path['adapt_metric'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    # Load from checkpoint

    # if args.resume:
    #     checkpoint = load_checkpoint(args.resume)
    #     model.load_state_dict(checkpoint['state_dict'])
    #     start_epoch = checkpoint['epoch']
    #     best_top1 = checkpoint['best_top1']
    #     print("=> Start epoch {}  best top1 {:.1%}"
    #           .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = ADP_Evaluator(model, criterion)
    # evaluator = Evaluator(model)
    if args.evaluate:
        # metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset) > 1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name])
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
            return

    # Optimizer
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
    #                              weight_decay=args.weight_decay)

    if args.only_train_metric:
        optimizer = torch.optim.Adam(criterion.parameters(), lr=args.lr,
                                     weight_decay = args.weight_decay)

        for param in model.parameters():
            param.requires_grad = False
        only_metric_train = True
    else:
        optimizer = torch.optim.Adam([{'params': model.parameters(), 'lr': 0.1*args.lr},
                                      {'params': criterion.parameters()}], lr=args.lr,
                                     weight_decay = args.weight_decay)
        only_metric_train = False

    # def part_param(model,str):
    #     for name,param in model.named_parameters():
    #         if str not in name:
    #             yield param
    #
    # new_param = part_param(model,'base')
    # optimizer = torch.optim.Adam([
    #                             {'params': model.module.base.parameters()},
    #                             {'params':new_param, 'weight_decay':1.5*args.weight_decay}
    #                             ]
    #                             ,lr=args.lr, weight_decay=args.weight_decay,)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.01 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lambda(epochs):
        w_lambda = 0.001 if epoch <=30 else 0.001*(0.01**((epoch - 30)/70))
        return w_lambda
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        weight_constraint_lambda = 0.0001
        trainer.train(epoch, train_loader, optimizer, logger, weight_constraint_lambda)

        #######Tensorboard-logs##########
        if not only_metric_train:
            for tag, value in model.named_parameters():
                tag = tag.replace('.', '/')
                try:
                    logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                    logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
                except AttributeError,e:
                    pass
        for tag, value in criterion.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError, e:
                pass
예제 #11
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, _, _, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    trans_train_loader, num_classes = get_fake_data(args.trans_name, args.trans_data_txt, args.height,
                                       args.width, args.batch_size, args.workers)
    # Create model
    model = models.create(args.arch,
                          dropout=0, num_classes=num_classes)
    # model = models.create(args.arch, num_features=1024, num_diff_features=args.features,
    #                       dropout=args.dropout, num_classes=num_classes, iden_pretrain=True)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, trans_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr * 0.1**(epoch//args.lr_change_epochs)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, trans_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        # for tag, value in criterion.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        #################################
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, trans_train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #12
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #13
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features, norm=True,
                          dropout=args.dropout)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = OIMLoss(model.module.num_features, num_classes,
                        scalar=args.oim_scalar,
                        momentum=args.oim_momentum).cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #14
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    # assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          num_diff_features=128,
                          dropout=args.dropout,
                          cut_at_pooling=False)
    print(model)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items()
                           if k in model.state_dict()}
        model_dict = model.state_dict()
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset)>1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name], metric)
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
            return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError:
                pass
        # top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
        # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        top1 = 1
        # is_best = top1 > best_top1
        # best_top1 = max(top1, best_top1)
        # save_checkpoint({
        #     'state_dict': model.module.state_dict(),
        #     'epoch': epoch + 1,
        #     'best_top1': best_top1,
        # }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
        is_best = False
        best_top1 = 1
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                               dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #15
0
def main(args):
    # For fast training.
    cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print('log_dir=', args.logs_dir)

    # Print logs
    print(args)

    # Create data loaders
    dataset, num_classes, source_train_loader, target_train_loader, \
    query_loader, gallery_loader = get_data(args.data_dir, args.source,
                                            args.target, args.height,
                                            args.width, args.batch_size,
                                            args.re, args.workers)

    # Create model
    model = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)

    # Invariance learning model
    num_tgt = len(dataset.target_train)
    model_inv = InvNet(args.features, num_tgt,
                        beta=args.inv_beta, knn=args.knn,
                        alpha=args.inv_alpha)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        model_inv.load_state_dict(checkpoint['state_dict_inv'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} "
              .format(start_epoch))

    # Set model
    model = nn.DataParallel(model).to(device)
    model_inv = model_inv.to(device)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.output_feature)
        return

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))

    base_params_need_for_grad = filter(lambda p: p.requires_grad, model.module.base.parameters())

    new_params = [p for p in model.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': base_params_need_for_grad, 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, model_inv, lmd=args.lmd, include_mmd=args.include_mmd)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.epochs_decay
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader, optimizer)

        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'state_dict_inv': model_inv.state_dict(),
            'epoch': epoch + 1,
        }, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.
              format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.output_feature)
예제 #16
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.loss == 'triplet':
        assert args.num_instances > 1, 'TripletLoss requires num_instances > 1'
        assert args.batch_size % args.num_instances == 0, \
            'num_instances should divide batch_size'
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.workers, args.num_instances,
                 combine_trainval=args.combine_trainval)

    # Create model
    if args.loss == 'xentropy':
        model = InceptionNet(num_classes=num_classes,
                             num_features=args.features,
                             dropout=args.dropout)
    elif args.loss == 'oim':
        model = InceptionNet(num_features=args.features,
                             norm=True,
                             dropout=args.dropout)
    elif args.loss == 'triplet':
        model = InceptionNet(num_features=args.features, dropout=args.dropout)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    model = torch.nn.DataParallel(model).cuda()

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> start epoch {}  best top1 {:.1%}".format(
            args.start_epoch, best_top1))
    else:
        best_top1 = 0

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    if args.loss == 'xentropy':
        criterion = torch.nn.CrossEntropyLoss()
    elif args.loss == 'oim':
        criterion = OIMLoss(model.module.num_features,
                            num_classes,
                            scalar=args.oim_scalar,
                            momentum=args.oim_momentum)
    elif args.loss == 'triplet':
        criterion = TripletLoss(margin=args.triplet_margin)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    criterion.cuda()

    # Optimizer
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
    else:
        raise ValueError("Cannot recognize optimizer type:", args.optimizer)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1**(epoch // 60))
        elif args.optimizer == 'adam':
            lr = args.lr if epoch <= 100 else \
                args.lr * (0.001 ** (epoch - 100) / 50)
        else:
            raise ValueError("Cannot recognize optimizer type:",
                             args.optimizer)
        for g in optimizer.param_groups:
            g['lr'] = lr

    # Start training
    for epoch in range(args.start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #17
0
def main(args):
    cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print('log_dir= ', args.logs_dir)

    # Print logs
    print('args= ', args)

    # Create data loaders
    dataset, num_classes, query_loader, gallery_loader, propagate_loader = get_data(
        args.data_dir, args.target, args.height, args.width, args.batch_size,
        args.re, args.workers)

    # Create model
    model = stb_net.MemoryBankModel(out_dim=2048, use_bnneck=args.use_bnneck)

    # Create memory bank
    cap_memory = CAPMemory(beta=args.inv_beta,
                           alpha=args.inv_alpha,
                           all_img_cams=dataset.target_train_all_img_cams)

    # Set model
    model = nn.DataParallel(model.to(device))
    cap_memory = cap_memory.to(device)

    # Load from checkpoint
    if len(args.load_ckpt) > 0:
        print('  Loading pre-trained model: {}'.format(args.load_ckpt))
        trained_dict = torch.load(args.load_ckpt)
        filtered_trained_dict = {
            k: v
            for k, v in trained_dict.items()
            if not k.startswith('module.classifier')
        }
        for k in filtered_trained_dict.keys():
            if 'embeding' in k:
                print('pretrained model has key= {}'.format(k))
        model_dict = model.state_dict()
        model_dict.update(filtered_trained_dict)
        model.load_state_dict(model_dict)

    # Evaluator
    if args.evaluate:
        print("Test:")
        eval_results = test_model(model, query_loader, gallery_loader)
        print(
            'rank1: %.4f, rank5: %.4f, rank10: %.4f, rank20: %.4f, mAP: %.4f' %
            (eval_results[1], eval_results[2], eval_results[3],
             eval_results[4], eval_results[0]))
        return

    # Optimizer
    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        lr = args.base_lr
        weight_decay = args.weight_decay
        params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]

    optimizer = torch.optim.Adam(params)
    lr_scheduler = WarmupMultiStepLR(optimizer,
                                     args.milestones,
                                     gamma=0.1,
                                     warmup_factor=0.01,
                                     warmup_iters=10)

    # Trainer
    trainer = Trainer(model, cap_memory)

    # Start training
    for epoch in range(args.epochs):
        lr_scheduler.step(epoch)

        # image grouping
        print('Epoch {} image grouping:'.format(epoch))
        updated_label, init_intra_id_feat = img_association(
            model,
            propagate_loader,
            min_sample=4,
            eps=args.thresh,
            rerank=True,
            k1=20,
            k2=6,
            intra_id_reinitialize=True)

        # update train loader
        new_train_loader, loader_size = update_train_loader(
            dataset,
            dataset.target_train,
            updated_label,
            args.height,
            args.width,
            args.batch_size,
            args.re,
            args.workers,
            dataset.target_train_all_img_cams,
            sample_position=5)
        num_batch = int(float(loader_size) / args.batch_size)

        # train an epoch
        trainer.train(epoch,
                      new_train_loader,
                      optimizer,
                      num_batch=num_batch,
                      all_pseudo_label=torch.from_numpy(updated_label).to(
                          torch.device('cuda')),
                      init_intra_id_feat=init_intra_id_feat)

        # test
        if (epoch + 1) % 10 == 0:
            print('Test with epoch {} model:'.format(epoch))
            eval_results = test_model(model, query_loader, gallery_loader)
            print(
                '    rank1: %.4f, rank5: %.4f, rank10: %.4f, rank20: %.4f, mAP: %.4f'
                % (eval_results[1], eval_results[2], eval_results[3],
                   eval_results[4], eval_results[0]))

        # save final model
        if (epoch + 1) % args.epochs == 0:
            torch.save(
                model.state_dict(),
                osp.join(args.logs_dir,
                         'final_model_epoch_' + str(epoch + 1) + '.pth'))
            print('Final Model saved.')
예제 #18
0
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.camstyle, args.re, args.num_instances, args.workers)

    # Create model
    model = models.create(args.arch,
                          num_features=2048,
                          dropout=args.dropout,
                          num_classes=args.features)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.output_feature, args.rerank)
        return

    # Criterion
    # criterion = TripletLoss(margin=args.margin).cuda()
    criterion_cro = nn.CrossEntropyLoss().cuda()
    criterion_tri = TripletLoss(margin=args.margin).cuda()
    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion_cro, criterion_tri)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.output_feature, args.rerank)
예제 #19
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir + '/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144,
                                   56) if args.arch == 'inception' else (256,
                                                                         128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    model, model_discriminator = models.create(args.arch,
                                               num_classes=num_classes,
                                               num_features=args.features,
                                               attention_mode=args.att_mode)
    # print(model)
    model = model.cuda()
    model_discriminator = model_discriminator.cuda()

    evaluator = Evaluator(model)
    metric = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['model'])
        model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        exit()

    current_margin = args.margin
    #criterion_z = nn.CrossEntropyLoss().cuda()
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes,
                                          epsilon=0.5).cuda()
    criterion_I = TripletLoss(margin=current_margin).cuda()
    criterion_D = nn.CrossEntropyLoss().cuda()

    print(args)

    if args.arch == 'ide':
        ignored_params = list(map(id, model.model.fc.parameters())) + list(
            map(id, model.classifier.parameters()))
    else:
        ignored_params = list(map(id, model.classifier.parameters()))

    base_params = filter(lambda p: id(p) not in ignored_params,
                         model.parameters())

    if args.use_adam:
        optimizer_ft = torch.optim.Adam([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': model.classifier.parameters(),
                'lr': args.lr
            },
        ],
                                        weight_decay=5e-4)

        optimizer_discriminator = torch.optim.Adam(
            [{
                'params': model_discriminator.model.parameters(),
                'lr': args.lr
            }, {
                'params': model_discriminator.classifier.parameters(),
                'lr': args.lr
            }],
            weight_decay=5e-4)

    else:
        optimizer_ft = torch.optim.SGD([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': model.classifier.parameters(),
                'lr': args.lr
            },
        ],
                                       momentum=0.9,
                                       weight_decay=5e-4,
                                       nesterov=True)
        optimizer_discriminator = torch.optim.SGD([
            {
                'params': model_discriminator.model.parameters(),
                'lr': args.lr
            },
            {
                'params': model_discriminator.classifier.parameters(),
                'lr': args.lr
            },
        ],
                                                  momentum=0.9,
                                                  weight_decay=5e-4,
                                                  nesterov=True)

    scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma,
                                  args.warmup_factor, args.warmup_iters,
                                  args.warmup_methods)

    trainer = Trainer(model, model_discriminator, criterion_z, criterion_I,
                      criterion_D, trainvallabel, 1, 1, 0.3, 0.05, 5)

    flag = 1
    best_top1 = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        triple_loss, tot_loss = trainer.train(epoch, train_loader,
                                              optimizer_ft,
                                              optimizer_discriminator)

        save_checkpoint(
            {
                'model': model.state_dict(),
                'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        if epoch < 100:
            continue
        if not epoch % 10 == 0:
            continue

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery, metric)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'model': model.state_dict(),
                'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

    print('Test with best model:')
    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.format(
        epoch, top1, best_top1, ' *' if is_best else ''))

    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['model'])
    metric.train(model, train_loader)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, metric)
    print(args)
예제 #20
0
def main(args):
    np.random.seed(args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Log args
    print(args)
    args_dict = vars(args)
    with open(osp.join(args.logs_dir, 'args.json'), 'w') as f:
        json.dump(args_dict, f)

    # Create data loaders
    dataset, num_classes, train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height, \
                 args.width, args.crop_height, args.crop_width, args.batch_size, \
                 args.caffe_sampler, \
                 args.workers)

    # Create model
    valid_args = ['features', 'use_relu', 'dilation']
    model_kwargs = {k:v for k,v in args_dict.items() if k in valid_args}
    model = models.create(args.arch, **model_kwargs)
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    params = []
    params_dict = dict(model.named_parameters())
    for key, value in params_dict.items():
        if value.requires_grad == False:
            continue
        if key[-4:]=='bias':
            params += [{'params': [value], 'lr':args.lr*2, 'weight_decay':args.weight_decay*0.0}]
        else:
            params += [{'params': [value], 'lr':args.lr*1, 'weight_decay':args.weight_decay*1.0}]
    optimizer = SGD_caffe(params,
                          lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)

    # Trainer
    trainer = Trainer(model, criterion)

    # Evaluate
    def evaluate(test_model):
        print('Test with model {}:'.format(test_model))
        checkpoint = load_checkpoint(osp.join(args.logs_dir, '{}.pth.tar'.format(test_model)))
        model.module.load(checkpoint)
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, msg='TEST')

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 200 else \
            args.lr * (0.2 ** ((epoch-200)//200 + 1))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(1, args.epochs+1):

        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        # Save
        if epoch%200 == 0 or epoch==args.epochs:
            save_dict = model.module.save_dict()
            save_dict.update({'epoch': epoch})
            save_checkpoint(save_dict, fpath=osp.join(args.logs_dir, 'epoch_{}.pth.tar'.format(epoch)))

        print('\n * Finished epoch {:3d}\n'.format(epoch))

    evaluate('epoch_750')
예제 #21
0
def main(args):
    fix(args.seed)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    
    print(args)
    # Create data loaders
    dataset, test_dataset, num_classes, source_train_loader, grid_query_loader, grid_gallery_loader,prid_query_loader, prid_gallery_loader,viper_query_loader, viper_gallery_loader, ilid_query_loader, ilid_gallery_loader = \
        get_data(args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instance, args.re, args.workers)

    # Create model
    Encoder, Transfer, CamDis = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)

    invNet = InvNet(args.features, num_classes, args.batch_size, beta=args.beta, knn=args.knn, alpha=args.alpha).cuda()

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        Encoder.load_state_dict(checkpoint['Encoder'])
        Transfer.load_state_dict(checkpoint['Transfer'])
        CamDis.load_state_dict(checkpoint['CamDis'])
        invNet.load_state_dict(checkpoint['InvNet'])
        start_epoch = checkpoint['epoch']

    Encoder = Encoder.cuda()
    Transfer = Transfer.cuda()
    CamDis = CamDis.cuda()

    model = [Encoder, Transfer, CamDis]
    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        # -----------------------------
        v = evaluator.eval_viper(viper_query_loader, viper_gallery_loader, test_dataset.viper_query, test_dataset.viper_gallery, args.output_feature, seed=97)
        p = evaluator.eval_prid(prid_query_loader, prid_gallery_loader, test_dataset.prid_query, test_dataset.prid_gallery, args.output_feature, seed=40)
        g = evaluator.eval_grid(grid_query_loader, grid_gallery_loader, test_dataset.grid_query, test_dataset.grid_gallery, args.output_feature, seed=28)
        l = evaluator.eval_ilids(ilid_query_loader, test_dataset.ilid_query, args.output_feature, seed=24)
        # -----------------------------

    criterion = []
    criterion.append(nn.CrossEntropyLoss().cuda())
    criterion.append(TripletLoss(margin=args.margin))


    # Optimizer
    base_param_ids = set(map(id, Encoder.base.parameters()))
    new_params = [p for p in Encoder.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': Encoder.base.parameters(), 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer_Encoder = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=0.9, weight_decay=5e-4, nesterov=True)
    # ====
    base_param_ids = set(map(id, Transfer.base.parameters()))
    new_params = [p for p in Transfer.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': Transfer.base.parameters(), 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer_Transfer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=0.9, weight_decay=5e-4, nesterov=True)
    # ====
    param_groups = [
        {'params':CamDis.parameters(), 'lr_mult':1.0},
    ]
    optimizer_Cam = torch.optim.SGD(param_groups, lr=args.lr,momentum=0.9, weight_decay=5e-4, nesterov=True)

    optimizer = [optimizer_Encoder, optimizer_Transfer, optimizer_Cam]

    # Trainer
    trainer = Trainer(model, criterion, InvNet=invNet)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = args.lr * (0.1 ** ((epoch) // step_size))
        for g in optimizer_Encoder.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in optimizer_Transfer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in optimizer_Cam.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, optimizer, args.tri_weight, args.adv_weight, args.mem_weight)

        save_checkpoint({
            'Encoder': Encoder.state_dict(),
            'Transfer': Transfer.state_dict(),
            'CamDis': CamDis.state_dict(),
            'InvNet': invNet.state_dict(),
            'epoch': epoch + 1,
        }, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        evaluator = Evaluator(model)
        print('\n * Finished epoch {:3d} \n'.
              format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, args.output_feature, args.rerank)
예제 #22
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances, use_semi=False).cuda(),
        TripletLoss(args.margin, args.num_instances, use_semi=False).cuda()
    ]
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

            # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)
        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        # assign label for target ones
        newLab = labelNoise(torch.from_numpy(target_features),
                            torch.from_numpy(labels))
        # unknownFeats = target_features[labels==-1,:]
        counter = 0
        from collections import defaultdict
        realIDs, fakeIDs = defaultdict(list), []
        for (fname, realID, cam), label in zip(tgt_dataset.trainval, newLab):
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam))
            realIDs[realID].append(counter)
            fakeIDs.append(label)
            counter += 1
        precision, recall, fscore = calScores(realIDs, np.asarray(fakeIDs))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))
        print(
            f'precision:{precision * 100}, recall:{100 * recall}, fscore:{fscore}'
        )
        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        trainer = Trainer(model, criterion)

        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, optimizer)  # to at most 80%
        # test only
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    save_checkpoint(
        {
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': rank_score.market1501[0],
        },
        True,
        fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return rank_score.map, rank_score.market1501[0]
예제 #23
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    name = f'{args.dataset}-{args.arch}'
    logs_dir = f'logs/amsoftmax-loss/{name}'

    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, class_weight, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes, cos_output=True)


    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = model.cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, normalize_features=True, only_top1=True)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    if args.class_weight:
        criterion = AMSoftmax(weight=class_weight).cuda()
    else:
        criterion = AMSoftmax().cuda()

    # Optimizer
    if hasattr(model, 'base'):
        base_param_ids = set(map(id, model.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.base.parameters(), 'lr_mult': 0.01},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer

    trainer = Trainer(model, criterion, name=name)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #24
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    
    train, val, trainval = [], [], []
    numbers = [0, 0, 0]

    dataset_cuhk03 = merge('cuhk03', train, val, trainval, numbers, args.data_dir, args.split)
    dataset_market1501 = merge('market1501', train, val, trainval, numbers, args.data_dir, args.split)
    merge('cuhksysu', train, val, trainval, numbers, args.data_dir, args.split)
    merge('mars', train, val, trainval, numbers, args.data_dir, args.split)
    
    num_train_ids, num_val_ids, num_trainval_ids = numbers
    
    assert num_val_ids == dataset_cuhk03.num_val_ids + dataset_market1501.num_val_ids

    print("============================================")
    print("JSTL dataset loaded")
    print("  subset   | # ids | # images")
    print("  ---------------------------")
    print("  train    | {:5d} | {:8d}"
          .format(num_train_ids, len(train)))
    print("  val      | {:5d} | {:8d}"
          .format(num_val_ids, len(val)))
    print("  trainval | {:5d} | {:8d}"
          .format(num_trainval_ids, len(trainval)))

    query_cuhk03, gallery_cuhk03 = dataset_cuhk03.query, dataset_cuhk03.gallery
    query_market1501, gallery_market1501 = dataset_market1501.query, dataset_market1501.gallery

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = trainval if args.combine_trainval else train
    num_classes = (num_trainval_ids if args.combine_trainval
                   else num_train_ids)

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(args.height, args.width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=args.data_dir,
                     transform=train_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        sampler=RandomIdentitySampler(train_set, args.num_instances),
        pin_memory=True, drop_last=True)

    val_loader = DataLoader(
        Preprocessor(val, root=args.data_dir,
                     transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    test_loader_cuhk03 = DataLoader(
        Preprocessor(list(set(query_cuhk03) | set(gallery_cuhk03)),
                     root=dataset_cuhk03.images_dir, transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    test_loader_market1501 = DataLoader(
        Preprocessor(list(set(query_market1501) | set(gallery_market1501)),
                     root=dataset_market1501.images_dir, transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, val, val, metric)
        print("Test(cuhk03):")
        evaluator.evaluate(test_loader_cuhk03, query_cuhk03, gallery_cuhk03, metric)
        print("Test(market1501):")
        evaluator.evaluate(test_loader_market1501, query_market1501, gallery_market1501, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val, val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)

    print("Test(cuhk03):")
    evaluator.evaluate(test_loader_cuhk03, query_cuhk03, gallery_cuhk03, metric)
    print("Test(market1501):")
    evaluator.evaluate(test_loader_market1501, query_market1501, gallery_market1501, metric)
예제 #25
0
파일: main.py 프로젝트: ZJULearning/DeAda
def main(args):
    setproctitle.setproctitle(args.project_name)
    logs_dir = osp.join(args.root_dir, 'logs/', args.project_name)
    if osp.exists(logs_dir) is False:
        os.makedirs(logs_dir)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    data_dir = osp.join(args.data_dir, args.dataset)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))
    print('{}'.format(vars(parser.parse_args())))

    # Create data loaders
    def readlist(path):
        lines = []
        with open(path, 'r') as f:
            data = f.readlines()

        for line in data:
            name, pid, camid = line.split()
            lines.append((name, int(pid), int(camid)))
        return lines

    if osp.exists(osp.join(data_dir, 'train.txt')):
        train_list = readlist(osp.join(data_dir, 'train.txt'))
    else:
        print("The training list doesn't exist")

    if osp.exists(osp.join(data_dir, 'val.txt')):
        val_list = readlist(osp.join(data_dir, 'val.txt'))
    else:
        print("The validation list doesn't exist")

    if osp.exists(osp.join(data_dir, 'query.txt')):
        query_list = readlist(osp.join(data_dir, 'query.txt'))
    else:
        print("The query.txt doesn't exist")

    if osp.exists(osp.join(data_dir, 'gallery.txt')):
        gallery_list = readlist(osp.join(data_dir, 'gallery.txt'))
    else:
        print("The gallery.txt doesn't exist")

    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    train_loader, val_loader, test_loader = \
        get_data(data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval, train_list, val_list, query_list, gallery_list,dataset_type=args.dataset)
    # Create model
    num_classes = args.ncls
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    cnt = 0
    for p in model.parameters():
        cnt += p.numel()
    print('Parameter number:{}\n'.format(cnt))
    # Load from checkpoint
    start_epoch = best_top1 = 0
    model = nn.DataParallel(model).cuda()
    #model = model.cuda()
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.module.load_state_dict(checkpoint['state_dict'])

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        with torch.no_grad():
            print('Test with latest model:')
            checkpoint = load_checkpoint(
                osp.join(logs_dir, 'checkpoint.pth.tar'))
            model.module.load_state_dict(checkpoint['state_dict'])
            print('best epoch: ', checkpoint['epoch'])
            metric.train(model, train_loader)
            evaluator.evaluate(test_loader,
                               query_list,
                               gallery_list,
                               clist=clist,
                               metric=metric)

            print('Test with best model:')
            checkpoint = load_checkpoint(
                osp.join(logs_dir, 'model_best.pth.tar'))
            model.module.load_state_dict(checkpoint['state_dict'])
            print('best epoch: ', checkpoint['epoch'])
            metric.train(model, train_loader)
            evaluator.evaluate(test_loader,
                               query_list,
                               gallery_list,
                               clist=clist,
                               metric=metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if args.training_method == 'plain':
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    elif args.training_method == 'deada':
        param_class_ids = set(map(id, model.module.classifier.parameters()))
        param_extrac = [
            p for p in model.parameters() if id(p) not in param_class_ids
        ]
        param_groups = [{
            'params': param_extrac,
            'lr_mult': 0.1
        }, {
            'params': model.module.classifier.parameters(),
            'lr': args.lr_classifier
        }]
    else:
        raise KeyError('Unknown training method: ', args.training_method)

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch, args):
        step_size = args.step_size
        lr = args.lr if epoch <= step_size else \
             args.lr * (0.1 ** ((epoch - step_size) // step_size + 1))
        if args.training_method == 'plain':
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)
        elif args.training_method == 'deada':
            for g in optimizer.param_groups[:1]:
                # only update lr of feature extractor, keep lr of classifier constant
                g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    waits = 0
    for epoch in range(start_epoch, args.epochs):
        print('Project Name:{}'.format(args.project_name))
        if waits >= args.patience:
            print('Patience is exceeded\n')
            break
        print('\nWaits: {}'.format(waits))
        adjust_lr(epoch, args)
        if args.training_method == 'deada':
            lr_extrac = optimizer.param_groups[0]['lr']
            lr_class = optimizer.param_groups[1]['lr']
            print('feature extractor lr: ', lr_extrac, ' classifier lr: ',
                  lr_class)
            init.normal_(trainer.model.module.classifier.weight, std=0.001)
            init.constant_(trainer.model.module.classifier.bias, 0)

        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val_list, val_list)
        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))
        if (epoch + 1) % 5 == 0:
            print('Test model: \n')
            model_name = 'epoch_' + str(epoch) + '.pth.tar'
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                },
                False,
                fpath=osp.join(logs_dir, model_name))
        if is_best:
            waits = 0
        else:
            waits += 1
    # Final test
    with torch.no_grad():
        print('Test with latest model:')
        checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
        model.module.load_state_dict(checkpoint['state_dict'])
        print('best epoch: ', checkpoint['epoch'])
        metric.train(model, train_loader)
        evaluator.evaluate(test_loader,
                           query_list,
                           gallery_list,
                           metric=metric)

        print('Test with best model:')
        checkpoint = load_checkpoint(osp.join(logs_dir, 'model_best.pth.tar'))
        model.module.load_state_dict(checkpoint['state_dict'])
        print('best epoch: ', checkpoint['epoch'])
        metric.train(model, train_loader)
        evaluator.evaluate(test_loader,
                           query_list,
                           gallery_list,
                           metric=metric)
예제 #26
0
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    print(args)
    # Create data loaders
    dataset, num_classes, source_train_loader, query_loader_2, gallery_loader_2, query_loader_3, gallery_loader_3 = \
        get_data(args.data_dir, args.source, args.target, args.height,
                 args.width, args.batch_size, args.num_instance, args.workers)

    # Create model
    MaskNet, TaskNet = models.create(args.arch,
                                     num_features=args.features,
                                     dropout=args.dropout,
                                     num_classes=num_classes)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        MaskNet.load_state_dict(checkpoint['MaskNet'])
        TaskNet.load_state_dict(checkpoint['TaskNet'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    MaskNet = nn.DataParallel(MaskNet).cuda()
    TaskNet = nn.DataParallel(TaskNet).cuda()

    # Evaluator
    evaluator = Evaluator([MaskNet, TaskNet])
    if args.evaluate:
        print("Test:")
        print("partial-iLIDS:")
        evaluator.evaluate(query_loader_2, gallery_loader_2, dataset.query_2,
                           dataset.gallery_2, args.output_feature)
        print("partial-REID:")
        evaluator.evaluate(query_loader_3, gallery_loader_3, dataset.query_3,
                           dataset.gallery_3, args.output_feature)

        return

    # Criterion
    criterion = []
    criterion.append(nn.CrossEntropyLoss().cuda())
    criterion.append(TripletLoss(margin=args.margin))
    criterion.append(nn.MSELoss(reduce=True, size_average=True).cuda())

    # Optimizer
    param_groups = [
        {
            'params': MaskNet.module.parameters(),
            'lr_mult': 0.1
        },
    ]
    optimizer_Mask = torch.optim.SGD(param_groups,
                                     lr=args.lr,
                                     momentum=args.momentum,
                                     weight_decay=args.weight_decay,
                                     nesterov=True)
    #
    base_param_ids = set(map(id, TaskNet.module.base.parameters()))
    new_params = [
        p for p in TaskNet.parameters() if id(p) not in base_param_ids
    ]
    param_groups = [{
        'params': TaskNet.module.base.parameters(),
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]
    optimizer_Ide = torch.optim.SGD(param_groups,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)

    # Trainer
    trainer = Trainer([MaskNet, TaskNet], criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 10
        if epoch <= 9:
            lr = 0.0008 * (epoch / 10.0)
        elif epoch <= 16:
            lr = 0.1
        elif epoch <= 23:
            lr = 0.001
        else:
            lr = 0.0001

        for g in optimizer_Mask.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in optimizer_Ide.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    tmp_2 = best_2 = 0
    tmp_3 = best_3 = 0
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, [source_train_loader],
                      [optimizer_Mask, optimizer_Ide], args.batch_size)

        save_checkpoint(
            {
                'MaskNet': MaskNet.module.state_dict(),
                'TaskNet': TaskNet.module.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        if epoch == 9:
            save_checkpoint(
                {
                    'MaskNet': MaskNet.module.state_dict(),
                    'TaskNet': TaskNet.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir, 'epoch9_checkpoint.pth.tar'))

        evaluator = Evaluator([MaskNet, TaskNet])
        if epoch > 9 and epoch % 1 == 0:
            tmp_2 = evaluator.evaluate(query_loader_2, gallery_loader_2,
                                       dataset.query_2, dataset.gallery_2,
                                       args.output_feature)
            tmp_3 = evaluator.evaluate(query_loader_3, gallery_loader_3,
                                       dataset.query_3, dataset.gallery_3,
                                       args.output_feature)

        if (tmp_2 > best_2):
            save_checkpoint(
                {
                    'MaskNet': MaskNet.module.state_dict(),
                    'TaskNet': TaskNet.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir,
                               'best_checkpoint_piLIDS.pth.tar'))
            best_2 = tmp_2
        print("iLIDS_best:", best_2)
        #
        if (tmp_3 > best_3):
            save_checkpoint(
                {
                    'MaskNet': MaskNet.module.state_dict(),
                    'TaskNet': TaskNet.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir, 'best_checkpoint_pREID.pth.tar'))
            best_3 = tmp_3
        print("REID_best:", best_3)
        print('\n * Finished epoch {:3d} \n'.format(epoch))
예제 #27
0
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.trainer, args.re, args.workers)
    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))
    model = nn.DataParallel(model).cuda()

    #Evaluator
    if args.trainer == 0:
        evaluator = Evaluator(model)
        if args.evaluate:
            print("Test:")
            evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                               dataset.gallery, args.output_feature,
                               args.rerank)
            return
    if args.trainer == 1:
        evaluator = EvaluatorRelations(model)
        if args.evaluate:
            print("Test:")
            evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                               dataset.gallery, args.output_feature,
                               args.rerank)
            return
    if args.trainer == 2:
        evaluator = EvaluatorRelations(model)
        if args.evaluate:
            print("Test:")
            evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                               dataset.gallery, args.output_feature,
                               args.rerank)
            return
    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.module.base.parameters(),
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    #optimizer = torch.optim.Adam(param_groups, lr=args.lr, betas=(0.9, 0.999), weight_decay=args.weight_decay)

    # Trainer
    if args.trainer == 0:
        trainer = Trainer(model, criterion)
    if args.trainer == 1:
        #checkpoint = load_checkpoint('/home/nvlab/CamStyle/logs/market1501road40/checkpoint.pth.tar')
        #model_dict=model.state_dict()
        #checkpoint = {k:v for k,v in checkpoint.items() if k in model_dict}
        #model_dict.update(checkpoint)
        #model.load_state_dict(model_dict)
        trainer = PairTrainer(model,
                              criterion,
                              train_loader,
                              num_classes=num_classes)
    if args.trainer == 2:
        checkpoint = load_checkpoint(
            '/home/nvlab/groupre-id/CamStyle/logs/market1501duke20/checkpoint.pth.tar'
        )
        model_dict = model.state_dict()
        checkpoint = {k: v for k, v in checkpoint.items() if k in model_dict}
        model_dict.update(checkpoint)
        #model.load_state_dict(model_dict)
        #criterion = nn.TripletMarginLoss(margin=1.0, p=2).cuda()
        trainer = TripletTrainer(model,
                                 criterion,
                                 train_loader,
                                 num_classes=num_classes)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 10
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        if args.trainer == 0:
            trainer.train(epoch, train_loader, optimizer)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d} \n'.format(epoch))
            if epoch == args.epochs - 1:
                evaluator = Evaluator(model)
                print('Test with best model:')
                evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                   dataset.gallery, args.output_feature,
                                   args.rerank)

        if args.trainer == 1:
            trainer.train(epoch, train_loader, optimizer)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d} \n'.format(epoch))
            evaluator = EvaluatorRelations(model)
            print('Test with best model:')
            evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                               dataset.gallery, args.output_feature,
                               args.rerank)

        if args.trainer == 2:
            trainer.train(epoch, train_loader, optimizer)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d} \n'.format(epoch))
            if epoch == args.epochs - 1:
                evaluator = EvaluatorRelations(model)
                print('Test with best model:')
                evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                   dataset.gallery, args.output_feature,
                                   args.rerank)
예제 #28
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = InceptionNet(num_channels=8,
                         num_features=args.features,
                         dropout=args.dropout,
                         num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=os.path.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(
        os.path.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)

    features, _ = extract_features(evaluator.model, test_loader)
    distmat = pairwise_distance(features,
                                dataset.query,
                                dataset.gallery,
                                metric=metric)
    evaluate_all(distmat,
                 query=dataset.query,
                 gallery=dataset.gallery,
                 cmc_topk=(1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50))

    torch.save(model, os.path.join(args.logs_dir, 'model.pt'))
예제 #29
0
def main(args):
    cudnn.deterministic = False
    cudnn.benchmark = True

    exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset))
    output_dir = osp.join(exp_database_dir, args.method, args.sub_method)
    log_file = osp.join(output_dir, 'log.txt')
    # Redirect print to both console and log file
    sys.stdout = Logger(log_file)

    # Create model
    ibn_type = args.ibn
    if ibn_type == 'none':
        ibn_type = None
    model = resmap.create(args.arch,
                          ibn_type=ibn_type,
                          final_layer=args.final_layer,
                          neck=args.neck).cuda()
    num_features = model.num_features
    # print(model)
    # print('\n')

    feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
    hei = args.height // feamap_factor[args.final_layer]
    wid = args.width // feamap_factor[args.final_layer]
    matcher = QAConv(num_features, hei, wid).cuda()

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')

    # Criterion
    criterion = TripletLoss(matcher, args.margin).cuda()

    # Optimizer
    base_param_ids = set(map(id, model.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.base.parameters(),
        'lr': 0.1 * args.lr
    }, {
        'params': new_params,
        'lr': args.lr
    }, {
        'params': matcher.parameters(),
        'lr': args.lr
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    # Load from checkpoint
    start_epoch = 0
    base_loss = None
    final_epochs = args.max_epochs
    lr_stepped = False

    if args.resume or args.evaluate:
        print('Loading checkpoint...')
        if args.resume and (args.resume != 'ori'):
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(
                osp.join(output_dir, 'checkpoint.pth.tar'))
        model.load_state_dict(checkpoint['model'])
        criterion.load_state_dict(checkpoint['criterion'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch']
        base_loss = checkpoint['base_loss']
        final_epochs = checkpoint['final_epochs']
        lr_stepped = checkpoint['lr_stepped']

        if lr_stepped:
            print('Decay the learning rate by a factor of 0.1.')
            for group in optimizer.param_groups:
                group['lr'] *= 0.1

        print("=> Start epoch {} ".format(start_epoch))

    model = nn.DataParallel(model).cuda()

    # Create data loaders
    save_path = None
    if args.gs_save:
        save_path = output_dir
    dataset, num_classes, train_loader, _, _ = get_data(
        args.dataset, args.data_dir, model, matcher, save_path, args)

    if not args.evaluate:
        # Trainer
        trainer = Trainer(model, criterion, args.clip_value)
        t0 = time.time()

        # Start training
        for epoch in range(start_epoch, args.max_epochs):
            loss, acc = trainer.train(epoch, train_loader, optimizer)

            if epoch == 1:
                base_loss = loss

            lr = list(map(lambda group: group['lr'], optimizer.param_groups))

            train_time = time.time() - t0
            epoch1 = epoch + 1

            print(
                '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds.                  \n'
                % (epoch1, lr[0], lr[1], lr[2], loss, acc * 100, train_time))

            if (not lr_stepped) and (base_loss is not None) and (
                    loss < base_loss * args.step_factor):
                lr_stepped = True
                final_epochs = min(args.max_epochs, epoch1 + epoch1 // 2)
                print(
                    'Decay the learning rate by a factor of 0.1. Final epochs: %d.\n'
                    % final_epochs)
                for group in optimizer.param_groups:
                    group['lr'] *= 0.1

            save_checkpoint(
                {
                    'model': model.module.state_dict(),
                    'criterion': criterion.state_dict(),
                    'optim': optimizer.state_dict(),
                    'epoch': epoch1,
                    'final_epochs': final_epochs,
                    'base_loss': base_loss,
                    'lr_stepped': lr_stepped,
                },
                fpath=osp.join(output_dir, 'checkpoint.pth.tar'))

            if epoch1 == final_epochs:
                print('The learning converges at epoch %d.\n' % epoch1)
                break

    json_file = osp.join(output_dir, 'results.json')

    if not args.evaluate:
        arg_dict = {
            'train_dataset': args.dataset,
            'exp_dir': args.exp_dir,
            'method': args.method,
            'sub_method': args.sub_method
        }
        with open(json_file, 'a') as f:
            json.dump(arg_dict, f)
            f.write('\n')
        train_dict = {
            'train_dataset': args.dataset,
            'loss': loss,
            'acc': acc,
            'epochs': epoch1,
            'train_time': train_time
        }
        with open(json_file, 'a') as f:
            json.dump(train_dict, f)
            f.write('\n')

    # Final test
    print('Evaluate the learned model:')
    t0 = time.time()

    # Evaluator
    evaluator = Evaluator(model)

    test_names = args.testset.strip().split(',')
    for test_name in test_names:
        if test_name not in datasets.names():
            print('Unknown dataset: %s.' % test_name)
            continue

        t1 = time.time()
        testset, test_query_loader, test_gallery_loader = \
            get_test_data(test_name, args.data_dir, args.height, args.width, args.workers, args.test_fea_batch)

        if not args.do_tlift:
            testset.has_time_info = False

        test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \
        test_dist_rerank, test_dist_tlift, pre_tlift_dict = \
            evaluator.evaluate(matcher, testset, test_query_loader, test_gallery_loader,
                                args.test_gal_batch, args.test_prob_batch,
                               args.tau, args.sigma, args.K, args.alpha)

        test_time = time.time() - t1

        if testset.has_time_info:
            test_dict = {
                'test_dataset': test_name,
                'rank1': test_rank1,
                'mAP': test_mAP,
                'rank1_rerank': test_rank1_rerank,
                'mAP_rerank': test_mAP_rerank,
                'rank1_tlift': test_rank1_tlift,
                'mAP_tlift': test_mAP_tlift,
                'test_time': test_time
            }
            print(
                '  %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,'
                ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' %
                (test_name, test_rank1 * 100, test_mAP * 100,
                 test_rank1_rerank * 100, test_mAP_rerank * 100,
                 test_rank1_tlift * 100, test_mAP_tlift * 100))
        else:
            test_dict = {
                'test_dataset': test_name,
                'rank1': test_rank1,
                'mAP': test_mAP,
                'test_time': test_time
            }
            print('  %s: rank1=%.1f, mAP=%.1f.\n' %
                  (test_name, test_rank1 * 100, test_mAP * 100))

        with open(json_file, 'a') as f:
            json.dump(test_dict, f)
            f.write('\n')

        if args.save_score:
            test_gal_list = np.array(
                [fname for fname, _, _, _ in testset.gallery], dtype=np.object)
            test_prob_list = np.array(
                [fname for fname, _, _, _ in testset.query], dtype=np.object)
            test_gal_ids = [pid for _, pid, _, _ in testset.gallery]
            test_prob_ids = [pid for _, pid, _, _ in testset.query]
            test_gal_cams = [c for _, _, c, _ in testset.gallery]
            test_prob_cams = [c for _, _, c, _ in testset.query]
            test_score_file = osp.join(exp_database_dir, args.method,
                                       args.sub_method,
                                       '%s_score.mat' % test_name)
            sio.savemat(test_score_file, {
                'score': 1. - test_dist,
                'score_rerank': 1. - test_dist_rerank,
                'score_tlift': 1. - test_dist_tlift,
                'gal_time': pre_tlift_dict['gal_time'],
                'prob_time': pre_tlift_dict['prob_time'],
                'gal_list': test_gal_list,
                'prob_list': test_prob_list,
                'gal_ids': test_gal_ids,
                'prob_ids': test_prob_ids,
                'gal_cams': test_gal_cams,
                'prob_cams': test_prob_cams
            },
                        oned_as='column',
                        do_compression=True)

    test_time = time.time() - t0

    if not args.evaluate:
        print('Finished training at epoch %d, loss = %.3f, acc = %.2f%%.\n' %
              (epoch1, loss, acc * 100))
        print(
            "Total training time: %.3f sec. Average training time per epoch: %.3f sec."
            % (train_time, train_time / (epoch1 - start_epoch)))
    print("Total testing time: %.3f sec.\n" % test_time)

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')
예제 #30
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError('Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print('Resuming checkpoints from finetuned model on another dataset...\n')
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [
        # TripletLoss(args.margin, args.num_instances, isAvg=True, use_semi=True).cuda(),
        SortedTripletLoss(args.margin, isAvg=True).cuda(),
        # HoughTripletLoss(args.margin, args.num_instances, isAvg=True, use_semi=True).cuda(),
        # None,
        None, None, None
    ]


    # Optimizer
    optimizer = torch.optim.Adam(
        model.parameters(), lr=args.lr
    )


    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height,args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    evaluator = Evaluator(model, print_freq=args.print_freq)
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    st_model = ST_Model(tgt_dataset.meta['num_cameras'])
    same = None
    # train_loader2 = None
    best_mAP = 0

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model, src_extfeat_loader, print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([source_features[f].unsqueeze(0) for f, _, _, _ in src_dataset.train], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n+1))
        target_features, tarNames = extract_features(model, tgt_extfeat_loader, print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([target_features[f].unsqueeze(0) for f, _, _, _ in tgt_dataset.trainval], 0)
        # target_real_label = np.asarray([tarNames[f].unsqueeze(0) for f, _, _, _ in tgt_dataset.trainval])

        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features, target_features, lambda_value=args.lambda_value)

        ranking = np.argsort(rerank_dist)[:, 1:]

        if iter_n != 0:
            st_dist = np.zeros(rerank_dist.shape)
            for i, (_, _, c1, t1) in enumerate(tgt_dataset.trainval):
                for j, (_, _, c2, t2) in enumerate(tgt_dataset.trainval):
                    if not same.in_peak(c1, c2, t1, t2, 0.25):
                        st_dist[i, j] = 1

            rerank_dist = rerank_dist + st_dist * 10

        # if iter_n > 0:
        #     rerank_dist = st_model.apply(rerank_dist, tgt_dataset.trainval, tgt_dataset.trainval)

        cluster = HDBSCAN(metric='precomputed', min_samples=10)
        # select & cluster images as training set of this epochs
        clusterRes = cluster.fit(rerank_dist.astype(np.float64))
        labels, label_num = clusterRes.labels_, clusterRes.labels_.max() + 1
        centers = np.zeros((label_num, target_features.shape[1]))
        nums = [0] * target_features.shape[1]
        print('clusters num =', label_num)

        # generate new dataset
        new_dataset = []
        index = -1
        for (fname, _, cam, timestamp), label in zip(tgt_dataset.trainval, labels):
            index += 1
            if label == -1: continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam, timestamp))
            centers[label] += target_features[index]
            nums[label] += 1
        print('Iteration {} have {} training images'.format(iter_n+1, len(new_dataset)))

        # learn ST model
        # if iter_n % 2 == 0:
        # if iter_n == 0:
            # cluster = HDBSCAN(metric='precomputed', min_samples=10)
            # # select & cluster images as training set of this epochs
            # clusterRes = cluster.fit(rerank_dist.astype(np.float64))
            # labels, label_num = clusterRes.labels_, clusterRes.labels_.max() + 1
            # centers = np.zeros((label_num, target_features.shape[1]))
            # nums = [0] * target_features.shape[1]
            # print('clusters num =', label_num)
            #
            # # generate new dataset
            # new_dataset = []
            # index = -1
            # for (fname, _, cam, timestamp), label in zip(tgt_dataset.trainval, labels):
            #     index += 1
            #     if label == -1: continue
            #     # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            #     new_dataset.append((fname, label, cam, timestamp))
            #     centers[label] += target_features[index]
            #     nums[label] += 1
            # print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset)))

            # same, _ = st_model.fit(new_dataset)
        # st_model.fit(tgt_dataset.trainval)
        same, _ = st_model.fit(new_dataset)

        train_loader = DataLoader(
            Preprocessor(new_dataset, root=tgt_dataset.images_dir, transform=train_transformer),
            batch_size=args.batch_size, num_workers=4,
            sampler=RandomIdentitySampler(new_dataset, args.num_instances),
            pin_memory=True, drop_last=True
        )

        def filter(i, j):
            _, _, c1, t1 = tgt_dataset.trainval[i]
            _, _, c2, t2 = tgt_dataset.trainval[j]
            return st_model.val(c1, c2, t1, t2) > 0.01

        # if iter_n == 0:
        #     ranking = np.argsort(rerank_dist)[:, 1:]

        # dukemtmc
        # cluster_size = 23.535612535612536

        # market1501
        cluster_size = 17.22503328894807

        must_conn = int(cluster_size / 2)
        might_conn = int(cluster_size * 2)

        length = len(tgt_dataset.trainval)
        pos = [[] for _ in range(length)]
        neg = [[] for _ in range(length)]
        for i in range(length):
            for j_ in range(might_conn):
                j = ranking[i][j_]
                if j_ < must_conn and i in ranking[j][:must_conn]:
                    pos[i].append(j)
                elif i in ranking[j][:might_conn] and filter(i, j):
                    pos[i].append(j)
                else:
                    neg[i].append(j)
            # pos[i] = pos[i][-1:]
            # neg[i] = neg[i][:1]

        SP, SF, DP, DF = 0, 0, 0, 0
        for i in range(length):
            for j in pos[i]:
                if tgt_dataset.trainval[i][1] == tgt_dataset.trainval[j][1]:
                    SP += 1
                else:
                    SF += 1
            for j in neg[i]:
                if tgt_dataset.trainval[i][1] == tgt_dataset.trainval[j][1]:
                    DP += 1
                else:
                    DF += 1
        print('stat: %.1f %.1f %.3f, %.3f' % ((SP + SF) / length, (DP + DF) / length, SP / (SP + SF), DF / (DP + DF)))

        train_loader2 = DataLoader(
            Preprocessor(tgt_dataset.trainval, root=tgt_dataset.images_dir, transform=train_transformer),
            batch_size=args.batch_size, num_workers=4,
            # sampler=RandomIdentitySampler(new_dataset, args.num_instances),
            # shuffle=True,
            sampler=TripletSampler(tgt_dataset.trainval, pos, neg),
            pin_memory=True, drop_last=True
        )

        # learn visual model
        for i in range(label_num):
            centers[i] /= nums[i]
        criterion[3] = ClassificationLoss(normalize(centers, axis=1)).cuda()

        classOptimizer = torch.optim.Adam([
            {'params': model.parameters()},
            {'params': criterion[3].classifier.parameters(), 'lr': 1e-3}
        ], lr=args.lr)

        # trainer = HoughTrainer(model, st_model, train_loader, criterion, classOptimizer)
        trainer = ClassificationTrainer(model, train_loader, criterion, classOptimizer)
        trainer2 = Trainer(model, train_loader2, criterion, optimizer)

        for epoch in range(args.epochs):
            trainer.train(epoch)
            if epoch % 8 == 0:
                trainer2.train(epoch)
            # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
        if rank_score.map > best_mAP:
            best_mAP = rank_score.map
            save_checkpoint({
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1, 'best_top1': rank_score.market1501[0],
                }, True, fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1, 'best_top1': rank_score.market1501[0],
        }, False, fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return (rank_score.map, rank_score.market1501[0])
예제 #31
0
파일: train.py 프로젝트: miraclebiu/reid5
def main(args):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    #### load configures from yml file to args  then merge config from args to loss_config
    cfg_file = osp.join('.','cfgs', args.model, 'stage'+str(args.stage)+'.yml')
    if os.path.exists(cfg_file):
        print('cfg_file :',cfg_file)
        with open(cfg_file, 'r') as f:
            yaml_cfg = edict(yaml.load(f))
        merge_a_into_b(yaml_cfg, args.__dict__)
    else:
        print('cfg files not exists, using default params')
    check_cfg(args)

    #### create logs dir and save log.txt
    logs_base_dir = osp.join('.','logs',args.dataset,args.model)
    logs_dir = osp.join(logs_base_dir,'stage'+str(args.stage))
    print("log_dir : ",logs_dir)

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))
        print('log start!')
    print("Current experiments parameters")
    pprint.pprint(args)

    

    ##### Create data loaders
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval, args.sample_instance)

    ##### Create model
    model = model_creator(args.model, args.depth, pretrained=True, num_features=args.num_features, norm = args.norm,
                          dropout=args.dropout, num_classes=num_classes, add_l3_softmax=False,test_stage=False)#args.features
    #### resume models
    start_epoch = curr_best_map = 0
    if args.resume:
        if args.resume_path:
            ckpt_file = args.resume_path
        else:
            ckpt_dir = osp.join(logs_base_dir,'stage'+str(args.stage-1))
            ckpt_file = osp.join(ckpt_dir, 'model_best.pth.tar')


        if not os.path.exists(ckpt_file):
            raise RuntimeError(ckpt_file+'resume model doesnot exist!')
        else:
            print("resume model from", ckpt_file)
            checkpoint = load_checkpoint(ckpt_file)
            model.load_state_dict(checkpoint['state_dict'])
            curr_best_map = checkpoint['best_map']
            print("=> Start epoch {}  best map {:.1%}"
                  .format(start_epoch, curr_best_map))
        args.frozen_sublayer = False

    model = nn.DataParallel(model).cuda()

    ##### Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    ##### Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Before Test the model:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)

    merge_a_into_b(args,loss_config)


    print("Current loss config")
    print(loss_config)

    print("\n manual check configs to continue experiments !!!\n")
    pdb.set_trace()


    ##### create loss
    criterion = loss_creator(loss_config).cuda()

    # frozen_layerName = ['conv1', 'bn1', 'relu','maxpool', 'layer1', 'layer2',
    #                     'layer3','layer3_0','layer3_1','layer3_2',
    #                     'layer4','layer4_0','layer4_1','layer4_2']
    # frozen_layerName = ['conv1', 'bn1', 'relu','maxpool', 'layer1', 'layer2',
    #                     'layer3','layer3_0','layer3_1','layer3_2',]
    frozen_layerName = ['conv1', 'bn1', 'relu','maxpool', 'layer1', 'layer2',]
    ##### Optimizer
    if args.frozen_sublayer:
        frozen_Source = None
        if hasattr(model.module,'base'):
            frozen_Source = 'model.module.base.'
        elif hasattr(model.module,frozen_layerName[0]):
            frozen_Source = 'model.module.'
        else:
            raise RuntimeError('Not freeze layers but frozen_sublayer is True!')

        base_params_set = set()
        for subLayer in frozen_layerName:
            if hasattr( eval(frozen_Source[:-1]),subLayer):
                print('frozen layer: ', subLayer )
                single_module_param = eval(frozen_Source + subLayer + '.parameters()')
                # base_params.append(single_module_param)
                single_module_param_set = set(map(id, single_module_param))
                base_params_set = base_params_set | single_module_param_set
            else:
                print("current model doesn't have ",subLayer)

        new_params = [p for p in model.parameters() if
                      id(p) not in base_params_set]

        base_params = [p for p in model.parameters() if
                       id(p) in base_params_set]
        param_groups = [
            {'params': base_params, 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}
        ]
    else:
        param_groups = model.parameters()

    if args.optimizer =="sgd":
        optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    else :
        optimizer = torch.optim.Adam(param_groups, lr=args.lr ,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)
    step_epoch = 80
    # Schedule learning rate

    if args.optimizer == 'sgd':
        step_epoch = 80
        sceduler = StepLR(optimizer, step_size=step_epoch, gamma = 0.1)

    else :
        #### the original lr is multiply 10 to start and this is done in creating the optimizer
        step_epoch = 90
        lambda1 = lambda epoch: 0.1 * 0.1 ** ((epoch - step_epoch) / float(step_epoch))
        sceduler =  LambdaLR(optimizer, lr_lambda=lambda1)

    def save_model_when_running(curr_best_map):
        metric.train(model,train_loader)
        top_map = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        is_best = top_map > curr_best_map
        curr_best_map = max(top_map, curr_best_map)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_map': top_map,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        return curr_best_map



    evaluate_steps =[i for i in range(1,step_epoch*2) if i%10==0] + [i for i in range(step_epoch*2,args.epochs+1,3)]
        
    # Start training
    for epoch in range(start_epoch, args.epochs):
        sceduler.step()
        print('Current  epoch lr:',optimizer.param_groups[0].get('lr'))
        trainer.train(epoch, train_loader, optimizer)
        if epoch in evaluate_steps:
            curr_best_map = save_model_when_running(curr_best_map)


    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #32
0
파일: main.py 프로젝트: Bodomit/CamStyle
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.camstyle, args.re,
                 0 if args.debug else args.workers,
                 camstyle_path = args.camstyle_path)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model, args.logs_dir)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.output_feature, args.rerank)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.module.base.parameters(),
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    if args.camstyle == 0:
        trainer = Trainer(model, criterion)
    else:
        trainer = CamStyleTrainer(model, criterion, camstyle_loader)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model, args.logs_dir)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.output_feature, args.rerank)
예제 #33
0
    def train(self,
              train_data,
              step,
              epochs=70,
              step_size=55,
              init_lr=0.1,
              dropout=0.5):
        """ create model and dataloader """
        if self.model:
            model = self.model
        else:
            model = models.create(self.model_name,
                                  dropout=self.dropout,
                                  num_classes=self.num_classes,
                                  mode=self.mode)
            model = nn.DataParallel(model).cuda()
        dataloader = self.get_dataloader(train_data, training=True)

        # the base parameters for the backbone (e.g. ResNet50)
        base_param_ids = set(map(id, model.module.base.parameters()))

        # we fixed the first three blocks to save GPU memory
        # base_params_need_for_grad = filter(lambda p: p.requires_grad, model.module.base.parameters())

        # params of the new layers
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        # set the learning rate for backbone to be 0.1 times
        param_groups = [
            {
                'params': model.module.base.parameters(),
                'lr_mult': 1.0
            },
            # {'params': base_params_need_for_grad, 'lr_mult': 0.1},
            {
                'params': new_params,
                'lr_mult': 1.0
            }
        ]

        criterion = []
        if self.num_classes == 0:
            criterion.append(
                TripletLoss(margin=0.3,
                            num_instances=self.num_instances).cuda())
            criterion.append(
                TripletLoss(margin=0.3,
                            num_instances=self.num_instances).cuda())
            trainer = Trainer(model, criterion)
        else:
            criterion.append(nn.CrossEntropyLoss().cuda())
            criterion.append(nn.MSELoss().cuda())
            trainer = DistillTrainer(model, self.model_distill, criterion)
        optimizer = torch.optim.SGD(param_groups,
                                    lr=init_lr,
                                    momentum=0.9,
                                    weight_decay=5e-4,
                                    nesterov=True)

        # change the learning rate by step
        def adjust_lr(epoch, step_size):
            lr = init_lr / (10**(epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

            if epoch % step_size == 0:
                print("Epoch {}, current lr {}".format(epoch, lr))

        # def adjust_lr(epoch):
        #     if epoch <=7:
        #         lr = args.lr
        #     elif epoch <= 14:
        #         lr = 0.3 * args.lr
        #     else:
        #         lr = 0.1 * args.lr
        #     for g in optimizer.param_groups:
        #         g['lr'] = lr * g.get('lr_mult', 1)
        """ main training process """
        for epoch in range(epochs):
            adjust_lr(epoch, step_size)
            trainer.train(epoch, dataloader, optimizer, print_freq=20)
        self.model = model
예제 #34
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval, args.batch_id)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #35
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, train_loader_head, train_loader_upper, train_loader_lower,\
    val_loader, val_loader_head, val_loader_upper, val_loader_lower,\
    test_loader, test_loader_head, test_loader_upper, test_loader_lower= \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)
    # create model1 model2 model3  然后修改optimizer?
    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)
    model_head = models.create(args.arch,
                               num_features=args.features,
                               dropout=args.dropout,
                               num_classes=num_classes)
    model_upper = models.create(args.arch,
                                num_features=args.features,
                                dropout=args.dropout,
                                num_classes=num_classes)
    model_lower = models.create(args.arch,
                                num_features=args.features,
                                dropout=args.dropout,
                                num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    #    if args.resume:
    #       checkpoint = load_checkpoint(args.resume)
    #        model.load_state_dict(checkpoint['state_dict'])
    #        start_epoch = checkpoint['epoch']
    #        best_top1 = checkpoint['best_top1']
    #        print("=> Start epoch {}  best top1 {:.1%}"
    #              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()
    model_head = nn.DataParallel(model_head).cuda()
    model_upper = nn.DataParallel(model_upper).cuda()
    model_lower = nn.DataParallel(model_lower).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, model_head, model_upper, model_lower)

    #    if args.evaluate:
    #        metric.train(model, train_loader)
    #        print("Validation:")
    #        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
    #        print("Test:")
    #        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
    #        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()
    criterion_head = nn.CrossEntropyLoss().cuda()
    criterion_upper = nn.CrossEntropyLoss().cuda()
    criterion_lower = nn.CrossEntropyLoss().cuda()

    # Optimizer

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    if hasattr(model_head.module, 'base'):
        base_param_ids_head = set(map(id, model_head.module.base.parameters()))
        new_params_head = [
            p for p in model_head.parameters()
            if id(p) not in base_param_ids_head
        ]
        param_groups_head = [{
            'params': model_head.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params_head,
            'lr_mult': 1.0
        }]
    else:
        param_groups_head = model_head.parameters()
    optimizer_head = torch.optim.SGD(param_groups_head,
                                     lr=args.lr,
                                     momentum=args.momentum,
                                     weight_decay=args.weight_decay,
                                     nesterov=True)

    if hasattr(model_head.module, 'base'):
        base_param_ids_upper = set(
            map(id, model_upper.module.base.parameters()))
        new_params_upper = [
            p for p in model_upper.parameters()
            if id(p) not in base_param_ids_upper
        ]
        param_groups_upper = [{
            'params': model_upper.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params_upper,
            'lr_mult': 1.0
        }]
    else:
        param_groups_upper = model_upper.parameters()
    optimizer_upper = torch.optim.SGD(param_groups_upper,
                                      lr=args.lr,
                                      momentum=args.momentum,
                                      weight_decay=args.weight_decay,
                                      nesterov=True)

    if hasattr(model_lower.module, 'base'):
        base_param_ids_lower = set(
            map(id, model_lower.module.base.parameters()))
        new_params_lower = [
            p for p in model_lower.parameters()
            if id(p) not in base_param_ids_lower
        ]
        param_groups_lower = [{
            'params': model_lower.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params_lower,
            'lr_mult': 1.0
        }]
    else:
        param_groups_lower = model_lower.parameters()
    optimizer_lower = torch.optim.SGD(param_groups_lower,
                                      lr=args.lr,
                                      momentum=args.momentum,
                                      weight_decay=args.weight_decay,
                                      nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)
    trainer_head = Trainer(model_head, criterion_head)
    trainer_upper = Trainer(model_upper, criterion_upper)
    trainer_lower = Trainer(model_lower, criterion_lower)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_head(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer_head.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_upper(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer_upper.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_lower(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer_lower.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        adjust_lr_head(epoch)
        adjust_lr_upper(epoch)
        adjust_lr_lower(epoch)
        trainer.train(epoch, train_loader, optimizer)
        trainer_head.train(epoch, train_loader_head, optimizer_head)
        trainer_upper.train(epoch, train_loader_upper, optimizer_upper)
        trainer_lower.train(epoch, train_loader_lower, optimizer_lower)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val_loader_head,
                                  val_loader_upper, val_loader_lower,
                                  dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'),
            opath='model_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_head.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_head.pth.tar'),
            opath='model_head_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_upper.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_upper.pth.tar'),
            opath='model_upper_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_lower.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_lower.pth.tar'),
            opath='model_lower_best.pth.tar')

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    checkpoint_head = load_checkpoint(
        osp.join(args.logs_dir, 'model_head_best.pth.tar'))
    checkpoint_upper = load_checkpoint(
        osp.join(args.logs_dir, 'model_upper_best.pth.tar'))
    checkpoint_lower = load_checkpoint(
        osp.join(args.logs_dir, 'model_lower_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    model_head.module.load_state_dict(checkpoint_head['state_dict'])
    model_upper.module.load_state_dict(checkpoint_upper['state_dict'])
    model_lower.module.load_state_dict(checkpoint_lower['state_dict'])
    metric.train(model, train_loader)
    metric.train(model_head, train_loader_head)
    metric.train(model_upper, train_loader_upper)
    metric.train(model_lower, train_loader_lower)

    evaluator.evaluate(test_loader, test_loader_head, test_loader_upper,
                       test_loader_lower, dataset.query, dataset.gallery,
                       metric)