Beispiel #1
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    dataset_target, test_loader_target = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch, pretrained=False, cut_at_pooling=args.cut_at_pooling,
                          num_features=args.features, dropout=args.dropout, num_classes=0)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint['state_dict'], model)
    # start_epoch = checkpoint['epoch']
    # best_mAP = checkpoint['best_mAP']
    # print("=> Checkpoint of epoch {}  best mAP {:.1%}".format(start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    print("Test on the target domain of {}:".format(args.dataset_target))
    evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
    return
Beispiel #2
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))

    # Create data loaders
    dataset_target, test_loader_target = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch, num_features=args.features, num_classes=0)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint, model)

    # Evaluator
    evaluator = Evaluator(model)
    print("Testing...")
    evaluator.evaluate(test_loader_target,
                       dataset_target.query,
                       dataset_target.gallery,
                       cmc_flag=True,
                       args=args,
                       rerank=args.rerank)
    return
Beispiel #3
0
def create_model(args, classes):
    model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout, num_classes=classes)

    model.cuda()
    model = nn.DataParallel(model)

    initial_weights = load_checkpoint(args.init)
    copy_state_dict(initial_weights['state_dict'], model)

    return model
def main_worker(args):
    global start_epoch, best_mAP

    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    else:
        log_dir = osp.dirname(args.resume)
        sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    iters = args.iters if (args.iters>0) else None
    dataset_source, num_classes, train_loader_source, test_loader_source = \
        get_data(args.dataset_source, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, args.num_instances, iters)

    dataset_target, _, train_loader_target, test_loader_target = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, 0, iters)

    # Create model
    model = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=num_classes)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        copy_state_dict(checkpoint['state_dict'], model)
        start_epoch = checkpoint['epoch']
        best_mAP = checkpoint['best_mAP']
        print("=> Start epoch {}  best mAP {:.1%}"
              .format(start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test on source domain:")
        evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True, rerank=args.rerank)
        print("Test on target domain:")
        evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
        return

    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
    optimizer = torch.optim.Adam(params)
    lr_scheduler = WarmupMultiStepLR(optimizer, args.milestones, gamma=0.1, warmup_factor=0.01, warmup_iters=args.warmup_step)

    # Trainer
    trainer = PreTrainer(model, num_classes, margin=args.margin)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        lr_scheduler.step()
        train_loader_source.new_epoch()
        train_loader_target.new_epoch()

        trainer.train(epoch, train_loader_source, train_loader_target, optimizer,
                    train_iters=len(train_loader_source), print_freq=args.print_freq)

        if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):

            _, mAP = evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True)

            is_best = mAP > best_mAP
            best_mAP = max(mAP, best_mAP)
            save_checkpoint({
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_mAP': best_mAP,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d}  source mAP: {:5.1%}  best: {:5.1%}{}\n'.
                  format(epoch, mAP, best_mAP, ' *' if is_best else ''))

    print("Test on target domain:")
    evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
Beispiel #5
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (384, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    base_model = models.create(args.arch,
                               num_features=1024,
                               cut_at_pooling=True,
                               dropout=args.dropout,
                               num_classes=args.features)

    grp_num = args.grp_num
    embed_model = [
        VNetEmbed(instances_num=args.num_instances,
                  feat_num=(2048 / grp_num),
                  num_classes=2,
                  drop_ratio=args.dropout).cuda() for i in range(grp_num)
    ]

    base_model = nn.DataParallel(base_model).cuda()

    model = VNetExtension(
        instances_num=args.num_instances,  # 
        base_model=base_model,
        embed_model=embed_model,
        alpha=args.alpha)

    if args.retrain:
        if args.evaluate_from:
            print('loading trained model...')
            checkpoint = load_checkpoint(args.evaluate_from)
            model.load_state_dict(checkpoint['state_dict'])

        else:
            print('loading base part of pretrained model...')
            checkpoint = load_checkpoint(args.retrain)
            #copy_state_dict(checkpoint['state_dict'], base_model, strip='base.module.', replace='module.')
            copy_state_dict(checkpoint['state_dict'],
                            base_model,
                            strip='base_model.',
                            replace='')
            print('loading embed part of pretrained model...')
            if grp_num > 1:
                for i in range(grp_num):
                    copy_state_dict(checkpoint['state_dict'],
                                    embed_model[i],
                                    strip='embed_model.bn_' + str(i) + '.',
                                    replace='bn.')
                    copy_state_dict(checkpoint['state_dict'],
                                    embed_model[i],
                                    strip='embed_model.classifier_' + str(i) +
                                    '.',
                                    replace='classifier.')
            else:
                copy_state_dict(checkpoint['state_dict'],
                                embed_model[0],
                                strip='module.embed_model.',
                                replace='')

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    best_mAP = 0

    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    # Evaluator
    evaluator = CascadeEvaluator(
        base_model,
        embed_model,
        embed_dist_fn=lambda x: F.softmax(x, dim=1).data[:, 0])
    #embed_dist_fn=lambda x: F.softmax(x))# here we are performing softmax normalization, this function take N,2 vector and after normalizing both column it return the
    #first column

    if args.evaluate:
        metric.train(model, train_loader)
        if args.evaluate_from:
            print('loading trained model...')
            checkpoint = load_checkpoint(args.evaluate_from)
            model.load_state_dict(checkpoint['state_dict'])
        print("Test:")
        evaluator.evaluate(test_loader,
                           dataset.query,
                           dataset.gallery,
                           args.alpha,
                           metric,
                           rerank_topk=args.rerank,
                           dataset=args.dataset)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    criterion2 = TripletLoss(margin=args.margin).cuda()

    #criterion = nn.BCELoss().cuda()

    # base lr rate and embed lr rate

    new_params = [z for z in model.embed]
    param_groups = [
                    {'params': model.base.module.base.parameters(), 'lr_mult': 1.0}] + \
                   [{'params': new_params[i].parameters(), 'lr_mult': 10.0} for i in range(grp_num)]

    # Optimizer

    optimizer = torch.optim.Adam(param_groups,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = DCDSBase(model, criterion, criterion2, args.alpha, grp_num)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.ss if args.arch == 'inception' else 20
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        return lr

    # Start training
    for epoch in range(start_epoch, args.epochs):
        lr = adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, lr, warm_up=False)
        top1, mAP = evaluator.evaluate(val_loader,
                                       dataset.val,
                                       dataset.val,
                                       args.alpha,
                                       rerank_topk=args.rerank,
                                       second_stage=True,
                                       dataset=args.dataset)

        is_best = top1 > best_top1
        best_mAP = max(mAP, best_mAP)
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  mAP: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, mAP, best_mAP, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader,
                       dataset.query,
                       dataset.gallery,
                       args.alpha,
                       metric,
                       rerank_topk=args.rerank,
                       dataset=args.dataset)
Beispiel #6
0
    def __init__(self, opt, source_classes):
        BaseModel.__init__(self, opt)
        self.opt = opt
        self.source_classes = source_classes
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = [
            'D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B',
            'rc', 'reid_ce_B', 'reid_tri_B'
        ]
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        visual_names_A = ['real_A', 'fake_B']
        visual_names_B = ['real_B', 'fake_A']
        self.visual_names = visual_names_A + visual_names_B  # combine visualizations for A and B
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
        if self.isTrain:
            self.model_names = [
                'G_A', 'G_B', 'D_A', 'D_B', '_A', '_B', '_B_ema'
            ]
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                        opt.netG, opt.norm, not opt.no_dropout,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf,
                                        opt.netG, opt.norm, not opt.no_dropout,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)

        self.net_A = models.create(opt.arch,
                                   num_features=opt.features,
                                   dropout=opt.dropout,
                                   num_classes=source_classes)
        self.net_B = models.create(opt.arch,
                                   num_features=opt.features,
                                   dropout=opt.dropout,
                                   num_classes=source_classes +
                                   opt.num_clusters)
        self.net_A.cuda()
        self.net_B.cuda()
        self.net_A = nn.DataParallel(self.net_A)
        self.net_B = nn.DataParallel(self.net_B)

        if (opt.init_s):
            initial_weights = load_checkpoint(opt.init_s)
            self.net_A.load_state_dict(initial_weights['state_dict'])
            copy_state_dict(initial_weights['state_dict'], self.net_B)

        self.net_B_ema = copy.deepcopy(self.net_B)

        if self.isTrain:  # define discriminators
            if (opt.netD == 'n_layers_proj'):
                assert (opt.gan_mode == 'hinge')
                self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
                                                opt.netD, opt.n_layers_D,
                                                opt.norm, opt.init_type,
                                                opt.init_gain, self.gpu_ids)
                self.netD_B = self.netD_A
            else:
                self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
                                                opt.netD, opt.n_layers_D,
                                                opt.norm, opt.init_type,
                                                opt.init_gain, self.gpu_ids)
                self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
                                                opt.netD, opt.n_layers_D,
                                                opt.norm, opt.init_type,
                                                opt.init_gain, self.gpu_ids)

            if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
                assert (opt.input_nc == opt.output_nc)
            self.fake_A_pool = ImagePool(
                opt.pool_size
            )  # create image buffer to store previously generated images
            self.fake_B_pool = ImagePool(
                opt.pool_size
            )  # create image buffer to store previously generated images
            # define loss functions
            self.criterionGAN = networks.GANLoss(
                opt.gan_mode).cuda()  # define GAN loss.
            self.criterionCycle = torch.nn.L1Loss().cuda()
            self.criterionIdt = torch.nn.L1Loss().cuda()
            self.criterionMMD = networks.MMD_loss().cuda()
            ### [IMPORTANT] define relation consistency loss ###
            self.criterion_rc = SoftTripletLoss(margin=None, drop=0).cuda()

            # loss functions for ReID
            self.criterion_tri = SoftTripletLoss(margin=0.0,
                                                 drop=opt.dropout_tri).cuda()

            self.set_optimizer()