def main_worker(args):
    cudnn.benchmark = True

    # log_dir = osp.dirname(args.resume[0])
    log_dir = "logs/"
    if args.dataset != 'target_test':
        sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    dataset, test_loader = get_data(args.dataset,
                                    args.data_dir,
                                    args.height,
                                    args.width,
                                    args.batch_size,
                                    args.workers,
                                    flip=args.flip)
    print("Test on {}:".format(args.dataset))

    # Create camera model
    if args.camera:
        cam_model = models.create(args.arch_c,
                                  pretrained=False,
                                  num_features=args.features,
                                  dropout=args.dropout,
                                  num_classes=0)
        checkpoint = load_checkpoint(args.camera)
        copy_state_dict(checkpoint['state_dict'], cam_model, strip='module.')
        cam_model.cuda()
        cam_model = nn.DataParallel(cam_model)
    else:
        cam_model = None

    evaluator = Evaluator(None,
                          cam_model=cam_model,
                          cam_weight=0.1,
                          flip=args.flip)

    all_features = collections.defaultdict(list)

    for arch, resume in zip(args.arch, args.resume):
        print("extract features from {}".format(arch))
        # Create model
        model = models.create(arch,
                              pretrained=False,
                              num_features=args.features,
                              dropout=args.dropout,
                              num_classes=0)
        if args.dsbn and 'source' not in resume:
            print("==> Load the model with domain-specific BNs")
            convert_dsbn(model)

        # Load from checkpoint
        checkpoint = load_checkpoint(resume)
        copy_state_dict(checkpoint['state_dict'], model, strip='module.')

        if args.dsbn and 'source' not in resume:
            print("==> Test with {}-domain BNs".format(
                "source" if args.test_source else "target"))
            convert_bn(model, use_target=(not args.test_source))

        model.cuda()
        model = nn.DataParallel(model)

        features, _ = extract_features(model, test_loader, flip=args.flip)

        for fname in features.keys():
            all_features[fname].append(features[fname])

    all_best_comb_list = list(range(len(args.arch)))
    comb_features = collections.OrderedDict()
    for fname in all_features.keys():
        comb_features[fname] = torch.cat(
            [all_features[fname][i] for i in all_best_comb_list], dim=0)
        comb_features[fname] = nn.functional.normalize(comb_features[fname],
                                                       p=2,
                                                       dim=0)

    evaluator.evaluate(
        test_loader,
        dataset.query,
        dataset.gallery,
        features=comb_features,
        rerank=args.rerank,
        k1=args.k1,
        k2=args.k2,
        lambda_value=args.lambda_value,
        submit_file=osp.join(log_dir, 'result.txt'),
        only_submit=(True if args.dataset == 'target_test' else False))

    return
Exemple #2
0
def main_worker(args):
    global start_epoch, best_mAP
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    else:
        log_dir = osp.dirname(args.resume)
        sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    iters = args.iters if (args.iters > 0) else None
    print("==> Load source-domain trainset")
    dataset_source = get_data(args.dataset_source, args.data_dir)
    source_classes = dataset_source.num_train_pids
    print("==> Load target-domain trainset")
    dataset_target = get_data('target_train', args.data_dir)
    print("==> Load target-domain valset")
    dataset_target_val = get_data('target_val', args.data_dir)

    test_loader_target = get_test_loader(args, dataset_target_val, args.height,
                                         args.width, args.batch_size,
                                         args.workers)
    train_loader_source = get_train_loader(args, dataset_source, args.height,
                                           args.width, args.batch_size,
                                           args.workers, args.num_instances,
                                           iters, args.epochs)
    train_loader_target = get_train_loader(args, dataset_target, args.height,
                                           args.width, args.batch_size,
                                           args.workers, 0, iters, args.epochs)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          norm=False,
                          dropout=args.dropout,
                          num_classes=source_classes)
    model.cuda()

    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        params += [{
            "params": [value],
            "lr": args.lr,
            "weight_decay": args.weight_decay
        }]
    optimizer = torch.optim.Adam(params)

    if args.fp16:
        model, optimizer = amp.initialize(model, optimizer, opt_level="O1")

    model = nn.DataParallel(model)

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        copy_state_dict(checkpoint['state_dict'], model)
        start_epoch = checkpoint['epoch']
        best_mAP = checkpoint['best_mAP']
        print("=> Start epoch {}  best mAP {:.1%}".format(
            start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test on target domain:")
        evaluator.evaluate(test_loader_target,
                           dataset_target_val.query,
                           dataset_target_val.gallery,
                           rerank=args.rerank)
        return

    lr_scheduler = WarmupMultiStepLR(optimizer,
                                     args.milestones,
                                     gamma=0.1,
                                     warmup_factor=0.01,
                                     warmup_iters=args.warmup_step)

    # Trainer
    trainer = PreTrainer(model,
                         source_classes,
                         margin=args.margin,
                         fp16=args.fp16)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        train_loader_source.new_epoch()
        train_loader_target.new_epoch()

        trainer.train(epoch,
                      train_loader_source,
                      train_loader_target,
                      optimizer,
                      train_iters=len(train_loader_source),
                      print_freq=args.print_freq)

        if ((epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1)):
            _, mAP = evaluator.evaluate(test_loader_target,
                                        dataset_target_val.query,
                                        dataset_target_val.gallery)

            is_best = mAP > best_mAP
            best_mAP = max(mAP, best_mAP)
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': epoch + 1,
                    'best_mAP': best_mAP,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d} mAP: {:5.1%}  best: {:5.1%}{}\n'.
                  format(epoch, mAP, best_mAP, ' *' if is_best else ''))

        lr_scheduler.step()

    print("Test on target domain:")
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(test_loader_target,
                       dataset_target_val.query,
                       dataset_target_val.gallery,
                       rerank=args.rerank)
Exemple #3
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    if args.dataset != 'target_test':
        sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    dataset, test_loader = get_data(args.dataset,
                                    args.data_dir,
                                    args.height,
                                    args.width,
                                    args.batch_size,
                                    args.workers,
                                    flip=args.flip)

    # Create model
    model = models.create(args.arch,
                          pretrained=False,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=0)
    if args.dsbn:
        print("==> Load the model with domain-specific BNs")
        convert_dsbn(model)

    # Create camera model
    if args.camera:
        cam_model = models.create(args.arch_c,
                                  pretrained=False,
                                  num_features=args.features,
                                  dropout=args.dropout,
                                  num_classes=0)
        checkpoint = load_checkpoint(args.camera)
        copy_state_dict(checkpoint['state_dict'], cam_model, strip='module.')
        cam_model.cuda()
        cam_model = nn.DataParallel(cam_model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint['state_dict'], model, strip='module.')

    if args.dsbn:
        print("==> Test with {}-domain BNs".format(
            "source" if args.test_source else "target"))
        convert_bn(model, use_target=(not args.test_source))

    model.cuda()
    model = nn.DataParallel(model)

    # Evaluator
    evaluator = Evaluator(model,
                          cam_model=cam_model if args.camera else None,
                          cam_weight=0.1,
                          flip=args.flip)
    print("Test on {}:".format(args.dataset))
    evaluator.evaluate(
        test_loader,
        dataset.query,
        dataset.gallery,
        rerank=args.rerank,
        k1=args.k1,
        k2=args.k2,
        lambda_value=args.lambda_value,
        submit_file=osp.join(log_dir, 'result.txt'),
        qe=False,
        only_submit=(True if args.dataset == 'target_test' else False))

    return
Exemple #4
0
    def __init__(self, opt, source_classes):
        BaseModel.__init__(self, opt)
        self.opt = opt
        self.source_classes = source_classes
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = [
            'D_A',
            'G_A',
            'cycle_A',
            'idt_A',
            'D_B',
            'G_B',
            'cycle_B',
            'idt_B',
            'rc_A',
        ]
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        visual_names_A = ['real_A', 'fake_B']
        visual_names_B = ['real_B', 'fake_A']

        self.visual_names = visual_names_A + visual_names_B  # combine visualizations for A and B
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B', '_A', '_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                        opt.netG, opt.norm, not opt.no_dropout,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf,
                                        opt.netG, opt.norm, not opt.no_dropout,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)

        self.net_A = models.create(opt.arch,
                                   num_features=opt.features,
                                   dropout=opt.dropout,
                                   num_classes=source_classes)
        self.net_B = models.create(opt.arch,
                                   num_features=opt.features,
                                   dropout=opt.dropout,
                                   num_classes=source_classes)

        if (opt.init_s):
            initial_weights = load_checkpoint(opt.init_s)
            copy_state_dict(initial_weights['state_dict'],
                            self.net_A,
                            strip='module.')
            copy_state_dict(initial_weights['state_dict'],
                            self.net_B,
                            strip='module.')

        if (opt.init_t):
            convert_dsbn(self.net_B)
            initial_weights = load_checkpoint(opt.init_t)
            copy_state_dict(initial_weights['state_dict'],
                            self.net_B,
                            strip='module.')
            convert_bn(self.net_B, use_target=True)

        self.net_A.cuda()
        self.net_B.cuda()
        self.net_A = nn.DataParallel(self.net_A)
        self.net_B = nn.DataParallel(self.net_B)

        if self.isTrain:  # define discriminators
            if (opt.netD == 'n_layers_proj'):
                assert (opt.gan_mode == 'hinge')
                self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
                                                opt.netD, opt.n_layers_D,
                                                opt.norm, opt.init_type,
                                                opt.init_gain, self.gpu_ids)
                self.netD_B = self.netD_A
            else:
                self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
                                                opt.netD, opt.n_layers_D,
                                                opt.norm, opt.init_type,
                                                opt.init_gain, self.gpu_ids)
                self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
                                                opt.netD, opt.n_layers_D,
                                                opt.norm, opt.init_type,
                                                opt.init_gain, self.gpu_ids)

        if self.isTrain:
            if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
                assert (opt.input_nc == opt.output_nc)
            self.fake_A_pool = ImagePool(
                opt.pool_size
            )  # create image buffer to store previously generated images
            self.fake_B_pool = ImagePool(
                opt.pool_size
            )  # create image buffer to store previously generated images
            # define loss functions
            self.criterionGAN = networks.GANLoss(
                opt.gan_mode).cuda()  # define GAN loss.
            self.criterionCycle = torch.nn.L1Loss().cuda()
            self.criterionIdt = torch.nn.L1Loss().cuda()
            self.criterion_rc = SoftTripletLoss(margin=None).cuda()
            self.loss_rc = AverageMeter()

            self.set_optimizer()
def create_model(args, num_classes):
    model_kwargs = {
        'num_features': args.features,
        'dropout': args.dropout,
        'num_classes': num_classes,
        'metric': args.metric,
        's': args.metric_s,
        'm': args.metric_m
    }
    model_1 = models.create(args.arch, **model_kwargs)
    model_1_ema = models.create(args.arch, **model_kwargs)
    model_2 = models.create(args.arch, **model_kwargs)
    model_2_ema = models.create(args.arch, **model_kwargs)

    initial_weights = load_checkpoint(args.init_1)
    copy_state_dict(initial_weights['state_dict'], model_1, strip='module.')
    copy_state_dict(initial_weights['state_dict'],
                    model_1_ema,
                    strip='module.')
    model_1_ema.classifier.weight.data.copy_(model_1.classifier.weight.data)

    initial_weights = load_checkpoint(args.init_2)
    copy_state_dict(initial_weights['state_dict'], model_2, strip='module.')
    copy_state_dict(initial_weights['state_dict'],
                    model_2_ema,
                    strip='module.')
    model_2_ema.classifier.weight.data.copy_(model_2.classifier.weight.data)

    # adopt domain-specific BN
    convert_dsbn(model_1)
    convert_dsbn(model_2)
    convert_dsbn(model_1_ema)
    convert_dsbn(model_2_ema)

    # use CUDA
    model_1.cuda()
    model_2.cuda()
    model_1_ema.cuda()
    model_2_ema.cuda()

    # Optimizer
    optimizer = None
    if args.fp16:
        params = [{
            "params": [value]
        } for _, value in model_1.named_parameters() if value.requires_grad]
        params += [{
            "params": [value]
        } for _, value in model_2.named_parameters() if value.requires_grad]
        optimizer = torch.optim.Adam(params,
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
        # fp16
        [model_1, model_2], optimizer = amp.initialize([model_1, model_2],
                                                       optimizer,
                                                       opt_level="O1")

    # multi-gpu
    model_1 = nn.DataParallel(model_1)
    model_2 = nn.DataParallel(model_2)
    model_1_ema = nn.DataParallel(model_1_ema)
    model_2_ema = nn.DataParallel(model_2_ema)

    for param in model_1_ema.parameters():
        param.detach_()
    for param in model_2_ema.parameters():
        param.detach_()

    return model_1, model_2, model_1_ema, model_2_ema, optimizer
def main_worker(args):
    global start_epoch, best_mAP
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    else:
        log_dir = osp.dirname(args.resume)
        sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    iters = args.iters if (args.iters>0) else None
    print("==> Load target-domain trainset")
    dataset_target = get_data('target_train', args.data_dir)
    print("==> Load target-domain valset")
    dataset_target_val = get_data('target_val', args.data_dir)

    test_loader_target = get_test_loader(dataset_target_val, args.height, args.width, args.batch_size, args.workers)
    train_loader_target = get_train_loader(args, dataset_target, args.height, args.width,
                                        args.batch_size, args.workers, 0, iters, args.epochs)

    # Create model
    model_kwargs = {'num_features':args.features, 'norm':False,
                    'dropout':args.dropout, 'num_classes':dataset_target.num_train_cams,}
                    # 'metric':args.metric, 's':args.metric_s, 'm':args.metric_m}
    model = models.create(args.arch, **model_kwargs)
    model.cuda()

    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
    optimizer = torch.optim.Adam(params)
    if args.fp16:
        model, optimizer = amp.initialize(model, optimizer, opt_level="O1")

    model = nn.DataParallel(model)

    lr_scheduler = WarmupMultiStepLR(optimizer, args.milestones, gamma=0.1, warmup_factor=0.01, warmup_iters=args.warmup_step)

    # Trainer
    trainer = CameraTrainer(model, dataset_target.num_train_cams, margin=args.margin, fp16=args.fp16)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        train_loader_target.new_epoch()

        trainer.train(epoch, train_loader_target, optimizer,
                    train_iters=len(train_loader_target), print_freq=args.print_freq)

        if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
            mAP = validate(model, test_loader_target)

            is_best = mAP > best_mAP
            best_mAP = max(mAP, best_mAP)
            save_checkpoint({
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_mAP': best_mAP,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d}  accuracy: {:5.1%}  best: {:5.1%}{}\n'.
                  format(epoch, mAP, best_mAP, ' *' if is_best else ''))

        lr_scheduler.step()