Exemplo n.º 1
0
def test_with_open_reid(args):
    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create model
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)

    model = nn.DataParallel(model).cuda()
    print('Test with best model:')
    # checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)
    # Evaluator
    evaluator = Evaluator(model)
    metric.train(model, train_loader)
    print("Validation:")
    evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
    print("Test:")
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 2
0
def evaluate(model, dataset, params, metric=None):
    query, gallery = dataset.query, dataset.gallery
    dataloader = dp.get_dataloader(
        list(set(dataset.query) | set(dataset.gallery)), dataset.images_dir,
        **params)
    metric = DistanceMetric(algorithm='euclidean')
    metric.train(model, dataloader)
    evaluator = Evaluator(model)
    evaluator.evaluate(dataloader, query, gallery, metric)
Exemplo n.º 3
0
def evaluate(model, dataset, config):
    config.set_training(False)
    query, gallery = dataset.query, dataset.gallery
    dataloader = dp.get_dataloader(
        list(set(dataset.query) | set(dataset.gallery)), dataset.images_dir,
        config)
    metric = DistanceMetric(algorithm=config.dist_metric)
    metric.train(model, dataloader)
    evaluator = Evaluator(model)
    evaluator.evaluate(dataloader,
                       query,
                       gallery,
                       metric,
                       print_freq=config.batch_size)
Exemplo n.º 4
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)
    start_epoch = best_top1 = 0
    model = nn.DataParallel(model).cuda()
    if args.resume:
        #checkpoint = load_checkpoint(args.resume)
        #state_dict = get_state_dict(checkpoint['state_dict'],model.state_dict())
        #model.load_state_dict(state_dict)
        #start_epoch = checkpoint['epoch']
        #best_top1 = checkpoint['best_top1']
        #print("=> Start epoch {}  best top1 {:.1%}"
        #      .format(start_epoch, best_top1))
        state_dict = torch.load(args.resume)
        state_dict = get_state_dict(state_dict, model.state_dict())
        model.load_state_dict(state_dict)
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)

    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 5
0
def combine_evaluate(features, dataset):
    metric = DistanceMetric(algorithm='euclidean')
    distmats = [pairwise_distance(feature, dataset.query, dataset.gallery, metric)\
            for feature in features]
    distmats = np.array([dist.numpy() for dist in distmats])
    distmat = np.sum(distmats, axis=0)
    evaluate_all(distmat, dataset.query, dataset.gallery)
Exemplo n.º 6
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True  # Not sure about this one
    metric = DistanceMetric(algorithm='euclidean')

    for test_set in args.test_sets:
        dataset, _, loader = load_dataset(args.architecture, dataset=test_set)
        cmcs = {}
        for batch_id in args.batch_ids:
            benchmark_dir = os.path.join(working_dir, 'benchmarks', batch_id,
                                         args.loss, args.architecture)
            _, num_classes, _ = load_dataset(args.architecture,
                                             dataset='synthetic',
                                             batch_id=batch_id)

            model = setup_model(args.loss, args.architecture, num_classes)
            model = nn.DataParallel(model).cuda()
            checkpoint = load_checkpoint(
                os.path.join(benchmark_dir, 'model_best.pth.tar'))
            model.module.load_state_dict(checkpoint['state_dict'])
            evaluator = Evaluator(model)

            cmcs[batch_id] = evaluator.test(loader, dataset.query,
                                            dataset.gallery, metric)

        plot(args, os.path.join(working_dir, 'plots'), cmcs, test_set)
Exemplo n.º 7
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    name = f'{args.dataset}-{args.arch}'
    logs_dir = f'logs/softmax-loss/{name}'

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          cos_output=args.cos_output)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    checkpoint = load_checkpoint(args.resume)
    model.load_state_dict(checkpoint['state_dict'], strict=False)
    start_epoch = checkpoint['epoch']
    best_top1 = checkpoint['best_top1']
    print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))
    model = model.cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, normalize_features=True)  # args.cos_output)
    metric.train(model, train_loader)
    print("Validation:")
    evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
    print("Test:")
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 8
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.source_batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    # Create source data_loader
    source_dataset, source_num_classes, source_train_loader\
        , source_val_loader, source_test_loader = \
        get_source_data(args.source_dataset, args.split, args.data_dir, args.height,
                 args.width, args.source_batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create target data_loader
    target_dataset, target_num_classes, target_train_loader\
        , target_val_loader, target_test_loader = \
        get_target_data(args.target_dataset, args.split, args.data_dir, args.height,
                 args.width, args.target_batch_size, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = ResNet_recon(num_features=1024, dropout=args.dropout)
    if args.evaluate:
        model_evalu = nn.DataParallel(model).cuda()
    model = model.cuda(0)

    # For source triplet-loss
    trip_embedding = Trip_embedding(num_features=1024,
                                    num_diff_features=128, dropout=args.dropout).cuda(1)
    # For target reconstruction-loss
    recon_module = Reconstruct(num_features=1024).cuda(1)
    # Criterion
    criterion = ReconTripLoss().cuda(1)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        model_path = load_checkpoint(args.resume)
        model.load_state_dict(model_path['state_dict'])
        # trip_embedding.load_state_dict(model_path['trip_em'])
        recon_module.load_state_dict(model_path['recon_dict'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    # model = nn.DataParallel(model, device_ids=[0,1]).cuda(1)

    # model.cuda(0)
    # trip_embedding.cuda(1)
    # recon_module.cuda(1)
    # criterion.cuda(1)
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator

    if args.evaluate:
        evaluator = Evaluator(model_evalu)
        metric.train(model_evalu, source_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.source_dataset) > 1:
            for dataset_name in args.source_dataset:
                print("{} source_test result:".format(dataset_name))
                evaluator.evaluate(source_test_loader[dataset_name],
                                   source_dataset.query[dataset_name],
                                   source_dataset.gallery[dataset_name],
                                   metric)
            return
        else:
            print("source test result")
            evaluator.evaluate(source_test_loader, source_dataset.query,
                               source_dataset.gallery, metric)
            print("target test result")
            evaluator.evaluate(target_test_loader, target_dataset.query,
                               target_dataset.gallery, metric)
            return

    evaluator = Evaluator(model)
    # Optimizer
    optimizer = torch.optim.Adam([{'params': model.parameters()},
                                  # {'params': trip_embedding.parameters()},
                                  {'params': recon_module.parameters()}],
                                  lr=args.lr, weight_decay=args.weight_decay)
    # Trainer
    trainer = Transfer_Trainer(model, recon_module, trip_embedding, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/
                                 float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    top1 = 0
    is_best = True
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(source_train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(source_train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        if epoch % 25 == 0 and epoch != 0:
            top1 = evaluator.evaluate(source_test_loader, source_dataset.query, source_dataset.query)
            target_top1 = evaluator.evaluate(target_test_loader, target_dataset.query, target_dataset.query)
            print('target_top1 = {:5.1%}'.format(target_top1))
            # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)

        save_checkpoint({
            'state_dict': model.state_dict(),
            'recon_dict': recon_module.state_dict(),
            # 'trip_em': trip_embedding.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, source_train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(source_test_loader[dataset_name], source_dataset.query[dataset_name],
                               source_dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(source_test_loader, source_dataset.query, source_dataset.gallery, metric)
Exemplo n.º 9
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir + '/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144,
                                   56) if args.arch == 'inception' else (256,
                                                                         128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    print(num_classes)
    # model_s, model_t, model_discriminator = models.create(args.arch, num_classes=num_classes, num_features=args.features)
    model_t, model_discriminator = models.create(args.arch,
                                                 num_classes=num_classes,
                                                 num_features=args.features,
                                                 attention_mode=args.att_mode)
    model_s = models.create('baseline_wo_D',
                            num_classes=295,
                            num_features=args.features,
                            attention_mode=args.att_mode)
    # load source network
    checkpoint_s = load_checkpoint(
        '/home/fan/cross_reid/2000_mode1/model_best.pth.tar')
    model_dict = model_s.state_dict()
    state_dict = {
        k: v
        for k, v in checkpoint_s.items() if k in model_dict.keys()
    }
    model_dict.update(state_dict)

    # print(model_s)
    # print(checkpoint_s['model'])
    # print(model_dict.keys())
    model_s.load_state_dict(model_dict)

    # print(model_s)
    # print(checkpoint_s['model'])
    # model_s.load_state_dict(checkpoint_s['model'])
    # print(model_s)
    model_s = model_s.cuda()
    model_t = model_t.cuda()
    model_discriminator = model_discriminator.cuda()

    evaluator = Evaluator(model_t)
    metric = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_t.load_state_dict(checkpoint['model'])
        model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model_t, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        exit()

    current_margin = args.margin
    #criterion_z = nn.CrossEntropyLoss().cuda()
    criterion_att = nn.MSELoss().cuda()
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes,
                                          epsilon=0.5).cuda()
    criterion_I = TripletLoss(margin=current_margin).cuda()
    criterion_D = nn.CrossEntropyLoss().cuda()

    print(args)

    if args.arch == 'ide':
        ignored_params = list(map(id, model_t.model.fc.parameters())) + list(
            map(id, model_t.classifier.parameters()))
    else:
        ignored_params = list(map(id, model_t.classifier.parameters())) + list(
            map(id, model_t.attention_module.parameters()))

    base_params = filter(lambda p: id(p) not in ignored_params,
                         model_t.parameters())

    if args.use_adam:
        optimizer_ft = torch.optim.Adam([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': model_t.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.attention_module.parameters(),
                'lr': args.lr
            },
        ],
                                        weight_decay=5e-4)

        optimizer_discriminator = torch.optim.Adam(
            [{
                'params': model_discriminator.model.parameters(),
                'lr': args.lr
            }, {
                'params': model_discriminator.classifier.parameters(),
                'lr': args.lr
            }],
            weight_decay=5e-4)

    else:
        optimizer_ft = torch.optim.SGD(
            [{
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            }, {
                'params': model_t.classifier.parameters(),
                'lr': args.lr
            }, {
                'params': model_t.attention_module.parameters(),
                'lr': args.lr
            }],
            momentum=0.9,
            weight_decay=5e-4,
            nesterov=True)
        optimizer_discriminator = torch.optim.SGD([
            {
                'params': model_discriminator.model.parameters(),
                'lr': args.lr
            },
            {
                'params': model_discriminator.classifier.parameters(),
                'lr': args.lr
            },
        ],
                                                  momentum=0.9,
                                                  weight_decay=5e-4,
                                                  nesterov=True)

    scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma,
                                  args.warmup_factor, args.warmup_iters,
                                  args.warmup_methods)

    trainer = Trainer(model_s, model_t, model_discriminator, criterion_z,
                      criterion_I, criterion_att, criterion_D, trainvallabel,
                      1, 1, 0.15, 0.05, 5)

    flag = 1
    best_top1 = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        triple_loss, tot_loss, att_loss, D_loss = trainer.train(
            epoch, train_loader, optimizer_ft, optimizer_discriminator)

        save_checkpoint(
            {
                'model': model_t.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        if epoch < 100:
            continue
        if not epoch % 10 == 0:
            continue

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery, metric)
        print(top1)
        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'model': model_t.state_dict(),
                # 'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

    print('Test with best model:')
    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.format(
        epoch, top1, best_top1, ' *' if is_best else ''))

    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model_t.load_state_dict(checkpoint['model'])
    metric.train(model_t, train_loader)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, metric)
    print(args)
Exemplo n.º 10
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    start = time.time()

    # Redirect print to both console and log file
    if not args.evaluate:
        dt = datetime.datetime.now()
        sys.stdout = Logger(
            osp.join(
                args.logs_dir,
                'log_' + str(dt.month).zfill(2) + str(dt.day).zfill(2) +
                str(dt.hour).zfill(2) + str(dt.minute).zfill(2) + '.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    mt_train_loader, mt_num_classes, test_loader, query_set, gallery_set = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=mt_num_classes,
                          double_loss=True)
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)

    # Criterion
    criterion_1 = nn.CrossEntropyLoss().cuda()
    criterion_2 = PosetLoss_G2G(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    num_task = len(
        mt_num_classes)  # num_task equals camera number, each camera is a task
    trainer = Trainer(model, criterion_1, criterion_2, num_task)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    start_epoch = best_top1 = 0
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, mt_train_loader, optimizer)
        if (epoch % args.start_save == (args.start_save - 1)):
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                },
                0,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            # Final test
            print('Test with the model after epoch {:d}:'.format(epoch + 1))
            checkpoint = load_checkpoint(
                osp.join(args.logs_dir, 'checkpoint.pth.tar'))
            model.module.load_state_dict(checkpoint['state_dict'])
            metric.train(model, mt_train_loader)
            evaluator.evaluate(test_loader, query_set, gallery_set, metric)
    end = time.time()
    print('Total time: {:.1f}s'.format(end - start))
Exemplo n.º 11
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, train_loader_head, train_loader_upper, train_loader_lower,\
    val_loader, val_loader_head, val_loader_upper, val_loader_lower,\
    test_loader, test_loader_head, test_loader_upper, test_loader_lower= \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)
    # create model1 model2 model3  然后修改optimizer?
    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)
    model_head = models.create(args.arch,
                               num_features=args.features,
                               dropout=args.dropout,
                               num_classes=num_classes)
    model_upper = models.create(args.arch,
                                num_features=args.features,
                                dropout=args.dropout,
                                num_classes=num_classes)
    model_lower = models.create(args.arch,
                                num_features=args.features,
                                dropout=args.dropout,
                                num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    #    if args.resume:
    #       checkpoint = load_checkpoint(args.resume)
    #        model.load_state_dict(checkpoint['state_dict'])
    #        start_epoch = checkpoint['epoch']
    #        best_top1 = checkpoint['best_top1']
    #        print("=> Start epoch {}  best top1 {:.1%}"
    #              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()
    model_head = nn.DataParallel(model_head).cuda()
    model_upper = nn.DataParallel(model_upper).cuda()
    model_lower = nn.DataParallel(model_lower).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, model_head, model_upper, model_lower)

    #    if args.evaluate:
    #        metric.train(model, train_loader)
    #        print("Validation:")
    #        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
    #        print("Test:")
    #        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
    #        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()
    criterion_head = nn.CrossEntropyLoss().cuda()
    criterion_upper = nn.CrossEntropyLoss().cuda()
    criterion_lower = nn.CrossEntropyLoss().cuda()

    # Optimizer

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    if hasattr(model_head.module, 'base'):
        base_param_ids_head = set(map(id, model_head.module.base.parameters()))
        new_params_head = [
            p for p in model_head.parameters()
            if id(p) not in base_param_ids_head
        ]
        param_groups_head = [{
            'params': model_head.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params_head,
            'lr_mult': 1.0
        }]
    else:
        param_groups_head = model_head.parameters()
    optimizer_head = torch.optim.SGD(param_groups_head,
                                     lr=args.lr,
                                     momentum=args.momentum,
                                     weight_decay=args.weight_decay,
                                     nesterov=True)

    if hasattr(model_head.module, 'base'):
        base_param_ids_upper = set(
            map(id, model_upper.module.base.parameters()))
        new_params_upper = [
            p for p in model_upper.parameters()
            if id(p) not in base_param_ids_upper
        ]
        param_groups_upper = [{
            'params': model_upper.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params_upper,
            'lr_mult': 1.0
        }]
    else:
        param_groups_upper = model_upper.parameters()
    optimizer_upper = torch.optim.SGD(param_groups_upper,
                                      lr=args.lr,
                                      momentum=args.momentum,
                                      weight_decay=args.weight_decay,
                                      nesterov=True)

    if hasattr(model_lower.module, 'base'):
        base_param_ids_lower = set(
            map(id, model_lower.module.base.parameters()))
        new_params_lower = [
            p for p in model_lower.parameters()
            if id(p) not in base_param_ids_lower
        ]
        param_groups_lower = [{
            'params': model_lower.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params_lower,
            'lr_mult': 1.0
        }]
    else:
        param_groups_lower = model_lower.parameters()
    optimizer_lower = torch.optim.SGD(param_groups_lower,
                                      lr=args.lr,
                                      momentum=args.momentum,
                                      weight_decay=args.weight_decay,
                                      nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)
    trainer_head = Trainer(model_head, criterion_head)
    trainer_upper = Trainer(model_upper, criterion_upper)
    trainer_lower = Trainer(model_lower, criterion_lower)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_head(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer_head.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_upper(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer_upper.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_lower(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer_lower.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        adjust_lr_head(epoch)
        adjust_lr_upper(epoch)
        adjust_lr_lower(epoch)
        trainer.train(epoch, train_loader, optimizer)
        trainer_head.train(epoch, train_loader_head, optimizer_head)
        trainer_upper.train(epoch, train_loader_upper, optimizer_upper)
        trainer_lower.train(epoch, train_loader_lower, optimizer_lower)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val_loader_head,
                                  val_loader_upper, val_loader_lower,
                                  dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'),
            opath='model_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_head.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_head.pth.tar'),
            opath='model_head_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_upper.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_upper.pth.tar'),
            opath='model_upper_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_lower.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_lower.pth.tar'),
            opath='model_lower_best.pth.tar')

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    checkpoint_head = load_checkpoint(
        osp.join(args.logs_dir, 'model_head_best.pth.tar'))
    checkpoint_upper = load_checkpoint(
        osp.join(args.logs_dir, 'model_upper_best.pth.tar'))
    checkpoint_lower = load_checkpoint(
        osp.join(args.logs_dir, 'model_lower_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    model_head.module.load_state_dict(checkpoint_head['state_dict'])
    model_upper.module.load_state_dict(checkpoint_upper['state_dict'])
    model_lower.module.load_state_dict(checkpoint_lower['state_dict'])
    metric.train(model, train_loader)
    metric.train(model_head, train_loader_head)
    metric.train(model_upper, train_loader_upper)
    metric.train(model_lower, train_loader_lower)

    evaluator.evaluate(test_loader, test_loader_head, test_loader_upper,
                       test_loader_lower, dataset.query, dataset.gallery,
                       metric)
Exemplo n.º 12
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 13
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    ## get_source_data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get_target_data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(2048) -> FC(args.features)
    num_class = 0
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=num_class, num_split=args.num_split, cluster=args.dce_loss) #duke
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=num_class, num_split=args.num_split, cluster=args.dce_loss)
    else:
        raise RuntimeError('Please specify the number of classes (ids) of the network.')
    
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        print('Resuming checkpoints from finetuned model on another dataset...\n')
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint, strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model')
    model = nn.DataParallel(model).cuda()
   
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print("Test with the original model trained on source domain:")
    best_top1 = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    if args.evaluate:
        return

    # Criterion
    criterion = []
    criterion.append(TripletLoss(margin=args.margin,num_instances=args.num_instances).cuda())
    criterion.append(TripletLoss(margin=args.margin,num_instances=args.num_instances).cuda())

    #multi lr
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if
                  id(p) not in base_param_ids]
    param_groups = [
        {'params': model.module.base.parameters(), 'lr_mult': 1.0},
        {'params': new_params, 'lr_mult': 1.0}]
    # Optimizer
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=0.9, weight_decay=args.weight_decay)    

    ##### adjust lr
    def adjust_lr(epoch):
        if epoch <= 7:
            lr = args.lr
        elif epoch <=14:
            lr = 0.3 * args.lr
        else:
            lr = 0.1 * args.lr
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    ##### training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        Resize((args.height,args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    iter_nums = args.iteration
    start_epoch = args.start_epoch
    cluster_list = []
    top_percent = args.rho
    EF = 100 // iter_nums + 1
    eug = None
    for iter_n in range(start_epoch, iter_nums):
        #### get source datas' feature
        if args.load_dist and iter_n == 0:
            dist = pickle.load(open('dist' + str(args.num_split) + '.pkl', 'rb'))
            euclidean_dist_list = dist['euclidean']
            rerank_dist_list = dist['rerank']
        else:
            source_features, _ = extract_features(model, src_extfeat_loader, for_eval=False)
            if isinstance(source_features[src_dataset.trainval[0][0]], list):
                len_f = len(source_features[src_dataset.trainval[0][0]])
                source_features = [torch.cat([source_features[f][i].unsqueeze(0) for f, _, _ in src_dataset.trainval], 0) for i in range(len_f)]
            else:
                source_features = torch.cat([source_features[f].unsqueeze(0) for f, _, _ in src_dataset.trainval], 0) # synchronization feature order with s_dataset.trainval
            #### extract training images' features
            print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n+1))
            target_features, _ = extract_features(model, tgt_extfeat_loader, for_eval=False)
            if isinstance(target_features[tgt_dataset.trainval[0][0]], list):
                len_f = len(target_features[tgt_dataset.trainval[0][0]])
                target_features = [torch.cat([target_features[f][i].unsqueeze(0) for f, _, _ in tgt_dataset.trainval], 0) for i in range(len_f)]
            else:
                target_features = torch.cat([target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval], 0) # synchronization feature order with dataset.trainval
            #### calculate distance and rerank result
            print('Calculating feature distances...') 
            # target_features = target_features.numpy()
            euclidean_dist_list, rerank_dist_list = compute_dist(
                source_features, target_features, lambda_value=args.lambda_value, no_rerank=args.no_rerank, num_split=args.num_split) # lambda=1 means only source dist
            del target_features
            del source_features
        
        labels_list, cluster_list = generate_selflabel(
            euclidean_dist_list, rerank_dist_list, iter_n, args, cluster_list)
        #### generate new dataset
        train_loader = generate_dataloader(tgt_dataset, labels_list, train_transformer, iter_n, args)

        if iter_n == 5:
            u_data, l_data = updata_lable(tgt_dataset, labels_list[0], args.tgt_dataset, sample=args.sample)
            eug = EUG(model_name=args.arch, batch_size=args.batch_size, mode=args.mode, num_classes=num_class, 
            data_dir=args.data_dir, l_data=l_data, u_data=u_data, print_freq=args.print_freq, 
            save_path=args.logs_dir, pretrained_model=model, rerank=True)
            eug.model = model

        if eug is not None:
            nums_to_select = int(min((iter_n + 1) * int(len(u_data) // (iter_nums)), len(u_data)))
            pred_y, pred_score = eug.estimate_label()
            
            print('This is running {} with EF= {}%, step {}:\t Nums_to_be_select {}, \t Logs-dir {}'.format(
                args.mode, EF, iter_n+1, nums_to_select, args.logs_dir
            ))
            selected_idx = eug.select_top_data(pred_score, nums_to_select)
            new_train_data = eug.generate_new_train_data(selected_idx, pred_y)
            eug_dataloader = eug.get_dataloader(new_train_data, training=True)

            top1 = iter_trainer(model, tgt_dataset, train_loader, eug_dataloader, test_loader, optimizer, 
                criterion, args.epochs, args.logs_dir, args.print_freq, args.lr)
            eug.model = model
            del train_loader
            # del eug_dataloader
        else:
            top1 = iter_trainer(model, tgt_dataset, train_loader, None, test_loader, optimizer, 
            criterion, args.epochs, args.logs_dir, args.print_freq, args.lr)
            del train_loader

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': iter_n + 1,
            'best_top1': best_top1,
            # 'num_ids': num_ids,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(iter_n+1, top1, best_top1, ' *' if is_best else ''))
Exemplo n.º 14
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    name = f'{args.dataset}-{args.arch}'
    logs_dir = f'logs/amsoftmax-loss/{name}'

    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, class_weight, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes, cos_output=True)


    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = model.cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, normalize_features=True, only_top1=True)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    if args.class_weight:
        criterion = AMSoftmax(weight=class_weight).cuda()
    else:
        criterion = AMSoftmax().cuda()

    # Optimizer
    if hasattr(model, 'base'):
        base_param_ids = set(map(id, model.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.base.parameters(), 'lr_mult': 0.01},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer

    trainer = Trainer(model, criterion, name=name)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 15
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    
    train, val, trainval = [], [], []
    numbers = [0, 0, 0]

    dataset_cuhk03 = merge('cuhk03', train, val, trainval, numbers, args.data_dir, args.split)
    dataset_market1501 = merge('market1501', train, val, trainval, numbers, args.data_dir, args.split)
    merge('cuhksysu', train, val, trainval, numbers, args.data_dir, args.split)
    merge('mars', train, val, trainval, numbers, args.data_dir, args.split)
    
    num_train_ids, num_val_ids, num_trainval_ids = numbers
    
    assert num_val_ids == dataset_cuhk03.num_val_ids + dataset_market1501.num_val_ids

    print("============================================")
    print("JSTL dataset loaded")
    print("  subset   | # ids | # images")
    print("  ---------------------------")
    print("  train    | {:5d} | {:8d}"
          .format(num_train_ids, len(train)))
    print("  val      | {:5d} | {:8d}"
          .format(num_val_ids, len(val)))
    print("  trainval | {:5d} | {:8d}"
          .format(num_trainval_ids, len(trainval)))

    query_cuhk03, gallery_cuhk03 = dataset_cuhk03.query, dataset_cuhk03.gallery
    query_market1501, gallery_market1501 = dataset_market1501.query, dataset_market1501.gallery

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = trainval if args.combine_trainval else train
    num_classes = (num_trainval_ids if args.combine_trainval
                   else num_train_ids)

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(args.height, args.width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=args.data_dir,
                     transform=train_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        sampler=RandomIdentitySampler(train_set, args.num_instances),
        pin_memory=True, drop_last=True)

    val_loader = DataLoader(
        Preprocessor(val, root=args.data_dir,
                     transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    test_loader_cuhk03 = DataLoader(
        Preprocessor(list(set(query_cuhk03) | set(gallery_cuhk03)),
                     root=dataset_cuhk03.images_dir, transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    test_loader_market1501 = DataLoader(
        Preprocessor(list(set(query_market1501) | set(gallery_market1501)),
                     root=dataset_market1501.images_dir, transform=test_transformer),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=True)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, val, val, metric)
        print("Test(cuhk03):")
        evaluator.evaluate(test_loader_cuhk03, query_cuhk03, gallery_cuhk03, metric)
        print("Test(market1501):")
        evaluator.evaluate(test_loader_market1501, query_market1501, gallery_market1501, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val, val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)

    print("Test(cuhk03):")
    evaluator.evaluate(test_loader_cuhk03, query_cuhk03, gallery_cuhk03, metric)
    print("Test(market1501):")
    evaluator.evaluate(test_loader_market1501, query_market1501, gallery_market1501, metric)
Exemplo n.º 16
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir + '/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144,
                                   56) if args.arch == 'inception' else (256,
                                                                         128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader, query_loader_s, gallery_loader_s = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    print(num_classes)
    gen_a = AdaINGen(3, num_classes)
    gen_b = AdaINGen(3, num_classes)
    # id_a = ft_netAB(num_classes, stride=1, norm="no",  pool="max")
    # id_b = ft_netAB(num_classes, stride=1, norm="no",  pool="max")
    dis_a = MsImageDis(3)  # discriminator for domain a
    dis_b = MsImageDis(3)  # discriminator for domain a
    gen_a = gen_a.cuda()
    gen_b = gen_b.cuda()
    # id_a = id_a.cuda()
    # id_b = id_b.cuda()
    dis_a = dis_a.cuda()
    dis_b = dis_b.cuda()

    evaluator = Evaluator(gen_a.enc_content, gen_b.enc_content)
    metric = DistanceMetric(algorithm=args.dist_metric)

    # evaluator_s = Evaluator(model_s)
    # metric_s = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_t.load_state_dict(checkpoint['model'])
        # model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model_t, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        exit()

    current_margin = args.margin
    criterion_z_s = CrossEntropyLabelSmooth(num_classes=num_classes,
                                            epsilon=0.5).cuda()
    criterion_att = nn.MSELoss().cuda()
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes,
                                          epsilon=0.5).cuda()
    criterion_I = TripletLoss(margin=current_margin).cuda()
    criterion_I_s = TripletLoss_s(margin=current_margin).cuda()
    # criterion_D = nn.CrossEntropyLoss().cuda()

    # print(args)
    # # setup id opt
    # # if args.arch == 'ide':
    # #     ignored_params = list(map(id, model_t.model.fc.parameters() )) + list(map(id, model_t.classifier.parameters() ))
    # # else:
    # #     ignored_params = list(map(id, id_a.classifier1.parameters())) + list(map(id, id_a.classifier2.parameters()))
    # #     ignored_params_s = list(map(id, id_b.classifier1.parameters())) + list(map(id, id_b.classifier2.parameters()))
    # #
    # # base_params = filter(lambda p: id(p) not in ignored_params, id_a.parameters())
    # # base_params_s = filter(lambda p: id(p) not in ignored_params_s, id_b.parameters())
    # #
    # # if args.use_adam:
    # #     optimizer_ft = torch.optim.Adam([
    # #         {'params': filter(lambda p: p.requires_grad,base_params), 'lr': args.lr},
    # #         {'params': filter(lambda p: p.requires_grad, base_params_s), 'lr': args.lr},
    # #         {'params': id_a.classifier1.parameters(), 'lr': args.lr},
    # #         {'params': id_a.classifier2.parameters(), 'lr': args.lr},
    # #         {'params': id_b.classifier1.parameters(), 'lr': args.lr},
    # #         {'params': id_b.classifier2.parameters(), 'lr': args.lr},
    # #         ],
    # #         weight_decay=5e-4)
    # # else:
    # #     optimizer_ft = torch.optim.SGD([
    # #         {'params': filter(lambda p: p.requires_grad, base_params), 'lr': args.lr},
    # #         {'params': filter(lambda p: p.requires_grad, base_params_s), 'lr': args.lr},
    # #         {'params': model_s.classifier.parameters(), 'lr': args.lr},
    # #         {'params': model_s.attention_module.parameters(), 'lr': args.lr},
    # #         {'params': model_t.classifier.parameters(), 'lr': args.lr},
    # #         {'params': model_t.attention_module.parameters(), 'lr': args.lr},
    # #         ],
    # #         momentum=0.9,
    # #         weight_decay=5e-4,
    # #         nesterov=True)
    # #
    # # id_scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma, args.warmup_factor,
    # #                                       args.warmup_iters, args.warmup_methods)
    # setup dis and gen
    dis_opt = torch.optim.Adam([
        {
            'params': dis_a.parameters(),
            'lr': args.dis_lr
        },
        {
            'params': dis_b.parameters(),
            'lr': args.dis_lr
        },
    ],
                               weight_decay=5e-4)
    gen_opt = torch.optim.Adam([
        {
            'params': gen_a.parameters(),
            'lr': args.gen_lr
        },
        {
            'params': gen_b.parameters(),
            'lr': args.gen_lr
        },
    ],
                               weight_decay=5e-4)
    dis_scheduler = WarmupMultiStepLR(dis_opt, args.mile_stone, args.gamma,
                                      args.warmup_factor, args.warmup_iters,
                                      args.warmup_methods)
    gen_scheduler = WarmupMultiStepLR(gen_opt, args.mile_stone, args.gamma,
                                      args.warmup_factor, args.warmup_iters,
                                      args.warmup_methods)
    trainer = Trainer(gen_a, gen_b, dis_a, dis_b, criterion_z, criterion_I,
                      criterion_att, trainvallabel, 1, 1, 0.15, 0.05, 5)

    flag = 1
    best_top1 = -1
    best_top1_s = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        print("Begin Train")
        # id_scheduler.step()
        gen_scheduler.step()
        dis_scheduler.step()
        trainer.train(epoch, train_loader, dis_opt, gen_opt)
        #
        save_checkpoint(
            {
                'content_a': gen_a.enc_content.state_dict(),
                'content_b': gen_b.enc_content.state_dict(),
                'style_a': gen_a.enc_style.state_dict(),
                'style_b': gen_b.enc_style.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        # if epoch < 200:
        #     continue
        if not epoch % 1 == 0:
            continue

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery, metric)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'content_a': gen_a.enc_content.state_dict(),
                'content_b': gen_b.enc_content.state_dict(),
                'style_a': gen_a.enc_style.state_dict(),
                'style_b': gen_b.enc_style.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')
    #
    #
    #
    #
    # print('Test with best model_t:')
    # print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
    #           format(epoch, top1, best_top1, ' *' if is_best else ''))
    #
    # print('Test with best model_s:')
    # print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
    #       format(epoch, top1_s, best_top1_s, ' *' if is_best else ''))
    #
    # checkpoint = load_checkpoint(osp.join(args.logs_dir,'model_best.pth.tar'))
    # model_t.load_state_dict(checkpoint['model'])
    # metric.train(model_t, train_loader)
    # evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
    #
    # checkpoint_s = load_checkpoint(osp.join(args.logs_dir, 's_model_best.pth.tar'))
    # model_s.load_state_dict(checkpoint_s['model'])
    # evaluator_s.evaluate(query_loader_s, gallery_loader_s, dataset.query, dataset.gallery, metric)

    print(args)
Exemplo n.º 17
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features, norm=True,
                          dropout=args.dropout)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = OIMLoss(model.module.num_features, num_classes,
                        scalar=args.oim_scalar,
                        momentum=args.oim_momentum).cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 18
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, 'num_instances should divide batch_size'

    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.batch_size, args.num_instances, args.workers)

    # Create model
    print('num_features: %d, features:%d ' % (args.num_features, num_classes))
    model = models.create("deepperson",
                          num_features=args.num_features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluate(model, val_loader, dataset.val, dataset.val)
        print("Test:")
        evaluate(model, test_loader, dataset.query, dataset.gallery)
        return

    # Criterion
    criterion = DeepLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, add_soft=args.add_soft)
        if epoch < args.start_save:
            continue
        top1 = evaluate(model, val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with last model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluate(model, test_loader, dataset.query, dataset.gallery)
Exemplo n.º 19
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (224, 224)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create model
    model, model_discriminator = models.create(args.arch, num_classes=num_classes, num_features=args.features)
    model = model.cuda()
    model_discriminator = model_discriminator.cuda()
    evaluator = Evaluator(model)

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['model'])
        model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  "
          .format(start_epoch))
    #model = nn.DataParallel(model).cuda()

    if args.evaluate:
        metric.train(model, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
        exit()

    # Init
    current_margin = args.margin
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes).cuda()#nn.CrossEntropyLoss().cuda()
    criterion_I = TripletLoss(margin= current_margin).cuda()
    criterion_D = nn.CrossEntropyLoss().cuda()
    print(args)

    # Observe that all parameters are being optimized
    if args.arch == 'ide':
        ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))
    else:
        ignored_params = list(map(id, model.classifier.parameters())) + list(map(id, model.base.fc.parameters() )) 
    base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
    for k,v in model.named_parameters():
    	print(k)
    # optimizer_ft = optim.Adam([
    #          {'params': base_params, 'lr': 0.0001},
    #          {'params': model.classifier.parameters(), 'lr': 0.0001},
    #      ], weight_decay=5e-4)

    # optimizer_ft = torch.optim.SGD([
    #          {'params': filter(lambda p: p.requires_grad,base_params), 'lr': 0.0001},
    #          {'params': model.classifier.parameters(), 'lr': 0.0001},
    #     ],
    #     momentum=0.9,
    #     weight_decay=5e-4,
    #     nesterov=True)


    optimizer_ft = torch.optim.Adam([
             {'params': filter(lambda p: p.requires_grad,base_params), 'lr': 1e-4},
             {'params': model.classifier.parameters(), 'lr': 1e-4},
        ],
        weight_decay=5e-4)

    optimizer_discriminator = torch.optim.Adam([
             {'params': model_discriminator.model.parameters(), 'lr': 1e-4},
             {'params': model_discriminator.classifier.parameters(), 'lr': 1e-4}
        ],
        weight_decay=5e-4)

    # optimizer_discriminator = optim.Adam([
    #          {'params': model_discriminator.model.parameters(), 'lr': 0.0001},
    #          {'params': model_discriminator.classifier.parameters(), 'lr': 0.0001}
    #      ], weight_decay=5e-4)

    # Trainer
    trainer = Trainer(model, model_discriminator, criterion_z, criterion_I, criterion_D, trainvallabel, 1, 1 ,0.15 , 0.05, 5) # c: 0.15, u: 0.05

    flag = 1
    best_top1 = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        triple_loss, tot_loss = trainer.train(epoch, train_loader, optimizer_ft, optimizer_discriminator)
        '''
        if (flag == 1 and triple_loss < 0.1):
            for g in optimizer_ft.param_groups:
                g['lr'] = 0.001
            flag = 0
        if (flag == 0 and triple_loss > 0.1):
            for g in optimizer_ft.param_groups:
                g['lr'] = 0.0001
            flag = 1
        '''
        save_checkpoint({
            'model': model.state_dict(),
            'model_discriminator': model_discriminator.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, False, epoch, args.logs_dir, fpath='checkpoint.pth.tar')
        print(epoch)
        if epoch < 200:
            continue
        if not epoch % 10 ==0:
            continue
        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'model': model.state_dict(),
            'model_discriminator': model_discriminator.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, epoch, args.logs_dir, fpath='checkpoint.pth.tar')

    print('Test with best model:')
    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    checkpoint = load_checkpoint(osp.join(args.logs_dir,'model_best.pth.tar'))
    model.load_state_dict(checkpoint['model'])
    metric.train(model, train_loader)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
    print(args)
Exemplo n.º 20
0
def main(args):
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    sys.stderr = Logger(osp.join(args.logs_dir, 'err.txt'))
    lz.init_dev(args.gpu)
    print('config is {}'.format(vars(args)))
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'

    test_loader = get_data(args)

    # Create model
    model = models.create(
        args.arch,
        dropout=args.dropout,
        pretrained=args.pretrained,
        block_name=args.block_name,
        block_name2=args.block_name2,
        num_features=args.num_classes,
        num_classes=100,
        num_deform=args.num_deform,
        fusion=args.fusion,
        last_conv_stride=args.last_conv_stride,
        last_conv_dilation=args.last_conv_dilation,
    )

    print(model)
    param_mb = sum(p.numel() for p in model.parameters()) / 1000000.0
    print('    Total params: %.2fM' % (param_mb))

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        while not osp.exists(args.resume):
            lz.logging.warning(' no chkpoint {} '.format(args.resume))
            time.sleep(20)
        if torch.cuda.is_available():
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(args.resume, map_location='cpu')
        # model.load_state_dict(checkpoint['state_dict'])
        db_name = args.logs_dir + '/' + args.logs_dir.split('/')[-1] + '.h5'
        load_state_dict(model, checkpoint['state_dict'])
        with lz.Database(db_name) as db:
            if 'cent' in checkpoint:
                db['cent'] = to_numpy(checkpoint['cent'])
            db['xent'] = to_numpy(checkpoint['state_dict']['embed2.weight'])
        if args.restart:
            start_epoch_ = checkpoint['epoch']
            best_top1_ = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}".format(
                start_epoch_, best_top1_))
        else:
            start_epoch = checkpoint['epoch']
            best_top1 = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}".format(
                start_epoch, best_top1))
    if args.gpu is None or len(args.gpu) == 0:
        model = nn.DataParallel(model)
    elif len(args.gpu) == 1:
        model = nn.DataParallel(model).cuda()
    else:
        model = nn.DataParallel(model, device_ids=range(len(args.gpu))).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    features, _ = extract_features(model, test_loader)
    for k in features.keys():
        features[k] = features[k].numpy()
    lz.msgpack_dump(features, work_path + '/reid.person/fea.mp', allow_np=True)
Exemplo n.º 21
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, _, _, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    trans_train_loader, num_classes = get_fake_data(args.trans_name, args.trans_data_txt, args.height,
                                       args.width, args.batch_size, args.workers)
    # Create model
    model = models.create(args.arch,
                          dropout=0, num_classes=num_classes)
    # model = models.create(args.arch, num_features=1024, num_diff_features=args.features,
    #                       dropout=args.dropout, num_classes=num_classes, iden_pretrain=True)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, trans_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr * 0.1**(epoch//args.lr_change_epochs)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, trans_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        # for tag, value in criterion.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        #################################
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, trans_train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 22
0
def main(args):
    setproctitle.setproctitle(args.project_name)
    logs_dir = osp.join(args.root_dir, 'logs/', args.project_name)
    if osp.exists(logs_dir) is False:
        os.makedirs(logs_dir)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    data_dir = osp.join(args.data_dir, args.dataset)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))
    print('{}'.format(vars(parser.parse_args())))

    # Create data loaders
    def readlist(path):
        lines = []
        with open(path, 'r') as f:
            data = f.readlines()

        for line in data:
            name, pid, camid = line.split()
            lines.append((name, int(pid), int(camid)))
        return lines

    if osp.exists(osp.join(data_dir, 'train.txt')):
        train_list = readlist(osp.join(data_dir, 'train.txt'))
    else:
        print("The training list doesn't exist")

    if osp.exists(osp.join(data_dir, 'val.txt')):
        val_list = readlist(osp.join(data_dir, 'val.txt'))
    else:
        print("The validation list doesn't exist")

    if osp.exists(osp.join(data_dir, 'query.txt')):
        query_list = readlist(osp.join(data_dir, 'query.txt'))
    else:
        print("The query.txt doesn't exist")

    if osp.exists(osp.join(data_dir, 'gallery.txt')):
        gallery_list = readlist(osp.join(data_dir, 'gallery.txt'))
    else:
        print("The gallery.txt doesn't exist")

    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    train_loader, val_loader, test_loader = \
        get_data(data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval, train_list, val_list, query_list, gallery_list,dataset_type=args.dataset)
    # Create model
    num_classes = args.ncls
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    cnt = 0
    for p in model.parameters():
        cnt += p.numel()
    print('Parameter number:{}\n'.format(cnt))
    # Load from checkpoint
    start_epoch = best_top1 = 0
    model = nn.DataParallel(model).cuda()
    #model = model.cuda()
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.module.load_state_dict(checkpoint['state_dict'])

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        with torch.no_grad():
            print('Test with latest model:')
            checkpoint = load_checkpoint(
                osp.join(logs_dir, 'checkpoint.pth.tar'))
            model.module.load_state_dict(checkpoint['state_dict'])
            print('best epoch: ', checkpoint['epoch'])
            metric.train(model, train_loader)
            evaluator.evaluate(test_loader,
                               query_list,
                               gallery_list,
                               clist=clist,
                               metric=metric)

            print('Test with best model:')
            checkpoint = load_checkpoint(
                osp.join(logs_dir, 'model_best.pth.tar'))
            model.module.load_state_dict(checkpoint['state_dict'])
            print('best epoch: ', checkpoint['epoch'])
            metric.train(model, train_loader)
            evaluator.evaluate(test_loader,
                               query_list,
                               gallery_list,
                               clist=clist,
                               metric=metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if args.training_method == 'plain':
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    elif args.training_method == 'deada':
        param_class_ids = set(map(id, model.module.classifier.parameters()))
        param_extrac = [
            p for p in model.parameters() if id(p) not in param_class_ids
        ]
        param_groups = [{
            'params': param_extrac,
            'lr_mult': 0.1
        }, {
            'params': model.module.classifier.parameters(),
            'lr': args.lr_classifier
        }]
    else:
        raise KeyError('Unknown training method: ', args.training_method)

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch, args):
        step_size = args.step_size
        lr = args.lr if epoch <= step_size else \
             args.lr * (0.1 ** ((epoch - step_size) // step_size + 1))
        if args.training_method == 'plain':
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)
        elif args.training_method == 'deada':
            for g in optimizer.param_groups[:1]:
                # only update lr of feature extractor, keep lr of classifier constant
                g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    waits = 0
    for epoch in range(start_epoch, args.epochs):
        print('Project Name:{}'.format(args.project_name))
        if waits >= args.patience:
            print('Patience is exceeded\n')
            break
        print('\nWaits: {}'.format(waits))
        adjust_lr(epoch, args)
        if args.training_method == 'deada':
            lr_extrac = optimizer.param_groups[0]['lr']
            lr_class = optimizer.param_groups[1]['lr']
            print('feature extractor lr: ', lr_extrac, ' classifier lr: ',
                  lr_class)
            init.normal_(trainer.model.module.classifier.weight, std=0.001)
            init.constant_(trainer.model.module.classifier.bias, 0)

        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val_list, val_list)
        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))
        if (epoch + 1) % 5 == 0:
            print('Test model: \n')
            model_name = 'epoch_' + str(epoch) + '.pth.tar'
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                },
                False,
                fpath=osp.join(logs_dir, model_name))
        if is_best:
            waits = 0
        else:
            waits += 1
    # Final test
    with torch.no_grad():
        print('Test with latest model:')
        checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
        model.module.load_state_dict(checkpoint['state_dict'])
        print('best epoch: ', checkpoint['epoch'])
        metric.train(model, train_loader)
        evaluator.evaluate(test_loader,
                           query_list,
                           gallery_list,
                           metric=metric)

        print('Test with best model:')
        checkpoint = load_checkpoint(osp.join(logs_dir, 'model_best.pth.tar'))
        model.module.load_state_dict(checkpoint['state_dict'])
        print('best epoch: ', checkpoint['epoch'])
        metric.train(model, train_loader)
        evaluator.evaluate(test_loader,
                           query_list,
                           gallery_list,
                           metric=metric)
Exemplo n.º 23
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print(
        "Test with the original model trained on target domain (direct transfer):"
    )
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    if args.evaluate:
        return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances).cuda(),
        TripletLoss(args.margin, args.num_instances).cuda(),
    ]

    # Optimizer
    optimizer = torch.optim.SGD(
        model.parameters(),
        lr=args.lr,
        momentum=0.9,
    )

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.trainval, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        # train model with new generated dataset
        trainer = Trainer(model, criterion, print_freq=args.print_freq)
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, optimizer)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    return (rank_score.map, rank_score.market1501[0])
Exemplo n.º 24
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = ResNet(args.depth, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)
    model = nn.DataParallel(model).cuda()
    # Load from checkpoint
    start_epoch = best_map = 0
    if args.if_resume:
        print(args.resume)
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        prior_best_map = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, prior_best_map))
    # model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    alpha= args.alpha
    beta = args.beta
    gamma = args.gamma
    criterion = TripletLoss_biu(margin = args.margin, num_instances=args.num_instances, 
                                    alpha = alpha, beta =beta , gamma =gamma).cuda()

    # Optimizer
    if args.optimizer == 'sgd':
            # base_param_ids = set(map(id, model.module.base.parameters()))
            # new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
            # param_groups = [
            #     {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            #     {'params': new_params, 'lr_mult': 1.0}]
        param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    else :
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1 ** (epoch // 40))
        else :
            lr = args.lr if epoch <= 80 else \
                 args.lr * (0.1 ** ((epoch - 100) / 60.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        if epoch % 3 ==0:
            metric.train(model,train_loader)
            top_map = evaluator.evaluate(test_loader, dataset.query, dataset.gallery) 
            is_best = top_map > prior_best_map
            prior_best_map = max(top_map, prior_best_map)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_map': top_map,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        # print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
        #       format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 25
0
def main(args):
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    sys.stderr = Logger(osp.join(args.logs_dir, 'err.txt'))
    lz.init_dev(args.gpu)
    print('config is {}'.format(vars(args)))
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'

    dataset = datasets.create(args.dataset, root=args.root + '/' + args.dataset,
                              split_id=args.split_id, mode=args.dataset_mode)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = dataset.trainval
    num_classes = dataset.num_trainval_ids

    train_transformer = T.Compose([
        T.RandomCropFlip(args.height, args.width, area=args.area),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(args.height, args.width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=dataset.images_dir,
                     transform=train_transformer,
                     ),
        batch_size=args.batch_size, num_workers=args.workers,
        sampler=RandomIdentityWeightedSampler(
            train_set, args.num_instances,
            batch_size=args.batch_size,
            rand_ratio=args.rand_ratio,
        ),
        # shuffle=True,
        pin_memory=args.pin_memory, drop_last=True)

    test_loader = DataLoader(
        Preprocessor(dataset.val,
                     root=dataset.images_dir,
                     transform=test_transformer,
                     ),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=False)

    # Create model
    model = models.create(args.arch,
                          dropout=args.dropout,
                          pretrained=args.pretrained,
                          block_name=args.block_name,
                          block_name2=args.block_name2,
                          num_features=args.num_classes,
                          num_classes=num_classes,
                          num_deform=args.num_deform,
                          fusion=args.fusion,
                          )

    print(model)
    param_mb = sum(p.numel() for p in model.parameters()) / 1000000.0
    logging.info('    Total params: %.2fM' % (param_mb))

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        while not osp.exists(args.resume):
            lz.logging.warning(' no chkpoint {} '.format(args.resume))
            time.sleep(20)
        if torch.cuda.is_available():
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(args.resume, map_location='cpu')
        # model.load_state_dict(checkpoint['state_dict'])
        db_name = args.logs_dir + '/' + args.logs_dir.split('/')[-1] + '.h5'
        load_state_dict(model, checkpoint['state_dict'])
        with lz.Database(db_name) as db:
            if 'cent' in checkpoint:
                db['cent'] = to_numpy(checkpoint['cent'])
            db['xent'] = to_numpy(checkpoint['state_dict']['embed2.weight'])
        if args.restart:
            start_epoch_ = checkpoint['epoch']
            best_top1_ = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}"
                  .format(start_epoch_, best_top1_))
        else:
            start_epoch = checkpoint['epoch']
            best_top1 = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}"
                  .format(start_epoch, best_top1))
    if args.gpu is None:
        model = nn.DataParallel(model)
    elif len(args.gpu) == 1:
        model = nn.DataParallel(model).cuda()
    else:
        model = nn.DataParallel(model, device_ids=range(len(args.gpu))).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, gpu=args.gpu, conf=args.eval_conf, args=args)
    if args.evaluate:
        res = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric,
                                 final=True, prefix='test')

        lz.logging.info('eval {}'.format(res))
        return res
    # Criterion
    if not args.xent_smooth:
        xent = nn.CrossEntropyLoss()
    else:
        xent = CrossEntropyLabelSmooth(num_classes=num_classes)
    setattr(xent, 'name', 'xent')

    criterion = [TripletLoss(margin=args.margin, mode='hard', args=args),
                 CenterLoss(num_classes=num_classes, feat_dim=args.num_classes,
                            margin2=args.margin2,
                            margin3=args.margin3, mode=args.mode,
                            push_scale=args.push_scale,
                            args=args),
                 xent
                 ]
    if args.gpu is not None:
        criterion = [c.cuda() for c in criterion]
    # Optimizer
    fast_params = []
    for name, param in model.named_parameters():
        if name == 'module.embed1.weight' or name == 'module.embed2.weight':
            fast_params.append(param)
    fast_params_ids = set(map(fid, fast_params))
    normal_params = [p for p in model.parameters() if fid(p) not in fast_params_ids]
    param_groups = [
        {'params': fast_params, 'lr_mult': args.lr_mult},
        {'params': normal_params, 'lr_mult': 1.},
    ]
    if args.optimizer_cent == 'sgd':
        optimizer_cent = torch.optim.SGD(criterion[1].parameters(), lr=args.lr_cent, )
    else:
        optimizer_cent = torch.optim.Adam(criterion[1].parameters(), lr=args.lr_cent, )
    if args.optimizer == 'adam':
        optimizer = torch.optim.Adam(
            # model.parameters(),
            param_groups,
            lr=args.lr,
            betas=args.adam_betas,
            eps=args.adam_eps,  # adam hyperparameter
            weight_decay=args.weight_decay)
    elif args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(
            # filter(lambda p: p.requires_grad, model.parameters()),
            param_groups,
            lr=args.lr,
            weight_decay=args.weight_decay, momentum=0.9,
            nesterov=True)
    else:
        raise NotImplementedError

    if args.cls_pretrain:
        args_cp = copy.deepcopy(args)
        args_cp.cls_weight = 1
        args_cp.tri_weight = 0
        trainer = XentTrainer(model, criterion, dbg=False,
                              logs_at=args_cp.logs_dir + '/vis', args=args_cp)
        for epoch in range(start_epoch, args_cp.epochs):
            hist = trainer.train(epoch, train_loader, optimizer)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'cent': criterion[1].centers,
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, True, fpath=osp.join(args.logs_dir, 'checkpoint.{}.pth'.format(epoch)))  #
            print('Finished epoch {:3d} hist {}'.
                  format(epoch, hist))
    # Trainer
    trainer = TCXTrainer(model, criterion, dbg=True,
                         logs_at=args.logs_dir + '/vis', args=args, dop_info=dop_info)

    # Schedule learning rate
    def adjust_lr(epoch, optimizer=optimizer, base_lr=args.lr, steps=args.steps, decay=args.decay):

        exp = len(steps)
        for i, step in enumerate(steps):
            if epoch < step:
                exp = i
                break
        lr = base_lr * decay ** exp

        lz.logging.info('use lr {}'.format(lr))
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr * param_group.get('lr_mult', 1)

    def adjust_bs(epoch, args):
        if args.batch_size_l == []:
            return args
        res = 0
        for i, step in enumerate(args.bs_steps):
            if epoch > step:
                res = i + 1
        print(epoch, res)
        if res >= len(args.num_instances_l):
            res = -1
        args.batch_size = args.batch_size_l[res]
        args.num_instances = args.num_instances_l[res]
        return args

    writer = SummaryWriter(args.logs_dir)
    writer.add_scalar('param', param_mb, global_step=0)

    # schedule = CyclicLR(optimizer)
    schedule = None
    # Start training
    for epoch in range(start_epoch, args.epochs):
        # warm up
        # mAP, acc,rank5 = evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)

        adjust_lr(epoch=epoch)
        args = adjust_bs(epoch, args)

        hist = trainer.train(epoch, train_loader, optimizer, print_freq=args.print_freq, schedule=schedule,
                             optimizer_cent=optimizer_cent)
        for k, v in hist.items():
            writer.add_scalar('train/' + k, v, epoch)
        writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
        writer.add_scalar('bs', args.batch_size, epoch)
        writer.add_scalar('num_instances', args.num_instances, epoch)

        if not args.log_middle:
            continue
        if epoch < args.start_save:
            continue
        if epoch % 15 == 0:
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'cent': criterion[1].centers,
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, False, fpath=osp.join(args.logs_dir, 'checkpoint.{}.pth'.format(epoch)))

        if epoch not in args.log_at:
            continue

        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'cent': criterion[1].centers,
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, False, fpath=osp.join(args.logs_dir, 'checkpoint.{}.pth'.format(epoch)))

        # res = evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        # for n, v in res.items():
        #     writer.add_scalar('train/'+n, v, epoch)

        res = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric, epoch=epoch)
        for n, v in res.items():
            writer.add_scalar('test/' + n, v, epoch)

        top1 = res['top-1']
        is_best = top1 > best_top1

        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'cent': criterion[1].centers,
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.{}.pth'.format(epoch)))  #

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    res = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
    for n, v in res.items():
        writer.add_scalar('test/' + n, v, args.epochs)

    if osp.exists(osp.join(args.logs_dir, 'model_best.pth')) and args.test_best:
        print('Test with best model:')
        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth'))
        model.module.load_state_dict(checkpoint['state_dict'])
        metric.train(model, train_loader)
        res = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric, final=True)
        for n, v in res.items():
            writer.add_scalar('test/' + n, v, args.epochs + 1)
        lz.logging.info('final eval is {}'.format(res))

    writer.close()
    json_dump(res, args.logs_dir + '/res.json', 'w')
    return res
Exemplo n.º 26
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    for ii in range(0,9):
        #if ii == 5 or ii == 6: ii = ii - 4
        #print(ii)
        #continue
        if not osp.exists(args.logs_dir+'/{}'.format(ii)):
            os.mkdir(args.logs_dir+'/{}'.format(ii))
        sys.stdout = Logger(osp.join(args.logs_dir+'/{}/log'.format(ii)))
        dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader = \
            get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob,ii+1)
        if not args.evaluate:
            sys.stdout = Logger(osp.join(args.logs_dir+'/log'))

        if args.height is None or args.width is None:
            args.height, args.width = (144, 56) if args.arch == 'inception' else (256, 128)
        model_t = models.create(args.arch, num_classes=num_classes, num_features=args.features, attention_mode=args.att_mode)
        model_s = models.create(args.arch, num_classes=num_classes, num_features=args.features, attention_mode=args.att_mode)
        model_ir = models.create(args.arch, num_classes=num_classes, num_features=args.features, attention_mode=args.att_mode)
    #    print(model)
        USE_CUDA = torch.cuda.is_available()
        device = torch.device("cuda:0" if USE_CUDA else "cpu")
        model_t = nn.DataParallel(model_t, device_ids=[0,1,2])
        model_t.to(device)
        model_s = nn.DataParallel(model_s, device_ids=[0,1,2])
        model_s.to(device)
        model_ir = nn.DataParallel(model_ir, device_ids=[0,1,2])
        model_ir.to(device)
        print(num_classes)
        #model = model.cuda()
        #model_discriminator = model_discriminator.cuda()
        #model_discriminator = nn.DataParallel(model_discriminator, device_ids=[0,1,2])
        #model_discriminator.to(device)

        evaluator = Evaluator(model_t)
        metric = DistanceMetric(algorithm=args.dist_metric)
        evaluator_s = Evaluator(model_s)
        metric_s = DistanceMetric(algorithm=args.dist_metric)
        evaluator_ir = Evaluator(model_ir)
        metric_ir = DistanceMetric(algorithm=args.dist_metric)

        start_epoch = 0
        if args.resume:
            checkpoint = load_checkpoint(args.resume)
            model.load_state_dict(checkpoint['model'])
            model_discriminator.load_state_dict(checkpoint['model_discriminator'])
            start_epoch = checkpoint['epoch']
            print("=> Start epoch {}".format(start_epoch))

        if args.evaluate:
            metric.train(model, train_loader)
            evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
            exit()

        current_margin = args.margin
        #criterion_z = nn.CrossEntropyLoss().cuda()

        criterion_z = CrossEntropyLabelSmooth(num_classes= num_classes, epsilon=0.3).cuda()
        criterion_att = nn.MSELoss().cuda()
        #criterion_I = TripletLoss(margin= current_margin).cuda()
        #criterion_I = Circle_Rank_loss(margin_1=args.margin_1, margin_2=args.margin_2, alpha_1=args.alpha_1, alpha_2=args.alpha_2).cuda()
        criterion_I = Rank_loss(margin_1= args.margin_1, margin_2 =args.margin_2, alpha_1 =args.alpha_1, alpha_2= args.alpha_2).cuda()
        criterion_t = Triplet(margin=current_margin).cuda()

        print(args)

        if args.arch == 'ide':
            ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))
        else:
            ignored_params = list(map(id, model_t.module.classifier.parameters())) + list(map(id, model_t.module.attention_module.parameters()))
            ignored_params_s = list(map(id, model_s.module.classifier.parameters())) + list(map(id, model_s.module.attention_module.parameters()))
            ignored_params_ir = list(map(id, model_ir.module.classifier.parameters())) + list(map(id, model_ir.module.attention_module.parameters()))

        base_params = filter(lambda p: id(p) not in ignored_params, model_t.parameters())
        base_params_s = filter(lambda p: id(p) not in ignored_params_s, model_s.parameters())
        base_params_ir = filter(lambda p: id(p) not in ignored_params_ir, model_ir.parameters())


        if args.use_adam:
            optimizer_ft = torch.optim.Adam([
            #print("Ranger")
            #optimizer_ft = Ranger([
                {'params': filter(lambda p: p.requires_grad,base_params), 'lr': args.lr},
                {'params': filter(lambda p: p.requires_grad,base_params_s), 'lr':args.lr},
                {'params': filter(lambda p: p.requires_grad,base_params_ir), 'lr':args.lr},
                {'params': model_t.module.classifier.parameters(), 'lr': args.lr},
                {'params': model_t.module.attention_module.parameters(), 'lr': args.lr},
                {'params': model_s.module.classifier.parameters(), 'lr': args.lr},
                {'params': model_s.module.attention_module.parameters(), 'lr': args.lr},
                {'params': model_ir.module.classifier.parameters(), 'lr': args.lr},
                {'params': model_ir.module.attention_module.parameters(), 'lr': args.lr},
                ],
                weight_decay=5e-4)

            #optimizer_discriminator = torch.optim.Adam([
            #    {'params': model_discriminator.module.model.parameters(), 'lr': args.lr},
            #    {'params': model_discriminator.module.classifier.parameters(), 'lr': args.lr}
            #    ],
            #    weight_decay=5e-4)


        else:
            optimizer_ft = torch.optim.SGD([
                {'params': filter(lambda p: p.requires_grad,base_params), 'lr': args.lr},
                {'params': model.classifier.parameters(), 'lr': args.lr},
                ],
                momentum=0.9,
                weight_decay=5e-4,
                nesterov=True)
            optimizer_discriminator = torch.optim.SGD([
                {'params': model_discriminator.model.parameters(), 'lr': args.lr},
                {'params': model_discriminator.classifier.parameters(), 'lr': args.lr},
                ],
                momentum=0.9,
                weight_decay=5e-4,
                nesterov=True)

        scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma, args.warmup_factor,
                                            args.warmup_iters, args.warmup_methods)

        trainer = Trainer(model_t, model_s, model_ir, criterion_z, criterion_I, criterion_att, criterion_t, trainvallabel, 1, 1 ,args.rgb_w, args.ir_w, 1000)

        flag = 1
        best_top1 = -1
        # best_top1_s = -1
        # best_top1_ir = -1
        # Start training
        for epoch in range(start_epoch, args.epochs):
            scheduler.step()
            triple_loss, tot_loss = trainer.train(epoch, train_loader, optimizer_ft)

            save_checkpoint({
                'model': model_t.module.state_dict(),
                #'model_discriminator': model_discriminator.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, False, epoch, (args.logs_dir+'/{}'.format(ii)), fpath='checkpoint.pth.tar')

            if epoch < 1:
                continue
            if not epoch % 10 ==0:
                continue


            top1, cmc, mAP = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)

            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint({
                'model': model_t.module.state_dict(),
                #'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, is_best, epoch, (args.logs_dir+'/{}'.format(ii)), fpath='checkpoint.pth.tar')
       # print('Test with best model:')
       # print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
        #        format(epoch, top1, best_top1, ' *' if is_best else ''))

        #checkpoint = load_checkpoint(osp.join((args.logs_dir+'/{}'.format(ii)),'model_best.pth.tar'))
       # model_t.load_state_dict(checkpoint['model'])
       # metric.train(model, train_loader)
       # _, best_cmc, best_mAP= evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
       # if ii == 0:
       #     all_cmc = best_cmc
       #     all_mAP = best_mAP

        #else:
        #    all_cmc = all_cmc + best_cmc
        #    all_mAP = all_mAP + best_mAP
        #del model, metric, evaluator, scheduler, criterion_z, criterion_I, criterion_D
   # print('------------------Final-Results------------------')
   # print('Mean AP: {:4.2%}'.format(all_mAP/10.0))
   # print('CMC Scores{:>12}'.format('RegDB')
   # )
   # for k in [1,10,20]:
   #     print('  top-{:<4}{:12.2%}'
   #           .format(k, all_cmc[k -1]/10.0)
   #           )

    #print('Test with best model:')
    #print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
    #          format(epoch, top1, best_top1, ' *' if is_best else ''))

    #checkpoint = load_checkpoint(osp.join(args.logs_dir,'model_best.pth.tar'))
    #model.load_state_dict(checkpoint['model'])
    #metric.train(model, train_loader)
    #evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
    print(args)
Exemplo n.º 27
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = InceptionNet(num_channels=8,
                         num_features=args.features,
                         dropout=args.dropout,
                         num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=os.path.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(
        os.path.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)

    features, _ = extract_features(evaluator.model, test_loader)
    distmat = pairwise_distance(features,
                                dataset.query,
                                dataset.gallery,
                                metric=metric)
    evaluate_all(distmat,
                 query=dataset.query,
                 gallery=dataset.gallery,
                 cmc_topk=(1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50))

    torch.save(model, os.path.join(args.logs_dir, 'model.pt'))
Exemplo n.º 28
0
Arquivo: main.py Projeto: leule/DCDS
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (384, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    base_model = models.create(args.arch,
                               num_features=1024,
                               cut_at_pooling=True,
                               dropout=args.dropout,
                               num_classes=args.features)

    grp_num = args.grp_num
    embed_model = [
        VNetEmbed(instances_num=args.num_instances,
                  feat_num=(2048 / grp_num),
                  num_classes=2,
                  drop_ratio=args.dropout).cuda() for i in range(grp_num)
    ]

    base_model = nn.DataParallel(base_model).cuda()

    model = VNetExtension(
        instances_num=args.num_instances,  # 
        base_model=base_model,
        embed_model=embed_model,
        alpha=args.alpha)

    if args.retrain:
        if args.evaluate_from:
            print('loading trained model...')
            checkpoint = load_checkpoint(args.evaluate_from)
            model.load_state_dict(checkpoint['state_dict'])

        else:
            print('loading base part of pretrained model...')
            checkpoint = load_checkpoint(args.retrain)
            #copy_state_dict(checkpoint['state_dict'], base_model, strip='base.module.', replace='module.')
            copy_state_dict(checkpoint['state_dict'],
                            base_model,
                            strip='base_model.',
                            replace='')
            print('loading embed part of pretrained model...')
            if grp_num > 1:
                for i in range(grp_num):
                    copy_state_dict(checkpoint['state_dict'],
                                    embed_model[i],
                                    strip='embed_model.bn_' + str(i) + '.',
                                    replace='bn.')
                    copy_state_dict(checkpoint['state_dict'],
                                    embed_model[i],
                                    strip='embed_model.classifier_' + str(i) +
                                    '.',
                                    replace='classifier.')
            else:
                copy_state_dict(checkpoint['state_dict'],
                                embed_model[0],
                                strip='module.embed_model.',
                                replace='')

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    best_mAP = 0

    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    # Evaluator
    evaluator = CascadeEvaluator(
        base_model,
        embed_model,
        embed_dist_fn=lambda x: F.softmax(x, dim=1).data[:, 0])
    #embed_dist_fn=lambda x: F.softmax(x))# here we are performing softmax normalization, this function take N,2 vector and after normalizing both column it return the
    #first column

    if args.evaluate:
        metric.train(model, train_loader)
        if args.evaluate_from:
            print('loading trained model...')
            checkpoint = load_checkpoint(args.evaluate_from)
            model.load_state_dict(checkpoint['state_dict'])
        print("Test:")
        evaluator.evaluate(test_loader,
                           dataset.query,
                           dataset.gallery,
                           args.alpha,
                           metric,
                           rerank_topk=args.rerank,
                           dataset=args.dataset)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    criterion2 = TripletLoss(margin=args.margin).cuda()

    #criterion = nn.BCELoss().cuda()

    # base lr rate and embed lr rate

    new_params = [z for z in model.embed]
    param_groups = [
                    {'params': model.base.module.base.parameters(), 'lr_mult': 1.0}] + \
                   [{'params': new_params[i].parameters(), 'lr_mult': 10.0} for i in range(grp_num)]

    # Optimizer

    optimizer = torch.optim.Adam(param_groups,
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = DCDSBase(model, criterion, criterion2, args.alpha, grp_num)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.ss if args.arch == 'inception' else 20
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        return lr

    # Start training
    for epoch in range(start_epoch, args.epochs):
        lr = adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, lr, warm_up=False)
        top1, mAP = evaluator.evaluate(val_loader,
                                       dataset.val,
                                       dataset.val,
                                       args.alpha,
                                       rerank_topk=args.rerank,
                                       second_stage=True,
                                       dataset=args.dataset)

        is_best = top1 > best_top1
        best_mAP = max(mAP, best_mAP)
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  mAP: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, mAP, best_mAP, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader,
                       dataset.query,
                       dataset.gallery,
                       args.alpha,
                       metric,
                       rerank_topk=args.rerank,
                       dataset=args.dataset)
Exemplo n.º 29
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.loss == 'triplet':
        assert args.num_instances > 1, 'TripletLoss requires num_instances > 1'
        assert args.batch_size % args.num_instances == 0, \
            'num_instances should divide batch_size'
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.workers, args.num_instances,
                 combine_trainval=args.combine_trainval)

    # Create model
    if args.loss == 'xentropy':
        model = InceptionNet(num_classes=num_classes,
                             num_features=args.features,
                             dropout=args.dropout)
    elif args.loss == 'oim':
        model = InceptionNet(num_features=args.features,
                             norm=True,
                             dropout=args.dropout)
    elif args.loss == 'triplet':
        model = InceptionNet(num_features=args.features, dropout=args.dropout)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    model = torch.nn.DataParallel(model).cuda()

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> start epoch {}  best top1 {:.1%}".format(
            args.start_epoch, best_top1))
    else:
        best_top1 = 0

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    if args.loss == 'xentropy':
        criterion = torch.nn.CrossEntropyLoss()
    elif args.loss == 'oim':
        criterion = OIMLoss(model.module.num_features,
                            num_classes,
                            scalar=args.oim_scalar,
                            momentum=args.oim_momentum)
    elif args.loss == 'triplet':
        criterion = TripletLoss(margin=args.triplet_margin)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    criterion.cuda()

    # Optimizer
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
    else:
        raise ValueError("Cannot recognize optimizer type:", args.optimizer)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1**(epoch // 60))
        elif args.optimizer == 'adam':
            lr = args.lr if epoch <= 100 else \
                args.lr * (0.001 ** (epoch - 100) / 50)
        else:
            raise ValueError("Cannot recognize optimizer type:",
                             args.optimizer)
        for g in optimizer.param_groups:
            g['lr'] = lr

    # Start training
    for epoch in range(args.start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 30
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    # assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          num_diff_features=128,
                          dropout=args.dropout,
                          cut_at_pooling=False)
    print(model)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items()
                           if k in model.state_dict()}
        model_dict = model.state_dict()
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset)>1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name], metric)
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
            return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError:
                pass
        # top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
        # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        top1 = 1
        # is_best = top1 > best_top1
        # best_top1 = max(top1, best_top1)
        # save_checkpoint({
        #     'state_dict': model.module.state_dict(),
        #     'epoch': epoch + 1,
        #     'best_top1': best_top1,
        # }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
        is_best = False
        best_top1 = 1
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                               dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 31
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir + '/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144,
                                   56) if args.arch == 'inception' else (256,
                                                                         128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader, query_loader_s, gallery_loader_s, query_loader_ir, gallery_loader_ir = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    print(num_classes)
    # model_s, model_t, model_discriminator = models.create(args.arch, num_classes=num_classes, num_features=args.features)
    model_t = models.create(args.arch,
                            num_classes=num_classes,
                            num_features=args.features,
                            attention_mode=args.att_mode)
    model_s = models.create(args.arch,
                            num_classes=num_classes,
                            num_features=args.features,
                            attention_mode=args.att_mode)
    model_ir = models.create(args.arch,
                             num_classes=num_classes,
                             num_features=args.features,
                             attention_mode=args.att_mode)

    # load source network
    # checkpoint_s = load_checkpoint('/home/fan/cross_reid/source_net/model_best.pth.tar')
    #
    # model_dict = model_s.state_dict()
    # state_dict = {k:v for k,v in checkpoint_s.items() if k in model_dict.keys()}
    # model_dict.update(state_dict)

    # print(model_s)
    # print(checkpoint_s['model'])
    # print(model_dict.keys())
    # model_s.load_state_dict(model_dict)
    # print(model_s)
    USE_CUDA = torch.cuda.is_available()
    device = torch.device("cuda:0" if USE_CUDA else "cpu")
    model_s = nn.DataParallel(model_s, device_ids=[0, 1, 2])
    model_s.to(device)
    # model_s = model_s.cuda()
    model_t = nn.DataParallel(model_t, device_ids=[0, 1, 2])
    model_t.to(device)

    model_ir = nn.DataParallel(model_ir, device_ids=[0, 1, 2])
    model_ir.to(device)
    # model_discriminator = model_discriminator.cuda()

    evaluator = Evaluator(model_t)
    metric = DistanceMetric(algorithm=args.dist_metric)
    evaluator_s = Evaluator(model_s)
    metric_s = DistanceMetric(algorithm=args.dist_metric)
    evaluator_ir = Evaluator(model_ir)
    metric_ir = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_t.load_state_dict(checkpoint['model'])
        # model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model_t, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        exit()

    current_margin = args.margin
    criterion_z_s = CrossEntropyLabelSmooth(num_classes=num_classes,
                                            epsilon=0.3).cuda()
    criterion_att = nn.MSELoss().cuda()
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes,
                                          epsilon=0.3).cuda()
    # criterion_I = TripletLoss(margin = current_margin).cuda()
    criterion_I = Rank_loss(margin_1=args.margin_1,
                            margin_2=args.margin_2,
                            alpha_1=args.alpha_1,
                            alpha_2=args.alpha_2).cuda()
    criterion_t = Triplet(margin=current_margin).cuda()
    # criterion_D = nn.CrossEntropyLoss().cuda()

    print(args)

    if args.arch == 'ide':
        ignored_params = list(map(id, model_t.model.fc.parameters())) + list(
            map(id, model_t.classifier.parameters()))
    else:
        ignored_params = list(map(
            id, model_t.module.classifier.parameters())) + list(
                map(id, model_t.module.attention_module.parameters()))
        ignored_params_s = list(map(
            id, model_s.module.classifier.parameters())) + list(
                map(id, model_s.module.attention_module.parameters()))
        ignored_params_ir = list(
            map(id, model_ir.module.classifier.parameters())) + list(
                map(id, model_ir.module.attention_module.parameters()))

    base_params = filter(lambda p: id(p) not in ignored_params,
                         model_t.parameters())
    base_params_s = filter(lambda p: id(p) not in ignored_params_s,
                           model_s.parameters())
    base_params_ir = filter(lambda p: id(p) not in ignored_params_ir,
                            model_ir.parameters())

    if args.use_adam:
        optimizer_ft = torch.optim.Adam([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': filter(lambda p: p.requires_grad, base_params_s),
                'lr': args.lr
            },
            {
                'params': filter(lambda p: p.requires_grad, base_params_ir),
                'lr': args.lr
            },
            {
                'params': model_s.module.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_s.module.attention_module.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.module.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.module.attention_module.parameters(),
                'lr': args.lr
            },
            {
                'params': model_ir.module.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_ir.module.attention_module.parameters(),
                'lr': args.lr
            },
        ],
                                        weight_decay=5e-4)

        # optimizer_discriminator = torch.optim.Adam([
        #     {'params': model_discriminator.model.parameters(), 'lr': args.lr},
        #     {'params': model_discriminator.classifier.parameters(), 'lr': args.lr}
        #     ],
        #     weight_decay=5e-4)

    else:
        optimizer_ft = torch.optim.SGD([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': filter(lambda p: p.requires_grad, base_params_s),
                'lr': args.lr
            },
            {
                'params': model_s.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_s.attention_module.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.attention_module.parameters(),
                'lr': args.lr
            },
        ],
                                       momentum=0.9,
                                       weight_decay=5e-4,
                                       nesterov=True)
        # optimizer_discriminator = torch.optim.SGD([
        #      {'params': model_discriminator.model.parameters(), 'lr': args.lr},
        #      {'params': model_discriminator.classifier.parameters(), 'lr': args.lr},
        #     ],
        #     momentum=0.9,
        #     weight_decay=5e-4,
        #     nesterov=True)

    scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma,
                                  args.warmup_factor, args.warmup_iters,
                                  args.warmup_methods)

    # trainer = Trainer(model_s, model_ir, model_t, criterion_z, criterion_z_s, criterion_I ,criterion_att, criterion_t, trainvallabel, 1, 1 ,0.15 , 0.05, 5)
    trainer = Trainer(model_s, model_ir, model_t, criterion_z, criterion_I,
                      criterion_att, trainvallabel, 1, 1, 0.0, 0.0, 1000)
    flag = 1
    best_top1 = -1
    best_top1_s = -1
    best_top1_ir = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        triple_loss, tot_loss = trainer.train(epoch, train_loader,
                                              optimizer_ft)

        save_checkpoint(
            {
                'model': model_t.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        save_checkpoint_s(
            {
                'model': model_s.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1_s,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='s_checkpoint.pth.tar')

        save_checkpoint_ir(
            {
                'model': model_ir.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1_ir,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='ir_checkpoint.pth.tar')
        # if epoch < 200:
        #     continue
        if not epoch % 1 == 0:
            continue

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery, metric)
        # top1_s = evaluator_s.evaluate(query_loader_s, gallery_loader_s, dataset.query, dataset.gallery, metric_s)
        #top1_ir = evaluator_ir.evaluate(query_loader_ir, gallery_loader_ir, dataset.query, dataset.gallery, metric_ir)
        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'model': model_t.module.state_dict(),
                # 'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        #is_best_s = max(top1_s, best_top1_s)
        #save_checkpoint_s({
        #    'model': model_s.module.state_dict(),
        #    'epoch': epoch + 1,
        #    'best_top1' : best_top1_s,
        #}, is_best_s, epoch, args.logs_dir, fpath='s_checkpoint.pth.tar')

        #is_best_ir = max(top1_ir, best_top1_ir)
        #save_checkpoint_ir({
        #    'model': model_ir.module.state_dict(),
        #    'epoch': epoch + 1,
        #    'best_top1': best_top1_ir,
        #}, is_best_ir, epoch, args.logs_dir, fpath='ir_checkpoint.pth.tar')


#    print('Test with best model:')
#    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
#              format(epoch, top1, best_top1, ' *' if is_best else ''))

#    checkpoint = load_checkpoint(osp.join(args.logs_dir,'model_best.pth.tar'))
#    model_t.load_state_dict(checkpoint['model'])
#    metric.train(model_t, train_loader)
#    evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
    print(args)
Exemplo n.º 32
0
def run():
    np.random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    cudnn.benchmark = True
    data_dir = opt.data_dir

    # Redirect print to both console and log file
    #if not opt.evaluate:
    #    sys.stdout = Logger(osp.join(opt.logs_dir, 'log_l2_per.txt'))
    # Create data loaders
    def readlist(path):
        lines = []
        with open(path, 'r') as f:
            data = f.readlines()

        #pdb.set_trace()
        for line in data:
            name, pid, cam = line.split()
            lines.append((name, int(pid), int(cam)))
        return lines

    # Load data list for wuzhen
    if osp.exists(osp.join(data_dir, 'train.txt')):
        train_list = readlist(osp.join(data_dir, 'train.txt'))
    else:
        print("The training list doesn't exist")

    if osp.exists(osp.join(data_dir, 'val.txt')):
        val_list = readlist(osp.join(data_dir, 'val.txt'))
    else:
        print("The validation list doesn't exist")

    if osp.exists(osp.join(data_dir, 'query.txt')):
        query_list = readlist(osp.join(data_dir, 'query.txt'))
    else:
        print("The query.txt doesn't exist")

    if osp.exists(osp.join(data_dir, 'gallery.txt')):
        gallery_list = readlist(osp.join(data_dir, 'gallery.txt'))
    else:
        print("The gallery.txt doesn't exist")

    if opt.height is None or opt.width is None:
        opt.height, opt.width = (144, 56) if opt.arch == 'inception' else \
                                  (256, 128)

    train_loader,val_loader, test_loader = \
        get_data(opt.split, data_dir, opt.height,
                 opt.width, opt.batchSize, opt.workers,
                 opt.combine_trainval, train_list, val_list, query_list, gallery_list)
    # Create model
    # ori 14514; clear 12654,  16645
    densenet = densenet121(num_classes=20330, num_features=256)
    start_epoch = best_top1 = 0
    if opt.resume:
        #checkpoint = load_checkpoint(opt.resume)
        #densenet.load_state_dict(checkpoint['state_dict'])
        densenet.load_state_dict(torch.load(opt.resume))
        start_epoch = opt.resume_epoch
        print("=> Finetune Start epoch {} ".format(start_epoch))
    if opt.pretrained_model:
        print('Start load params...')
        load_params(densenet, opt.pretrained_model)
    # Load from checkpoint
    #densenet = nn.DataParallel(densenet).cuda()
    metric = DistanceMetric(algorithm=opt.dist_metric)
    print('densenet')
    show_info(densenet, with_arch=True, with_grad=False)
    netG = netg()
    print('netG')
    show_info(netG, with_arch=True, with_grad=False)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        #load_params(netG,opt.netG)
    if opt.cuda:
        netG = netG.cuda()
        densenet = densenet.cuda()
    perceptionloss = perception_loss(cuda=opt.cuda)
    l2loss = l2_loss(cuda=opt.cuda)
    #    discriloss=discri_loss(cuda = opt.cuda,batchsize = opt.batchSize,height = \
    #                           opt.height,width = opt.width,lr = opt.lr,step_size = \
    #                           opt.step_size,decay_step = opt.decay_step )
    # Evaluator
    evaluator = Evaluator(densenet)
    #    if opt.evaluate:
    metric.train(densenet, train_loader)
    print("Validation:")
    evaluator.evaluate(val_loader, val_list, val_list, metric)
    print("Test:")
    evaluator.evaluate(test_loader, query_list, gallery_list, metric)
    #    return
    # Criterion
    #    criterion = nn.CrossEntropyLoss(ignore_index=-100).cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    # Optimizer
    param_groups = []
    mult_lr(densenet, param_groups)
    optimizer = optim.SGD(param_groups,
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)
    #    optimizer = optim.Adam(param_groups, lr=opt.lr, betas=(opt.beta1, 0.9))

    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.9))

    # Start training
    for epoch in range(start_epoch, opt.epochs):
        adjust_lr(optimizer, epoch)
        adjust_lr(optimizerG, epoch)
        #discriloss.adjust_lr(epoch)
        losses = AverageMeter()
        precisions = AverageMeter()
        densenet.train()
        for i, data in enumerate(train_loader):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            real_cpu, _, pids, _ = data
            if opt.cuda:
                real_cpu = real_cpu.cuda()
                targets = Variable(pids.cuda())
                input.resize_as_(real_cpu).copy_(real_cpu)
            inputv = Variable(input)
            outputs, output_dense, _ = densenet(inputv)
            fake = netG(output_dense)
            fake = fake * 3
            #discriloss(fake = fake, inputv = inputv, i = i)
            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            if i % opt.CRITIC_ITERS == 0:
                netG.zero_grad()
                optimizer.zero_grad()
                #loss_discri = discriloss.gloss(fake = fake)
                loss_l2 = l2loss(fake=fake, inputv=inputv)
                loss_perception = perceptionloss(fake=fake, inputv=inputv)
                loss_classify = criterion(outputs, targets)
                prec, = accuracy(outputs.data, targets.data)
                prec = prec[0]
                losses.update(loss_classify.data[0], targets.size(0))
                precisions.update(prec, targets.size(0))
                loss = loss_classify + 0 * loss_l2 + 0 * loss_perception
                #                loss = loss_discri
                loss.backward()
                optimizerG.step()
                optimizer.step()
            #print(precisions.val)
            #print(precisions.avg)
#           print('[%d/%d][%d/%d] '%(epoch, opt.epochs, i, len(train_loader)))


#            print('[%d/%d][%d/%d] Loss_discri: %.4f '%(epoch, opt.epochs, i, \
#                  len(train_loader),loss_discri.data[0]))
            print('[%d/%d][%d/%d] Loss_l2: %.4f Loss_perception: %.4f '%(epoch, opt.epochs, i, \
                  len(train_loader),loss_l2.data[0],loss_perception.data[0]))
            print('Loss {}({})\t'
                  'Prec {}({})\t'.format(losses.val, losses.avg,
                                         precisions.val, precisions.avg))
            if i % 100 == 0:
                vutils.save_image(real_cpu,
                                  '%s/real_samples.png' % opt.outf,
                                  normalize=True)
                outputs, output_dense, _ = densenet(x=inputv)
                fake = netG(output_dense)
                fake = fake * 3
                vutils.save_image(fake.data,
                                  '%s/fake_samples_epoch_%03d.png' %
                                  (opt.outf, epoch),
                                  normalize=True)
        show_info(densenet, with_arch=False, with_grad=True)
        show_info(netG, with_arch=False, with_grad=True)
        if epoch % 5 == 0:
            torch.save(densenet.state_dict(),
                       '%s/densenet_epoch_%d.pth' % (opt.outf, epoch))
            torch.save(netG.state_dict(),
                       '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
        if epoch < opt.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val_list, val_list)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': densenet.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(opt.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))
        if (epoch + 1) % 5 == 0:
            print('Test model: \n')
            evaluator.evaluate(test_loader, query_list, gallery_list)
            model_name = 'epoch_' + str(epoch) + '.pth.tar'
            torch.save({'state_dict': densenet.state_dict()},
                       osp.join(opt.logs_dir, model_name))
    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(opt.logs_dir, 'model_best.pth.tar'))
    densenet.load_state_dict(checkpoint['state_dict'])
    print('best epoch: ', checkpoint['epoch'])
    metric.train(densenet, train_loader)
    evaluator.evaluate(test_loader, query_list, gallery_list, metric)
Exemplo n.º 33
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval, args.batch_id)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Exemplo n.º 34
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    cudnn.benchmark = True

    # Redirect print to both console and log file
    # All the print infomration are stored in the logs_dir

    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.loss == 'triplet':
        assert args.num_instances > 1, 'TripletLoss requires num_instances > 1'
        assert args.batch_size % args.num_instances == 0, \
            'num_instances should divide batch_size'

    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir,
             args.batch_size, args.seq_len, args.seq_srd,
                 args.workers, args.num_instances,
                 combine_trainval=True)

    # Create model
    if args.loss == 'xentropy':
        model = ResNetLSTM_btfu(args.depth,
                                pretrained=True,
                                num_features=args.features,
                                dropout=args.dropout)

    elif args.loss == 'oim':
        model = ResNetLSTM_btfu(args.depth,
                                pretrained=True,
                                num_features=args.features,
                                norm=True,
                                dropout=args.dropout)

    elif args.loss == 'triplet':
        model = ResNetLSTM_btfu(args.depth,
                                pretrained=True,
                                num_features=args.features,
                                dropout=args.dropout)

    else:
        raise ValueError("cannot recognize loss type:", args.loss)

    model = torch.nn.DataParallel(model).cuda()

    # Load from checkpoint
    # TODO is not necessary currently

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)

    # Criterion
    if args.loss == 'xentropy':
        criterion = torch.nn.CrossEntropyLoss()
    elif args.loss == 'oim':
        criterion = OIMLoss(model.module.num_features,
                            num_classes,
                            scalar=args.oim_scalar,
                            momentum=args.oim_momentum)
    elif args.loss == 'triplet':
        criterion = TripletLoss(margin=args.triplet_margin)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    criterion.cuda()

    # Optimizer
    if args.optimizer == 'sgd':
        if args.loss == 'xentropy':
            base_param_ids = set(map(id, model.module.base.parameters()))
            new_params = [
                p for p in model.parameters() if id(p) not in base_param_ids
            ]
            param_groups = [{
                'params': model.module.base.parameters(),
                'lr_mult': 0.1
            }, {
                'params': new_params,
                'lr_mult': 1.0
            }]
        else:
            param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)

    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)

    else:
        raise ValueError("Cannot recognize optimizer type:", args.optimizer)

    # Trainer
    trainer = SeqTrainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1**(epoch // 40))
        elif args.optimizer == 'adam':
            lr = args.lr if epoch <= 100 else \
                args.lr * (0.001 ** (epoch - 100) / 50)
        else:
            raise ValueError("Cannot recognize optimizer type:",
                             args.optimizer)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Starting training
    for epoch in range(args.start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        top1 = evaluator.evaluate(test_loader,
                                  dataset.query,
                                  dataset.gallery,
                                  multi_shot=True)