示例#1
0
def create_model(args):
    model = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)

    model.cuda()
    model = nn.DataParallel(model)

    initial_weights = load_checkpoint(args.init)
    copy_state_dict(initial_weights['state_dict'], model)

    return model
示例#2
0
def create_model(args):
    model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
    model_2 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)

    model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)
    model_2_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=args.num_clusters)

    model_1.cuda()
    model_2.cuda()
    model_1_ema.cuda()
    model_2_ema.cuda()
    model_1 = nn.DataParallel(model_1)
    model_2 = nn.DataParallel(model_2)
    model_1_ema = nn.DataParallel(model_1_ema)
    model_2_ema = nn.DataParallel(model_2_ema)

    initial_weights = load_checkpoint(args.init_1)
    copy_state_dict(initial_weights['state_dict'], model_1)
    copy_state_dict(initial_weights['state_dict'], model_1_ema)
    model_1_ema.module.classifier.weight.data.copy_(model_1.module.classifier.weight.data)

    initial_weights = load_checkpoint(args.init_2)
    copy_state_dict(initial_weights['state_dict'], model_2)
    copy_state_dict(initial_weights['state_dict'], model_2_ema)
    model_2_ema.module.classifier.weight.data.copy_(model_2.module.classifier.weight.data)

    for param in model_1_ema.parameters():
        param.detach_()
    for param in model_2_ema.parameters():
        param.detach_()

    return model_1, model_2, model_1_ema, model_2_ema
示例#3
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    query_loader, gallery_loader = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch, pretrained=False, num_features=args.features, dropout=args.dropout, num_classes=0)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint['state_dict'], model)
    start_epoch = checkpoint['epoch']
    best_mAP = checkpoint['best_mAP']
    print("=> Checkpoint of epoch {}  best mAP {:.1%}".format(start_epoch, best_mAP))

    # Evaluator
    q_f, q_id, q_cam = test(model, query_loader)
    g_f, g_id, g_cam = test(model, gallery_loader)

    q_g_dist = np.dot(q_f, np.transpose(g_f))
    q_g_dist = 2. - 2 * q_g_dist  # change the cosine similarity metric to euclidean similarity metric
    all_cmc, mAP = eval_func(q_g_dist, q_id, g_id, q_cam, g_cam)
    all_cmc = all_cmc * 100
    print('rank-1: {:.4f} rank-5: {:.4f} rank-10: {:.4f} rank-20: {:.4f} rank-50: {:.4f} mAP: {:.4f}'.format(all_cmc[0],
                                                                                                             all_cmc[4],
                                                                                                             all_cmc[9],
                                                                                                             all_cmc[
                                                                                                                 19],
                                                                                                             all_cmc[
                                                                                                                 49],
                                                                                                             mAP * 100))

    indices = np.argsort(q_g_dist, axis=1)
    np.savetxt("answer.txt", indices[:, :100], fmt="%04d")
    return
示例#4
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    dataset_target, test_loader_target = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch,
                          pretrained=False,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=0)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint['state_dict'], model)
    start_epoch = checkpoint['epoch']
    best_mAP = checkpoint['best_mAP']
    print("=> Checkpoint of epoch {}  best mAP {:.1%}".format(
        start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    print("Test on the target domain of {}:".format(args.dataset_target))
    evaluator.evaluate(test_loader_target,
                       dataset_target.query,
                       dataset_target.gallery,
                       cmc_flag=True,
                       rerank=args.rerank)
    return
示例#5
0
def main_worker(args):
    global start_epoch, best_mAP

    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    else:
        log_dir = osp.dirname(args.resume)
        sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    iters = args.iters if (args.iters > 0) else None
    dataset_source, num_classes, train_loader_source = \
        get_data(args.dataset_source, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, args.num_instances, iters)
    dataset_target, _, train_loader_target = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, args.num_instances, iters)
    dataset_validation, test_loader_target = \
        get_test_data(args.dataset_validation, args.data_dir, args.height,
                      args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch,
                          dropout=args.dropout,
                          num_classes=num_classes,
                          circle=args.circle)
    # print(model)
    print("Model size: {:.3f} M".format(count_num_param(model)))
    model.cuda()
    model = nn.DataParallel(model)  # 多gpu并行

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        copy_state_dict(checkpoint['state_dict'], model)
        start_epoch = checkpoint['epoch']
        best_mAP = checkpoint['best_mAP']
        print("=> Start epoch {}  best mAP {:.1%}".format(
            start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        # print("Test on source domain:")
        # evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True,
        #                    rerank=args.rerank)
        print("Test on target domain:")
        evaluator.evaluate(test_loader_target,
                           dataset_target.query,
                           dataset_target.gallery,
                           cmc_flag=True,
                           rerank=args.rerank)
        return

    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        params += [{
            "params": [value],
            "lr": args.lr,
            "weight_decay": args.weight_decay
        }]
    optimizer = torch.optim.Adam(params)
    lr_scheduler = WarmupMultiStepLR(optimizer,
                                     args.milestones,
                                     gamma=0.1,
                                     warmup_factor=0.01,
                                     warmup_iters=args.warmup_step)

    # Trainer
    trainer = PreTrainer(model, num_classes, args, margin=args.margin)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        lr_scheduler.step()
        train_loader_source.new_epoch()
        # train_loader_target.new_epoch()

        trainer.train(epoch,
                      train_loader_source,
                      optimizer,
                      train_iters=len(train_loader_source),
                      print_freq=args.print_freq,
                      balance=args.balance)

        if (epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1):
            _, mAP = evaluator.evaluate(test_loader_target,
                                        dataset_validation.query,
                                        dataset_validation.gallery,
                                        cmc_flag=True)

            is_best = mAP > best_mAP
            best_mAP = max(mAP, best_mAP)
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': epoch + 1,
                    'best_mAP': best_mAP,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print(
                '\n * Finished epoch {:3d}  source mAP: {:5.1%}  best: {:5.1%}{}\n'
                .format(epoch, mAP, best_mAP, ' *' if is_best else ''))

    print("Test on target domain:")
    evaluator.evaluate(test_loader_target,
                       dataset_validation.query,
                       dataset_validation.gallery,
                       cmc_flag=True,
                       rerank=args.rerank)