コード例 #1
0
def run():
    batch_id = 16
    batch_image = 4
    batch_test = 32

    train_transform = transforms.Compose([
        transforms.Resize((384, 128)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    train_dataset = Market1501(args.root + '/bounding_box_train', transform=train_transform)
    train_loader = dataloader.DataLoader(train_dataset,
                                         sampler=RandomIdSampler(train_dataset, batch_image=batch_image),
                                         batch_size=batch_id * batch_image,
                                         num_workers=args.workers)

    test_transform = transforms.Compose([
        transforms.Resize((384, 128)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    test_flip_transform = transforms.Compose([
        transforms.Resize((384, 128)),
        functional.hflip,
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    query_dataset = Market1501(args.root + '/query', transform=test_transform)
    query_flip_dataset = Market1501(args.root + '/query', transform=test_flip_transform)
    query_loader = dataloader.DataLoader(query_dataset, batch_size=batch_test, num_workers=args.workers)
    query_flip_loader = dataloader.DataLoader(query_flip_dataset, batch_size=batch_test, num_workers=args.workers)

    test_dataset = Market1501(args.root + '/bounding_box_test', transform=test_transform)
    test_flip_dataset = Market1501(args.root + '/bounding_box_test', transform=test_flip_transform)
    test_loader = dataloader.DataLoader(test_dataset, batch_size=batch_test, num_workers=args.workers)
    test_flip_loader = dataloader.DataLoader(test_flip_dataset, batch_size=batch_test, num_workers=args.workers)

    mgn = MGN(num_classes=len(train_dataset.unique_ids)).to(DEVICE)

    cross_entropy_loss = nn.CrossEntropyLoss()
    triplet_semihard_loss = TripletSemihardLoss(margin=1.2)

    optimizer = optim.SGD(mgn.parameters(), lr=1e-2, momentum=0.9, weight_decay=5e-4)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [40, 60], gamma=0.1)

    epochs = 80
    for epoch in range(epochs):
        mgn.train()
        scheduler.step()

        running_loss = 0.0
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)

            optimizer.zero_grad()

            outputs = mgn(inputs)
            losses = [triplet_semihard_loss(output, labels) for output in outputs[1]] + \
                     [cross_entropy_loss(output, labels) for output in outputs[2]]
            loss = sum(losses) / len(losses)
            loss.backward()

            optimizer.step()

            running_loss += loss.item()
            print('%d/%d - %d/%d - loss: %f' % (epoch + 1, epochs, i, len(train_loader), loss.item()))
        print('epoch: %d/%d - loss: %f' % (epoch + 1, epochs, running_loss / len(train_loader)))

        if epoch % 10 == 9:
            mgn.eval()

            query = np.concatenate([mgn(inputs.to(DEVICE))[0].detach().cpu().numpy()
                                    for inputs, _ in query_loader])
            query_flip = np.concatenate([mgn(inputs.to(DEVICE))[0].detach().cpu().numpy()
                                         for inputs, _ in query_flip_loader])

            test = np.concatenate([mgn(inputs.to(DEVICE))[0].detach().cpu().numpy()
                                   for inputs, _ in test_loader])
            test_flip = np.concatenate([mgn(inputs.to(DEVICE))[0].detach().cpu().numpy()
                                        for inputs, _ in test_flip_loader])

            dist = cdist((query + query_flip) / 2., (test + test_flip) / 2.)
            # dist = cdist(normalize(query + query_flip), normalize(test + test_flip))
            r = cmc(dist, query_dataset.ids, test_dataset.ids, query_dataset.cameras, test_dataset.cameras,
                    separate_camera_set=False,
                    single_gallery_shot=False,
                    first_match_break=True)
            m_ap = mean_ap(dist, query_dataset.ids, test_dataset.ids, query_dataset.cameras, test_dataset.cameras)
            print('epoch[%d]: mAP=%f, r@1=%f, r@3=%f, r@5=%f, r@10=%f' % (epoch + 1, m_ap, r[0], r[2], r[4], r[9]))
コード例 #2
0
def run():
    batch_id = args.batch_id  # 8
    batch_image = args.batch_image  # 8
    batch_train = args.batch_train  # 64
    batch_test = args.batch_test  # 32

    # ==============================================================
    trple_margin = args.trple_margin  # 0.1 increment
    para_balance = args.para_balance  # 0.01 increment
    # ==============================================================

    print("trple_margin:{}".format(trple_margin))
    print("para_balance:{}".format(para_balance))

    train_transform = transforms.Compose([
        transforms.Resize(args.transform_imsize, interpolation=3),
        #transforms.RandomCrop((256, 128)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=args.transform_norm_mean,
                             std=args.transform_norm_std),
        RandomErasing(probability=args.transform_random_erase_p,
                      mean=args.transform_random_erase_mean)
    ])

    test_transform = transforms.Compose([
        transforms.Resize(args.transform_imsize, interpolation=3),
        transforms.ToTensor(),
        transforms.Normalize(mean=args.transform_norm_mean,
                             std=args.transform_norm_std)
    ])

    train_dataset = Dataset(root + '/bounding_box_train',
                            transform=train_transform)
    train_loader_tri = DataLoader(train_dataset,
                                  sampler=RandomIdSampler(
                                      train_dataset, batch_image=batch_image),
                                  batch_size=batch_id * batch_image)
    # num_workers=num_workers)
    train_loader_all = DataLoader(train_dataset,
                                  batch_size=batch_train,
                                  shuffle=True,
                                  drop_last=True)

    query_dataset = Dataset(root + '/query', transform=test_transform)
    query_loader = DataLoader(query_dataset,
                              batch_size=batch_test,
                              shuffle=False)

    test_dataset = Dataset(root + '/bounding_box_test',
                           transform=test_transform)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_test,
                             shuffle=False)

    model = PCB_plus_dropout_pyramid(num_classes=len(train_dataset.unique_ids))
    model_w = nn.DataParallel(model).to(DEVICE)
    criterion = nn.CrossEntropyLoss()
    triplet_loss = TripletLoss(margin=trple_margin)  # original margin: 1.2

    finetuned_params = list(model.base.parameters())
    # To train from scratch
    new_params = [
        p for n, p in model.named_parameters() if not n.startswith('base.')
    ]
    param_groups = [{
        'params': finetuned_params,
        'lr': args.lr_finetune
    }, {
        'params': new_params,
        'lr': args.lr_new
    }]
    optimizer = optim.SGD(param_groups, momentum=0.9, weight_decay=5e-4)

    modules_optims = [model, optimizer]

    #resume_ep, scores = load_ckpt(modules_optims, 'logs/pcb/ckpt_ep59.pth')
    #print('Resume from EP: {}'.format(resume_ep))
    print(optimizer)

    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=args.lr_schedule,
                                               gamma=0.5)

    refine_ep = 10
    epochs = args.n_epoch

    # ==============================================================
    print("epochs:{}".format(epochs))
    print("refine_ep:{}".format(refine_ep))
    # ==============================================================

    max_mAP = 0
    m_ap = 0

    for epoch in range(epochs):
        model_w.train()
        scheduler.step()

        if epoch > refine_ep and epoch % 2 == 1:
            train_loader = train_loader_tri
        else:
            train_loader = train_loader_all

        running_loss = 0.0
        for i, data in enumerate(train_loader):
            inputs, labels = data
            # print(inputs)
            # print(labels)
            inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)

            feats_list, logits_list = model_w(inputs)

            loss1 = torch.stack(
                [criterion(logits, labels) for logits in logits_list],
                dim=0).sum()
            #sum([criterion(logits, labels) for logits in logits_list])
            #loss2 = sum([triplet_semihard_loss(feats, labels) for feats in feats_list])

            loss2 = torch.stack(
                [triplet_loss(feats, labels) for feats in feats_list]).sum()
            #sum([triplet_loss(feats, labels) for feats in feats_list[6:]])

            optimizer.zero_grad()
            if epoch > refine_ep and epoch % 2 == 1:
                loss = loss1 + para_balance * loss2  # loss1 # + 0.1* loss2
            else:
                loss = loss1
            #loss = loss2
            loss.backward()

            optimizer.step()

            running_loss += loss.item()
            if i % 20 == 0:
                print('%d/%d - %d/%d - loss: %f (%f, %f)' %
                      (epoch, epochs, i, len(train_loader), loss.item(),
                       loss1.item(), loss2.item()))
        print('epoch: %d/%d - loss: %f' %
              (epoch, epochs, running_loss / len(train_loader)))

        if (epoch == 0 or epoch > 95) and ((epoch % 4 == 0) or
                                           (epoch == epochs - 1)):
            model_w.eval()
            query = np.concatenate([
                torch.cat(model_w(inputs.to(DEVICE))[0],
                          dim=1).detach().cpu().numpy()
                for i, (inputs, _) in enumerate(query_loader)
            ])

            test = np.concatenate([
                torch.cat(model_w(inputs.to(DEVICE))[0],
                          dim=1).detach().cpu().numpy()
                for i, (inputs, _) in enumerate(test_loader)
            ])

            dist = cdist(query, test)
            r = cmc(dist,
                    query_dataset.ids,
                    test_dataset.ids,
                    query_dataset.cameras,
                    test_dataset.cameras,
                    separate_camera_set=False,
                    single_gallery_shot=False,
                    first_match_break=True)
            m_ap = mean_ap(dist, query_dataset.ids, test_dataset.ids,
                           query_dataset.cameras, test_dataset.cameras)
            print('epoch[%d]: mAP=%f, r@1=%f, r@5=%f, r@10=%f' %
                  (epoch + 1, m_ap, r[0], r[4], r[9]))

        if epoch > 50 and max_mAP < m_ap:
            max_mAP = m_ap
            save_ckpt(
                modules_optims, epoch, 0,
                'logs/ckpt_ep{}_re02_bs64_dropout02_GPU{}_mAP{}_market.pth'.
                format(epoch, GPUID, m_ap))
コード例 #3
0
    def test(query_loader,
             query_flip_loader,
             test_loader,
             test_flip_loader,
             trainset_name,
             testset_name,
             epoch,
             verbose=False):
        cache_file = '{}/feat_cache-{}_to_{}.pkl'.format(
            exp_dir, trainset_name, testset_name)
        if args.use_feat_cache:
            assert os.path.exists(
                cache_file), "Feature cache file {} does not exist!".format(
                    cache_file)
            query_2, q_vis, query_flip_2, q_vis, test_2, test_vis, test_flip_2, test_vis, q_ids, q_cams, g_ids, g_cams = load_pickle(
                cache_file)
        else:
            query_2, q_vis = extract_loader_feat(query_loader, verbose=verbose)
            query_flip_2, q_vis = extract_loader_feat(query_flip_loader,
                                                      verbose=verbose)

            test_2, test_vis = extract_loader_feat(test_loader,
                                                   verbose=verbose)
            test_flip_2, test_vis = extract_loader_feat(test_flip_loader,
                                                        verbose=verbose)

            q_ids = query_loader.dataset.ids
            q_cams = query_loader.dataset.cameras
            g_ids = test_loader.dataset.ids
            g_cams = test_loader.dataset.cameras
            save_pickle([
                query_2, q_vis, query_flip_2, q_vis, test_2, test_vis,
                test_flip_2, test_vis, q_ids, q_cams, g_ids, g_cams
            ], cache_file)

        if args.test_which_feat > 0:
            # TODO: implement for pap
            idx = args.test_which_feat
            query_2 = query_2[:, 256 * idx - 256:256 * idx]
            query_flip_2 = query_flip_2[:, 256 * idx - 256:256 * idx]
            test_2 = test_2[:, 256 * idx - 256:256 * idx]
            test_flip_2 = test_flip_2[:, 256 * idx - 256:256 * idx]

        query = normalize(query_2 + query_flip_2)
        test = normalize(test_2 + test_flip_2)

        if verbose:
            print('query.shape:', query.shape)
            print('test.shape:', test.shape)
            if args.pap:
                print('q_vis.shape:', q_vis.shape)
                print('test_vis.shape:', test_vis.shape)

        if args.pap:
            dist_1 = compute_dist_with_visibility(query,
                                                  test,
                                                  q_vis,
                                                  test_vis,
                                                  dist_type='euclidean',
                                                  avg_by_vis_num=False)
        else:
            dist_1 = cdist(query, test)
        r_1 = cmc(dist_1,
                  q_ids,
                  g_ids,
                  q_cams,
                  g_cams,
                  separate_camera_set=False,
                  single_gallery_shot=False,
                  first_match_break=True)
        m_ap_1 = mean_ap(dist_1, q_ids, g_ids, q_cams, g_cams)
        print('EPOCH [%d] %s -> %s: mAP=%f, r@1=%f, r@3=%f, r@5=%f, r@10=%f' %
              (epoch + 1, trainset_name, testset_name, m_ap_1, r_1[0], r_1[2],
               r_1[4], r_1[9]))
コード例 #4
0
ファイル: ide.py プロジェクト: jiangqy/reid-mgn
def run():
    batch_size = 32

    train_transform = transforms.Compose([
        transforms.Resize(144, interpolation=3),
        transforms.RandomCrop((256, 128)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    test_transform = transforms.Compose([
        transforms.Resize((288, 144), interpolation=3),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    test_flip_transform = transforms.Compose([
        transforms.Resize((288, 144), interpolation=3),
        functional.hflip,
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    train_dataset = Market1501(root + '/bounding_box_train', transform=train_transform)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

    query_dataset = Market1501(root + '/query', transform=test_transform)
    query_flip_dataset = Market1501(root + '/query', transform=test_flip_transform)
    query_loader = DataLoader(query_dataset, batch_size=batch_size, shuffle=False)
    query_flip_loader = DataLoader(query_flip_dataset, batch_size=batch_size, shuffle=False)

    test_dataset = Market1501(root + '/bounding_box_test', transform=test_transform)
    test_flip_dataset = Market1501(root + '/bounding_box_test', transform=test_flip_transform)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    test_flip_loader = DataLoader(test_flip_dataset, batch_size=batch_size, shuffle=False)

    ide = IDE(num_classes=len(train_dataset.unique_ids)).to(DEVICE)
    criterion = nn.CrossEntropyLoss()

    params = [
        {'params': ide.backbone.parameters(), 'lr': 0.01},
        {'params': ide.classifier.parameters(), 'lr': 0.1},
    ]
    optimizer = optim.SGD(params, momentum=0.9, weight_decay=5e-4, nesterov=True)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)

    epochs = 50
    for epoch in range(epochs):
        ide.train()
        scheduler.step()

        running_loss = 0.0
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)

            optimizer.zero_grad()

            outputs = ide(inputs)
            loss = criterion(outputs[1], labels)
            loss.backward()

            optimizer.step()

            running_loss += loss.item()
            print('%d/%d - %d/%d - loss: %f' % (epoch, epochs, i, len(train_loader), loss.item()))
        print('epoch: %d/%d - loss: %f' % (epoch, epochs, running_loss / len(train_loader)))

        if epoch % 10 == 9:
            ide.eval()

            query = np.concatenate([ide(inputs.to(DEVICE))[0].detach().cpu().numpy()
                                    for inputs, _ in query_loader])
            query_flip = np.concatenate([ide(inputs.to(DEVICE))[0].detach().cpu().numpy()
                                         for inputs, _ in query_flip_loader])

            test = np.concatenate([ide(inputs.to(DEVICE))[0].detach().cpu().numpy()
                                   for inputs, _ in test_loader])
            test_flip = np.concatenate([ide(inputs.to(DEVICE))[0].detach().cpu().numpy()
                                        for inputs, _ in test_flip_loader])

            # dist = cdist((query + query_flip) / 2., (test + test_flip) / 2.)
            dist = cdist(normalize(query + query_flip), normalize(test + test_flip))
            r = cmc(dist, query_dataset.ids, test_dataset.ids, query_dataset.cameras, test_dataset.cameras,
                    separate_camera_set=False,
                    single_gallery_shot=False,
                    first_match_break=True)
            m_ap = mean_ap(dist, query_dataset.ids, test_dataset.ids, query_dataset.cameras, test_dataset.cameras)
            print('epoch[%d]: mAP=%f, r@1=%f, r@3=%f, r@5=%f, r@10=%f' % (epoch + 1, m_ap, r[0], r[2], r[4], r[9]))
コード例 #5
0
    param_groups = [{'params': finetuned_params, 'lr': 0.01},
                    {'params': new_params, 'lr': 0.1}]
    optimizer = optim.SGD(param_groups, momentum=0.9, weight_decay=5e-4)

    modules_optims = [model, optimizer]

    resume_ep, scores = load_ckpt(modules_optims,
                                  model_path)

    print(optimizer)
    print('Resume from EP: {}'.format(resume_ep))

    model_w.eval()

    query = np.concatenate([torch.cat(model_w(inputs.to(DEVICE))[0], dim=1).detach().cpu().numpy()
                            for inputs, _ in query_loader])

    test = np.concatenate([torch.cat(model_w(inputs.to(DEVICE))[0], dim=1).detach().cpu().numpy()
                           for inputs, _ in test_loader])

    dist = cdist(query, test)

    r = cmc(dist, query_dataset.ids, test_dataset.ids,
            query_dataset.cameras, test_dataset.cameras,
            separate_camera_set=False,
            single_gallery_shot=False,
            first_match_break=True)

    m_ap = mean_ap(dist, query_dataset.ids, test_dataset.ids, query_dataset.cameras, test_dataset.cameras)

    print('evaluate_model: mAP=%f, r@1=%f, r@5=%f, r@10=%f' % (m_ap, r[0], r[4], r[9]))
コード例 #6
0
ファイル: ide.py プロジェクト: zjj-2015/reid-mgn
def run():
    batch_size = 32

    train_transform = transforms.Compose([
        transforms.Resize(144, interpolation=3),
        transforms.RandomCrop((256, 128)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    test_transform = transforms.Compose([
        transforms.Resize((288, 144), interpolation=3),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    test_flip_transform = transforms.Compose([
        transforms.Resize((288, 144), interpolation=3),
        functional.hflip,
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    train_dataset = Market1501(root + '/bounding_box_train',
                               transform=train_transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True)

    query_dataset = Market1501(root + '/query', transform=test_transform)
    query_flip_dataset = Market1501(root + '/query',
                                    transform=test_flip_transform)
    query_loader = DataLoader(query_dataset,
                              batch_size=batch_size,
                              shuffle=False)
    query_flip_loader = DataLoader(query_flip_dataset,
                                   batch_size=batch_size,
                                   shuffle=False)

    test_dataset = Market1501(root + '/bounding_box_test',
                              transform=test_transform)
    test_flip_dataset = Market1501(root + '/bounding_box_test',
                                   transform=test_flip_transform)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=False)
    test_flip_loader = DataLoader(test_flip_dataset,
                                  batch_size=batch_size,
                                  shuffle=False)

    ide = IDE(num_classes=len(train_dataset.unique_ids)).to(DEVICE)
    criterion = nn.CrossEntropyLoss()

    params = [
        {
            'params': ide.backbone.parameters(),
            'lr': 0.01
        },
        {
            'params': ide.classifier.parameters(),
            'lr': 0.1
        },
    ]
    optimizer = optim.SGD(params,
                          momentum=0.9,
                          weight_decay=5e-4,
                          nesterov=True)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)

    epochs = 50
    for epoch in range(epochs):
        ide.train()
        scheduler.step()

        running_loss = 0.0
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)

            optimizer.zero_grad()

            outputs = ide(inputs)
            loss = criterion(outputs[1], labels)
            loss.backward()

            optimizer.step()

            running_loss += loss.item()
            print('%d/%d - %d/%d - loss: %f' %
                  (epoch, epochs, i, len(train_loader), loss.item()))
        print('epoch: %d/%d - loss: %f' %
              (epoch, epochs, running_loss / len(train_loader)))

        if epoch % 10 == 9:
            ide.eval()

            query = np.concatenate([
                ide(inputs.to(DEVICE))[0].detach().cpu().numpy()
                for inputs, _ in query_loader
            ])
            query_flip = np.concatenate([
                ide(inputs.to(DEVICE))[0].detach().cpu().numpy()
                for inputs, _ in query_flip_loader
            ])

            test = np.concatenate([
                ide(inputs.to(DEVICE))[0].detach().cpu().numpy()
                for inputs, _ in test_loader
            ])
            test_flip = np.concatenate([
                ide(inputs.to(DEVICE))[0].detach().cpu().numpy()
                for inputs, _ in test_flip_loader
            ])

            # dist = cdist((query + query_flip) / 2., (test + test_flip) / 2.)
            dist = cdist(normalize(query + query_flip),
                         normalize(test + test_flip))
            r = cmc(dist,
                    query_dataset.ids,
                    test_dataset.ids,
                    query_dataset.cameras,
                    test_dataset.cameras,
                    separate_camera_set=False,
                    single_gallery_shot=False,
                    first_match_break=True)
            m_ap = mean_ap(dist, query_dataset.ids, test_dataset.ids,
                           query_dataset.cameras, test_dataset.cameras)
            print('epoch[%d]: mAP=%f, r@1=%f, r@3=%f, r@5=%f, r@10=%f' %
                  (epoch + 1, m_ap, r[0], r[2], r[4], r[9]))