Пример #1
0
def _get_feat_data_loader(cfg, source_name, feat):
    dataset = init_dataset(source_name,
                           root=cfg.DATASET.ROOT_DIR,
                           verbose=False)
    generate_train = []
    for i in range(feat.size(0)):
        img_path, _, _ = dataset.train[i]
        generate_train.append((img_path, feat[i], -1))
    dataset.train = generate_train
    dataset.print_dataset_statistics(dataset.train, dataset.query,
                                     dataset.gallery)
    batch_size = cfg.TRAIN.BATCH_SIZE

    train_transforms = build_transforms(cfg, is_train=False)
    train_set = ImageDataset(dataset.train, train_transforms)

    def train_collate_fn_by_feat(batch):
        imgs, feats, _, _, = zip(*batch)
        imgs = torch.stack(imgs, dim=0)
        feats = torch.stack(feats, dim=0)
        return imgs, feats

    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=cfg.DATALOADER.NUM_WORKERS,
                              collate_fn=train_collate_fn_by_feat)
    return train_loader, dataset.num_train_pids
Пример #2
0
def make_train_data_loader_with_expand(cfg, data_set_names):
    # ######
    # train
    # ######
    datasets = []
    for name in data_set_names:
        datasets.append(init_dataset(name, root=cfg.DATASET.ROOT_DIR))

    all_dataset_train = []
    num_classes = 0
    num_train_imgs = 0
    num_train_cams = 0

    for dataset in datasets:
        for sample in dataset.train:
            temp = (sample[0], sample[1] + num_classes,
                    sample[2] + num_train_cams)
            all_dataset_train.append(temp)
        num_classes += dataset.num_train_pids
        num_train_imgs += dataset.num_train_imgs
        num_train_cams += dataset.num_train_cams

    logger.info("  ----------------------------------------")
    logger.info("  subset   | # ids | # images | # cameras")
    logger.info("  ----------------------------------------")
    logger.info("  train    | {:5d} | {:8d} | {:9d}".format(
        num_classes, num_train_imgs, num_train_cams))

    batch_size, sampler, shuffle = _get_train_sampler(cfg, all_dataset_train)
    train_loader = _get_train_loader(cfg, batch_size, all_dataset_train,
                                     sampler, shuffle)
    return train_loader, num_classes
Пример #3
0
def make_train_data_loader_for_extract(cfg, dataset_name, is_train=False):
    # ######
    # train
    # ######
    dataset = init_dataset(dataset_name, root=cfg.DATASET.ROOT_DIR)
    batch_size, sampler, shuffle = _get_train_sampler(cfg,
                                                      dataset.train,
                                                      extract=True)
    train_loader = _get_train_loader(cfg, batch_size, dataset.train, sampler,
                                     shuffle, is_train)
    return train_loader, dataset.num_train_pids
Пример #4
0
def init_extractor(cfg, num_classes=8368):
    # if cfg.MODEL.DEVICE == "cuda":
    #     os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True
    if num_classes is None:
        dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)
        num_classes = dataset.num_train_pids

    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)
    model.eval()
    return model
Пример #5
0
def make_data_loader_for_val_data(cfg):
    val_transforms = build_transforms(cfg, is_train=False)
    num_workers = cfg.DATALOADER.NUM_WORKERS
    if len(cfg.DATASETS.NAMES) == 1:
        dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)
    else:
        # TODO: add multi dataset to train
        dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)

    num_classes = dataset.num_train_pids

    val_set = ImageDataset(dataset= dataset.query + dataset.gallery,
                           rap_data_=dataset.rap_data,
                           transform=val_transforms,
                           is_train=False,
                           swap_roi_rou=False)
    val_loader = DataLoader(val_set,
                            batch_size=cfg.TEST.IMS_PER_BATCH,
                            shuffle=False,
                            num_workers=num_workers,
                            collate_fn=val_collate_fn)
    return val_loader, len(dataset.query), num_classes
def train(cfg):

    logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR)
    logger.info("Running with config:\n{}".format(cfg))

    # prepare dataset
    val_data_loader, num_query = make_val_data_loader(cfg)
    num_classes = np.zeros(len(cfg.DATALOADER.SAMPLER_PROB)).astype(int) - 1
    source_dataset = init_dataset(cfg.SRC_DATA.NAMES, root_train=cfg.SRC_DATA.TRAIN_DIR, transfered=cfg.SRC_DATA.TRANSFERED)
    num_classes[0] = source_dataset.num_train_pids
    num_classes[1] = cfg.TGT_UNSUPDATA.CLUSTER_TOPK
    if cfg.MODEL.FINETUNE:
        num_classes[1] += 200

    # prepare model
    model = build_model(cfg, num_classes)

    optimizer,fixed_lr_idxs = make_optimizer(cfg, model)
    loss_fn = make_loss(cfg, num_classes)

    # Add for using self trained model
    if cfg.MODEL.PRETRAIN_CHOICE == 'resume':
        start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1])
        logger.info('Start epoch:%d' %start_epoch)
        path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer')
        logger.info('Path to the checkpoint of optimizer:%s' %path_to_optimizer)
        model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
        optimizer.load_state_dict(torch.load(path_to_optimizer))
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch, fixed_lr_idxs)
    elif cfg.MODEL.PRETRAIN_CHOICE == 'self' or cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
        start_epoch = 0
        model.load_param(cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.PRETRAIN_CHOICE)
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, -1, fixed_lr_idxs)
        camera_model = build_camera_model(cfg, num_classes=5)
        camera_model.load_param(cfg.TEST.CAMERA_WEIGHT, cfg.MODEL.PRETRAIN_CHOICE)
    else:
        logger.info('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE))

    do_train(cfg,
            model,
            camera_model,
            val_data_loader,
            optimizer,
            scheduler,      # modify for using self trained model
            loss_fn,
            num_query,
            start_epoch,     # add for using self trained model
            0
            )
Пример #7
0
def get_train_dataloader(cfg):
    print('prepare training set ...')
    tng_tfms = build_transforms(cfg, is_train=False)
    num_workers = cfg.DATALOADER.NUM_WORKERS

    train_img_items = list()
    for d in cfg.DATASETS.NAMES:
        dataset = init_dataset(d)
        train_img_items.extend(dataset.train)

    tng_set = ImageDataset(train_img_items, tng_tfms, relabel=True)

    tng_dataloader = DataLoader(tng_set, cfg.TEST.IMS_PER_BATCH, num_workers=num_workers, collate_fn=fast_collate_fn, pin_memory=True)

    return tng_dataloader, tng_set.c
Пример #8
0
def make_train_data_loader(cfg, dataset_name):
    # ######
    # train
    # ######
    dataset = init_dataset(dataset_name, root=cfg.DATASET.ROOT_DIR)
    batch_size, sampler, shuffle = _get_train_sampler(cfg, dataset.train)
    # print(sampler)
    # i = 0
    # for item in sampler:
    #     print(item)
    #     i += 1
    #     if i == 30:
    #         break
    train_loader = _get_train_loader(cfg, batch_size, dataset.train, sampler,
                                     shuffle)
    return train_loader, dataset.num_train_pids
Пример #9
0
def make_multi_valid_data_loader(cfg, data_set_names, verbose=False):
    valid = OrderedDict()
    for name in data_set_names:
        dataset = init_dataset(name,
                               root=cfg.DATASET.ROOT_DIR,
                               verbose=verbose)
        val_transforms = build_transforms(cfg, is_train=False)
        val_set = ImageDataset(dataset.query + dataset.gallery, val_transforms)
        val_loader = DataLoader(val_set,
                                batch_size=cfg.TEST.BATCH_SIZE,
                                shuffle=False,
                                num_workers=cfg.DATALOADER.NUM_WORKERS,
                                collate_fn=val_collate_fn)
        valid[name] = (val_loader, len(dataset.query))

    return valid
Пример #10
0
def main():
    dataset = init_dataset('market1501')
    flist = dataset.train
    # test_list = FileListIterator(flist, 512)
    # for e in range(10):
    #     print(e)
    #     for d in test_list:
    #         continue
    # print(d)

    loader = get_loader(flist)
    for e in range(10):
        print(f'{e}')
        for i, data in enumerate(loader):
            for d in data:
                print(d['images'].shape)
Пример #11
0
def get_test_dataloader(cfg):
    print('prepare test set ...')
    val_tfms = build_transforms(cfg, is_train=False)
    num_workers = cfg.DATALOADER.NUM_WORKERS
    
    test_dataloader_collection, query_names_len_collection, test_names_collection = list(), list(), list()
    for d in cfg.DATASETS.TEST_NAMES:
        dataset = init_dataset(d)
        query_names, gallery_names = dataset.query, dataset.gallery

        test_set = ImageDataset(query_names+gallery_names, val_tfms, relabel=False)
        
        test_dataloader = DataLoader(test_set, cfg.TEST.IMS_PER_BATCH, num_workers=num_workers, collate_fn=fast_collate_fn, pin_memory=True)
        test_dataloader_collection.append(test_dataloader)
        query_names_len_collection.append(len(query_names))
        test_names_collection.append(query_names+gallery_names)
    
    return test_dataloader_collection, query_names_len_collection, test_names_collection
Пример #12
0
def make_train_data_loader_with_labels(cfg, dataset_name, labels):
    dataset = init_dataset(dataset_name,
                           root=cfg.DATASET.ROOT_DIR,
                           verbose=False)
    generate_train = []
    for i in range(len(labels)):
        if labels[i] == -1:
            continue
        img_path, _, _ = dataset.train[i]
        generate_train.append((img_path, labels[i], -1))
    dataset.train = generate_train
    dataset.print_dataset_statistics(dataset.train, dataset.query,
                                     dataset.gallery)

    batch_size, sampler, shuffle = _get_train_sampler(cfg, dataset.train)
    train_loader = _get_train_loader(cfg, batch_size, dataset.train, sampler,
                                     shuffle)
    return train_loader, dataset.num_train_pids
Пример #13
0
def _get_target_data_loader(cfg, target_name):
    dataset = init_dataset(target_name,
                           root=cfg.DATASET.ROOT_DIR,
                           verbose=False)
    batch_size, sampler, shuffle = _get_train_sampler(cfg, dataset.train)

    train_transforms = build_transforms(cfg, is_train=True)
    train_set = ImageDataset(dataset.train, train_transforms)

    def train_collate_fn_add_feat(batch):
        imgs, pids, _, _, = zip(*batch)
        pids = torch.tensor(pids, dtype=torch.int64)
        imgs = torch.stack(imgs, dim=0)
        return imgs, pids

    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              sampler=sampler,
                              shuffle=shuffle,
                              num_workers=cfg.DATALOADER.NUM_WORKERS,
                              collate_fn=train_collate_fn_add_feat)
    return train_loader, dataset.num_train_pids
Пример #14
0
def inference(cfg, model, val_loader, num_query):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Enter inferencing")
    if cfg.TEST.RE_RANKING == 'no':
        print("Create evaluator")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP(num_query,
                       max_rank=50,
                       feat_norm=cfg.TEST.FEAT_NORM,
                       fun=eval_func_with_plot)
            },
            device=device)
    elif cfg.TEST.RE_RANKING == 'yes':
        print("Create evaluator for reranking")
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP':
                R1_mAP_reranking(num_query,
                                 max_rank=50,
                                 feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    else:
        print(
            "Unsupported re_ranking config. Only support for no or yes, but got {}."
            .format(cfg.TEST.RE_RANKING))

    dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)
    transform = T.Compose([T.ToTensor()])
    val_dataset = ImageDataset(dataset.query + dataset.gallery, transform)
    plot(val_dataset, 'good_case', [[16843, 3918, 6980], [7005, 4783, 15962]])
Пример #15
0
def main():
    parser = argparse.ArgumentParser(description='Vehicle orientation')
    parser.add_argument('-u','--user', help='username', default='corner')
    parser.add_argument('-p','--project', help='project name', default='cityai2020Orientation')
    parser.add_argument('-r','--run_id', help='run id', default='pe5y029c')
    traindata = False
    is_synthetic = False
    is_track = True

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    print('wandb api')
    api = wandb.Api()
    run = api.run(args.user + '/' + args.project + '/' + args.run_id)
    print('copy wandb configs')
    cfg = copy.deepcopy(run.config)

    if cfg['MODEL.DEVICE'] == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg['MODEL.DEVICE_ID']
    cfg['DATASETS.TRACKS_FILE'] = '/net/merkur/storage/deeplearning/users/eckvik/WorkspaceNeu/VReID/ImageFolderStatistics/Track3Files'
    cudnn.benchmark = True
    print('load dataset')
    if is_synthetic and traindata:
        cfg['DATASETS.SYNTHETIC'] = True
        cfg['DATASETS.SYNTHETIC_LOADER'] = 0
        cfg['DATASETS.SYNTHETIC_DIR'] = 'ai_city_challenge/2020/Track2/AIC20_track2_reid_simulation/AIC20_track2/AIC20_ReID_Simulation'
        dataset = init_dataset('AI_CITY2020_TEST_VAL', cfg=cfg,fold=1,eval_mode=False)
    else:
        if is_track:
            dataset = init_dataset('AI_CITY2020_TRACKS', cfg=cfg,fold=1,eval_mode=False)
        else:
            dataset = init_dataset('AI_CITY2020_TEST_VAL', cfg=cfg,fold=1,eval_mode=False)

    if traindata:
        if is_synthetic and traindata:
            dataset = [item[0] for item in dataset.train]
            dataset = dataset[36935:]
            dataset.sort()
            dataset = [[item, 0,0] for item in dataset]
            val_set = ImageDatasetOrientation(dataset, cfg, is_train=False, test=True)
        else:
            val_set = ImageDatasetOrientation(dataset.train, cfg, is_train=False, test=True)
    else:
        val_set = ImageDatasetOrientation(dataset.query+dataset.gallery, cfg, is_train=False, test=True)
    #
    val_loader = DataLoader(
        val_set, batch_size=cfg['TEST.IMS_PER_BATCH'], shuffle=False, num_workers = cfg['DATALOADER.NUM_WORKERS'],
        collate_fn=val_collate_fn
    )
    print('build model')
    model = build_regression_model(cfg)
    print('get last epoch')
    epoch_best = 10#run.summary['epoch']
    weights_path = os.path.join(cfg['OUTPUT_DIR'],cfg['MODEL.NAME']+'_model_'+str(epoch_best)+'.pth')
    print('load pretrained weights')
    model.load_param(weights_path)
    model.eval()

    evaluator = create_supervised_evaluator(model, metrics={'score_feat': Score_feats()}, device = cfg['MODEL.DEVICE'])
    print('run')
    evaluator.run(val_loader)
    scores,feats,pids,camids = evaluator.state.metrics['score_feat']
    feats = np.array(feats)
    scores = np.array(scores)
    print('save')
    if traindata:

        if is_track:
            feats_mean = []
            for item in dataset.train_tracks_vID:
                indis = np.array([int(jitem[:6]) - 1 for jitem in item[0]])
                feats_mean.append(np.mean(feats[indis], axis=0))
            feats_mean = np.array(feats_mean)

            scores_mean = []
            for item in dataset.train_tracks_vID:
                indis = np.array([int(jitem[:6]) - 1 for jitem in item[0]])
                scores_mean.append(np.mean(scores[indis], axis=0))

            np.save(os.path.join(cfg['OUTPUT_DIR'], 'feats_train_track.npy'), np.array(feats_mean))  # .npy extension is added if not given
            np.save(os.path.join(cfg['OUTPUT_DIR'], 'scores_train_track.npy'), np.array(scores_mean))  # .npy extension is added if not given
        else:
            if is_synthetic and traindata:
                np.save(os.path.join(cfg['OUTPUT_DIR'], 'feats_train_synthetic.npy'), np.array(feats))  # .npy extension is added if not given
                np.save(os.path.join(cfg['OUTPUT_DIR'], 'scores_train_synthetic.npy'), np.array(scores))  # .npy extension is added if not given
            else:
                np.save(os.path.join(cfg['OUTPUT_DIR'], 'feats_train.npy'), np.array(feats))  # .npy extension is added if not given
                np.save(os.path.join(cfg['OUTPUT_DIR'], 'scores_train.npy'), np.array(scores))  # .npy extension is added if not given
    else:
        if is_track:
            feats_mean = []
            for feat in feats[:1052]:
                feats_mean.append(feat)
            for item in dataset.test_tracks_vID:
                indis = np.array([int(jitem[:6]) - 1 for jitem in item[0]])
                feats_mean.append(np.mean(feats[1052:][indis], axis=0))
            feats_mean = np.array(feats_mean)

            scores_mean = []
            for score in scores[:1052]:
                scores_mean.append(score)
            for item in dataset.test_tracks_vID:
                indis = np.array([int(jitem[:6]) - 1 for jitem in item[0]])
                scores_mean.append(np.mean(scores[1052:][indis], axis=0))
            np.save(os.path.join(cfg['OUTPUT_DIR'], 'feats_query_gal_track.npy'), np.array(feats_mean))  # .npy extension is added if not given
            np.save(os.path.join(cfg['OUTPUT_DIR'], 'scores_query_gal_track.npy'), np.array(scores_mean))  # .npy extension is added if not given
        else:
            np.save(os.path.join(cfg['OUTPUT_DIR'], 'feats_query_gal.npy'), np.array(feats))  # .npy extension is added if not given
            np.save(os.path.join(cfg['OUTPUT_DIR'], 'scores_query_gal.npy'), np.array(scores))  # .npy extension is added if not given
        print(cfg['OUTPUT_DIR'])
        print()
        txt_dir='dist_orient'
        num_query = 1052
        all_mAP = np.zeros(num_query)


        statistic_name ='feats'
        feats = torch.from_numpy(feats).float().to('cuda')
        feats_normed = torch.nn.functional.normalize(feats, dim=1, p=2)
        # query
        qf = feats_normed[:num_query]
        q_pids = np.asarray(pids[:num_query])
        q_camids = np.asarray(camids[:num_query])
        # gallery
        gf = feats_normed[num_query:]
        g_pids = np.asarray(pids[num_query:])
        g_camids = np.asarray(camids[num_query:])
        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.cpu().numpy()
        g_camids = np.ones_like(g_camids)
        g_pids = np.ones_like(g_pids)
        generate_image_dir_and_txt(cfg, dataset, txt_dir, distmat, g_pids, q_pids, g_camids, q_camids, all_mAP,
                                   statistic_name=statistic_name, max_rank=100)

        statistic_name ='xyz'
        feats = torch.from_numpy(scores).float().to('cuda')
        feats_normed = torch.nn.functional.normalize(feats, dim=1, p=2)
        # query
        qf = feats_normed[:num_query]
        q_pids = np.asarray(pids[:num_query])
        q_camids = np.asarray(camids[:num_query])
        # gallery
        gf = feats_normed[num_query:]
        g_pids = np.asarray(pids[num_query:])
        g_camids = np.asarray(camids[num_query:])
        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.cpu().numpy()
        g_camids = np.ones_like(g_camids)
        g_pids = np.ones_like(g_pids)
        generate_image_dir_and_txt(cfg, dataset, txt_dir, distmat, g_pids, q_pids, g_camids, q_camids, all_mAP,
                                   statistic_name=statistic_name, max_rank=100)

        statistic_name ='xy'
        scores_curr = scores[:,0:2]
        feats = torch.from_numpy(scores_curr).float().to('cuda')
        feats_normed = torch.nn.functional.normalize(feats, dim=1, p=2)
        # query
        qf = feats_normed[:num_query]
        q_pids = np.asarray(pids[:num_query])
        q_camids = np.asarray(camids[:num_query])
        # gallery
        gf = feats_normed[num_query:]
        g_pids = np.asarray(pids[num_query:])
        g_camids = np.asarray(camids[num_query:])
        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.cpu().numpy()
        g_camids = np.ones_like(g_camids)
        g_pids = np.ones_like(g_pids)
        generate_image_dir_and_txt(cfg, dataset, txt_dir, distmat, g_pids, q_pids, g_camids, q_camids, all_mAP,
                                   statistic_name=statistic_name, max_rank=100)


        statistic_name ='x'
        scores_curr = scores[:, 0:1]
        feats = torch.from_numpy(scores_curr).float().to('cuda')
        feats_normed = torch.nn.functional.normalize(feats, dim=1, p=2)
        # query
        qf = feats_normed[:num_query]
        q_pids = np.asarray(pids[:num_query])
        q_camids = np.asarray(camids[:num_query])
        # gallery
        gf = feats_normed[num_query:]
        g_pids = np.asarray(pids[num_query:])
        g_camids = np.asarray(camids[num_query:])
        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.cpu().numpy()
        g_camids = np.ones_like(g_camids)
        g_pids = np.ones_like(g_pids)
        generate_image_dir_and_txt(cfg, dataset, txt_dir, distmat, g_pids, q_pids, g_camids, q_camids, all_mAP,
                                   statistic_name=statistic_name, max_rank=100)
        statistic_name ='y'
        scores_curr = scores[:, 1:2]
        feats = torch.from_numpy(scores_curr).float().to('cuda')
        feats_normed = torch.nn.functional.normalize(feats, dim=1, p=2)
        # query
        qf = feats_normed[:num_query]
        q_pids = np.asarray(pids[:num_query])
        q_camids = np.asarray(camids[:num_query])
        # gallery
        gf = feats_normed[num_query:]
        g_pids = np.asarray(pids[num_query:])
        g_camids = np.asarray(camids[num_query:])
        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.cpu().numpy()
        g_camids = np.ones_like(g_camids)
        g_pids = np.ones_like(g_pids)
        generate_image_dir_and_txt(cfg, dataset, txt_dir, distmat, g_pids, q_pids, g_camids, q_camids, all_mAP,
                                   statistic_name=statistic_name, max_rank=100)
        statistic_name ='z'
        scores_curr = scores[:, 2:3]
        feats = torch.from_numpy(scores_curr).float().to('cuda')
        feats_normed = torch.nn.functional.normalize(feats, dim=1, p=2)
        # query
        qf = feats_normed[:num_query]
        q_pids = np.asarray(pids[:num_query])
        q_camids = np.asarray(camids[:num_query])
        # gallery
        gf = feats_normed[num_query:]
        g_pids = np.asarray(pids[num_query:])
        g_camids = np.asarray(camids[num_query:])
        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.cpu().numpy()
        g_camids = np.ones_like(g_camids)
        g_pids = np.ones_like(g_pids)
        generate_image_dir_and_txt(cfg, dataset, txt_dir, distmat, g_pids, q_pids, g_camids, q_camids, all_mAP,
                                   statistic_name=statistic_name, max_rank=100)
Пример #16
0
def do_train(cfg, model, train_loader, val_loader, optimizer, scheduler,
             loss_fn, num_query, start_epoch):
    log_period = cfg.SOLVER.LOG_PERIOD
    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    output_dir = cfg.OUTPUT_DIR
    device = cfg.MODEL.DEVICE
    epochs = cfg.SOLVER.MAX_EPOCHS

    logger = logging.getLogger("reid_baseline.train")
    logger.info("Start training")
    trainer = create_supervised_trainer(model,
                                        optimizer,
                                        loss_fn,
                                        device=device,
                                        gamma=cfg.MODEL.GAMMA,
                                        margin=cfg.SOLVER.MARGIN,
                                        beta=cfg.MODEL.BETA)
    if cfg.TEST.PAIR == "no":
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP': R1_mAP(1, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    elif cfg.TEST.PAIR == "yes":
        evaluator = create_supervised_evaluator(
            model,
            metrics={
                'r1_mAP': R1_mAP_pair(1,
                                      max_rank=50,
                                      feat_norm=cfg.TEST.FEAT_NORM)
            },
            device=device)
    checkpointer = ModelCheckpoint(output_dir,
                                   cfg.MODEL.NAME,
                                   checkpoint_period,
                                   n_saved=10,
                                   require_empty=False)
    # checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, n_saved=10, require_empty=False)
    timer = Timer(average=True)

    trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {
        'model': model,
        'optimizer': optimizer
    })
    timer.attach(trainer,
                 start=Events.EPOCH_STARTED,
                 resume=Events.ITERATION_STARTED,
                 pause=Events.ITERATION_COMPLETED,
                 step=Events.ITERATION_COMPLETED)

    # average metric to attach on trainer
    RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')
    RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')

    dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)

    @trainer.on(Events.STARTED)
    def start_training(engine):
        engine.state.epoch = start_epoch

    @trainer.on(Events.EPOCH_STARTED)
    def adjust_learning_rate(engine):
        scheduler.step()

    @trainer.on(Events.ITERATION_COMPLETED)
    def log_training_loss(engine):
        global ITER
        ITER += 1
        if ITER % log_period == 0:
            logger.info(
                "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
                .format(engine.state.epoch, ITER, len(train_loader),
                        engine.state.metrics['avg_loss'],
                        engine.state.metrics['avg_acc'],
                        scheduler.get_lr()[0]))
        if len(train_loader) == ITER:
            ITER = 0

    # adding handlers using `trainer.on` decorator API
    @trainer.on(Events.EPOCH_COMPLETED)
    def print_times(engine):
        # multi_person_training_info2()
        train_loader, val_loader, num_query, num_classes = make_data_loader_train(
            cfg)
        logger.info(
            'Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]'
            .format(engine.state.epoch,
                    timer.value() * timer.step_count,
                    train_loader.batch_size / timer.value()))
        logger.info('-' * 10)
        timer.reset()

    @trainer.on(Events.EPOCH_COMPLETED)
    def log_validation_results(engine):
        # if engine.state.epoch % eval_period == 0:
        if engine.state.epoch >= eval_period:
            all_cmc = []
            all_AP = []
            num_valid_q = 0
            q_pids = []
            for query_index in tqdm(range(num_query)):

                val_loader = make_data_loader_val(cfg, query_index, dataset)
                evaluator.run(val_loader)
                cmc, AP, q_pid = evaluator.state.metrics['r1_mAP']

                if AP >= 0:
                    if cmc.shape[0] < 50:
                        continue
                    num_valid_q += 1

                    all_cmc.append(cmc)
                    all_AP.append(AP)
                    q_pids.append(int(q_pid))
                else:
                    continue

            all_cmc = np.asarray(all_cmc).astype(np.float32)
            cmc = all_cmc.sum(0) / num_valid_q
            mAP = np.mean(all_AP)
            logger.info("Validation Results - Epoch: {}".format(
                engine.state.epoch))
            logger.info("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10]:
                logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(
                    r, cmc[r - 1]))

    trainer.run(train_loader, max_epochs=epochs)
train_test_query = 'image_train'

label_dest_dir = '.../OrientationReal/' + train_test_query
label_paths = glob.glob(os.path.join(label_dest_dir, '*'))
label_names = [os.path.basename(path).split('.')[0] for path in label_paths]

num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
wandb.init()
cfg = wandb.config

if cfg['MODEL.DEVICE'] == "cuda":
    os.environ['CUDA_VISIBLE_DEVICES'] = cfg['MODEL.DEVICE_ID']
cudnn.benchmark = True
print('load dataset')
dataset = init_dataset('AI_CITY2020_TEST_VAL',
                       cfg=cfg,
                       fold=1,
                       eval_mode=False)
val_set = ImageDatasetOrientation(dataset.train,
                                  cfg,
                                  is_train=False,
                                  test=True)

print('build model')
model = build_regression_model(cfg)
weights_path = os.path.join(args.weights_path)
print('load pretrained weights')
model.load_param(weights_path)
model.eval()

dataset_synthetic = init_dataset('AI_CITY2020_ORIENTATION',
                                 cfg=cfg,