Exemplo n.º 1
0
def test(config_file, **kwargs):
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k,v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()
    
    re_ranking=cfg.RE_RANKING
    
    PersonReID_Dataset_Downloader('./datasets',cfg.DATASETS.NAMES)
    if not re_ranking:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,'result')
        logger.info("Test Results:")
    else:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,'result_re-ranking')
        logger.info("Re-Ranking Test Results:") 
    
    device = torch.device(cfg.DEVICE)
    
    _, val_loader, num_query, num_classes = data_loader(cfg,cfg.DATASETS.NAMES)
    
    model = getattr(models, cfg.MODEL.NAME)(num_classes)
    model.load(cfg.OUTPUT_DIR,cfg.TEST.LOAD_EPOCH)
    if device:
        model.to(device) 
    model = model.eval()

    all_feats = []
    all_pids = []
    all_camids = []
    all_imgs = []
    
    for data in tqdm(val_loader, desc='Feature Extraction', leave=False):
        with torch.no_grad():
            images, pids, camids = data
            all_imgs.extend(images.numpy())
            if device:
                model.to(device) 
                images = images.to(device)
            
            feats = model(images)

        all_feats.append(feats)
        all_pids.extend(np.asarray(pids))
        all_camids.extend(np.asarray(camids))

    all_feats = torch.cat(all_feats, dim=0)
    # query
    qf = all_feats[:num_query]
    q_pids = np.asarray(all_pids[:num_query])
    q_camids = np.asarray(all_camids[:num_query])
    q_imgs = all_imgs[:num_query]
    # gallery
    gf = all_feats[num_query:]
    g_pids = np.asarray(all_pids[num_query:])
    g_camids = np.asarray(all_camids[num_query:])
    g_imgs = all_imgs[num_query:]

    if not re_ranking::
        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.cpu().numpy()
    else:
        print('Calculating Distance')
        q_g_dist = np.dot(qf.data.cpu(), np.transpose(gf.data.cpu()))
        q_q_dist = np.dot(qf.data.cpu(), np.transpose(qf.data.cpu()))
        g_g_dist = np.dot(gf.data.cpu(), np.transpose(gf.data.cpu()))
        print('Re-ranking:')
        distmat= re_ranking(q_g_dist, q_q_dist, g_g_dist)

    indices = np.argsort(distmat, axis=1)

    mean=cfg.INPUT.PIXEL_MEAN
    std=cfg.INPUT.PIXEL_STD
    top_k = 7
    for i in range(num_query):
        # get query pid and camid
        q_pid = q_pids[i]
        q_camid = q_camids[i]

        # remove gallery samples that have the same pid and camid with query
        order = indices[i]
        remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
        keep = np.invert(remove)
        # binary vector, positions with value 1 are correct matches
        true_index = indices[i][keep]

        plt.title("top5 query",fontsize=15)
        plt.subplot(181)
        img = np.clip(q_imgs[i].transpose(1,2,0)*std+mean,0.0,1.0)
        plt.imshow(img)
        for j in range(top_k):
            plt.subplot(182+j)
            img = np.clip(g_imgs[true_index[j]].transpose(1,2,0)*std+mean,0.0,1.0)
            plt.imshow(img)
        plt.savefig("./show/{}.jpg".format(i))
            
    logger.info('Testing complete')
Exemplo n.º 2
0
def train(config_file, **kwargs):
    # 1. config
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log')
    logger.info("Using {} GPUS".format(1))
    logger.info("Loaded configuration file {}".format(config_file))
    logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS

    # 2. datasets
    # Load the original dataset
    dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES +
                                     '_origin')  #'Market1501_origin'
    train_set_reference = ImageDataset(dataset_reference.train,
                                       train_transforms)
    train_loader_reference = DataLoader(train_set_reference,
                                        batch_size=128,
                                        shuffle=False,
                                        num_workers=cfg.DATALOADER.NUM_WORKERS,
                                        collate_fn=train_collate_fn)

    # Load the one-shot dataset
    train_loader, val_loader, num_query, num_classes = data_loader(
        cfg, cfg.DATASETS.NAMES)

    # 3. load the model and optimizer
    model = getattr(models, cfg.MODEL.NAME)(num_classes)
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)
    loss_fn = make_loss(cfg)
    logger.info("Start training")
    since = time.time()

    top = 0  # the choose of the nearest sample
    top_update = 0  # the first iteration train 80 steps and the following train 40

    # 4. Train and test
    for epoch in range(epochs):
        running_loss = 0.0
        running_acc = 0
        count = 1

        # get nearest samples and reset the model
        if top_update < 80:
            train_step = 80
        else:
            train_step = 40
        if top_update % train_step == 0:
            print("top: ", top)
            A, path_labeled = PSP(model, train_loader_reference, train_loader,
                                  top, cfg)
            top += cfg.DATALOADER.NUM_JUMP
            model = getattr(models, cfg.MODEL.NAME)(num_classes)
            optimizer = make_optimizer(cfg, model)
            scheduler = make_scheduler(cfg, optimizer)
            A_store = A.clone()
        top_update += 1

        for data in tqdm(train_loader, desc='Iteration', leave=False):
            model.train()
            images, labels_batch, img_path = data
            index, index_labeled = find_index_by_path(img_path,
                                                      dataset_reference.train,
                                                      path_labeled)
            images_relevant, GCN_index, choose_from_nodes, labels = load_relevant(
                cfg, dataset_reference.train, index, A_store, labels_batch,
                index_labeled)
            # if device:
            model.to(device)
            images = images_relevant.to(device)

            scores, feat = model(images)
            del images
            loss = loss_fn(scores, feat, labels.to(device), choose_from_nodes)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            count = count + 1
            running_loss += loss.item()
            running_acc += (scores[choose_from_nodes].max(1)[1].cpu() ==
                            labels_batch).float().mean().item()

        scheduler.step()

        # for model save if you need
        # if (epoch+1) % checkpoint_period == 0:
        #     model.cpu()
        #     model.save(output_dir,epoch+1)

        # Validation
        if (epoch + 1) % eval_period == 0:
            all_feats = []
            all_pids = []
            all_camids = []
            for data in tqdm(val_loader,
                             desc='Feature Extraction',
                             leave=False):
                model.eval()
                with torch.no_grad():
                    images, pids, camids = data

                    model.to(device)
                    images = images.to(device)

                    feats = model(images)
                    del images
                all_feats.append(feats.cpu())
                all_pids.extend(np.asarray(pids))
                all_camids.extend(np.asarray(camids))

            cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query)
            logger.info("Validation Results - Epoch: {}".format(epoch + 1))
            logger.info("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10, 20]:
                logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(
                    r, cmc[r - 1]))

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
Exemplo n.º 3
0
def test_cross_dataset(config_file, test_dataset, **kwargs):
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    PersonReID_Dataset_Downloader('./datasets', cfg.DATASETS.NAMES)
    _, _, _, num_classes = data_loader(cfg, cfg.DATASETS.NAMES)

    PersonReID_Dataset_Downloader('./datasets', test_dataset)
    _, val_loader, num_query, _ = data_loader(cfg, test_dataset)

    re_ranking = cfg.RE_RANKING

    if not re_ranking:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,
                             cfg.DATASETS.NAMES + '->' + test_dataset)
        logger.info("Test Results:")
    else:
        logger = make_logger(
            "Reid_Baseline", cfg.OUTPUT_DIR,
            cfg.DATASETS.NAMES + '->' + test_dataset + '_re-ranking')
        logger.info("Re-Ranking Test Results:")

    device = torch.device(cfg.DEVICE)

    model = getattr(models, cfg.MODEL.NAME)(num_classes, cfg.MODEL.LAST_STRIDE)
    model.load(cfg.OUTPUT_DIR, cfg.TEST.LOAD_EPOCH)
    model = model.eval()

    all_feats = []
    all_pids = []
    all_camids = []

    since = time.time()
    for data in tqdm(val_loader, desc='Feature Extraction', leave=False):
        model.eval()
        with torch.no_grad():
            images, pids, camids = data
            if device:
                model.to(device)
                images = images.to(device)

            feats = model(images)

        all_feats.append(feats)
        all_pids.extend(np.asarray(pids))
        all_camids.extend(np.asarray(camids))

    cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query,
                          re_ranking)

    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))

    test_time = time.time() - since
    logger.info('Testing complete in {:.0f}m {:.0f}s'.format(
        test_time // 60, test_time % 60))
def main():
    # load confiig
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("--imgs_path",
                        default="",
                        help="path to images file",
                        type=str)
    parser.add_argument("--save_feats_fname",
                        default="",
                        help="shortname of image",
                        type=str)
    parser.add_argument("--fresh_feats",
                        default=False,
                        help="whether to refresh feat",
                        action='store_true')
    parser.add_argument("--save_json_fname",
                        default="",
                        help="shortname of json",
                        type=str)
    parser.add_argument("--aug_ms",
                        default=False,
                        help="whether to aug",
                        action='store_true')
    parser.add_argument("--aug_flip",
                        default=False,
                        help="whether to aug",
                        action='store_true')
    parser.add_argument("--aug_centercrop",
                        default=False,
                        help="whether to aug",
                        action='store_true')
    parser.add_argument("--aug_by_mean",
                        default=False,
                        help="whether to aug",
                        action='store_true')
    parser.add_argument("--dist_metric",
                        default='euclidean',
                        help="whether to aug",
                        type=str)
    parser.add_argument("--sub",
                        default=False,
                        help="whether to aug",
                        action='store_true')

    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("feature extract", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    print("Running with config:\n{}".format(cfg))
    # ------------------- extractor feature
    if args.fresh_feats:
        extractor = Extractor(cfg)
        img_fnames = glob.glob(args.imgs_path + '*.jpg')
        img_fnames = sorted(img_fnames)
        print(img_fnames[:10])

        rescales = [1.0]
        flip_num = 1
        crop_scales = [1.0]
        if args.aug_ms:
            rescales = [0.7, 1.0, 1.4]
        if args.aug_flip:
            flip_num = 2
        if args.aug_centercrop:
            crop_scales = [1.0, 0.8]
        aug_features = []

        for i in range(flip_num):
            for crop_scale in crop_scales:
                for rescale in rescales:
                    # build transform
                    normalize_transform = T.Normalize(
                        mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
                    h, w = cfg.INPUT.SIZE_TEST
                    if i == 0:
                        transform = T.Compose([
                            T.Resize((int(h * rescale), int(w * rescale)),
                                     interpolation=Image.LANCZOS),
                            T.CenterCrop(
                                (int(h * crop_scale), int(w * crop_scale))),
                            T.ToTensor(), normalize_transform
                        ])
                    else:
                        transform = T.Compose([
                            T.Resize((int(h * rescale), int(w * rescale)),
                                     interpolation=Image.LANCZOS),  #
                            T.CenterCrop(
                                (int(h * crop_scale), int(w * crop_scale))),
                            T.RandomVerticalFlip(
                                1.1),  #     T.RandomHorizontalFlip(1.1),
                            T.ToTensor(),
                            normalize_transform
                        ])
                    logger.info(transform)
                    image_set = ImageDataset(img_fnames, transform)
                    image_loader = DataLoader(
                        image_set,
                        sampler=SequentialSampler(image_set),
                        batch_size=cfg.TEST.IMS_PER_BATCH,
                        num_workers=4)
                    features = []
                    with tqdm(total=len(image_loader)) as pbar:
                        for idx, batchs in enumerate(image_loader):
                            features.append(
                                extractor.apply_batch(
                                    batchs.cuda()).cpu().numpy())
                            pbar.update(1)
                    features = np.vstack(features)  #N,F
                    aug_features.append(features)
        features = np.hstack(aug_features)
        np.save(
            os.path.join(output_dir,
                         args.save_feats_fname.replace('.npy', '_cat.npy')),
            features)

        features = aug_features[0]
        for i in range(1, len(aug_features)):
            features += aug_features[i]
        features /= len(aug_features)
        np.save(
            os.path.join(output_dir,
                         args.save_feats_fname.replace('.npy', '_mean.npy')),
            features)
    if args.sub:
        if args.aug_by_mean:
            save_feats_fname = args.save_feats_fname.replace(
                '.npy', '_mean.npy')
        else:
            save_feats_fname = args.save_feats_fname.replace(
                '.npy', '_cat.npy')
        # ------------------- compute dist and get top n

        img_fnames = glob.glob(args.imgs_path + '*.jpg')
        img_fnames = sorted(img_fnames)
        print(img_fnames[:10])
        map2sub_id = {
            idx: int(fname.split('/')[-1].split('.')[0])
            for idx, fname in enumerate(img_fnames)
        }
        features = np.load(os.path.join(output_dir, save_feats_fname))
        # fname2query_id
        query_ids = [idx for idx, fname in enumerate(img_fnames)]
        features = torch.from_numpy(features).cuda()
        results = retrieval_reranking_gpu(features,
                                          features,
                                          len(query_ids),
                                          k1=20,
                                          k2=4,
                                          lambda_value=0.4,
                                          dist_metric=args.dist_metric)
        outputs = []
        for result in results:
            query_id, query_results = result['query_id'], result['top_n']

            match_inds = []

            for query_result in query_results:
                gallery_id = query_result[0]
                if gallery_id != query_id:
                    match_inds.append(map2sub_id[int(gallery_id)])  #
            outputs.append({
                'query_id': map2sub_id[int(query_id)],
                'ans_ids': match_inds
            })
        with open(os.path.join(output_dir, args.save_json_fname), 'w') as fid:
            json.dump(outputs, fid)
Exemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument("--config_file", default="", help="path to config file", type=str)
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,nargs=argparse.REMAINDER)
    args = parser.parse_args()
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)
    num_gpus = torch.cuda.device_count()
    logger = setup_logger('reid_baseline', output_dir, 0)
    logger.info('Using {} GPUS'.format(num_gpus))
    logger.info('Running with config:\n{}'.format(cfg))
    if cfg.INPUT.SEPNORM.USE:
        train_dl, val_dl, num_query, num_classes = make_sepnorm_dataloader(cfg, num_gpus)
    elif cfg.DATASETS.EXEMPLAR.USE:
        train_dl, val_dl, num_query, num_classes,exemplar_dl = make_dataloader(cfg, num_gpus)
    else:
        train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus)

    model = build_model(cfg, num_classes)
    loss = make_loss(cfg, num_classes)
    if cfg.SOLVER.CENTER_LOSS.USE == True:
        trainer = CenterTrainer(cfg, model, train_dl, val_dl,
                      loss, num_query, num_gpus)
    else:
        if cfg.SOLVER.MIXUP.USE:
            trainer = NegMixupTrainer(cfg, model, train_dl, val_dl,
                              loss, num_query, num_gpus)
        elif cfg.DATASETS.EXEMPLAR.USE:
            if cfg.DATASETS.EXEMPLAR.MEMORY.USE:
                trainer = ExemplarMemoryTrainer(cfg, model, train_dl, val_dl,exemplar_dl,
                                  loss, num_query, num_gpus)
            else:
                trainer = UIRLTrainer(cfg, model, train_dl, val_dl,exemplar_dl,
                                  loss, num_query, num_gpus)
        elif cfg.DATASETS.HIST_LABEL.USE:
            trainer = HistLabelTrainer(cfg, model, train_dl, val_dl,
                    loss, num_query, num_gpus)
        else:
            trainer = BaseTrainer(cfg, model, train_dl, val_dl,
                              loss, num_query, num_gpus)
    if cfg.INPUT.SEPNORM.USE:
        logger.info('train transform0: \n{}'.format(train_dl.dataset.transform0))
        logger.info('train transform1: \n{}'.format(train_dl.dataset.transform1))

        logger.info('valid transform0: \n{}'.format(val_dl.dataset.transform0))
        logger.info('valid transform1: \n{}'.format(val_dl.dataset.transform1))

    else:
        logger.info('train transform: \n{}'.format(train_dl.dataset.transform))
        logger.info('valid transform: \n{}'.format(val_dl.dataset.transform))
    logger.info(type(model))
    logger.info(loss)
    logger.info(trainer)
    for epoch in range(trainer.epochs):
        for batch in trainer.train_dl:
            trainer.step(batch)
            trainer.handle_new_batch()
        trainer.handle_new_epoch()
Exemplo n.º 6
0
def test(args):
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    logger = setup_logger('reid_baseline.eval', cfg.OUTPUT_DIR, 0, train=False)

    logger.info('Running with config:\n{}'.format(cfg))

    _, val_dl, num_query, num_classes = make_dataloader(cfg)

    model = build_model(cfg, num_classes)
    if cfg.TEST.MULTI_GPU:
        model = nn.DataParallel(model)
        model = convert_model(model)
        logger.info('Use multi gpu to inference')
    para_dict = torch.load(cfg.TEST.WEIGHT)
    model.load_state_dict(para_dict)
    model.cuda()
    model.eval()

    feats, pids, camids, paths = [], [], [], []
    with torch.no_grad():
        for batch in tqdm(val_dl, total=len(val_dl), leave=False):
            data, pid, camid, path = batch
            paths.extend(list(path))
            data = data.cuda()
            feat = model(data).detach().cpu()
            feats.append(feat)
            pids.append(pid)
            camids.append(camid)
    feats = torch.cat(feats, dim=0)
    pids = torch.cat(pids, dim=0)
    camids = torch.cat(camids, dim=0)

    query_feat = feats[:num_query]
    query_pid = pids[:num_query]
    query_camid = camids[:num_query]
    query_path = np.array(paths[:num_query])

    gallery_feat = feats[num_query:]
    gallery_pid = pids[num_query:]
    gallery_camid = camids[num_query:]
    gallery_path = np.array(paths[num_query:])

    distmat = euclidean_dist(query_feat, gallery_feat)

    cmc, mAP, all_AP = eval_func(distmat.numpy(),
                                 query_pid.numpy(),
                                 gallery_pid.numpy(),
                                 query_camid.numpy(),
                                 gallery_camid.numpy(),
                                 use_cython=True)

    if cfg.TEST.VIS:
        worst_q = np.argsort(all_AP)[:cfg.TEST.VIS_Q_NUM]
        qid = query_pid[worst_q]
        q_im = query_path[worst_q]

        ind = np.argsort(distmat, axis=1)
        gid = gallery_pid[ind[worst_q]][..., :cfg.TEST.VIS_G_NUM]
        g_im = gallery_path[ind[worst_q]][..., :cfg.TEST.VIS_G_NUM]

        for idx in range(cfg.TEST.VIS_Q_NUM):
            sid = qid[idx] == gid[idx]
            im = rank_list_to_im(range(len(g_im[idx])), sid, q_im[idx],
                                 g_im[idx])

            im.save(
                osp.join(cfg.OUTPUT_DIR,
                         'worst_query_{}.jpg'.format(str(idx).zfill(2))))

    logger.info('Validation Result:')
    for r in cfg.TEST.CMC:
        logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1]))
    logger.info('mAP: {:.2%}'.format(mAP))
    logger.info('-' * 20)

    if not cfg.TEST.RERANK:
        return

    distmat = re_rank(query_feat, gallery_feat)
    cmc, mAP, all_AP = eval_func(distmat,
                                 query_pid.numpy(),
                                 gallery_pid.numpy(),
                                 query_camid.numpy(),
                                 gallery_camid.numpy(),
                                 use_cython=True)

    logger.info('ReRanking Result:')
    for r in cfg.TEST.CMC:
        logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1]))
    logger.info('mAP: {:.2%}'.format(mAP))
    logger.info('-' * 20)
    parser = argparse.ArgumentParser(
        description='Load a trained network and plot its loss')
    parser.add_argument('config_file', help='config file path')
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)

    plt.figure(1, figsize=(9, 9))
    models = [
        f[:-4] for f in os.listdir('models')
        if 'maoetal_finetune' in f or 'maoetal_sunspot' in f
    ]  #'maoetal_baseline' in f]#
    for model_name in models:
        cfg.merge_from_list(['OUTPUT.CHECKPOINT_PREFIX', model_name])
        model = networkFactory(cfg)

        plt.plot(range(len(model.total_loss)),
                 model.total_loss,
                 label="Training Loss: {}".format(model_name))
    #plt.plot(range(0, len(model.total_loss), cfg.TRAINING.VALIDATION_FREQ), model.val_loss, label="Validation Loss: {}".format(model_name))

    leg = plt.legend(loc='best')

    plt.show()
Exemplo n.º 8
0
def train(config_file, **kwargs):
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    PersonReID_Dataset_Downloader(cfg.DATASETS.STORE_DIR, cfg.DATASETS.NAMES)

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log')
    logger.info("Using {} GPUS".format(1))
    logger.info("Loaded configuration file {}".format(config_file))
    logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    output_dir = cfg.OUTPUT_DIR
    device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS

    train_loader, val_loader, num_query, num_classes = data_loader(
        cfg, cfg.DATASETS.NAMES)

    model = getattr(models, cfg.MODEL.NAME)(num_classes, cfg.MODEL.LAST_STRIDE,
                                            cfg.MODEL.POOL)
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)
    loss_fn = make_loss(cfg)

    logger.info("Start training")
    since = time.time()
    for epoch in range(epochs):
        count = 0
        running_loss = 0.0
        running_acc = 0
        for data in tqdm(train_loader, desc='Iteration', leave=False):
            model.train()
            images, labels = data
            if device:
                model.to(device)
                images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()

            scores, feats = model(images)
            loss = loss_fn(scores, feats, labels)

            loss.backward()
            optimizer.step()

            count = count + 1
            running_loss += loss.item()
            running_acc += (
                scores[0].max(1)[1] == labels).float().mean().item()

        logger.info(
            "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
            .format(epoch + 1, count, len(train_loader), running_loss / count,
                    running_acc / count,
                    scheduler.get_lr()[0]))
        scheduler.step()

        if (epoch + 1) % checkpoint_period == 0:
            model.cpu()
            model.save(output_dir, epoch + 1)

        # Validation
        if (epoch + 1) % eval_period == 0:
            all_feats = []
            all_pids = []
            all_camids = []
            for data in tqdm(val_loader,
                             desc='Feature Extraction',
                             leave=False):
                model.eval()
                with torch.no_grad():
                    images, pids, camids = data
                    if device:
                        model.to(device)
                        images = images.to(device)

                    feats = model(images)

                all_feats.append(feats)
                all_pids.extend(np.asarray(pids))
                all_camids.extend(np.asarray(camids))

            cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query)
            logger.info("Validation Results - Epoch: {}".format(epoch + 1))
            logger.info("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10]:
                logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(
                    r, cmc[r - 1]))

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
Exemplo n.º 9
0
    def __init__(self, config_file, epoch_label, **kwargs):
        """
        Validation set is split into two parts - query (probe) and gallery (to be searched), based on num_query.

        ::Return: Initialize a file 'model_epoch.mtch':
                matching matrix M of num_query x num_gallery. M_ij is 1 <=> ith query is matched at rank j.
        """

        cfg.merge_from_file(config_file)

        if kwargs:
            opts = []
            for k, v in kwargs.items():
                opts.append(k)
                opts.append(v)
            cfg.merge_from_list(opts)
        cfg.freeze()
        self.cfg = cfg

        device = torch.device(cfg.DEVICE)
        output_dir = cfg.OUTPUT_DIR
        epoch = epoch_label
        re_ranking = cfg.RE_RANKING
        if not os.path.exists(output_dir):
            raise OSError('Output directory does not exist.')
        save_filename = (cfg.MODEL.NAME + '_epo%s.mtch' % epoch_label)
        self._filepath = os.path.join(output_dir, save_filename)

        if os.path.exists(self._filepath):
            print('Loading matches file...')
            self.data = np.load(self._filepath)
            train_loader, val_loader, num_query, num_classes = data_loader(
                cfg, cfg.DATASETS.NAMES)
            self.dataset = val_loader.dataset
            print('Matches loaded.')
        else:
            print('Creating matches file...')
            PersonReID_Dataset_Downloader(cfg.DATASETS.STORE_DIR,
                                          cfg.DATASETS.NAMES)

            train_loader, val_loader, num_query, num_classes = data_loader(
                cfg, cfg.DATASETS.NAMES)

            # load model
            model = getattr(models, cfg.MODEL.NAME)(num_classes)
            model.load(output_dir, epoch)
            model.eval()

            all_feats = []
            all_pids = []
            all_camids = []
            for data in tqdm(val_loader,
                             desc='Feature Extraction',
                             leave=False):
                with torch.no_grad():

                    images, pids, camids = data

                    if device:
                        model.to(device)
                        images = images.to(device)

                    feats = model(images)

                all_feats.append(feats)
                all_pids.extend(np.asarray(pids))
                all_camids.extend(np.asarray(camids))

            all_feats = torch.cat(all_feats, dim=0)
            # query
            qf = all_feats[:num_query]
            q_pids = np.asarray(all_pids[:num_query])
            q_camids = np.asarray(all_camids[:num_query])

            # gallery
            gf = all_feats[num_query:]
            g_pids = np.asarray(all_pids[num_query:])
            g_camids = np.asarray(all_camids[num_query:])

            if re_ranking:
                raise NotImplementedError()
            else:
                m, n = qf.shape[0], gf.shape[0]
                distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                            torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
                distmat.addmm_(1, -2, qf, gf.t())
                distmat = distmat.cpu().numpy()

            indices = np.argsort(distmat, axis=1)
            # matches = np.repeat(g_pids.reshape([1, n]), m, axis=0) == q_pids[:, np.newaxis]
            ranked_matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(
                np.int32)

            data = {
                'q_pids': q_pids,
                'g_pids': g_pids,
                'q_camids': q_camids,
                'g_camids': g_camids,
                'ranked_matches': ranked_matches,
                # 'matches': matches,
                'indices': indices,
            }

            # save as .mtch
            with open(self._filepath, 'wb') as f:
                np.savez(f, **data)

            print('Matches created.')

            self.data = data
            self.dataset = val_loader.dataset
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    feature_dim = 2048

    def extract_gall_feat(gall_loader):
        print('Extracting Gallery Feature...')
        ptr = 0
        gall_feat = np.zeros((ngall, feature_dim))
        for batch_idx, (input, label) in enumerate(gall_loader):
            batch_num = input.size(0)
            model.eval()
            with torch.no_grad():
                input = input.to('cuda')
                feat = model(input)
                feat = norm(feat, axis=-1)
                gall_feat[ptr:ptr + batch_num, :] = feat.detach().cpu().numpy()
                ptr = ptr + batch_num
        return gall_feat

    def extract_query_feat(query_loader):
        print('Extracting Query Feature...')
        ptr = 0
        query_feat = np.zeros((nquery, feature_dim))
        for batch_idx, (input, label) in enumerate(query_loader):
            batch_num = input.size(0)
            model.eval()
            with torch.no_grad():
                input = input.to('cuda')
                feat = model(input)
                feat = norm(feat, axis=-1)
                query_feat[ptr:ptr +
                           batch_num, :] = feat.detach().cpu().numpy()
                ptr = ptr + batch_num
        return query_feat

    '==============================================================================================='
    model = build_model(cfg, 395)
    model.load_state_dict(torch.load(cfg.TEST.WEIGHT))
    model.to('cuda')
    '==============================================================================================='
    data_path = '/data1/lidg/reid_dataset/IV-ReID/SYSU'
    query_img, query_label, query_cam = process_query_sysu(data_path,
                                                           mode='all')
    gall_img, gall_label, gall_cam = process_gallery_sysu(data_path,
                                                          mode='all',
                                                          trial=0)

    nquery = len(query_label)
    ngall = len(gall_label)
    print("Dataset statistics:")
    print("  ------------------------------")
    print("  subset   | # ids | # images")
    print("  ------------------------------")
    print("  query    | {:5d} | {:8d}".format(len(np.unique(query_label)),
                                              nquery))
    print("  gallery  | {:5d} | {:8d}".format(len(np.unique(gall_label)),
                                              ngall))
    print("  ------------------------------")

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    hight, width = 256, 128  #   (384, 256)   (256, 128)  (224,224)
    transform_test = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((hight, width)),
        transforms.ToTensor(),
        normalize,
    ])

    '==============================================================================================='
    queryset = TestData(query_img, query_label, transform=transform_test)
    query_loader = data.DataLoader(queryset,
                                   batch_size=29,
                                   shuffle=False,
                                   num_workers=4)
    query_feat = extract_query_feat(query_loader)
    '==============================================================================================='

    all_cmc = 0
    all_mAP = 0
    acc = np.zeros((10, 4))
    for trial in range(10):
        gall_img, gall_label, gall_cam = process_gallery_sysu(data_path,
                                                              mode='all',
                                                              trial=trial)

        '==============================================================================================='
        trial_gallset = TestData(gall_img,
                                 gall_label,
                                 transform=transform_test)
        trial_gall_loader = data.DataLoader(trial_gallset,
                                            batch_size=29,
                                            shuffle=False,
                                            num_workers=4)
        gall_feat = extract_gall_feat(trial_gall_loader)
        '==============================================================================================='

        distmat = np.matmul(query_feat, np.transpose(gall_feat))

        cmc, mAP = eval_sysu(-distmat, query_label, gall_label, query_cam,
                             gall_cam)

        if trial == 0:
            all_cmc = cmc
            all_mAP = mAP
        else:
            all_cmc = all_cmc + cmc
            all_mAP = all_mAP + mAP
        print('Test Trial: {}'.format(trial))
        print(
            'FC: top-1: {:.2%} | top-5: {:.2%} | top-10: {:.2%}| top-20: {:.2%}'
            .format(cmc[0], cmc[4], cmc[9], cmc[19]))
        print('mAP: {:.2%}'.format(mAP))
        acc[trial][0] = float('%.4f' % cmc[0])
        acc[trial][1] = float('%.4f' % cmc[9])
        acc[trial][2] = float('%.4f' % cmc[19])
        acc[trial][3] = float('%.4f' % mAP)
    cmc = all_cmc / 10
    mAP = all_mAP / 10
    print('All Average:')
    print('FC: top-1: {:.2%} | top-5: {:.2%} | top-10: {:.2%}| top-20: {:.2%}'.
          format(cmc[0], cmc[4], cmc[9], cmc[19]))
    print('mAP: {:.2%}'.format(mAP))
    print(np.mean(acc, 0))
    print(np.std(acc, 0))
Exemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser(description="ReID Model Training")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        metavar="FILE",
                        help="path to config file",
                        type=str)
    # parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    cfg.freeze()

    log_save_dir = os.path.join(cfg.OUTPUT_DIR, cfg.DATASETS.TEST_NAMES,
                                cfg.MODEL.VERSION)
    if not os.path.exists(log_save_dir): os.makedirs(log_save_dir)

    logger = setup_logger("reid_baseline.train", log_save_dir, 0)
    logger.info("Using {} GPUs.".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    logger.info('start training')
    cudnn.benchmark = True

    # Create data loaders
    tng_dataloader, val_dataloader, num_classes, num_query, tng_set, val_set = get_dataloader(
        cfg, return_sets=True)

    # Start training
    writer = None
    reid_system = ReidSystem(cfg, logger, writer)
    model = reid_system.model
    model.cuda()

    iter_nums = 1
    cluster_list = []
    #top_percent = args.rho
    for iter_n in range(0, iter_nums):
        #### get source datas' feature
        source_features = inference_ssg(cfg, model, tng_dataloader)
        target_features = inference_ssg(cfg, model, val_dataloader)

        #### calculate distance and rerank result
        distmat = compute_distmat_using_gpu(source_features, target_features)
        print('source_features', source_features.shape, 'target_features',
              target_features.shape, 'distmat', distmat.shape)

        labels_list, cluster_list = generate_self_label(
            distmat, iter_n, cluster_list)
        #### generate new dataset
        val_set = update_dataset(val_set, labels_list)
        del labels_list
        # del cluster_list
        reid_system.train()
Exemplo n.º 12
0
def train(config_file1, config_file2, **kwargs):
    # 1. config
    cfg.merge_from_file(config_file1)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    #cfg.freeze()
    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log')
    #logger.info("Using {} GPUS".format(1))
    logger.info("Loaded configuration file {}".format(config_file1))
    logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    #device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS

    # 2. datasets
    # Load the original dataset
    #dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES )
    dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES +
                                     '_origin')  #'Market1501_origin'
    train_set_reference = ImageDataset(dataset_reference.train,
                                       train_transforms)
    train_loader_reference = DataLoader(train_set_reference,
                                        batch_size=128,
                                        shuffle=False,
                                        num_workers=cfg.DATALOADER.NUM_WORKERS,
                                        collate_fn=train_collate_fn)
    #不用放到网络里,所以不用transform

    # Load the one-shot dataset
    train_loader, val_loader, num_query, num_classes = data_loader(
        cfg, cfg.DATASETS.NAMES)

    # 3. load the model and optimizer
    model = getattr(models, cfg.MODEL.NAME)(num_classes)
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)
    loss_fn = make_loss(cfg)
    logger.info("Start training")
    since = time.time()
    if torch.cuda.device_count() > 1:
        print("Use", torch.cuda.device_count(), 'gpus')
    elif torch.cuda.device_count() == 1:
        print("Use", torch.cuda.device_count(), 'gpu')
    model = nn.DataParallel(model)
    top = 0  # the choose of the nearest sample
    top_update = 0  # the first iteration train 80 steps and the following train 40
    train_time = 0  #1表示训练几次gan
    bound = 1  #究竟训练几次,改成多次以后再说
    lock = False
    train_compen = 0
    # 4. Train and test
    for epoch in range(epochs):
        running_loss = 0.0
        running_acc = 0
        count = 1
        # get nearest samples and reset the model
        if top_update < 80:
            train_step = 80
            #重新gan生成的图像第一次是否需要训练80次,看看是否下一次输入的图片变少了吧
        else:
            train_step = 40
        #if top_update % train_step == 0:
        if top_update % train_step == 0 and train_compen == 0:
            print("top: ", top)
            #作者原来的实验top取到41,这里折中(是否要折中也是个实验测试的点)
            #if 1==1:
            if top >= 8 and train_time < bound:
                train_compen = (top - 1) * 40 + 80
                #build_image(A,train_loader_reference,train_loader)
                train_time += 1
                #gan的训练模式
                mode = 'train'
                retrain(mode)
                #gan生成图像到原来数据集
                produce()
                cfg.merge_from_file(config_file2)
                output_dir = cfg.OUTPUT_DIR
                if output_dir and not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                logger = make_logger("Reid_Baseline", output_dir, 'log')
                logger.info(
                    "Loaded configuration file {}".format(config_file2))
                logger.info("Running with config:\n{}".format(cfg))
                dataset_reference = init_dataset(
                    cfg, cfg.DATASETS.NAMES + '_origin')  #'Market1501_origin'
                train_set_reference = ImageDataset(dataset_reference.train,
                                                   train_transforms)
                train_loader_reference = DataLoader(
                    train_set_reference,
                    batch_size=128,
                    shuffle=False,
                    num_workers=cfg.DATALOADER.NUM_WORKERS,
                    collate_fn=train_collate_fn)
                dataset_ref = init_dataset(cfg, cfg.DATASETS.NAMES +
                                           '_ref')  #'Market1501_origin'
                train_set_ref = ImageDataset(dataset_ref.train,
                                             train_transforms)
                train_loader_ref = DataLoader(
                    train_set_ref,
                    batch_size=128,
                    shuffle=False,
                    num_workers=cfg.DATALOADER.NUM_WORKERS,
                    collate_fn=train_collate_fn)
                lock = True
            if lock == True:
                A, path_labeled = PSP2(model, train_loader_reference,
                                       train_loader, train_loader_ref, top,
                                       logger, cfg)
                lock = False
            else:
                A, path_labeled = PSP(model, train_loader_reference,
                                      train_loader, top, logger, cfg)

            #vis = len(train_loader_reference.dataset)
            #A= torch.ones(vis, len(train_loader_reference.dataset))
            #build_image(A,train_loader_reference,train_loader)
            top += cfg.DATALOADER.NUM_JUMP
            model = getattr(models, cfg.MODEL.NAME)(num_classes)
            model = nn.DataParallel(model)
            optimizer = make_optimizer(cfg, model)
            scheduler = make_scheduler(cfg, optimizer)
            A_store = A.clone()
        top_update += 1

        for data in tqdm(train_loader, desc='Iteration', leave=False):
            model.train()
            images, labels_batch, img_path = data
            index, index_labeled = find_index_by_path(img_path,
                                                      dataset_reference.train,
                                                      path_labeled)
            images_relevant, GCN_index, choose_from_nodes, labels = load_relevant(
                cfg, dataset_reference.train, index, A_store, labels_batch,
                index_labeled)
            # if device:
            model.to(device)
            images = images_relevant.to(device)

            scores, feat = model(images)
            del images
            loss = loss_fn(scores, feat, labels.to(device), choose_from_nodes)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            count = count + 1
            running_loss += loss.item()
            running_acc += (scores[choose_from_nodes].max(1)[1].cpu() ==
                            labels_batch).float().mean().item()

        scheduler.step()

        # for model save if you need
        # if (epoch+1) % checkpoint_period == 0:
        #     model.cpu()
        #     model.save(output_dir,epoch+1)

        # Validation
        if (epoch + 1) % eval_period == 0:
            all_feats = []
            all_pids = []
            all_camids = []
            for data in tqdm(val_loader,
                             desc='Feature Extraction',
                             leave=False):
                model.eval()
                with torch.no_grad():
                    images, pids, camids = data

                    model.to(device)
                    images = images.to(device)

                    feats = model(images)
                    del images
                all_feats.append(feats.cpu())
                all_pids.extend(np.asarray(pids))
                all_camids.extend(np.asarray(camids))

            cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query)
            logger.info("Validation Results - Epoch: {}".format(epoch + 1))
            logger.info("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10, 20]:
                logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(
                    r, cmc[r - 1]))
        if train_compen > 0:
            train_compen -= 1

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
Exemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="./configs/softmax_triplet.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]
                   ) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.DATASETS.NAMES = 'mvb'
    cfg.DATASETS.ROOT_DIR = './data'
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    # init dataloader
    gallery_loader, query_loader, num_query, num_classes = make_test_data_loader(
        cfg)
    # build model and load checkpoint param
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    model.eval().cuda()
    feats = []
    g_pids = []
    g_camids = []
    g_names = []
    q_names = []
    # ===== extract feats =====
    with torch.no_grad():
        print('extract query feats...')
        for batch in query_loader:
            data, _, _, paths = batch
            feat = model(data.cuda())
            feats.append(feat.cpu())
            q_names.extend(paths)
        print('extract gallery feats...')
        for batch in gallery_loader:
            data, pids, camids, paths = batch
            g_pids.extend(pids)
            g_camids.extend(camids)
            feat = model(data.cuda())
            feats.append(feat.cpu())
            g_names.extend(paths)

    # ===== init vars =====
    feats = torch.cat(feats, dim=0) # cat feats because feats is batch-wised
    feats = torch.nn.functional.normalize(feats, dim=1, p=2) # normalize feats
    qf = feats[:num_query]          # query feats
    gf = feats[num_query:]          # gallery feats
    g_pids = np.array(g_pids)       # gallery pids
    g_camids = np.array(g_camids)   # gallery camids
    # ===== calc euclidean distance between gallery feat and query feat =====
    m, n = qf.shape[0], gf.shape[0]
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.cpu().numpy()

    # ===== find min distance of every query during id-wised =====
    num_q, num_g = distmat.shape
    indices = np.argsort(distmat, axis=1)   # sort distmat from low to high (distance)
    q_nameinds = np.zeros((num_q), dtype=int)  # init query ids array for sort because query files is not ordered
    distmat_id_wised = np.ones((num_q, num_classes), dtype=np.float32)*100
    def get_id(x):
        return int(x.split('_')[0])
    for q_idx in range(num_q):
        order = indices[q_idx]
        q_nameinds[q_idx] = int(q_names[q_idx].split('.')[0])  # get query id frome query img filename
        names = np.array(g_names)[order].tolist()
        pids = map(get_id, names)
        dists = distmat[q_idx][order]
        # find min distance of current query during id-wised
        for pid, dist in zip(pids, dists):
            if distmat_id_wised[q_idx, pid] > dist:
                distmat_id_wised[q_idx, pid] = dist
    
    # ===== sort query id from 0 to query nums =====
    orders = np.argsort(q_nameinds, axis=0)
    q_nameinds = q_nameinds[orders]
    distmat_id_wised = distmat_id_wised[orders]

    # ===== write result to csv =====
    with open('../024_bag_result.csv', 'w') as f:
        for q_idx in range(num_q):
            order = np.argsort(distmat_id_wised[q_idx])
            max_dist = distmat_id_wised[q_idx].max()
            buf = '%05d,' % q_nameinds[q_idx]
            for ind in order:
                score = (max_dist-distmat_id_wised[q_idx][ind])/max_dist
                buf += '%04d,%.6f,' % (ind, score)
            buf = buf[:-1]
            f.write(buf+'\n')
def main():
    # argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_file',
                        help='path of config file',
                        default=None,
                        type=str)
    parser.add_argument('--clean_run',
                        help='run from scratch',
                        default=False,
                        type=bool)
    parser.add_argument('opts',
                        help='modify arguments',
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    # config setup
    if args.config_file is not None:
        cfg.merge_from_file(args.config_file)
    if args.opts is not None: cfg.merge_from_list(args.opts)

    cfg.freeze()
    if args.clean_run:
        if os.path.exists(f'../experiments/{cfg.SYSTEM.EXP_NAME}'):
            shutil.rmtree(f'../experiments/{cfg.SYSTEM.EXP_NAME}')
        if os.path.exists(f'../experiments/runs/{cfg.SYSTEM.EXP_NAME}'):
            shutil.rmtree(f'../experiments/runs/{cfg.SYSTEM.EXP_NAME}')
            # Note!: Sleeping to make tensorboard delete it's cache.
            time.sleep(5)

    search = defaultdict()
    search['lr'], search['momentum'], search['factor'], search['step_size'] = [
        True
    ] * 4
    set_seeds(cfg)
    logdir, chk_dir = save_config(cfg.SAVE_ROOT, cfg)
    writer = SummaryWriter(log_dir=logdir)
    # setup logger
    logger_dir = Path(chk_dir).parent
    logger = setup_logger(cfg.SYSTEM.EXP_NAME, save_dir=logger_dir)
    # Model
    prediction_model = BaseModule(cfg)
    noise_model = NoiseModule(cfg)
    model = [prediction_model, noise_model]
    device = cfg.SYSTEM.DEVICE if torch.cuda.is_available() else 'cpu'
    # load the data
    train_loader = get_loader(cfg, 'train')
    val_loader = get_loader(cfg, 'val')
    prediction_model, noise_model = model
    prediction_model.to(device)
    lr = cfg.SOLVER.LR
    momentum = cfg.SOLVER.MOMENTUM
    weight_decay = cfg.SOLVER.WEIGHT_DECAY
    betas = cfg.SOLVER.BETAS
    step_size = cfg.SOLVER.STEP_SIZE
    decay_factor = cfg.SOLVER.FACTOR

    # Optimizer
    if cfg.SOLVER.OPTIMIZER == 'Adam':
        optimizer = optim.Adam(prediction_model.parameters(),
                               lr=lr,
                               weight_decay=weight_decay,
                               betas=betas)
    elif cfg.SOLVER.OPTIMIZER == 'SGD':
        optimizer = optim.SGD(prediction_model.parameters(),
                              lr=lr,
                              weight_decay=weight_decay,
                              momentum=momentum)
    if cfg.SOLVER.SCHEDULER == 'StepLR':
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=step_size,
                                              gamma=decay_factor)
    elif cfg.SOLVER.SCHEDULER == 'ReduceLROnPlateau':
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer,
            factor=cfg.SOLVER.FACTOR,
            min_lr=cfg.SOLVER.MIN_LR,
            patience=cfg.SOLVER.PAITENCE,
            cooldown=cfg.SOLVER.COOLDOWN,
            threshold=cfg.SOLVER.THRESHOLD,
            eps=1e-24)
    # checkpointer
    chkpt = Checkpointer(prediction_model,
                         optimizer,
                         scheduler=scheduler,
                         save_dir=chk_dir,
                         logger=logger,
                         save_to_disk=True)
    offset = 0
    checkpointer = chkpt.load()
    if not checkpointer == {}:
        offset = checkpointer.pop('epoch')
    loader = [train_loader, val_loader]
    print(f'Same optimizer, {scheduler.optimizer == optimizer}')
    print(cfg)
    model = [prediction_model, noise_model]
    train(cfg, model, optimizer, scheduler, loader, chkpt, writer, offset)
    test_loader = get_loader(cfg, 'test')
    test(cfg, prediction_model, test_loader, writer, logger)
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument('--test_phase', action='store_true', help="use cpu")
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.DATASETS.PRELOAD_IMAGE = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    model = model.cuda()
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))

    test_dataloader, num_query, dataset = get_test_dataloader(cfg,
                                                              test_phase=True)

    use_local_feature = True
    use_rerank = True
    use_cross_feature = False

    distmat, index, distmat1, distmat2 = inference_aligned_flipped(
        cfg, model, test_dataloader, num_query, use_local_feature, use_rerank,
        use_cross_feature)

    suffix = 'flip'
    if use_local_feature:
        suffix += '_aligned'
    if use_rerank:
        suffix += '_rerank'
    if use_cross_feature:
        suffix += '_cross'

    # saving results
    if args.test_phase:
        query_path = [t[0] for t in dataset.query]
        gallery_path = [t[0] for t in dataset.gallery]
        logger.info("-------------Write resutls to json file----------")

        results = {}
        top_k = 200
        for i in range(len(query_path)):
            topk_res = []
            for j in range(top_k):
                img_path = gallery_path[index[i, j]]
                # print(img_path)
                topk_res.append(img_path.split('/')[-1].split('_')[-1])
            results[query_path[i].split('/')[-1].split('_')[-1]] = topk_res

        # 写入结果
        if not os.path.isdir('submit'):
            os.mkdir('submit')

        strtime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
        json.dump(
            results,
            open(
                'submit/reid_%s_%s_%s.json' %
                (cfg.MODEL.NAME, strtime, suffix), 'w'))

        # saving dist_mats
        mat_path = 'dist_mats'
        if not os.path.isdir(mat_path):
            os.mkdir(mat_path)
        f = h5py.File(
            '%s/test_%s_%s_%s.h5' %
            (mat_path, cfg.MODEL.NAME, strtime, suffix), 'w')
        f.create_dataset('dist_mat', data=distmat, compression='gzip')

        if distmat1 is not None:
            f.create_dataset('dist_mat1', data=distmat1, compression='gzip')
        if distmat2 is not None:
            f.create_dataset('dist_mat2', data=distmat2, compression='gzip')
        f.close()
Exemplo n.º 16
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    query_dataset, test_dataset, query_loader, test_loader = make_data_loader(cfg)
    model = build_model(cfg, 4768)

    model.load_param(cfg.TEST.WEIGHT)

    model.to(DEVICE)
    model.eval()


    with torch.no_grad():
        query = np.concatenate([model(img.to(DEVICE)).detach().cpu().numpy()
                                for img, pid, camid in query_loader])
        test = np.concatenate([model(img.to(DEVICE)).detach().cpu().numpy()
                               for img, pid, camid in test_loader])

    query_ids = [pid1 for _, pid1, _ in query_loader]
    queryid = []
    for qid in query_ids:
        qid = list(qid)
        queryid=queryid + qid
    test_ids = [pid2 for _, pid2, _ in test_loader]

    testid = []
    for tid in test_ids:
        tid=list(tid)
        testid = testid + tid
    feat_norm = cfg.TEST.FEAT_NORM
    if feat_norm == 'yes':
        print("The test feature is normalized")
        feat = np.concatenate((query,test))
        feat = torch.from_numpy(feat)
        feat = torch.nn.functional.normalize(feat, dim=1, p=2).numpy()  # along channel
        query = feat[:3147]
        test = feat[3147:]

    print(len(query))
    if re_rank:
        query = torch.from_numpy(query)
        test = torch.from_numpy(test)
        dist = re_ranking(query, test, k1=8, k2=3, lambda_value=0.6)
    else:
        #dist = cdist(query,test,metric='mahalanobis')
        dist = cosine_similarity(query, test)
    #dist = normalize(dist, axis=1, norm='l2')
    #jsonData = json.dumps(dist.tolist())
    #fileObject = open('./distresult1.json', 'w')
    #fileObject.write(jsonData)
    #fileObject.close()
    rank = get_result(dist, queryid, testid)
Exemplo n.º 17
0
def main():
    parser = argparse.ArgumentParser(description="AGW Re-ID Baseline")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True

    data_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)

    if 'cpu' not in cfg.MODEL.DEVICE:
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model)
        model.to(device=cfg.MODEL.DEVICE)

    if cfg.TEST.EVALUATE_ONLY == 'on':
        logger.info("Evaluate Only")
        model.load_param(cfg.TEST.WEIGHT)
        do_test(cfg, model, data_loader, num_query)
        return

    criterion = model.get_creterion(cfg, num_classes)
    optimizer = model.get_optimizer(cfg, criterion)

    # Add for using self trained model
    if cfg.MODEL.PRETRAIN_CHOICE == 'self':
        start_epoch = eval(
            cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')
            [-1])
        print('Start epoch:', start_epoch)
        path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace(
            'model', 'optimizer')
        print('Path to the checkpoint of optimizer:', path_to_optimizer)
        path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace(
            'model', 'center_param')
        print('Path to the checkpoint of center_param:', path_to_center_param)
        path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace(
            'model', 'optimizer_center')
        print('Path to the checkpoint of optimizer_center:',
              path_to_optimizer_center)
        model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
        optimizer['model'].load_state_dict(torch.load(path_to_optimizer))
        criterion['center'].load_state_dict(torch.load(path_to_center_param))
        optimizer['center'].load_state_dict(
            torch.load(path_to_optimizer_center))
        scheduler = WarmupMultiStepLR(optimizer['model'], cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD, start_epoch)
    elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
        start_epoch = 0
        scheduler = WarmupMultiStepLR(optimizer['model'], cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD)

    else:
        print('Only support pretrain_choice for imagenet and self, but got {}'.
              format(cfg.MODEL.PRETRAIN_CHOICE))

    do_train(cfg, model, data_loader, optimizer, scheduler, criterion,
             num_query, start_epoch)
Exemplo n.º 18
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    #import pdb;pdb.set_trace()

    model = build_model(cfg, 412)
    model.load_state_dict(torch.load(cfg.TEST.WEIGHT))
    model.to('cuda')

    '==============================================================================================='

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    transform_test = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((256, 128)),  #  384, 256
        transforms.ToTensor(),
        #normalize,
    ])

    data_path = '/data1/lidg/reid_dataset/IV-ReID/RegDB/'

    query_img, query_label = process_test_regdb(
        data_path, trial=0, modal='thermal')  #  thermal    visible
    gall_img, gall_label = process_test_regdb(data_path,
                                              trial=0,
                                              modal='visible')

    gallset = TestData(gall_img,
                       gall_label,
                       transform=transform_test,
                       img_size=(384, 256))
    gall_loader = data.DataLoader(gallset,
                                  batch_size=1,
                                  shuffle=False,
                                  num_workers=4)

    nquery = len(query_label)
    ngall = len(gall_label)
    print("Dataset statistics:")
    print("  ------------------------------")
    print("  subset   | # ids | # images")
    print("  ------------------------------")
    print("  query    | {:5d} | {:8d}".format(len(np.unique(query_label)),
                                              nquery))
    print("  gallery  | {:5d} | {:8d}".format(len(np.unique(gall_label)),
                                              ngall))
    print("  ------------------------------")

    queryset = TestData(query_img,
                        query_label,
                        transform=transform_test,
                        img_size=(384, 256))
    query_loader = data.DataLoader(queryset,
                                   batch_size=1,
                                   shuffle=False,
                                   num_workers=4)

    feature_dim = 2048

    def extract_gall_feat(gall_loader):
        print('Extracting Gallery Feature...')
        ptr = 0
        gall_feat = np.zeros((ngall, feature_dim))

        rgbs = np.zeros((ngall, 256, 128, 3))
        learns = np.zeros((ngall, 256, 128, 3))

        for batch_idx, (input, label) in enumerate(gall_loader):
            batch_num = input.size(0)

            model.eval()
            with torch.no_grad():
                input = input.to('cuda')

                #feat = model(input)
                feat, rgb, x = model(input)

                rgbs[ptr:ptr + batch_num, :] = rgb
                learns[ptr:ptr + batch_num, :] = x

                gall_feat[ptr:ptr + batch_num, :] = feat.detach().cpu().numpy()
                ptr = ptr + batch_num

        from torchvision.transforms import ToPILImage
        import matplotlib.pyplot as plt
        img1 = np.mean(rgbs, axis=0)
        img2 = np.mean(learns, axis=0)

        #import pdb;pdb.set_trace()

        img1 = ToPILImage()(img1.astype(np.uint8))
        img2 = ToPILImage()(img2.astype(np.uint8))

        img1.save('regdb_RGB.jpg')
        img2.save('regdb_X.jpg')
        import pdb
        pdb.set_trace()

        return gall_feat

    def extract_query_feat(query_loader):
        print('Extracting Query Feature...')
        ptr = 0
        query_feat = np.zeros((nquery, feature_dim))
        for batch_idx, (input, label) in enumerate(query_loader):
            batch_num = input.size(0)
            model.eval()
            with torch.no_grad():
                input = input.to('cuda')
                feat = model(input)
                query_feat[ptr:ptr +
                           batch_num, :] = feat.detach().cpu().numpy()
                ptr = ptr + batch_num
        return query_feat

    gall_feat = extract_gall_feat(gall_loader)

    query_feat = extract_query_feat(query_loader)

    # fc feature
    distmat = np.matmul(query_feat, np.transpose(gall_feat))
    cmc, mAP = eval_regdb(-distmat, query_label, gall_label)

    print('FC: top-1: {:.2%} | top-5: {:.2%} | top-10: {:.2%}| top-20: {:.2%}'.
          format(cmc[0], cmc[4], cmc[9], cmc[19]))
    print('mAP: {:.2%}'.format(mAP))
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument('--test_phase', action='store_true', help="use cpu")
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    test_dataloader, num_query, dataset = get_test_dataloader(cfg,
                                                              test_phase=True)

    distmat_paths = [
        cfg.TEST.DISTMAT1,
        cfg.TEST.DISTMAT2,
        cfg.TEST.DISTMAT3,
        cfg.TEST.DISTMAT4,
        cfg.TEST.DISTMAT5,
        cfg.TEST.DISTMAT6,
        cfg.TEST.DISTMAT7,
        cfg.TEST.DISTMAT8,
        cfg.TEST.DISTMAT9,
        cfg.TEST.DISTMAT10,
        cfg.TEST.DISTMAT11,
        cfg.TEST.DISTMAT12,
        cfg.TEST.DISTMAT13,
        cfg.TEST.DISTMAT14,
        cfg.TEST.DISTMAT15,
        cfg.TEST.DISTMAT16,
        cfg.TEST.DISTMAT17,
        cfg.TEST.DISTMAT18,
    ]
    # 加载dist_mats
    dist_mats = []

    cnt = 0
    for distmat_path in distmat_paths:
        if os.path.isfile(distmat_path):
            f = h5py.File(distmat_path, 'r')
            mat = f['dist_mat'][()]
            mat = mat[np.newaxis, ...]
            dist_mats.append(mat)
            f.close()
            cnt += 1
        else:
            logger.info(f'Invalid checkpoint path {distmat_path}')

    dist_mat = np.concatenate(dist_mats, axis=0).mean(axis=0)
    score = dist_mat
    index = np.argsort(score, axis=1)  # from small to large

    logger.info(f'Average {cnt} results')
    # saving results
    if args.test_phase:
        query_path = [t[0] for t in dataset.query]
        gallery_path = [t[0] for t in dataset.gallery]
        logger.info("-------------Write resutls to json file----------")

        results = {}
        top_k = 200
        for i in range(len(query_path)):
            topk_res = []
            for j in range(top_k):
                img_path = gallery_path[index[i, j]]
                # print(img_path)
                topk_res.append(img_path.split('/')[-1].split('_')[-1])
            results[query_path[i].split('/')[-1].split('_')[-1]] = topk_res

        # 写入结果
        strtime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
        json.dump(results,
                  open('submit/ensemble_%s_%dm.json' % (strtime, cnt), 'w'))
Exemplo n.º 20
0
def arg_from_ui(imgs,
                progress,
                gpu_flag=None,
                config_path=None,
                dir=None,
                checkpoint=None,
                result=None):
    assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
        'PyTorch>=0.4.0 is required'
    # args = {'cfg': 'config/ade20k-resnet50dilated-ppm_deepsup.yaml', 'gpu': 0, 'opts': None, 'gpu_flag': False,
    #         'dir': 'ade20k-resnet50dilated-ppm_deepsup', 'result': 'segmentation', 'checkpoint': 'epoch_20.pth'}
    parser = argparse.ArgumentParser(
        description="PyTorch Semantic Segmentation Testing")
    parser.add_argument("--imgs",
                        default=imgs,
                        type=str,
                        help="an image paths, or a directory name")
    parser.add_argument(
        "--config_path",
        default="config/ade20k-resnet50dilated-ppm_deepsup.yaml",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument("--gpu",
                        default=0,
                        type=int,
                        help="gpu id for evaluation")
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    parser.add_argument(
        "--gpu_flag",
        help="open and close gpu",
        default=True,
        nargs=argparse.REMAINDER,
    )
    parser.add_argument(
        "--dir",
        help="model dir",
        default="ade20k-resnet50dilated-ppm_deepsup",
        nargs=argparse.REMAINDER,
    )
    parser.add_argument(
        "--result",
        help="segmentation result dir",
        default="segmentation",
        nargs=argparse.REMAINDER,
    )
    parser.add_argument(
        "--checkpoint",
        help="pretrained model checkpoint",
        default="epoch_20.pth",
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()
    if gpu_flag is not None:
        args.gpu_flag = gpu_flag
    if config_path:
        args.config_path = config_path
    if dir:
        args.dir = dir
    if checkpoint:
        args.checkpoint = checkpoint
    if result:
        args.result = result

    cfg.merge_from_file(args.config_path)
    cfg.merge_from_list(args.opts)
    # cfg.freeze()

    logger = setup_logger(distributed_rank=0)  # TODO
    logger.info("Loaded configuration file {}".format(args.config_path))
    logger.info("Running with config:\n{}".format(cfg))

    cfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()
    cfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()

    # absolute paths of model weights
    cfg.MODEL.weights_encoder = os.path.join(args.dir,
                                             'encoder_' + args.checkpoint)
    cfg.MODEL.weights_decoder = os.path.join(args.dir,
                                             'decoder_' + args.checkpoint)
    print(cfg.MODEL.weights_encoder)

    assert os.path.exists(cfg.MODEL.weights_encoder) and \
           os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"

    # generate testing image list
    print('-----imgs:', args.imgs)
    if os.path.isdir(args.imgs):
        imgs = find_recursive(args.imgs)
    else:
        imgs = [args.imgs]
    assert len(imgs), "imgs should be a path to image (.jpg) or directory."
    cfg.list_test = [{'fpath_img': x} for x in imgs]

    if not os.path.isdir(args.result):
        os.makedirs(args.result)

    main(cfg, args.gpu, args, progress)
Exemplo n.º 21
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Inference")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    # Merge config file.
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    # Print experimental infos.
    save_dir = ""
    logger = setup_logger("AlphAction", save_dir, get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(cfg)

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + get_pretty_env_info())

    # Build the model.
    model = build_detection_model(cfg)
    model.to("cuda")

    # load weight.
    output_dir = cfg.OUTPUT_DIR
    checkpointer = ActionCheckpointer(cfg, model, save_dir=output_dir)
    checkpointer.load(cfg.MODEL.WEIGHT)

    output_folders = [None] * len(cfg.DATASETS.TEST)
    dataset_names = cfg.DATASETS.TEST
    mem_active = has_memory(cfg.IA_STRUCTURE)
    if cfg.OUTPUT_DIR:
        for idx, dataset_name in enumerate(dataset_names):
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            os.makedirs(output_folder, exist_ok=True)
            output_folders[idx] = output_folder

    # Do inference.
    data_loaders_test = make_data_loader(cfg,
                                         is_train=False,
                                         is_distributed=distributed)
    for output_folder, dataset_name, data_loader_test in zip(
            output_folders, dataset_names, data_loaders_test):
        inference(
            model,
            data_loader_test,
            dataset_name,
            mem_active=mem_active,
            output_folder=output_folder,
        )
        synchronize()
Exemplo n.º 22
0

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--start_loc", type=tuple, default=(0, 0))
    parser.add_argument("--clean_run", type=bool, default=True)
    parser.add_argument("--config_file",
                        type=str,
                        default='../configs/exp1.yaml')
    parser.add_argument("--opts", nargs='*')
    args = parser.parse_args()
    opts = args.opts
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    if opts:
        cfg.merge_from_list(opts)

    cfg.freeze()

    chkpt_dir, log_dir, tb_dir = setup_exp(cfg.SYSTEM.SAVE_ROOT,
                                           cfg.SYSTEM.EXP_NAME, args.clean_run)
    print(f'chkpr_dir:{chkpt_dir}, log_dir:{log_dir}, tb_dir:{tb_dir}')

    writer = tensorboard.SummaryWriter(log_dir=tb_dir)
    logger = setup_logger(cfg.SYSTEM.EXP_NAME, log_dir, 0)

    logger.info(f'cfg: {str(cfg)}')

    glimpse_network = GlimpseNetwork(cfg)
    model = CoreNetwork(cfg, glimpse_network)
    optimizer = optim.Adam(model.parameters(),
Exemplo n.º 23
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument("--config_file", type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True

    _1, _2, _3, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    # gpu_device
    device = cfg.MODEL.DEVICE

    if device:
        if torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
        model.to(device)

    # test data-loader
    test_transforms = build_transforms(cfg, is_train=False)

    query_name = os.listdir(Q_ROOT)
    gallery_name = os.listdir(G_ROOT)

    dataset = [os.path.join(Q_ROOT, x) for x in query_name] + \
              [os.path.join(G_ROOT, x) for x in gallery_name]

    test_set = TestImageDataset(dataset=dataset, transform=test_transforms)

    test_loader = DataLoader(test_set,
                             batch_size=cfg.TEST.IMS_PER_BATCH,
                             shuffle=False,
                             num_workers=12,
                             collate_fn=test_collate_fn)

    result = []

    # _inference
    def _inference(batch):
        model.eval()
        with torch.no_grad():
            data = batch
            data = data.to(device) if torch.cuda.device_count() >= 1 else data
            feat = model(data)
            feat = feat.data.cpu().numpy()
            return feat

    count = 0
    for batch in test_loader:
        count += 1
        feat = _inference(batch)
        result.append(feat)

        if count % 100 == 0:
            print(count)

    result = np.concatenate(result, axis=0)

    query_num = len(query_name)
    query_feat = result[:query_num]
    gallery_feat = result[query_num:]

    pickle.dump([query_feat, query_name],
                open(cfg.OUTPUT_DIR + '/query_feature.feat', 'wb'))
    pickle.dump([gallery_feat, gallery_name],
                open(cfg.OUTPUT_DIR + '/gallery_feature.feat', 'wb'))
Exemplo n.º 24
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.TRAIN.DATALOADER.IMS_PER_BATCH = cfg.TRAIN.DATALOADER.CATEGORIES_PER_BATCH * cfg.TRAIN.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH
    cfg.VAL.DATALOADER.IMS_PER_BATCH = cfg.VAL.DATALOADER.CATEGORIES_PER_BATCH * cfg.VAL.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH
    cfg.TEST.DATALOADER.IMS_PER_BATCH = cfg.TEST.DATALOADER.CATEGORIES_PER_BATCH * cfg.TEST.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH
    cfg.freeze()

    output_dir = cfg.SOLVER.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("fundus_prediction", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    #build
    train_loader, val_loader, test_loader, classes_list = make_data_loader(cfg)
    num_classes = len(classes_list)

    model = build_model(cfg, num_classes)
    model.load_param("Overall", cfg.TEST.WEIGHT)

    #CJY 可视化  densenet层与层之间的联系
    #showWeight(model)

    #loss_fn = make_loss(cfg, num_classes)  # modified by gu
    # loss function
    #loss_func = make_loss(cfg, num_classes)  # modified by gu
    #g_loss_func = make_G_loss(cfg, num_classes)
    d_loss_func, lossClasses = make_D_loss(cfg, num_classes)
    loss_funcs = {}
    loss_funcs["G"] = d_loss_func
    loss_funcs["D"] = d_loss_func
    print('Test with the loss type is', cfg.LOSS.TYPE)

    writer_test = SummaryWriter(cfg.SOLVER.OUTPUT_DIR + "/summary/test")

    model_save_epoch = cfg.TEST.WEIGHT.split('/')[-1].split('.')[0].split(
        '_')[-1]

    if model_save_epoch.isdigit() == True:
        step = len(train_loader) * int(model_save_epoch)
    else:
        step = 0

    metrics = do_inference(cfg,
                           model,
                           train_loader,
                           classes_list,
                           loss_funcs,
                           plotFlag=True)

    for preKey in metrics['precision'].keys():
        writer_test.add_scalar("Precision/" + str(preKey),
                               metrics['precision'][preKey], step)

    for recKey in metrics['recall'].keys():
        writer_test.add_scalar("Recall/" + str(recKey),
                               metrics['recall'][recKey], step)

    for aucKey in metrics['roc_auc'].keys():
        writer_test.add_scalar("ROC_AUC/" + str(aucKey),
                               metrics['roc_auc'][aucKey], step)

    writer_test.add_scalar("OverallAccuracy", metrics["overall_accuracy"],
                           step)

    # writer.add_scalar("Val/"+"confusion_matrix", metrics['confusion_matrix'], step)

    # 混淆矩阵 和 ROC曲线可以用图的方式来存储
    roc_numpy = metrics["roc_figure"]
    writer_test.add_image("ROC", roc_numpy, step, dataformats='HWC')

    confusion_matrix_numpy = metrics["confusion_matrix_numpy"]
    writer_test.add_image("ConfusionMatrix",
                          confusion_matrix_numpy,
                          step,
                          dataformats='HWC')

    writer_test.close()
Exemplo n.º 25
0
def main():
    import pydevd_pycharm
    pydevd_pycharm.settrace('172.26.3.54',
                            port=12345,
                            stdoutToServer=True,
                            stderrToServer=True)
    parser = argparse.ArgumentParser(description="DFDGAN Showing G pic")
    parser.add_argument("--config_file",
                        default="./configs/show_pic.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    datasets_dir = ''
    for dataset_name in cfg.DATASETS.NAMES:
        if datasets_dir != '':
            datasets_dir += '-'
        datasets_dir += dataset_name
    output_dir = os.path.join(output_dir, datasets_dir)
    time_string = 'show_pic[{}]'.format(
        time.strftime('%Y-%m-%d-%X', time.localtime(time.time())))
    output_dir = os.path.join(output_dir, time_string)
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    device = cfg.TEST.DEVICE
    if device == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.TEST.DEVICE_ID
    cudnn.benchmark = True
    logger = setup_logger("DFDGAN", output_dir, 0)
    logger.info("Running with config:\n{}".format(cfg))

    data_loader, num_classes = make_dataloaders(cfg)
    E = Encoder(num_classes, cfg.E.LAST_STRIDE, cfg.E.PRETRAIN_PATH,
                cfg.E.NECK, cfg.TEST.NECK_FEAT, cfg.E.NAME,
                cfg.E.PRETRAIN_CHOICE).to(device)
    Ed = Encoder(num_classes, cfg.ED.LAST_STRIDE, cfg.ED.PRETRAIN_PATH,
                 cfg.ED.NECK, cfg.TEST.NECK_FEAT, cfg.ED.NAME,
                 cfg.ED.PRETRAIN_CHOICE).to(device)
    G = DFDGenerator(cfg.G.PRETRAIN_PATH,
                     cfg.G.PRETRAIN_CHOICE,
                     noise_size=cfg.TRAIN.NOISE_SIZE).to(device)
    for _, batch in enumerate(data_loader):
        img_x1, img_x2, img_y1, img_y2, target_pid, target_setid = batch
        img_x1, img_x2, img_y1, img_y2, target_pid, target_setid = img_x1.to(
            device), img_x2.to(device), img_y1.to(device), img_y2.to(
                device), target_pid.to(device), target_setid.to(device)
        g_img = G(E(img_x1)[0], Ed(img_y1)[0])
        img_x1_PIL = transforms.ToPILImage()(img_x1[0].cpu()).convert('RGB')
        img_x1_PIL.save(os.path.join(output_dir, 'img_x1.jpg'))
        img_y1_PIL = transforms.ToPILImage()(img_y1[0].cpu()).convert('RGB')
        img_y1_PIL.save(os.path.join(output_dir, 'img_y1.jpg'))
        g_img_PIL = transforms.ToPILImage()(g_img[0].cpu()).convert('RGB')
        g_img_PIL.save(os.path.join(output_dir, 'g_img.jpg'))
        break
Exemplo n.º 26
0
def train(config_file, resume=False, **kwargs):
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k, v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()

    # [PersonReID_Dataset_Downloader(cfg.DATASETS.STORE_DIR,dataset) for dataset in cfg.DATASETS.SOURCE]
    # [PersonReID_Dataset_Downloader(cfg.DATASETS.STORE_DIR,dataset) for dataset in cfg.DATASETS.TARGET]
    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = make_logger("Reid_Baseline", output_dir, 'log', resume)
    if not resume:
        logger.info("Using {} GPUS".format(1))
        logger.info("Loaded configuration file {}".format(config_file))
        logger.info("Running with config:\n{}".format(cfg))

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD
    output_dir = cfg.OUTPUT_DIR
    device = torch.device(cfg.DEVICE)
    epochs = cfg.SOLVER.MAX_EPOCHS

    train_loader, _, _, num_classes = data_loader(cfg,
                                                  cfg.DATASETS.SOURCE,
                                                  merge=cfg.DATASETS.MERGE)

    model = getattr(models, cfg.MODEL.NAME)(num_classes, cfg.MODEL.LAST_STRIDE,
                                            cfg.MODEL.POOL)
    if resume:
        checkpoints = get_last_stats(output_dir)
        try:
            model_dict = torch.load(checkpoints[cfg.MODEL.NAME])
        except KeyError:
            model_dict = torch.load(checkpoints[str(type(model))])
        model.load_state_dict(model_dict)
        if device:
            model.to(device)  # must be done before the optimizer generation
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)
    base_epo = 0
    if resume:
        optimizer.load_state_dict(torch.load(checkpoints['opt']))
        sch_dict = torch.load(checkpoints['sch'])
        scheduler.load_state_dict(sch_dict)
        base_epo = checkpoints['epo']

    loss_fn = make_loss(cfg)

    if not resume:
        logger.info("Start training")
    since = time.time()
    for epoch in range(epochs):
        count = 0
        running_loss = 0.0
        running_acc = 0
        for data in tqdm(train_loader, desc='Iteration', leave=False):
            model.train()
            images, labels, domains = data
            if device:
                model.to(device)
                images, labels, domains = images.to(device), labels.to(
                    device), domains.to(device)

            optimizer.zero_grad()

            scores, feats = model(images)
            loss = loss_fn(scores, feats, labels)

            loss.backward()
            optimizer.step()

            count = count + 1
            running_loss += loss.item()
            running_acc += (
                scores[0].max(1)[1] == labels).float().mean().item()

        logger.info(
            "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
            .format(epoch + 1 + base_epo, count, len(train_loader),
                    running_loss / count, running_acc / count,
                    scheduler.get_lr()[0]))
        scheduler.step()

        if (epoch + 1 + base_epo) % checkpoint_period == 0:
            model.cpu()
            model.save(output_dir, epoch + 1 + base_epo)
            torch.save(
                optimizer.state_dict(),
                os.path.join(output_dir,
                             'opt_epo' + str(epoch + 1 + base_epo) + '.pth'))
            torch.save(
                scheduler.state_dict(),
                os.path.join(output_dir,
                             'sch_epo' + str(epoch + 1 + base_epo) + '.pth'))

        # Validation
        if (epoch + base_epo + 1) % eval_period == 0:
            # Validation on Target Dataset
            for target in cfg.DATASETS.TARGET:
                mAPs = []
                cmcs = []
                for i in range(iteration):

                    set_seeds(i)

                    _, val_loader, num_query, _ = data_loader(cfg, (target, ),
                                                              merge=False)

                    all_feats = []
                    all_pids = []
                    all_camids = []

                    since = time.time()
                    for data in tqdm(val_loader,
                                     desc='Feature Extraction',
                                     leave=False):
                        model.eval()
                        with torch.no_grad():
                            images, pids, camids = data
                            if device:
                                model.to(device)
                                images = images.to(device)

                            feats = model(images)
                            feats /= feats.norm(dim=-1, keepdim=True)

                        all_feats.append(feats)
                        all_pids.extend(np.asarray(pids))
                        all_camids.extend(np.asarray(camids))

                    cmc, mAP = evaluation(all_feats, all_pids, all_camids,
                                          num_query)
                    mAPs.append(mAP)
                    cmcs.append(cmc)

                mAP = np.mean(np.array(mAPs))
                cmc = np.mean(np.array(cmcs), axis=0)

                mAP_std = np.std(np.array(mAPs))
                cmc_std = np.std(np.array(cmcs), axis=0)

                logger.info("Validation Results: {} - Epoch: {}".format(
                    target, epoch + 1 + base_epo))
                logger.info("mAP: {:.1%} (std: {:.3%})".format(mAP, mAP_std))
                for r in [1, 5, 10]:
                    logger.info(
                        "CMC curve, Rank-{:<3}:{:.1%} (std: {:.3%})".format(
                            r, cmc[r - 1], cmc_std[r - 1]))

            reset()

    time_elapsed = time.time() - since
    logger.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    logger.info('-' * 10)
def merge_from_dict(cfg, arg_dict):
    for key in arg_dict:
        if arg_dict[key] != None:
            cfg.merge_from_list([key, arg_dict[key]])
    return cfg
Exemplo n.º 28
0
def inv_root_lr(step):
    return 1 / np.sqrt(1 + step // 50_000)


def get_lr(optimizer):
    lrs = []
    for param_group in optimizer.param_groups:
        lrs.append(param_group['lr'])
    assert len(set(lrs)) == 1
    return lrs[0]


if __name__ == '__main__':

    cfg.merge_from_list(['MODEL.W', int(sys.argv[1])])

    _add_hparams({**cfg.TRAIN, **cfg.MODEL, **cfg.DATA}, {})

    x_train, x_test, y_train, y_test = get_data()
    train_dataloader = inf_data_gen(x_train, y_train, cfg.TRAIN.BATCH_SIZE)
    X_test = torch.Tensor(x_test).to(cfg.SYSTEM.DEVICE)
    Y_test = torch.Tensor(y_test).to(cfg.SYSTEM.DEVICE)

    net = Net(D=cfg.MODEL.D, W=cfg.MODEL.W)
    net.to(cfg.SYSTEM.DEVICE)

    optimizer = torch.optim.SGD(net.parameters(), lr=cfg.TRAIN.LEARNING_RATE)
    scheduler = LambdaLR(optimizer, lr_lambda=inv_root_lr)
    pbar = tqdm(train_dataloader, total=cfg.TRAIN.STEPS)
    for n_iter, (X, T) in enumerate(pbar, start=1):
Exemplo n.º 29
0
                    metavar='DIR',
                    help='path to datasets',
                    default=os.getenv('PT_DATA_DIR', './datasets'))
parser.add_argument('--output_dir',
                    type=str,
                    default=os.getenv('PT_OUTPUT_DIR', '/tmp'))
parser.add_argument(
    "opts",
    help="Modify config options using the command-line",
    default=None,
    nargs=argparse.REMAINDER,
)
args = parser.parse_args()

cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)

device = 'cuda' if torch.cuda.is_available() else 'cpu'
assert (device == 'cuda')
# Setup CUDA, GPU & distributed training
args.num_gpus = int(
    os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
cfg.distributed = args.num_gpus > 1

if args.local_rank == -1:
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    args.n_gpu = torch.cuda.device_count()
else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
    torch.cuda.set_device(args.local_rank)
    device = torch.device("cuda", args.local_rank)
    torch.distributed.init_process_group(backend=cfg.DISTRIBUTED_BACKEND,
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Template MNIST Inference")
    parser.add_argument("--config-file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = os.path.join(cfg.OUTPUT_ROOT, cfg.PROJECT_NAME,
                              cfg.EXPERIMENT_NAME)
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("template_model", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    model = build_model(cfg).eval().to(cfg.MODEL.DEVICE)
    model.load_state_dict(torch.load(cfg.TEST.WEIGHT)['model'])
    # load val data
    i_transform = build_input_transforms(cfg, is_train=False)
    train_dataset = ImageFolder(cfg.DATASETS.TRAIN_ROOT, i_transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.TEST.IMS_PER_BATCH,
                              shuffle=False,
                              num_workers=cfg.DATALOADER.NUM_WORKERS)
    # load val data
    val_dataset = ImageFolder(cfg.DATASETS.TEST_ROOT, i_transform)
    val_loader = DataLoader(val_dataset,
                            batch_size=cfg.TEST.IMS_PER_BATCH,
                            shuffle=False,
                            num_workers=cfg.DATALOADER.NUM_WORKERS)

    # get train zs
    train_zs, train_labels = extract_features(model, train_loader)
    val_zs, val_labels = extract_features(model, val_loader)

    logreg = LogisticRegression(solver='liblinear').fit(train_zs, train_labels)
    train_acc = logreg.score(train_zs, train_labels)
    val_acc = logreg.score(val_zs, val_labels)
    print('Train acc: {0:3.3f}, Val acc: {1:3.3f}'.format(train_acc, val_acc))