Esempio n. 1
0
def main():
    # Get config for this run
    hparams = parse_args()

    # Setup logger
    config = {
        "handlers": [
            {
                "sink": sys.stdout,
                "format": "{time:[MM-DD HH:mm]} - {message}"
            },
            {
                "sink": f"{hparams.outdir}/logs.txt",
                "format": "{time:[MM-DD HH:mm]} - {message}"
            },
        ],
    }
    logger.configure(**config)
    logger.info(f"Parameters used for training: {hparams}")

    # Fix seeds for reprodusability
    pt.utils.misc.set_random_seed(hparams.seed)

    # Save config
    os.makedirs(hparams.outdir, exist_ok=True)
    yaml.dump(vars(hparams), open(hparams.outdir + "/config.yaml", "w"))

    # Get model
    model = Model(arch=hparams.arch,
                  model_params=hparams.model_params,
                  embedding_size=hparams.embedding_size,
                  pooling=hparams.pooling).cuda()

    # Get loss
    # loss = LOSS_FROM_NAME[hparams.criterion](in_features=hparams.embedding_size, **hparams.criterion_params).cuda()
    loss = LOSS_FROM_NAME["cross_entropy"].cuda()
    logger.info(f"Loss for this run is: {loss}")

    if hparams.resume:
        checkpoint = torch.load(
            hparams.resume, map_location=lambda storage, loc: storage.cuda())
        model.load_state_dict(checkpoint["state_dict"], strict=True)
        loss.load_state_dict(checkpoint["loss"], strict=True)

    if hparams.freeze_bn:
        freeze_batch_norm(model)

    # Get optimizer
    # optim_params = pt.utils.misc.filter_bn_from_wd(model)
    optim_params = list(loss.parameters()) + list(
        model.parameters())  # add loss params
    optimizer = optimizer_from_name(hparams.optim)(
        optim_params, lr=0, weight_decay=hparams.weight_decay, amsgrad=True)

    num_params = pt.utils.misc.count_parameters(model)[0]
    logger.info(f"Model size: {num_params / 1e6:.02f}M")
    # logger.info(model)

    # Scheduler is an advanced way of planning experiment
    sheduler = pt.fit_wrapper.callbacks.PhasesScheduler(hparams.phases)

    # Save logs
    TB_callback = pt_clb.TensorBoard(hparams.outdir, log_every=20)

    # Get dataloaders
    train_loader, val_loader, val_indexes = get_dataloaders(
        root=hparams.root,
        augmentation=hparams.augmentation,
        size=hparams.size,
        val_size=hparams.val_size,
        batch_size=hparams.batch_size,
        workers=hparams.workers,
    )

    # Load validation query / gallery split and resort it according to indexes from sampler
    df_val = pd.read_csv(os.path.join(hparams.root, "train_val.csv"))
    df_val = df_val[df_val["is_train"].astype(np.bool) == False]
    val_is_query = df_val.is_query.values[val_indexes].astype(np.bool)

    logger.info(f"Start training")
    # Init runner
    runner = pt.fit_wrapper.Runner(
        model,
        optimizer,
        criterion=loss,
        callbacks=[
            # pt_clb.BatchMetrics([pt.metrics.Accuracy(topk=1)]),
            ContestMetricsCallback(
                is_query=val_is_query[:1280] if hparams.debug else val_is_query
            ),
            pt_clb.Timer(),
            pt_clb.ConsoleLogger(),
            pt_clb.FileLogger(),
            TB_callback,
            CheckpointSaver(hparams.outdir,
                            save_name="model.chpn",
                            monitor="target",
                            mode="max"),
            CheckpointSaver(hparams.outdir,
                            save_name="model_mapr.chpn",
                            monitor="mAP@R",
                            mode="max"),
            CheckpointSaver(hparams.outdir, save_name="model_loss.chpn"),
            sheduler,
            # EMA must go after other checkpoints
            pt_clb.ModelEma(model, hparams.ema_decay)
            if hparams.ema_decay else pt_clb.Callback(),
        ],
        use_fp16=hparams.
        use_fp16,  # use mixed precision by default.  # hparams.opt_level != "O0",
    )

    if hparams.head_warmup_epochs > 0:
        #Freeze model
        for p in model.parameters():
            p.requires_grad = False

        runner.fit(
            train_loader,
            # val_loader=val_loader,
            epochs=hparams.head_warmup_epochs,
            steps_per_epoch=20 if hparams.debug else None,
            # val_steps=20 if hparams.debug else None,
        )

        # Unfreeze model
        for p in model.parameters():
            p.requires_grad = True

        if hparams.freeze_bn:
            freeze_batch_norm(model)

        # Re-init to avoid nan's in loss
        optim_params = list(loss.parameters()) + list(model.parameters())

        optimizer = optimizer_from_name(hparams.optim)(
            optim_params,
            lr=0,
            weight_decay=hparams.weight_decay,
            amsgrad=True)

        runner.state.model = model
        runner.state.optimizer = optimizer
        runner.state.criterion = loss

    # Train
    runner.fit(
        train_loader,
        # val_loader=val_loader,
        start_epoch=hparams.head_warmup_epochs,
        epochs=sheduler.tot_epochs,
        steps_per_epoch=20 if hparams.debug else None,
        # val_steps=20 if hparams.debug else None,
    )

    logger.info(f"Loading best model")
    checkpoint = torch.load(os.path.join(hparams.outdir, f"model.chpn"))
    model.load_state_dict(checkpoint["state_dict"], strict=True)
    # runner.state.model = model
    # loss.load_state_dict(checkpoint["loss"], strict=True)

    # Evaluate
    _, [acc1, map10, target, mapR] = runner.evaluate(
        val_loader,
        steps=20 if hparams.debug else None,
    )

    logger.info(
        f"Val: Acc@1 {acc1:0.5f}, mAP@10 {map10:0.5f}, Target {target:0.5f}, mAP@R {mapR:0.5f}"
    )

    # Save params used for training and final metrics into separate TensorBoard file
    metric_dict = {
        "hparam/Acc@1": acc1,
        "hparam/mAP@10": map10,
        "hparam/mAP@R": target,
        "hparam/Target": mapR,
    }

    # Convert all lists / dicts to avoid TB error
    hparams.phases
    hparams.phases = str(hparams.phases)
    hparams.model_params = str(hparams.model_params)
    hparams.criterion_params = str(hparams.criterion_params)

    with pt.utils.tensorboard.CorrectedSummaryWriter(hparams.outdir) as writer:
        writer.add_hparams(hparam_dict=vars(hparams), metric_dict=metric_dict)
Esempio n. 2
0
def test(hparams):
    # Check that folder exists
    assert hparams.config_path.exists()

    # Read config
    with open(hparams.config_path / "config.yaml", "r") as file:
        model_configs = yaml.load(file)
    model_configs.update(vars(hparams))
    hparams = argparse.Namespace(**model_configs)

    # Get model
    model = Model(
        arch=hparams.arch,
        model_params=hparams.model_params,
        embedding_size=hparams.embedding_size,
        pooling=hparams.pooling).cuda()
    # logger.info(model)

    # Init
    checkpoint = torch.load(hparams.config_path / f"model.chpn")
    model.load_state_dict(checkpoint["state_dict"], strict=False)

    # -------------- Get embeddings for val and test data --------------
    if hparams.extract_embeddings:
        if hparams.validation:
            print(f"Using size {hparams.val_size}")
            loader, indexes = get_val_dataloader(
                root=hparams.root,
                augmentation="val",
                batch_size=hparams.batch_size,
                size=hparams.val_size,
                workers=hparams.workers,
            )

            # Load validation query / gallery split and sort it according to indexes from sampler
            df_val = pd.read_csv(os.path.join(hparams.root, "train_val.csv"))
            df_val = df_val[df_val["is_train"].astype(np.bool) == False].iloc[indexes]

            val_embeddings = predict_from_loader(model, loader)

            # Hack to save torch.Tensor into pd.DataFrame
            df_val["embeddings"] = list(map(lambda r: np.array(r).tolist(), val_embeddings))
            # Save results into folder with logs
            df_val.to_csv(hparams.config_path / "train_val.csv", index=None)
            del val_embeddings
            logger.info("Finished extracting validation embeddings")

        if hparams.test:
            loader, indexes = get_test_dataloader(
                root=hparams.root,
                augmentation="test",
                batch_size=hparams.batch_size,
                size=hparams.val_size,
                workers=hparams.workers,
            )
            # Load test DF and sort it according to indexes from sampler
            df_test = pd.read_csv(os.path.join(hparams.root, "test_A.csv")).iloc[indexes]
            test_embeddings = predict_from_loader(model, loader)

            # Hack to save torch.Tensor into pd.DataFrame
            df_test["embeddings"] = list(map(lambda r: np.array(r).tolist(), test_embeddings))

            # Save results into folder with logs
            df_test.to_csv(hparams.config_path / "test_A.csv", index=None)
            del test_embeddings
            logger.info("Finished extracting test embeddings")

    # -------------- Test model on validation dataset --------------
    if hparams.validation:
        # Read DF
        df_val = pd.read_csv(hparams.config_path / "train_val.csv")
        val_embeddings = torch.tensor(list(map(eval, df_val["embeddings"].values)))
        query_mask = df_val["is_query"].values.astype(np.bool)
        val_labels = df_val["label"].values

        # Shape (n_embeddings, embedding_dim)
        query_embeddings, gallery_embeddings = val_embeddings[query_mask], val_embeddings[~query_mask]
        query_labels, gallery_labels = val_labels[query_mask], val_labels[~query_mask]
        logger.info(f"Validation query size - {len(query_embeddings)}, gallery size - {len(gallery_embeddings)}")
        del val_embeddings

        if hparams.dba:
            gallery_embeddings = query_expansion(gallery_embeddings, gallery_embeddings, topk=10, alpha=None)

        if hparams.aqe:
            query_embeddings = query_expansion(query_embeddings, gallery_embeddings, topk=3, alpha=3)

        # Shape (query_size x gallery_size)
        conformity_matrix = torch.tensor(query_labels.reshape(-1, 1) == gallery_labels)

        # Matrix of pairwise cosin distances
        distances = torch.cdist(query_embeddings, gallery_embeddings)

        acc1 = cmc_score_count(distances, conformity_matrix, topk=1)
        map10 = map_at_k(distances, conformity_matrix, topk=10)
        mapR = map_at_k(distances, conformity_matrix, topk=None)

        logger.info(
            f"Val: Acc@1 {acc1:0.5f}, mAP@10 {map10:0.5f}, Target {0.5 * acc1 + 0.5 * map10:0.5f}, mAP@R {mapR:0.5f}")

    # -------------- Predict on  test dataset  --------------
    if hparams.test:
        df_test = pd.read_csv(hparams.config_path / "test_A.csv")
        test_embeddings = torch.tensor(list(map(eval, df_test["embeddings"].values)))
        query_mask = df_test["is_query"].values.astype(np.bool)
        query_files, gallery_files = df_test["file_path"].values[query_mask], df_test["file_path"].values[~query_mask]

        # Shape (n_embeddings, embedding_dim)
        query_embeddings, gallery_embeddings = test_embeddings[query_mask], test_embeddings[~query_mask]
        query_files, gallery_files = df_test["file_path"].values[query_mask], df_test["file_path"].values[~query_mask]
        logger.info(f"Test query size - {len(query_embeddings)}, gallery size - {len(gallery_embeddings)}")
        del test_embeddings

        if hparams.dba:
            gallery_embeddings = query_expansion(gallery_embeddings, gallery_embeddings, topk=10, alpha=None)

        if hparams.aqe:
            query_embeddings = query_expansion(query_embeddings, gallery_embeddings, topk=3, alpha=3)

        # Matrix of pairwise cosin distances
        distances = torch.cdist(query_embeddings, gallery_embeddings)
        perm_matrix = torch.argsort(distances)

        logger.info(f"Creating submission{'_dba' if hparams.dba else ''}{'_aqe' if hparams.aqe else ''}_{hparams.val_size}.csv")
        data = {
            "image_id": [],
            "gallery_img_list": []
        }

        for idx in tqdm(range(len(query_files))):
            query_file = query_files[idx].split("/")[1]
            predictions = gallery_files[perm_matrix[:, : 10][idx]]
            predictions = [p.split("/")[1] for p in predictions]
            data["image_id"].append(query_file)
            data["gallery_img_list"].append(predictions)

        df = pd.DataFrame(data=data)
        df["gallery_img_list"] = df["gallery_img_list"].apply(lambda x: '{{{}}}'.format(",".join(x))).astype(str)
        lines = [f"{x},{y}" for x, y in zip(data["image_id"], df["gallery_img_list"])]
        with open(hparams.config_path \
            / f"submission{'_dba' if hparams.dba else ''}{'_aqe' if hparams.aqe else ''}_{hparams.val_size}.csv", "w") as f:
            for line in lines:
                f.write(line + '\n')