コード例 #1
0
def process_eval_image(
    cfg,
    fname_in,
    roi,
    fname_out,
    spatial_levels,
    image_helper,
    model,
    pca,
    eval_dataset_name,
    verbose=False,
):
    if is_revisited_dataset(eval_dataset_name):
        img = image_helper.load_and_prepare_revisited_image(fname_in, roi=roi)
    elif is_instre_dataset(eval_dataset_name):
        img = image_helper.load_and_prepare_instre_image(fname_in)
    else:
        img = image_helper.load_and_prepare_image(fname_in, roi=roi)

    v = torch.autograd.Variable(img.unsqueeze(0))
    vc = v.cuda()
    # the model output is a list always.
    activation_map = model(vc)[0].cpu()

    if verbose:
        print(f"Eval image raw activation map shape: { activation_map.shape }")

    # process the features: rmac | l2 norm
    if cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "rmac":
        descriptors = get_rmac_descriptors(
            activation_map,
            spatial_levels,
            pca=pca,
            normalize=cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES,
        )
    elif cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "gem":
        descriptors = gem(
            activation_map,
            p=cfg.IMG_RETRIEVAL.GEM_POOL_POWER,
            add_bias=True,
        )
    else:
        descriptors = activation_map

    # Optionally l2 normalize the features.
    if (cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES
            and cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE != "rmac"):
        # RMAC performs normalization within the algorithm, hence we skip it here.
        descriptors = l2n(descriptors, dim=1)

    # Optionally apply pca.
    if pca and cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE != "rmac":
        # RMAC performs pca within the algorithm, hence we skip it here.
        descriptors = pca.apply(descriptors)

    if fname_out:
        save_file(descriptors.data.numpy(), fname_out, verbose=False)
    return descriptors.data.numpy()
コード例 #2
0
    def process_train_image(i, out_dir, verbose=False):
        if i % LOG_FREQUENCY == 0:
            logging.info(f"Train Image: {i}"),

        fname_out = None
        if out_dir:
            fname_out = f"{out_dir}/{i}.npy"

        if fname_out and PathManager.exists(fname_out):
            feat = load_file(fname_out)
            train_features.append(feat)
        else:
            fname_in = train_dataset.get_filename(i)
            if is_revisited_dataset(train_dataset_name):
                img = image_helper.load_and_prepare_revisited_image(fname_in,
                                                                    roi=None)
            elif is_whiten_dataset(train_dataset_name):
                img = image_helper.load_and_prepare_whitening_image(fname_in)
            else:
                img = image_helper.load_and_prepare_image(fname_in, roi=None)
            v = torch.autograd.Variable(img.unsqueeze(0))
            vc = v.cuda()
            # the model output is a list always.
            activation_map = model(vc)[0].cpu()

            if verbose:
                print(
                    f"Train Image raw activation map shape: { activation_map.shape }"
                )

            # once we have the features,
            # we can perform: rmac | gem pooling | l2 norm
            if cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "rmac":
                descriptors = get_rmac_descriptors(
                    activation_map,
                    spatial_levels,
                    normalize=cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES,
                )
            elif cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "gem":
                descriptors = gem(
                    activation_map,
                    p=cfg.IMG_RETRIEVAL.GEM_POOL_POWER,
                    add_bias=True,
                )
            else:
                descriptors = activation_map

            # Optionally l2 normalize the features.
            if (cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES
                    and cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE != "rmac"):
                # RMAC performs normalization within the algorithm, hence we skip it here.
                descriptors = l2n(descriptors, dim=1)

            if fname_out:
                save_file(descriptors.data.numpy(), fname_out, verbose=False)
            train_features.append(descriptors.data.numpy())
コード例 #3
0
def gem_pool_and_save_features(features, p, add_bias, gem_out_fname):
    if PathManager.exists(gem_out_fname):
        logging.info("Loading train GeM features...")
        features = load_file(gem_out_fname)
    else:
        logging.info(f"GeM pooling features: {features.shape}")
        features = l2n(gem(features, p=p, add_bias=True))
        save_file(features, gem_out_fname)
        logging.info(f"Saved GeM features to: {gem_out_fname}")
    return features
コード例 #4
0
def post_process_image(
    cfg,
    model_output,
    pca=None,
):
    train_feature = np.array(
        [m if isinstance(m, list) else m.tolist() for m in model_output])
    train_feature = [torch.from_numpy(np.expand_dims(train_feature, axis=0))]

    if cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "rmac":
        descriptor = get_rmac_descriptors(
            train_feature[0],
            cfg.IMG_RETRIEVAL.SPATIAL_LEVELS,
            normalize=cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES,
            pca=pca,
        )
    elif cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "gem":
        descriptor = get_average_gem(
            train_feature,
            p=cfg.IMG_RETRIEVAL.GEM_POOL_POWER,
            add_bias=True,
        )
    else:
        descriptor = torch.mean(torch.stack(train_feature), dim=0)
        descriptor = descriptor.reshape(descriptor.shape[0], -1)

    # Optionally l2 normalize the features.
    if (cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES
            and cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE != "rmac"):
        # RMAC performs normalization within the algorithm, hence we skip it here.
        descriptor = l2n(descriptor, dim=1)

    # Optionally apply pca.
    if pca and cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE != "rmac":
        # RMAC performs pca within the algorithm, hence we skip it here.
        descriptor = pca.apply(descriptor)

    return descriptor.data.numpy()
コード例 #5
0
def process_eval_image(
    cfg,
    fname_in,
    roi,
    fname_out,
    spatial_levels,
    image_helper,
    model,
    pca,
    eval_dataset_name,
    verbose=False,
):
    with PerfTimer("read_sample", PERF_STATS):
        if is_revisited_dataset(eval_dataset_name):
            img = image_helper.load_and_prepare_revisited_image(fname_in,
                                                                roi=roi)
        elif is_instre_dataset(eval_dataset_name):
            img = image_helper.load_and_prepare_instre_image(fname_in)
        else:
            img = image_helper.load_and_prepare_image(fname_in, roi=roi)

    with PerfTimer("extract_features", PERF_STATS):
        # the model output is a list always.
        img_scalings = cfg.IMG_RETRIEVAL.IMG_SCALINGS or [1]
        activation_maps = extract_activation_maps(img, model, img_scalings)

    if verbose:
        print(
            f"Example eval image raw activation map shape: { activation_maps[0].shape }"  # NOQA
        )
    with PerfTimer("post_process_features", PERF_STATS):
        # process the features: rmac | l2 norm
        if cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "rmac":
            descriptors = get_rmac_descriptors(
                activation_maps[0],
                spatial_levels,
                pca=pca,
                normalize=cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES,
            )
        elif cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "gem":
            descriptors = get_average_gem(
                activation_maps,
                p=cfg.IMG_RETRIEVAL.GEM_POOL_POWER,
                add_bias=True,
            )
        else:
            descriptors = torch.mean(torch.stack(activation_maps), dim=0)
            descriptors = descriptors.reshape(descriptors.shape[0], -1)

        # Optionally l2 normalize the features.
        if (cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES
                and cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE != "rmac"):
            # RMAC performs normalization within the algorithm, hence we skip it here.
            descriptors = l2n(descriptors, dim=1)

        # Optionally apply pca.
        if pca and cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE != "rmac":
            # RMAC performs pca within the algorithm, hence we skip it here.
            descriptors = pca.apply(descriptors)

    if fname_out:
        save_file(descriptors.data.numpy(), fname_out, verbose=False)

    return descriptors.data.numpy()
コード例 #6
0
    def process_train_image(i, out_dir, verbose=False):
        if i % LOG_FREQUENCY == 0:
            logging.info(f"Train Image: {i}"),

        fname_out = None
        if out_dir:
            fname_out = f"{out_dir}/{i}.npy"

        if fname_out and g_pathmgr.exists(fname_out):
            feat = load_file(fname_out)
            train_features.append(feat)
        else:
            with PerfTimer("read_sample", PERF_STATS):
                fname_in = train_dataset.get_filename(i)
                if is_revisited_dataset(train_dataset_name):
                    img = image_helper.load_and_prepare_revisited_image(
                        fname_in, roi=None)
                elif is_whiten_dataset(train_dataset_name):
                    img = image_helper.load_and_prepare_whitening_image(
                        fname_in)
                else:
                    img = image_helper.load_and_prepare_image(fname_in,
                                                              roi=None)

            with PerfTimer("extract_features", PERF_STATS):
                img_scalings = cfg.IMG_RETRIEVAL.IMG_SCALINGS or [1]
                activation_maps = extract_activation_maps(
                    img, model, img_scalings)

            if verbose:
                print(
                    f"Example train Image raw activation map shape: { activation_maps[0].shape }"  # NOQA
                )

            with PerfTimer("post_process_features", PERF_STATS):
                # once we have the features,
                # we can perform: rmac | gem pooling | l2 norm
                if cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "rmac":
                    descriptors = get_rmac_descriptors(
                        activation_maps[0],
                        spatial_levels,
                        normalize=cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES,
                    )
                elif cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == "gem":
                    descriptors = get_average_gem(
                        activation_maps,
                        p=cfg.IMG_RETRIEVAL.GEM_POOL_POWER,
                        add_bias=True,
                    )
                else:
                    descriptors = torch.mean(torch.stack(activation_maps),
                                             dim=0)
                    descriptors = descriptors.reshape(descriptors.shape[0], -1)

                # Optionally l2 normalize the features.
                if (cfg.IMG_RETRIEVAL.NORMALIZE_FEATURES
                        and cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE != "rmac"):
                    # RMAC performs normalization within the algorithm, hence we skip it here.
                    descriptors = l2n(descriptors, dim=1)

            if fname_out:
                save_file(descriptors.data.numpy(), fname_out, verbose=False)
            train_features.append(descriptors.data.numpy())