Exemple #1
0
def inference_on_single_labelled_image_pca(
    query_img_file,
    labels_dir,
    img_dir,
    img_fts_dir,
    weights_file,
    top_k=1000,
    plot=True,
):
    """
    Function that returns the average precision for a given query image and also plots the top 20 results

    Args:
        query_img_file  : path of query image file
        labels_dir  : Directory for ground truth labels
        img_dir     : Directory holding the images
        img_fts_dir : Directory holding the pca reduced features generated through create_db.py script
        weights_file: path of trained weights file
        top_k       : top_k values used to calculate the average precison
        plot        : if True, top 20 results are plotted

    Returns:
        Average precision for the query image file
    """
    # Create cuda parameters
    use_cuda = torch.cuda.is_available()
    np.random.seed(2019)
    torch.manual_seed(2019)
    device = torch.device("cuda" if use_cuda else "cpu")
    print("Available device = ", device)

    # Create embedding network
    resnet_model = create_embedding_net()
    model = TripletNet(resnet_model)
    model.load_state_dict(torch.load(weights_file))
    model.to(device)
    model.eval()

    # Get query name
    query_img_name = query_img_file.split("/")[-1]
    query_img_path = os.path.join(img_dir, query_img_name)

    # Create Query extractor object
    QUERY_EXTRACTOR = QueryExtractor(labels_dir, img_dir, subset="inference")

    # Create query ground truth dictionary
    query_gt_dict = QUERY_EXTRACTOR.get_query_map()[query_img_name]

    # Creat image database
    QUERY_IMAGES_FTS = [
        os.path.join(img_fts_dir, file)
        for file in sorted(os.listdir(img_fts_dir))
    ]
    QUERY_IMAGES = [
        os.path.join(img_fts_dir, file) for file in sorted(os.listdir(img_dir))
    ]

    # Query fts
    query_fts = get_query_embedding(model, device,
                                    query_img_file).detach().cpu().numpy()
    query_fts = perform_pca_on_single_vector(query_fts)

    # Create similarity list
    similarity = []
    for file in tqdm(QUERY_IMAGES_FTS):
        file_fts = np.squeeze(np.load(file))
        cos_sim = np.dot(query_fts, file_fts) / (np.linalg.norm(query_fts) *
                                                 np.linalg.norm(file_fts))
        similarity.append(cos_sim)

    # Get best matches using similarity
    similarity = np.asarray(similarity)
    indexes = (-similarity).argsort()[:top_k]
    best_matches = [QUERY_IMAGES[index] for index in indexes]

    # Get preds
    if plot:
        preds = get_preds_and_visualize(best_matches, query_gt_dict, img_dir,
                                        20)
    else:
        preds = get_preds(best_matches, query_gt_dict)

    # Get average precision
    ap = ap_per_query(best_matches, query_gt_dict)

    return ap
Exemple #2
0
def inference_on_single_labelled_image_pca_web(
    model,
    query_img_file,
    labels_dir="./static/data/oxbuild/gt_files/",
    img_dir="./static/data/oxbuild/images/",
    img_fts_dir="./static/fts_pca/oxbuild/",
    top_k=60,
    plot=False,
):
    """
    Function similar to inference_on_single_labelled_image_pca, but modified return values for usage during deployment

    Args:
        model       : model used (either paris or oxford)
        query_img_file  : path of query image file
        labels_dir  : Directory for ground truth labels
        img_dir     : Directory holding the images
        img_fts_dir : Directory holding the pca reduced features generated through create_db.py script
        top_k       : top_k values used to calculate the average precison, default is 60 for web deployment
        plot        : if True, top 20 results are plotted

    Returns:
        List of top k similar images; list of ground truth labels for top k images
    """
    # Create cuda parameters
    use_cuda = torch.cuda.is_available()
    np.random.seed(2019)
    torch.manual_seed(2019)
    device = torch.device("cuda" if use_cuda else "cpu")
    print("Available device = ", device)

    # Get query name
    query_img_name = query_img_file.split("/")[-1]
    query_img_path = os.path.join(img_dir, query_img_name)

    # Create Query extractor object
    QUERY_EXTRACTOR = QueryExtractor(labels_dir, img_dir, subset="inference")

    # Creat image database
    QUERY_IMAGES_FTS = [
        os.path.join(img_fts_dir, file)
        for file in sorted(os.listdir(img_fts_dir))
    ]
    QUERY_IMAGES = [
        os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))
    ]

    # Query fts
    query_fts = get_query_embedding(model, device, "." +
                                    query_img_file).detach().cpu().numpy()
    query_fts = perform_pca_on_single_vector(query_fts)

    # Create similarity list
    similarity = []
    for file in tqdm(QUERY_IMAGES_FTS):
        file_fts = np.squeeze(np.load(file))
        cos_sim = np.dot(query_fts, file_fts) / (np.linalg.norm(query_fts) *
                                                 np.linalg.norm(file_fts))
        similarity.append(cos_sim)

    # Get best matches using similarity
    similarity = np.asarray(similarity)
    indexes = (-similarity).argsort()[:top_k]
    best_matches = [QUERY_IMAGES[index] for index in indexes]
    print(best_matches)

    # Create query ground truth dictionary
    gt_map = [0] * 60
    try:
        query_gt_dict = QUERY_EXTRACTOR.get_query_map()[query_img_name]
        gt_map = get_gt_web(best_matches, query_gt_dict)
    except:
        pass
    print(gt_map)
    for i in range(len(best_matches)):
        best_matches[i] = best_matches[i][1:]
    return best_matches, gt_map