Example #1
0
def find_similar(request, pk):
    image1 = get_image(pk, None, request)
    pk = int(pk)

    # Search only enabled if the image is not thresholded
    if image1.is_thresholded == False:

        # Count the number of comparisons that we have to determine max that we can return
        number_comparisons = count_existing_comparisons(pk)

        max_results = 100
        if number_comparisons < 100:
            max_results = number_comparisons

        # Get only # max_results similarity calculations for this image, and ids of other images
        comparisons = get_existing_comparisons(pk).extra(select={
            "abs_score":
            "abs(similarity_score)"
        }).order_by("-abs_score")[0:max_results]  # "-" indicates descending

        images = [image1]
        scores = [1]  # pearsonr
        for comp in comparisons:
            # pick the image we are comparing with
            image = [
                image for image in [comp.image1, comp.image2] if image.id != pk
            ][0]
            if hasattr(image, "map_type") and image.thumbnail:
                images.append(image)
                scores.append(comp.similarity_score)

        # We will need lists of image ids, png paths, query id, query path, tags, names, scores
        image_ids = [image.pk for image in images]
        png_img_paths = [image.get_thumbnail_url() for image in images]
        tags = [[str(image.map_type)] for image in images]

        # The top text will be the collection name, the bottom text the image name
        bottom_text = ["%s" % (image.name) for image in images]
        top_text = ["%s" % (image.collection.name) for image in images]
        compare_url = "/images/compare"  # format will be prefix/[query_id]/[other_id]
        image_url = "/images"  # format will be prefix/[other_id]
        image_title = format_image_collection_names(
            image_name=image1.name,
            collection_name=image1.collection.name,
            map_type=image1.map_type,
            total_length=50)

        # Here is the query image
        query_png = image1.thumbnail.url

        # Do similarity search and return html to put in page, specify 100 max results, take absolute value of scores
        html_snippet = search.similarity_search(image_scores=scores,
                                                tags=tags,
                                                png_paths=png_img_paths,
                                                button_url=compare_url,
                                                image_url=image_url,
                                                query_png=query_png,
                                                query_id=pk,
                                                top_text=top_text,
                                                image_ids=image_ids,
                                                bottom_text=bottom_text,
                                                max_results=max_results,
                                                absolute_value=True)

        html = [h.strip("\n") for h in html_snippet]

        # Get the number of images still processing
        images_processing = count_processing_comparisons(pk)

        context = {
            'html': html,
            'images_processing': images_processing,
            'image_title': image_title,
            'image_url': '/images/%s' % (image1.pk)
        }
        return render(request, 'statmaps/compare_search.html', context)
    else:
        error_message = "Image comparison is not enabled for thresholded images."
        context = {'error_message': error_message}
        return render(request, 'statmaps/error_message.html', context)
Example #2
0
def find_similar(request,pk):
    image1 = get_image(pk,None,request)
    pk = int(pk)

    # Search only enabled if the image is not thresholded
    if image1.is_thresholded == False:

        # Count the number of comparisons that we have to determine max that we can return
        # TODO: optimize this slow query
        #number_comparisons = count_existing_comparisons(pk)

        max_results = 100
        #if number_comparisons < 100:
        #    max_results = number_comparisons

        # Get only # max_results similarity calculations for this image, and ids of other images
        comparisons = get_existing_comparisons(pk).extra(select={"abs_score": "abs(similarity_score)"}).order_by("-abs_score")[0:max_results] # "-" indicates descending

        images = [image1]
        scores = [1] # pearsonr
        for comp in comparisons:
            # pick the image we are comparing with
            image = [image for image in [comp.image1, comp.image2] if image.id != pk][0]
            if hasattr(image, "map_type") and image.thumbnail:
                images.append(image)
                scores.append(comp.similarity_score)

        # We will need lists of image ids, png paths, query id, query path, tags, names, scores
        image_ids = [image.pk for image in images]
        png_img_paths = [image.get_thumbnail_url() for image in images]
        tags = [[str(image.map_type)] for image in images]

        # The top text will be the collection name, the bottom text the image name
        bottom_text = ["%s" % (image.name) for image in images]
        top_text = ["%s" % (image.collection.name) for image in images]
        compare_url = "/images/compare"  # format will be prefix/[query_id]/[other_id]
        image_url = "/images"  # format will be prefix/[other_id]
        image_title = format_image_collection_names(image_name=image1.name,
                                                    collection_name=image1.collection.name,
                                                    map_type=image1.map_type,total_length=50)

        # Here is the query image
        query_png = image1.thumbnail.url

        # Do similarity search and return html to put in page, specify 100 max results, take absolute value of scores
        html_snippet = search.similarity_search(image_scores=scores,tags=tags,png_paths=png_img_paths,
                                    button_url=compare_url,image_url=image_url,query_png=query_png,
                                    query_id=pk,top_text=top_text,image_ids=image_ids,
                                    bottom_text=bottom_text,max_results=max_results,absolute_value=True,
                                    remove_scripts=["BOOTSTRAP","BOOTSTRAP_MIN"],container_width=1200)

        html = [h.strip("\n") for h in html_snippet]

        # Get the number of images still processing
        # TODO: improve performance of this calculation
        # images_processing = count_processing_comparisons(pk)

        context = {'html': html,
                   #'images_processing':images_processing,
                   'image_title':image_title, 'image_url': '/images/%s' % (image1.pk) }
        return render(request, 'statmaps/compare_search.html', context)
    else:
        error_message = "Image comparison is not enabled for thresholded images."
        context = {'error_message': error_message}
        return render(request, 'statmaps/error_message.html', context)
Example #3
0
def compare_images(request, pk1, pk2):
    import numpy as np
    image1 = get_image(pk1, None, request)
    image2 = get_image(pk2, None, request)
    images = [image1, image2]

    # Get image: collection: [map_type] names no longer than ~125 characters
    image1_custom_name = format_image_collection_names(
        image_name=image1.name,
        collection_name=image1.collection.name,
        map_type=image1.map_type,
        total_length=125)
    image2_custom_name = format_image_collection_names(
        image_name=image2.name,
        collection_name=image2.collection.name,
        map_type=image2.map_type,
        total_length=125)

    image_names = [image1_custom_name, image2_custom_name]

    # Create custom links for the visualization
    custom = {
        "IMAGE_1_LINK": "/images/%s" % (image1.pk),
        "IMAGE_2_LINK": "/images/%s" % (image2.pk)
    }

    # Load image vectors from npy files
    image_vector1 = np.load(image1.reduced_representation.file)
    image_vector2 = np.load(image2.reduced_representation.file)

    # Load atlas pickle, containing vectors of atlas labels, colors, and values for same voxel dimension (4mm)
    neurovault_root = os.path.dirname(
        os.path.dirname(os.path.realpath(neurovault.__file__)))
    atlas_pkl_path = os.path.join(neurovault_root,
                                  'neurovault/static/atlas/atlas_mni_4mm.pkl')
    atlas = joblib.load(atlas_pkl_path)

    # Load the atlas svg, so we don't need to dynamically generate it
    atlas_svg = os.path.join(neurovault_root,
                             'neurovault/static/atlas/atlas_mni_2mm_svg.pkl')
    atlas_svg = joblib.load(atlas_svg)

    # Generate html for similarity search, do not specify atlas
    html_snippet, _ = scatterplot.scatterplot_compare_vector(
        image_vector1=image_vector1,
        image_vector2=image_vector2,
        image_names=image_names,
        atlas_vector=atlas["atlas_vector"],
        atlas_labels=atlas["atlas_labels"],
        atlas_colors=atlas["atlas_colors"],
        corr_type="pearson",
        subsample_every=10,  # subsample every 10th voxel
        custom=custom,
        remove_scripts="D3_MIN_JS")

    # Add atlas svg to the image, and prepare html for rendering
    html = [h.replace("[coronal]", atlas_svg) for h in html_snippet]
    html = [
        h.strip("\n").replace("[axial]", "").replace("[sagittal]", "")
        for h in html
    ]
    context = {'html': html}

    # Determine if either image is thresholded
    threshold_status = np.array(
        [image_names[i] for i in range(0, 2) if images[i].is_thresholded])
    if len(threshold_status) > 0:
        warnings = list()
        for i in range(0, len(image_names)):
            warnings.append(
                'Warning: Thresholded image: %s (%.4g%% of voxels are zeros),'
                % (image_names[i], images[i].perc_bad_voxels))
        context["warnings"] = warnings

    return render(request, 'statmaps/compare_images.html', context)
Example #4
0
def compare_images(request,pk1,pk2):
    import numpy as np
    image1 = get_image(pk1,None,request)
    image2 = get_image(pk2,None,request)
    images = [image1,image2]

    # Get image: collection: [map_type] names no longer than ~125 characters
    image1_custom_name = format_image_collection_names(image_name=image1.name,
                                                       collection_name=image1.collection.name,
                                                       map_type=image1.map_type,total_length=125)
    image2_custom_name = format_image_collection_names(image_name=image2.name,
                                                       collection_name=image2.collection.name,
                                                       map_type=image2.map_type,total_length=125)

    image_names = [image1_custom_name,image2_custom_name]

    # Create custom links for the visualization
    custom = {
            "IMAGE_1_LINK":"/images/%s" % (image1.pk),
            "IMAGE_2_LINK":"/images/%s" % (image2.pk)
    }

    # create reduced representation in case it's not there
    if not image1.reduced_representation:
        image1 = save_resampled_transformation_single(image1.id) # cannot run this async
    if not image2.reduced_representation:
        image2 = save_resampled_transformation_single(image1.id) # cannot run this async

    # Load image vectors from npy files
    image_vector1 = np.load(image1.reduced_representation.file)
    image_vector2 = np.load(image2.reduced_representation.file)

    # Load atlas pickle, containing vectors of atlas labels, colors, and values for same voxel dimension (4mm)
    this_path = os.path.abspath(os.path.dirname(__file__))
    atlas_pkl_path = os.path.join(this_path, 'static/atlas/atlas_mni_4mm.pkl')
    atlas = joblib.load(atlas_pkl_path)

    # Load the atlas svg, so we don't need to dynamically generate it
    atlas_svg = os.path.join(this_path, 'static/atlas/atlas_mni_2mm_svg.pkl')
    atlas_svg = joblib.load(atlas_svg)

    # Generate html for similarity search, do not specify atlas
    html_snippet, _ = scatterplot.scatterplot_compare_vector(image_vector1=image_vector1,
                                                                 image_vector2=image_vector2,
                                                                 image_names=image_names,
                                                                 atlas_vector=atlas["atlas_vector"],
                                                                 atlas_labels=atlas["atlas_labels"],
                                                                 atlas_colors=atlas["atlas_colors"],
                                                                 corr_type="pearson",
                                                                 subsample_every=10, # subsample every 10th voxel
                                                                 custom=custom,
                                                                 remove_scripts="D3_MIN_JS",
                                                                 width=1000)

    # Add atlas svg to the image, and prepare html for rendering
    html = [h.replace("[coronal]",atlas_svg) for h in html_snippet]
    html = [h.strip("\n").replace("[axial]","").replace("[sagittal]","") for h in html]
    context = {'html': html}

    # Determine if either image is thresholded
    threshold_status = np.array([image_names[i] for i in range(0,2) if images[i].is_thresholded])
    if len(threshold_status) > 0:
        warnings = list()
        for i in range(0,len(image_names)):
            warnings.append('Warning: Thresholded image: %s (%.4g%% of voxels are zeros),' %(image_names[i],images[i].perc_bad_voxels))
        context["warnings"] = warnings

    return render(request, 'statmaps/compare_images.html', context)