예제 #1
0
def calculate_reverse_inference_distance(query_image,in_images,out_images,standard_mask,equal_priors=True):    
    '''calculate_reverse_inference_distance

    return reverse inference value based on generating likelihood scores using distance
    of the query image from the group

    ..note::
        
        Reverse Inference Calculation ------------------------------------------------------------------
        P(node mental process|activation) = P(activation|mental process) * P(mental process)
        divided by
        P(activation|mental process) * P(mental process) + P(A|~mental process) * P(~mental process)
        P(activation|mental process): my voxelwise prior map

    :param query_image: nifti image path
        image that we want to calculate reverse inference score for

    :param subset_in: list of nifti files
        brain maps that are defined for the concept

    :param subset_out: list of nifti files
        the rest

    :param equal_priors: boolean
        use 0.5 as a prior for each group [default True]. If set to False, the
        frequency of the concept in the total set will be used. "True" is recommended for small sets.

    '''
    if len(numpy.intersect1d(in_images,out_images)) > 0:
        raise ValueError("ERROR: in_images and out_images should not share images!")
    all_images = in_images + out_images
    mr = get_images_df(file_paths=all_images,mask=standard_mask)
    mr.index = all_images
    in_subset = mr.loc[in_images]
    out_subset = mr.loc[out_images] 
    if equal_priors:
        p_process_in = 0.5
        p_process_out = 0.5
    else:
        in_count = len(in_images)
        out_count = len(out_images) 
        total = in_count + out_count              # total number of nifti images
        p_process_in = float(in_count) / total    # percentage of niftis in
        p_process_out = float(out_count) / total  # percentage out
    # Read in the query image
    query = get_images_df(file_paths=query_image,mask=standard_mask)
    # Generate a mean image for each group
    mean_image_in = pandas.DataFrame(in_subset.mean())
    mean_image_out = pandas.DataFrame(out_subset.mean())
    # p in/out is similarity between query image and groups
    p_in = numpy.power(calculate_pairwise_correlation(mean_image_in[0],query[0]),2)
    p_out = numpy.power(calculate_pairwise_correlation(mean_image_out[0],query[0]),2)
    # Calculate inference
    numerators = p_in * p_process_in
    denominators = (p_in * p_process_in) + (p_out * p_process_out)
    return (numerators / denominators)
예제 #2
0
def save_voxelwise_pearson_similarity_reduced_representation(pk1, pk2):
    from neurovault.apps.statmaps.models import Similarity, Comparison
    import numpy as np

    # We will always calculate Comparison 1 vs 2, never 2 vs 1
    if pk1 != pk2:
        try:
            sorted_images = get_images_by_ordered_id(pk1, pk2)
        except Http404:
            # files have been deleted in the meantime
            return
        image1 = sorted_images[0]
        image2 = sorted_images[1]
        pearson_metric = Similarity.objects.get(
            similarity_metric="pearson product-moment correlation coefficient",
            transformation="voxelwise")

        # Make sure we have a transforms for pks in question
        if not image1.reduced_representation or not os.path.exists(
                image1.reduced_representation.path):
            image1 = save_resampled_transformation_single(
                pk1)  # cannot run this async

        if not image2.reduced_representation or not os.path.exists(
                image1.reduced_representation.path):
            image2 = save_resampled_transformation_single(
                pk2)  # cannot run this async

        # Load image pickles
        image_vector1 = np.load(image1.reduced_representation.file)
        image_vector2 = np.load(image2.reduced_representation.file)

        # Calculate binary deletion vector mask (find 0s and nans)
        mask = make_binary_deletion_vector([image_vector1, image_vector2])

        # Calculate pearson
        pearson_score = calculate_pairwise_correlation(
            image_vector1[mask == 1],
            image_vector2[mask == 1],
            corr_type="pearson")

        # Only save comparison if is not nan
        if not numpy.isnan(pearson_score):
            Comparison.objects.update_or_create(image1=image1,
                                                image2=image2,
                                                defaults={
                                                    'similarity_metric':
                                                    pearson_metric,
                                                    'similarity_score':
                                                    pearson_score
                                                })
            return image1.pk, image2.pk, pearson_score
        else:
            print "Comparison returned NaN."
    else:
        raise Exception("You are trying to compare an image with itself!")
예제 #3
0
파일: tasks.py 프로젝트: rwblair/NeuroVault
def save_voxelwise_pearson_similarity_reduced_representation(pk1, pk2):
    from neurovault.apps.statmaps.models import Similarity, Comparison
    import numpy as np

    # We will always calculate Comparison 1 vs 2, never 2 vs 1
    if pk1 != pk2:
        try:
            sorted_images = get_images_by_ordered_id(pk1, pk2)
        except Http404:
            # files have been deleted in the meantime
            return
        image1 = sorted_images[0]
        image2 = sorted_images[1]
        pearson_metric = Similarity.objects.get(similarity_metric="pearson product-moment correlation coefficient",
                                                transformation="voxelwise")
    
        # Make sure we have a transforms for pks in question
        if not image1.reduced_representation or not os.path.exists(image1.reduced_representation.path):
            image1 = save_resampled_transformation_single(pk1) # cannot run this async

        if not image2.reduced_representation or not os.path.exists(image1.reduced_representation.path):
            image2 = save_resampled_transformation_single(pk2) # cannot run this async

        # Load image pickles
        image_vector1 = np.load(image1.reduced_representation.file)
        image_vector2 = np.load(image2.reduced_representation.file)

        # Calculate binary deletion vector mask (find 0s and nans)
        mask = make_binary_deletion_vector([image_vector1,image_vector2])

        # Calculate pearson
        pearson_score = calculate_pairwise_correlation(image_vector1[mask==1],
                                                       image_vector2[mask==1],
                                                       corr_type="pearson")   

        # Only save comparison if is not nan
        if not numpy.isnan(pearson_score):     
            Comparison.objects.update_or_create(image1=image1, image2=image2,
                                                similarity_metric=pearson_metric,
                                                similarity_score=pearson_score)
            return image1.pk,image2.pk,pearson_score
        else:
            print "Comparison returned NaN."
    else:
        raise Exception("You are trying to compare an image with itself!")
예제 #4
0
def calculate_reverse_inference_distance(query_image,
                                         in_images,
                                         out_images,
                                         standard_mask,
                                         equal_priors=True):
    '''calculate_reverse_inference_distance

    return reverse inference value based on generating likelihood scores using distance
    of the query image from the group

    ..note::
        
        Reverse Inference Calculation ------------------------------------------------------------------
        P(node mental process|activation) = P(activation|mental process) * P(mental process)
        divided by
        P(activation|mental process) * P(mental process) + P(A|~mental process) * P(~mental process)
        P(activation|mental process): my voxelwise prior map

    :param query_image: nifti image path
        image that we want to calculate reverse inference score for

    :param subset_in: list of nifti files
        brain maps that are defined for the concept

    :param subset_out: list of nifti files
        the rest

    :param equal_priors: boolean
        use 0.5 as a prior for each group [default True]. If set to False, the
        frequency of the concept in the total set will be used. "True" is recommended for small sets.

    '''
    if len(numpy.intersect1d(in_images, out_images)) > 0:
        raise ValueError(
            "ERROR: in_images and out_images should not share images!")
    all_images = in_images + out_images
    mr = get_images_df(file_paths=all_images, mask=standard_mask)
    mr.index = all_images
    in_subset = mr.loc[in_images]
    out_subset = mr.loc[out_images]
    if equal_priors:
        p_process_in = 0.5
        p_process_out = 0.5
    else:
        in_count = len(in_images)
        out_count = len(out_images)
        total = in_count + out_count  # total number of nifti images
        p_process_in = float(in_count) / total  # percentage of niftis in
        p_process_out = float(out_count) / total  # percentage out
    # Read in the query image
    query = get_images_df(file_paths=query_image, mask=standard_mask)
    # Generate a mean image for each group
    mean_image_in = pandas.DataFrame(in_subset.mean())
    mean_image_out = pandas.DataFrame(out_subset.mean())
    # p in/out is similarity between query image and groups
    p_in = numpy.power(
        calculate_pairwise_correlation(mean_image_in[0], query[0]), 2)
    p_out = numpy.power(
        calculate_pairwise_correlation(mean_image_out[0], query[0]), 2)
    # Calculate inference
    numerators = p_in * p_process_in
    denominators = (p_in * p_process_in) + (p_out * p_process_out)
    return (numerators / denominators)