def test_binary_deletion_vector():

  mr_directory = get_data_directory()
  
  # We will generate data with the following overlap percentages
  overlap_percents = [0.0,0.25,0.5,0.75,1.0]
  for overlap in overlap_percents:
    vector_length = 10000
    image_vector1 = numpy.zeros((vector_length))
    image_vector2 = numpy.zeros((vector_length))
    number_overlap_voxels = int(numpy.floor(overlap*vector_length))
    remaining_voxels = int(vector_length - number_overlap_voxels)
    idx = range(0,vector_length)
    # We break the remaining voxels into 4 groups:
    # - nans that will overlap
    # - zeros that will overlap (no change to images here, already zeros)
    # - nans in image1, random sample of values in image2
    # - zeros in image2, random sample of values in image1 
    group_size = remaining_voxels/4
    if overlap != 0.0:
      # Here are the overlapping voxels for each image
      overlap_idx = range(0,number_overlap_voxels)
      image_vector1[overlap_idx] = 1
      image_vector2[overlap_idx] = 1 
    if overlap != 1.0: 
      # Nans that will overlap
      nans_overlap_idx = range(number_overlap_voxels,(number_overlap_voxels+group_size))
      image_vector1[nans_overlap_idx] = numpy.nan
      image_vector2[nans_overlap_idx] = numpy.nan
      # Nans in image1, random sample of values in image 2
      start = number_overlap_voxels+group_size
      end = number_overlap_voxels+2*group_size
      nans_image1 = idx[start:end]
      values_image2 = range(nans_image1[-1],(nans_image1[-1] + int(group_size/2)))
      image_vector1[nans_image1] = numpy.nan
      image_vector2[values_image2] = 0.5
      # Zeros in image2, random sample of values in image 1
      start = number_overlap_voxels+2*group_size
      end = number_overlap_voxels+3*group_size
      zeros_image2 = idx[start:end]
      values_image1 = range(zeros_image2[-1],(zeros_image2[-1] + int(group_size/2)))
      image_vector1[values_image1] = 0.75
    # Create nifti images and pdmask
    pdmask = make_binary_deletion_vector([image_vector1,image_vector2]) 
    actual_overlap = len(numpy.where(pdmask!=0)[0])
    print "Overlap %s percent: should have %s, actual %s" %(overlap,number_overlap_voxels,actual_overlap)
    assert_equal(actual_overlap,number_overlap_voxels)
   
    # Also check that is binary
    if overlap != 0 and overlap != 1:
      assert_equal(numpy.unique(pdmask)[0],0)
      assert_equal(numpy.unique(pdmask)[1],1)

    if overlap == 0:
      assert_equal(numpy.unique(pdmask)[0],0)

    if overlap == 1:
      assert_equal(numpy.unique(pdmask)[0],1)
Beispiel #2
0
def test_binary_deletion_vector():

  mr_directory = get_data_directory()
  
  # We will generate data with the following overlap percentages
  overlap_percents = [0.0,0.25,0.5,0.75,1.0]
  for overlap in overlap_percents:
    vector_length = 10000
    image_vector1 = numpy.zeros((vector_length))
    image_vector2 = numpy.zeros((vector_length))
    number_overlap_voxels = int(numpy.floor(overlap*vector_length))
    remaining_voxels = int(vector_length - number_overlap_voxels)
    idx = list(range(0,vector_length))
    # We break the remaining voxels into 4 groups:
    # - nans that will overlap
    # - zeros that will overlap (no change to images here, already zeros)
    # - nans in image1, random sample of values in image2
    # - zeros in image2, random sample of values in image1 
    group_size = old_div(remaining_voxels,4)
    if overlap != 0.0:
      # Here are the overlapping voxels for each image
      overlap_idx = list(range(0,number_overlap_voxels))
      image_vector1[overlap_idx] = 1
      image_vector2[overlap_idx] = 1 
    if overlap != 1.0: 
      # Nans that will overlap
      nans_overlap_idx = list(range(number_overlap_voxels,(number_overlap_voxels+group_size)))
      image_vector1[nans_overlap_idx] = numpy.nan
      image_vector2[nans_overlap_idx] = numpy.nan
      # Nans in image1, random sample of values in image 2
      start = number_overlap_voxels+group_size
      end = number_overlap_voxels+2*group_size
      nans_image1 = idx[start:end]
      values_image2 = list(range(nans_image1[-1],(nans_image1[-1] + int(old_div(group_size,2)))))
      image_vector1[nans_image1] = numpy.nan
      image_vector2[values_image2] = 0.5
      # Zeros in image2, random sample of values in image 1
      start = number_overlap_voxels+2*group_size
      end = number_overlap_voxels+3*group_size
      zeros_image2 = idx[start:end]
      values_image1 = list(range(zeros_image2[-1],(zeros_image2[-1] + int(old_div(group_size,2)))))
      image_vector1[values_image1] = 0.75
    # Create nifti images and pdmask
    pdmask = make_binary_deletion_vector([image_vector1,image_vector2]) 
    actual_overlap = len(numpy.where(pdmask!=0)[0])
    print("Overlap %s percent: should have %s, actual %s" %(overlap,number_overlap_voxels,actual_overlap))
    assert_equal(actual_overlap,number_overlap_voxels)
   
    # Also check that is binary
    if overlap != 0 and overlap != 1:
      assert_equal(numpy.unique(pdmask)[0],0)
      assert_equal(numpy.unique(pdmask)[1],1)

    if overlap == 0:
      assert_equal(numpy.unique(pdmask)[0],0)

    if overlap == 1:
      assert_equal(numpy.unique(pdmask)[0],1)
Beispiel #3
0
def save_voxelwise_pearson_similarity_reduced_representation(pk1, pk2):
    from neurovault.apps.statmaps.models import Similarity, Comparison
    import numpy as np

    # We will always calculate Comparison 1 vs 2, never 2 vs 1
    if pk1 != pk2:
        try:
            sorted_images = get_images_by_ordered_id(pk1, pk2)
        except Http404:
            # files have been deleted in the meantime
            return
        image1 = sorted_images[0]
        image2 = sorted_images[1]
        pearson_metric = Similarity.objects.get(
            similarity_metric="pearson product-moment correlation coefficient",
            transformation="voxelwise")

        # Make sure we have a transforms for pks in question
        if not image1.reduced_representation or not os.path.exists(
                image1.reduced_representation.path):
            image1 = save_resampled_transformation_single(
                pk1)  # cannot run this async

        if not image2.reduced_representation or not os.path.exists(
                image1.reduced_representation.path):
            image2 = save_resampled_transformation_single(
                pk2)  # cannot run this async

        # Load image pickles
        image_vector1 = np.load(image1.reduced_representation.file)
        image_vector2 = np.load(image2.reduced_representation.file)

        # Calculate binary deletion vector mask (find 0s and nans)
        mask = make_binary_deletion_vector([image_vector1, image_vector2])

        # Calculate pearson
        pearson_score = calculate_pairwise_correlation(
            image_vector1[mask == 1],
            image_vector2[mask == 1],
            corr_type="pearson")

        # Only save comparison if is not nan
        if not numpy.isnan(pearson_score):
            Comparison.objects.update_or_create(image1=image1,
                                                image2=image2,
                                                defaults={
                                                    'similarity_metric':
                                                    pearson_metric,
                                                    'similarity_score':
                                                    pearson_score
                                                })
            return image1.pk, image2.pk, pearson_score
        else:
            print "Comparison returned NaN."
    else:
        raise Exception("You are trying to compare an image with itself!")
Beispiel #4
0
def save_voxelwise_pearson_similarity_reduced_representation(pk1, pk2):
    from neurovault.apps.statmaps.models import Similarity, Comparison
    import numpy as np

    # We will always calculate Comparison 1 vs 2, never 2 vs 1
    if pk1 != pk2:
        try:
            sorted_images = get_images_by_ordered_id(pk1, pk2)
        except Http404:
            # files have been deleted in the meantime
            return
        image1 = sorted_images[0]
        image2 = sorted_images[1]
        pearson_metric = Similarity.objects.get(similarity_metric="pearson product-moment correlation coefficient",
                                                transformation="voxelwise")
    
        # Make sure we have a transforms for pks in question
        if not image1.reduced_representation or not os.path.exists(image1.reduced_representation.path):
            image1 = save_resampled_transformation_single(pk1) # cannot run this async

        if not image2.reduced_representation or not os.path.exists(image1.reduced_representation.path):
            image2 = save_resampled_transformation_single(pk2) # cannot run this async

        # Load image pickles
        image_vector1 = np.load(image1.reduced_representation.file)
        image_vector2 = np.load(image2.reduced_representation.file)

        # Calculate binary deletion vector mask (find 0s and nans)
        mask = make_binary_deletion_vector([image_vector1,image_vector2])

        # Calculate pearson
        pearson_score = calculate_pairwise_correlation(image_vector1[mask==1],
                                                       image_vector2[mask==1],
                                                       corr_type="pearson")   

        # Only save comparison if is not nan
        if not numpy.isnan(pearson_score):     
            Comparison.objects.update_or_create(image1=image1, image2=image2,
                                                similarity_metric=pearson_metric,
                                                similarity_score=pearson_score)
            return image1.pk,image2.pk,pearson_score
        else:
            print "Comparison returned NaN."
    else:
        raise Exception("You are trying to compare an image with itself!")