Пример #1
0
def test_simulated_correlations():

  # Get standard brain mask
  mr_directory = get_data_directory()
  standard = "%s/MNI152_T1_2mm_brain_mask.nii.gz" %(mr_directory)
  thresholds = [0.0,0.5,1.0,1.5,1.96,2.0]

  # Generate random data inside brain mask, run 10 iterations
  standard = nibabel.load(standard)
  number_values = len(numpy.where(standard.get_data()!=0)[0])
  numpy.random.seed(9191986)
  for x in range(0,10):  
    data1 = norm.rvs(size=number_values)
    data2 = norm.rvs(size=number_values)
    corr = pearsonr(data1,data2)[0]
      
    # Put into faux nifti images
    mr1 = numpy.zeros(standard.shape)
    mr1[standard.get_data()!=0] = data1
    mr1 = nibabel.nifti1.Nifti1Image(mr1,affine=standard.get_affine(),header=standard.get_header())
    mr2 = numpy.zeros(standard.shape)
    mr2[standard.get_data()!=0] = data2
    mr2 = nibabel.nifti1.Nifti1Image(mr2,affine=standard.get_affine(),header=standard.get_header())  
    pdmask = make_binary_deletion_mask([mr1,mr2])
    pdmask = nibabel.Nifti1Image(pdmask,header=mr1.get_header(),affine=mr1.get_affine())
    score = calculate_correlation(images = [mr1,mr2],mask=pdmask)  
    assert_almost_equal(corr,score,decimal=5)
Пример #2
0
def test_simulated_correlations():

    # Get standard brain mask
    mr_directory = get_data_directory()
    standard = "%s/MNI152_T1_2mm_brain_mask.nii.gz" % (mr_directory)
    thresholds = [0.0, 0.5, 1.0, 1.5, 1.96, 2.0]

    # Generate random data inside brain mask, run 10 iterations
    standard = nibabel.load(standard)
    number_values = len(numpy.where(standard.get_data() != 0)[0])
    numpy.random.seed(9191986)
    for x in range(0, 10):
        data1 = norm.rvs(size=number_values)
        data2 = norm.rvs(size=number_values)
        corr = pearsonr(data1, data2)[0]

        # Put into faux nifti images
        mr1 = numpy.zeros(standard.shape)
        mr1[standard.get_data() != 0] = data1
        mr1 = nibabel.nifti1.Nifti1Image(mr1,
                                         affine=standard.get_affine(),
                                         header=standard.get_header())
        mr2 = numpy.zeros(standard.shape)
        mr2[standard.get_data() != 0] = data2
        mr2 = nibabel.nifti1.Nifti1Image(mr2,
                                         affine=standard.get_affine(),
                                         header=standard.get_header())
        pdmask = make_binary_deletion_mask([mr1, mr2])
        pdmask = nibabel.Nifti1Image(pdmask,
                                     header=mr1.get_header(),
                                     affine=mr1.get_affine())
        score = calculate_correlation(images=[mr1, mr2], mask=pdmask)
        assert_almost_equal(corr, score, decimal=5)
Пример #3
0
def test_binary_deletion_mask_values():
   
  images = get_pair_images(voxdims=["2","2"]) 
  image1 = nibabel.load(images[0])
  image2 = nibabel.load(images[1]) 
  pdmask = make_binary_deletion_mask([image1,image2]) 
  assert_equal(numpy.unique(pdmask)[0],0.0)
  assert_equal(numpy.unique(pdmask)[1],1.0)
  assert_false(numpy.isnan(pdmask).any())
  assert_false(numpy.isinf(pdmask).any())   
Пример #4
0
def test_binary_deletion_mask_values():

    images = get_pair_images(voxdims=["2", "2"])
    image1 = nibabel.load(images[0])
    image2 = nibabel.load(images[1])
    pdmask = make_binary_deletion_mask([image1, image2])
    assert_equal(numpy.unique(pdmask)[0], 0.0)
    assert_equal(numpy.unique(pdmask)[1], 1.0)
    assert_false(numpy.isnan(pdmask).any())
    assert_false(numpy.isinf(pdmask).any())
Пример #5
0
def test_binary_deletion_mask():

  mr_directory = get_data_directory()
  standard = "%s/MNI152_T1_8mm_brain_mask.nii.gz" %(mr_directory)
  brain_mask = nibabel.load(standard) 
  unzip = lambda l:tuple(zip(*l))
  
  # We will generate data with the following overlap percentages
  overlap_percents = [0.0,0.25,0.5,0.75,1.0]
  for overlap in overlap_percents:
    image1 = numpy.zeros(brain_mask.shape)
    image2 = numpy.zeros(brain_mask.shape)
    x,y,z = numpy.where(brain_mask.get_data()==1)
    idx = zip(x,y,z)
    numpy.random.shuffle(idx) 
    number_voxels = len(idx)
    number_overlap_voxels = int(numpy.floor(overlap*number_voxels))
    remaining_voxels = int(number_voxels - number_overlap_voxels)
    # We break the remaining voxels into 4 groups:
    # - nans that will overlap
    # - zeros that will overlap (no change to images here, already zeros)
    # - nans in image1, random sample of values in image2
    # - zeros in image2, random sample of values in image1 
    group_size = remaining_voxels/4
    if overlap != 0.0:
      # Here are the overlapping voxels for each image
      overlap_idx = unzip(idx[0:number_overlap_voxels])
      image1[overlap_idx] = 1
      image2[overlap_idx] = 1 
    if overlap != 1.0: 
      # Nans that will overlap
      nans_overlap_idx = unzip(idx[number_overlap_voxels:(number_overlap_voxels+group_size)])
      image1[nans_overlap_idx] = numpy.nan
      image2[nans_overlap_idx] = numpy.nan
      # Nans in image1, random sample of values in image 2
      start = number_overlap_voxels+group_size
      end = number_overlap_voxels+2*group_size
      nans_image1 = idx[start:end]
      values_image2 = unzip(random.sample(nans_image1,int(group_size/2)))
      image1[unzip(nans_image1)] = numpy.nan
      image2[values_image2] = 0.5
      # Zeros in image2, random sample of values in image 1
      start = number_overlap_voxels+2*group_size
      end = number_overlap_voxels+3*group_size
      zeros_image2 = idx[start:end]
      values_image1 = unzip(random.sample(zeros_image2,int(group_size/2)))
      image1[values_image1] = 0.75
    # Create nifti images and pdmask
    nii1 = nibabel.Nifti1Image(image1,affine=brain_mask.get_affine(),header=brain_mask.get_header())
    nii2 = nibabel.Nifti1Image(image2,affine=brain_mask.get_affine(),header=brain_mask.get_header())
    pdmask = make_binary_deletion_mask([nii1,nii2]) 
    actual_overlap = len(numpy.where(pdmask!=0)[0])
    print "Overlap %s percent: should have %s, actual %s" %(overlap,number_overlap_voxels,actual_overlap)
    assert_equal(actual_overlap,number_overlap_voxels)
Пример #6
0
def test_binary_deletion_mask():

  mr_directory = get_data_directory()
  standard = "%s/MNI152_T1_8mm_brain_mask.nii.gz" %(mr_directory)
  brain_mask = nibabel.load(standard) 
  unzip = lambda l:tuple(zip(*l))
  
  # We will generate data with the following overlap percentages
  overlap_percents = [0.0,0.25,0.5,0.75,1.0]
  for overlap in overlap_percents:
    image1 = numpy.zeros(brain_mask.shape)
    image2 = numpy.zeros(brain_mask.shape)
    x,y,z = numpy.where(brain_mask.get_data()==1)
    idx = list(zip(x,y,z))
    numpy.random.shuffle(idx) 
    number_voxels = len(idx)
    number_overlap_voxels = int(numpy.floor(overlap*number_voxels))
    remaining_voxels = int(number_voxels - number_overlap_voxels)
    # We break the remaining voxels into 4 groups:
    # - nans that will overlap
    # - zeros that will overlap (no change to images here, already zeros)
    # - nans in image1, random sample of values in image2
    # - zeros in image2, random sample of values in image1 
    group_size = old_div(remaining_voxels,4)
    if overlap != 0.0:
      # Here are the overlapping voxels for each image
      overlap_idx = unzip(idx[0:number_overlap_voxels])
      image1[overlap_idx] = 1
      image2[overlap_idx] = 1 
    if overlap != 1.0: 
      # Nans that will overlap
      nans_overlap_idx = unzip(idx[number_overlap_voxels:(number_overlap_voxels+group_size)])
      image1[nans_overlap_idx] = numpy.nan
      image2[nans_overlap_idx] = numpy.nan
      # Nans in image1, random sample of values in image 2
      start = number_overlap_voxels+group_size
      end = number_overlap_voxels+2*group_size
      nans_image1 = idx[start:end]
      values_image2 = unzip(random.sample(nans_image1,int(old_div(group_size,2))))
      image1[unzip(nans_image1)] = numpy.nan
      image2[values_image2] = 0.5
      # Zeros in image2, random sample of values in image 1
      start = number_overlap_voxels+2*group_size
      end = number_overlap_voxels+3*group_size
      zeros_image2 = idx[start:end]
      values_image1 = unzip(random.sample(zeros_image2,int(old_div(group_size,2))))
      image1[values_image1] = 0.75
    # Create nifti images and pdmask
    nii1 = nibabel.Nifti1Image(image1,affine=brain_mask.get_affine(),header=brain_mask.get_header())
    nii2 = nibabel.Nifti1Image(image2,affine=brain_mask.get_affine(),header=brain_mask.get_header())
    pdmask = make_binary_deletion_mask([nii1,nii2]) 
    actual_overlap = len(numpy.where(pdmask!=0)[0])
    print("Overlap %s percent: should have %s, actual %s" %(overlap,number_overlap_voxels,actual_overlap))
    assert_equal(actual_overlap,number_overlap_voxels)
Пример #7
0
def save_voxelwise_pearson_similarity_resample(pk1,
                                               pk2,
                                               resample_dim=[4, 4, 4]):
    from neurovault.apps.statmaps.models import Similarity, Comparison

    # We will always calculate Comparison 1 vs 2, never 2 vs 1
    if pk1 != pk2:
        sorted_images = get_images_by_ordered_id(pk1, pk2)
        image1 = sorted_images[0]
        image2 = sorted_images[1]
        pearson_metric = Similarity.objects.get(
            similarity_metric="pearson product-moment correlation coefficient",
            transformation="voxelwise")

        # Get standard space brain
        mr_directory = get_data_directory()
        reference = "%s/MNI152_T1_2mm_brain_mask.nii.gz" % (mr_directory)
        image_paths = [image.file.path for image in [image1, image2]]
        images_resamp, _ = resample_images_ref(images=image_paths,
                                               reference=reference,
                                               interpolation="continuous",
                                               resample_dim=resample_dim)
        # resample_images_ref will "squeeze" images, but we should keep error here for now
        for image_nii, image_obj in zip(images_resamp, [image1, image2]):
            if len(numpy.squeeze(image_nii.get_data()).shape) != 3:
                raise Exception(
                    "Image %s (id=%d) has incorrect number of dimensions %s" %
                    (image_obj.name, image_obj.id,
                     str(image_nii.get_data().shape)))

        # Calculate correlation only on voxels that are in both maps (not zero, and not nan)
        image1_res = images_resamp[0]
        image2_res = images_resamp[1]
        binary_mask = make_binary_deletion_mask(images_resamp)
        binary_mask = nib.Nifti1Image(binary_mask,
                                      header=image1_res.get_header(),
                                      affine=image1_res.get_affine())

        # Will return nan if comparison is not possible
        pearson_score = calculate_correlation([image1_res, image2_res],
                                              mask=binary_mask,
                                              corr_type="pearson")

        # Only save comparison if is not nan
        if not numpy.isnan(pearson_score):
            Comparison.objects.update_or_create(
                image1=image1,
                image2=image2,
                similarity_metric=pearson_metric,
                similarity_score=pearson_score)

            return image1.pk, image2.pk, pearson_score
        else:
            raise Exception("You are trying to compare an image with itself!")
Пример #8
0
def save_voxelwise_pearson_similarity_resample(pk1, pk2,resample_dim=[4,4,4]):
    from neurovault.apps.statmaps.models import Similarity, Comparison

    # We will always calculate Comparison 1 vs 2, never 2 vs 1
    if pk1 != pk2:
        try:
            sorted_images = get_images_by_ordered_id(pk1, pk2)
        except Http404:
            # files have been deleted in the meantime
            return
        image1 = sorted_images[0]
        image2 = sorted_images[1]
        pearson_metric = Similarity.objects.get(
                           similarity_metric="pearson product-moment correlation coefficient",
                           transformation="voxelwise")

        # Get standard space brain
        mr_directory = get_data_directory()
        reference = "%s/MNI152_T1_2mm_brain_mask.nii.gz" %(mr_directory)
        image_paths = [image.file.path for image in [image1, image2]]
        images_resamp, _ = resample_images_ref(images=image_paths, 
                                               reference=reference, 
                                               interpolation="continuous",
                                               resample_dim=resample_dim)
        # resample_images_ref will "squeeze" images, but we should keep error here for now
        for image_nii, image_obj in zip(images_resamp, [image1, image2]):
            if len(numpy.squeeze(image_nii.get_data()).shape) != 3:
                raise Exception("Image %s (id=%d) has incorrect number of dimensions %s"%(image_obj.name, 
                                                                                          image_obj.id, 
                                                                                          str(image_nii.get_data().shape)))

        # Calculate correlation only on voxels that are in both maps (not zero, and not nan)
        image1_res = images_resamp[0]
        image2_res = images_resamp[1]
        binary_mask = make_binary_deletion_mask(images_resamp)
        binary_mask = nib.Nifti1Image(binary_mask,header=image1_res.get_header(),affine=image1_res.get_affine())

        # Will return nan if comparison is not possible
        pearson_score = calculate_correlation([image1_res,image2_res],mask=binary_mask,corr_type="pearson")

        # Only save comparison if is not nan
        if not numpy.isnan(pearson_score):     
            Comparison.objects.update_or_create(image1=image1, image2=image2,
                                            similarity_metric=pearson_metric,
                                            similarity_score=pearson_score)

            return image1.pk,image2.pk,pearson_score
        else:
            raise Exception("You are trying to compare an image with itself!")
# Calculate image similarity with pearson correlation
# Feasible to run in serial for small number of images
print "Calculating spatial image similarity with pearson score, complete case analysis (set of overlapping voxels) for pairwise images..."
image_ids = images.image_id.tolist()
simmatrix = pandas.DataFrame(columns=image_ids, index=image_ids)
for id1 in image_ids:
    print "Processing %s..." % id1
    mr1_id = pad_zeros(id1)
    mr1_path = "%s/resampled_z/%s.nii.gz" % (data, mr1_id)
    mr1 = nibabel.load(mr1_path)
    for id2 in image_ids:
        mr2_id = pad_zeros(id2)
        mr2_path = "%s/resampled_z/%s.nii.gz" % (data, mr2_id)
        mr2 = nibabel.load(mr2_path)
        # Make a pairwise deletion / complete case analysis mask
        pdmask = make_binary_deletion_mask([mr1, mr2])
        pdmask = nibabel.Nifti1Image(pdmask, affine=standard.get_affine())
        score = calculate_correlation([mr1, mr2], mask=pdmask)
        simmatrix.loc[id1, id2] = score
        simmatrix.loc[id2, id1] = score

simmatrix.to_csv("%s/contrast_defined_images_pearsonpd_similarity.tsv" % results, sep="\t")

# Finally, resample images to 4mm voxel for classification analysis
outfolder_z4mm = "%s/resampled_z_4mm" % (data)
if not os.path.exists(outfolder_z4mm):
    os.mkdir(outfolder_z4mm)

maps = glob("%s/*.nii.gz" % outfolder_z)
for mr in maps:
    image_name = os.path.basename(mr)
Пример #10
0

count = 0
for neurosynth_map in neurosynth_maps:
    concept_name = os.path.basename(neurosynth_map).replace(
        "_regparams.nii.gz", "")
    concept = get_concept(name=concept_name).json[0]
    neurovault_map = "%s/results/classification_final/%s_regparam_z.nii.gz" % (
        base, concept["id"])
    if neurovault_map in neurovault_maps:
        print "Found match for %s" % (concept_name)
        nsmap = nibabel.load(neurosynth_map)
        nvmap = nibabel.load(neurovault_map)
        score = calculate_correlation([nsmap, nvmap], mask=standard_mask)
        # Let's also calculate just for overlapping voxels
        cca_mask = make_binary_deletion_mask([nsmap, nvmap])
        nvoxels = len(cca_mask[cca_mask != 0])
        cca_mask = nibabel.Nifti1Image(cca_mask,
                                       affine=standard_mask.get_affine())
        cca_score = calculate_correlation([nsmap, nvmap], mask=cca_mask)
        # And finally, since we see consistent size of cca mask (meaning not a lot of zeros) let's
        # try thresholding at +/- 1. The nsmap needs to be converted to z score
        image_df = get_images_df([nsmap, nvmap], mask=standard_mask)
        image_df.loc[0] = (image_df.loc[0] -
                           image_df.loc[0].mean()) / image_df.loc[0].std()
        nsmap_thresh = make_brainmap(image_df.loc[0], standard_mask)
        nsmap_thresh = apply_threshold(nsmap_thresh, 1.0)
        nvmap_thresh = make_brainmap(image_df.loc[1], standard_mask)
        nvmap_thresh = apply_threshold(nvmap_thresh, 1.0)
        cca_mask_thresh = make_binary_deletion_mask(
            [nsmap_thresh, nvmap_thresh])
Пример #11
0
# READ IN DATA, APPLY BRAIN MASK, GENERATE VECTORS

import nibabel
import numpy
import pandas
from pybraincompare.mr.datasets import get_mni_atlas, get_pair_images, get_mni_atlas
from pybraincompare.compare.mrutils import get_standard_mask, make_binary_deletion_mask, resample_images_ref
from pybraincompare.compare.maths import calculate_atlas_correlation
from nilearn.masking import apply_mask

image1,image2 = get_pair_images(voxdims=["2","2"])
image1 = nibabel.load(image1)
image2 = nibabel.load(image2)
brain_mask  = nibabel.load(get_standard_mask("FSL"))
atlas = get_mni_atlas("2")["2"]
pdmask = make_binary_deletion_mask([image1,image2])

# Combine the pdmask and the brainmask
mask = numpy.logical_and(pdmask,brain_mask.get_data())
mask = nibabel.nifti1.Nifti1Image(mask,affine=brain_mask.get_affine(),header=brain_mask.get_header())

# Resample images to mask
images_resamp, ref_resamp = resample_images_ref([image1,image2],
                                                mask,
                                                interpolation="continuous")

image1 = images_resamp[0].get_data()
image2 = images_resamp[1].get_data()
# We will save the x and y (which are x and z coordinates) in a data frame
# Right now we will save unique colors for images, in future should save value
# that represents similarity / difference
Пример #12
0
# Calculate image similarity with pearson correlation
# Feasible to run in serial for small number of images
print "Calculating spatial image similarity with pearson score, complete case analysis (set of overlapping voxels) for pairwise images..."
image_ids = images.image_id.tolist()
simmatrix = pandas.DataFrame(columns=image_ids,index=image_ids)
for id1 in image_ids:
    print "Processing %s..." %id1
    mr1_id = pad_zeros(id1)
    mr1_path = "%s/resampled_z/%s.nii.gz" %(data,mr1_id)
    mr1 = nibabel.load(mr1_path)
    for id2 in image_ids:
        mr2_id = pad_zeros(id2)
        mr2_path = "%s/resampled_z/%s.nii.gz" %(data,mr2_id)
        mr2 = nibabel.load(mr2_path)
        # Make a pairwise deletion / complete case analysis mask
        pdmask = make_binary_deletion_mask([mr1,mr2])
        pdmask = nibabel.Nifti1Image(pdmask,affine=standard.get_affine())
        score = calculate_correlation([mr1,mr2],mask=pdmask)
        simmatrix.loc[id1,id2] = score
        simmatrix.loc[id2,id1] = score

simmatrix.to_csv("%s/contrast_defined_images_pearsonpd_similarity.tsv" %results,sep="\t")

# Finally, resample images to 4mm voxel for classification analysis
outfolder_z4mm = "%s/resampled_z_4mm" %(data)
if not os.path.exists(outfolder_z4mm):
    os.mkdir(outfolder_z4mm)

maps = glob("%s/*.nii.gz" %outfolder_z)
for mr in maps:
    image_name = os.path.basename(mr)
    zeros[standard_mask.get_data()!=0] = vector
    return nibabel.Nifti1Image(zeros,affine=standard_mask.get_affine())
   

count=0
for neurosynth_map in neurosynth_maps:
    concept_name = os.path.basename(neurosynth_map).replace("_regparams.nii.gz","")
    concept = get_concept(name=concept_name).json[0]
    neurovault_map = "%s/results/classification_final/%s_regparam_z.nii.gz" %(base,concept["id"])
    if neurovault_map in neurovault_maps:
        print "Found match for %s" %(concept_name)
        nsmap = nibabel.load(neurosynth_map)
        nvmap = nibabel.load(neurovault_map)
        score = calculate_correlation([nsmap,nvmap],mask=standard_mask)
        # Let's also calculate just for overlapping voxels
        cca_mask = make_binary_deletion_mask([nsmap,nvmap])
        nvoxels = len(cca_mask[cca_mask!=0])
        cca_mask = nibabel.Nifti1Image(cca_mask,affine=standard_mask.get_affine())
        cca_score = calculate_correlation([nsmap,nvmap],mask=cca_mask)        
        # And finally, since we see consistent size of cca mask (meaning not a lot of zeros) let's
        # try thresholding at +/- 1. The nsmap needs to be converted to z score
        image_df = get_images_df([nsmap,nvmap],mask=standard_mask)
        image_df.loc[0] = (image_df.loc[0] - image_df.loc[0].mean()) / image_df.loc[0].std()
        nsmap_thresh = make_brainmap(image_df.loc[0],standard_mask)
        nsmap_thresh = apply_threshold(nsmap_thresh,1.0)
        nvmap_thresh = make_brainmap(image_df.loc[1],standard_mask)
        nvmap_thresh = apply_threshold(nvmap_thresh,1.0)
        cca_mask_thresh = make_binary_deletion_mask([nsmap_thresh,nvmap_thresh])
        nvoxels_thresh = len(cca_mask_thresh[cca_mask_thresh!=0])
        cca_mask_thresh = nibabel.Nifti1Image(cca_mask_thresh,affine=standard_mask.get_affine())                
        cca_score_thresh = calculate_correlation([nsmap_thresh,nvmap_thresh],mask=cca_mask_thresh)