def test_masked_transformation():
    image1 = nibabel.load(get_pair_images()[0])
    brain_4mm = get_standard_mask(4)
    nonzero_voxels = brain_4mm.get_data()[brain_4mm.get_data()!=0].shape[0]
    image1_vector = make_resampled_transformation_vector(image1,resample_dim=[4,4,4],standard_mask=True)
    assert_equal(nonzero_voxels,len(image1_vector))

    brain_8mm = get_standard_mask(8)
    nonzero_voxels = brain_8mm.get_data()[brain_8mm.get_data()!=0].shape[0]
    image1_vector = make_resampled_transformation_vector(image1,resample_dim=[8,8,8],standard_mask=True)
    assert_equal(nonzero_voxels,len(image1_vector))
Ejemplo n.º 2
0
def test_masked_transformation():
    image1 = nibabel.load(get_pair_images()[0])
    brain_4mm = get_standard_mask(4)
    nonzero_voxels = brain_4mm.get_data()[brain_4mm.get_data() != 0].shape[0]
    image1_vector = make_resampled_transformation_vector(
        image1, resample_dim=[4, 4, 4], standard_mask=True)
    assert_equal(nonzero_voxels, len(image1_vector))

    brain_8mm = get_standard_mask(8)
    nonzero_voxels = brain_8mm.get_data()[brain_8mm.get_data() != 0].shape[0]
    image1_vector = make_resampled_transformation_vector(
        image1, resample_dim=[8, 8, 8], standard_mask=True)
    assert_equal(nonzero_voxels, len(image1_vector))
Ejemplo n.º 3
0
def make_resampled_transformation(nii_obj,resample_dim=[4,4,4],standard_mask=True):

    nii_obj = get_nii_obj(nii_obj)[0]

    # To set 0s to nan, we need to have float64 data type
    true_zeros = numpy.zeros(nii_obj.shape) # default data_type is float64
    true_zeros[:] = nii_obj.get_data()
    true_zeros[true_zeros==0] = numpy.nan

    # Resample image to 4mm voxel, nans are preserved
    true_zeros = nib.nifti1.Nifti1Image(true_zeros,affine=nii_obj.get_affine())
    
    # Standard brain masking
    if standard_mask == True:
        standard = get_standard_mask(voxdim=resample_dim[0])
        true_zeros = resample_img(true_zeros,target_affine=standard.get_affine(), 
                                  target_shape=standard.shape)
      
        # Mask the image 
        masked_true_zeros = numpy.zeros(true_zeros.shape)
        masked_true_zeros[standard.get_data()!=0] = true_zeros.get_data()[standard.get_data()!=0]
        true_zeros = nib.nifti1.Nifti1Image(masked_true_zeros,affine=true_zeros.get_affine())

    # or just resample
    else: 
        if (resample_dim != numpy.diag(true_zeros.get_affine())[0:3]).all():
            true_zeros = resample_img(true_zeros,target_affine=numpy.diag(resample_dim))

    return true_zeros
Ejemplo n.º 4
0
def make_resampled_transformation_vector(nii_obj,resample_dim=[4,4,4],standard_mask=True):

    resamp_nii = make_resampled_transformation(nii_obj,resample_dim,standard_mask)
    if standard_mask:
        standard = get_standard_mask(voxdim=resample_dim[0])
        return resamp_nii.get_data()[standard.get_data()!=0]
    else:
        return resamp_nii.get_data().flatten()
def test_unmasked_transformation():
    image1 = nibabel.load(get_pair_images()[0])
    nonzero_voxels = len(image1.get_data().flatten())
    image1_vector = make_resampled_transformation_vector(image1,resample_dim=[2,2,2],standard_mask=False)
    assert_equal(nonzero_voxels,len(image1_vector))

    brain_4mm = get_standard_mask(4)
    nonzero_voxels = len(brain_4mm.get_data().flatten())
    image1_vector = make_resampled_transformation_vector(image1,resample_dim=[4,4,4],standard_mask=False)
    assert_equal(nonzero_voxels,len(image1_vector))
Ejemplo n.º 6
0
def test_unmasked_transformation():
    image1 = nibabel.load(get_pair_images()[0])
    nonzero_voxels = len(image1.get_data().flatten())
    image1_vector = make_resampled_transformation_vector(
        image1, resample_dim=[2, 2, 2], standard_mask=False)
    assert_equal(nonzero_voxels, len(image1_vector))

    brain_4mm = get_standard_mask(4)
    nonzero_voxels = len(brain_4mm.get_data().flatten())
    image1_vector = make_resampled_transformation_vector(
        image1, resample_dim=[4, 4, 4], standard_mask=False)
    assert_equal(nonzero_voxels, len(image1_vector))
Ejemplo n.º 7
0
download_dir = "/ahba_data"
os.makedirs(download_dir)

for i, url in enumerate(urls):
    print "Downloading %s" % url
    urllib.urlretrieve(url, os.path.join(download_dir, "donor%d.zip" % (i + 1)))
    zipfile.ZipFile(os.path.join(download_dir, "donor%d.zip" % (i + 1)))

# Dowloading MNI coordinates
urllib.urlretrieve(
    "https://raw.githubusercontent.com/chrisfilo/alleninf/master/alleninf/data/corrected_mni_coordinates.csv",
    os.path.join(download_dir, "corrected_mni_coordinates.csv"))

samples = pd.read_csv(os.path.join(download_dir, "corrected_mni_coordinates.csv"), index_col=0)

mni = get_standard_mask(voxdim=4)

reduced_coord = []

for coord_mni in samples[['corrected_mni_x', 'corrected_mni_y', 'corrected_mni_z']].values:
    sample_counts = np.zeros(mni.shape, dtype=int)
    coord_data = [int(round(i)) for i in nb.affines.apply_affine(npl.inv(mni.get_affine()), coord_mni)]
    sample_counts[coord_data[0],
                  coord_data[1],
                  coord_data[2]] = 1
    out_vector = sample_counts[mni.get_data()!=0]
    idx = out_vector.argmax()
    if idx == (out_vector == 1.0).sum() == 0:
        idx = np.nan
    reduced_coord.append(idx)
Ejemplo n.º 8
0
base = sys.argv[1]
data = "%s/data" % base
node_folder = "%s/groups" % data
results = "%s/results" % base  # any kind of tsv/result file
decode_folder = "%s/decode" % base

if not os.path.exists(decode_folder):
    os.mkdir(decode_folder)

# Images by Concepts data frame
labels_tsv = "%s/images_contrasts_df.tsv" % results
images = pandas.read_csv(labels_tsv, sep="\t", index_col=0)
output_folder = "%s/classification_final" % results

# Get standard mask, 4mm
standard_mask = get_standard_mask(4)

# Get all cognitive atlas concepts
all_concepts = get_concept().json
concepts = dict()
for concept in all_concepts:
    concepts[concept["id"]] = str(concept["name"])

# You will need to copy abstracts.txt into this folder from the repo
abstracts = pandas.read_csv("%s/abstracts.txt" % decode_folder,
                            sep="\t",
                            index_col=0,
                            header=None)
abstracts.columns = ["text"]

# Here we can produce a similarity matrix to compare images A and B

import pandas
import nibabel
from pybraincompare.mr.datasets import get_standard_mask
from nilearn.masking import apply_mask
from scipi.stats import pearsonr

standard = get_standard_mask()

from glob import glob

amaps = glob("*groupA*.nii.gz")
bmaps = glob("*groupB*.nii.gz")

# Compare all A maps vs A maps
asim = pandas.DataFrame(index=amaps,columns=amaps)
bsim = pandas.DataFrame(index=bmaps,columns=bmaps)

for i in range(0,len(amaps)):
    print "Processing %s of %s" %(i,len(amaps))
    for j in range(0,len(amaps)):
        amapi = nibabel.load(amaps[i])
        amapj = nibabel.load(amaps[j])
        bmapi = nibabel.load(bmaps[i])
        bmapj = nibabel.load(bmaps[j])
        vectorai = apply_mask(amapi,standard)
        vectoraj = apply_mask(amapj,standard)
        vectorbi = apply_mask(bmapi,standard)
        vectorbj = apply_mask(bmapj,standard)
        asim.loc[amaps[i],amaps[j]] = pearsonr(vectorai,vectoraj)[0]
labels_tsv = sys.argv[3]
image_lookup = sys.argv[4]

# image_pairs should be string of image pairs 1|2,3|4 for about 2000 (half) the image pairs
# We will calculate an accuracy across the image set, and build a null distribution by doing
# this procedure many times
image_pairs = image_pairs.split(",")
total = 0
correct = 0

# We will save a vector of 
# Images by Concept data frame, our X
X = pandas.read_csv(labels_tsv,sep="\t",index_col=0)

# Get standard mask, 4mm
standard_mask=get_standard_mask(4)

# Dictionary to look up image files (4mm)
lookup = pickle.load(open(image_lookup,"rb"))

concepts = X.columns.tolist()

# We will go through each voxel (column) in a data frame of image data
image_paths = lookup.values()
mr = get_images_df(file_paths=image_paths,mask=standard_mask)
image_ids = [int(os.path.basename(x).split(".")[0]) for x in image_paths]
mr.index = image_ids


# We will go through each voxel (column) in a data frame of image data
mr = get_images_df(file_paths=group["in"] + group["out"],mask=standard_mask)
labels_tsv = sys.argv[3]
image_lookup = sys.argv[4]

# image_pairs should be string of image pairs 1|2,3|4 for about 2000 (half) the image pairs
# We will calculate an accuracy across the image set, and build a null distribution by doing
# this procedure many times
image_pairs = image_pairs.split(",")
total = 0
correct = 0

# We will save a vector of
# Images by Concept data frame, our X
X = pandas.read_csv(labels_tsv, sep="\t", index_col=0)

# Get standard mask, 4mm
standard_mask = get_standard_mask(4)

# Dictionary to look up image files (4mm)
lookup = pickle.load(open(image_lookup, "rb"))

concepts = X.columns.tolist()

# We will go through each voxel (column) in a data frame of image data
image_paths = lookup.values()
mr = get_images_df(file_paths=image_paths, mask=standard_mask)
image_ids = [int(os.path.basename(x).split(".")[0]) for x in image_paths]
mr.index = image_ids

# We will go through each voxel (column) in a data frame of image data
mr = get_images_df(file_paths=group["in"] + group["out"], mask=standard_mask)
image_paths = group["in"] + group["out"]
Ejemplo n.º 12
0
for i, url in enumerate(urls):
    print "Downloading %s" % url
    urllib.urlretrieve(url, os.path.join(download_dir,
                                         "donor%d.zip" % (i + 1)))
    zipfile.ZipFile(os.path.join(download_dir, "donor%d.zip" % (i + 1)))

# Dowloading MNI coordinates
urllib.urlretrieve(
    "https://raw.githubusercontent.com/chrisfilo/alleninf/master/alleninf/data/corrected_mni_coordinates.csv",
    os.path.join(download_dir, "corrected_mni_coordinates.csv"))

samples = pd.read_csv(os.path.join(download_dir,
                                   "corrected_mni_coordinates.csv"),
                      index_col=0)

mni = get_standard_mask(voxdim=4)

reduced_coord = []

for coord_mni in samples[[
        'corrected_mni_x', 'corrected_mni_y', 'corrected_mni_z'
]].values:
    sample_counts = np.zeros(mni.shape, dtype=int)
    coord_data = [
        int(round(i))
        for i in nb.affines.apply_affine(npl.inv(mni.get_affine()), coord_mni)
    ]
    sample_counts[coord_data[0], coord_data[1], coord_data[2]] = 1
    out_vector = sample_counts[mni.get_data() != 0]
    idx = out_vector.argmax()
    if idx == (out_vector == 1.0).sum() == 0:
base = sys.argv[1]
data = "%s/data" %base
node_folder = "%s/groups" %data
results = "%s/results" %base  # any kind of tsv/result file
decode_folder = "%s/decode" %base

if not os.path.exists(decode_folder):
    os.mkdir(decode_folder)

# Images by Concepts data frame
labels_tsv = "%s/images_contrasts_df.tsv" %results
images = pandas.read_csv(labels_tsv,sep="\t",index_col=0)
output_folder = "%s/classification_final" %results

# Get standard mask, 4mm
standard_mask=get_standard_mask(4)

# Get all cognitive atlas concepts
all_concepts = get_concept().json
concepts = dict()
for concept in all_concepts:
    concepts[concept["id"]] = str(concept["name"])

# You will need to copy abstracts.txt into this folder from the repo
abstracts = pandas.read_csv("%s/abstracts.txt" %decode_folder,sep="\t",index_col=0,header=None)
abstracts.columns = ["text"]

# Functions to parse text
def remove_nonenglish_chars(text):
    return re.sub("[^a-zA-Z]", " ", text)
    
base = sys.argv[1]
data = "%s/data" %base
node_folder = "%s/groups" %data
results = "%s/results" %base  # any kind of tsv/result file
decode_folder = "%s/decode" %base

if not os.path.exists(decode_folder):
    os.mkdir(decode_folder)

# Images by Concepts data frame
labels_tsv = "%s/images_contrasts_df.tsv" %results
images = pandas.read_csv(labels_tsv,sep="\t",index_col=0)
output_folder = "%s/classification_final" %results

# Get standard mask, 4mm
standard_mask=get_standard_mask(4)

# Load the regression params data frame
result = pickle.load(open("%s/regression_params_dfs.pkl" %output_folder,"rb"))

all_concepts = get_concept().json
concepts = dict()
for concept in all_concepts:
    concepts[concept["id"]] = str(concept["name"])

# You will need to copy abstracts.txt into this folder from the repo
abstracts = pandas.read_csv("%s/abstracts.txt" %decode_folder,sep="\t",index_col=0,header=None)
abstracts.columns = ["text"]

# Functions to parse text
def remove_nonenglish_chars(text):