示例#1
0
def test_histogram_output():
    image = get_pair_images()[0]
    html_snippet = plot_histogram(image, view_in_browser=False)
    html_snippet = "".join(html_snippet)

    # This is the output we should get
    expected_output = '<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/1.0.2/Chart.min.js"></script>\n\n<!-- Image Histogram-->\n<div id = "imagehistogram" class="span12 widget blue" onTablet="span11" onDesktop="span12">\n<div class="clearfix"></div><br>\n<canvas id="histogram" width="1000" height="400"></canvas>\n    <script>\n\tvar ctx = document.getElementById("histogram").getContext("2d");\n        var data = {\n    \tlabels: ["-3.68","-3.38","-3.09","-2.79","-2.49","-2.19","-1.90","-1.60","-1.30","-1.00","-0.71","-0.41","-0.11","0.19","0.48","0.78","1.08","1.38","1.67","1.97","2.27","2.57","2.86","3.16","3.46","3.76"],\n\t\twidth: 400,\n   \t \tdatasets: [\n        \t{\n            \t    label: "Image Histogram",\n            \t    fillColor: "rgba(220,220,220,0.5)",\n            \t    strokeColor: "rgba(220,220,220,0.8)",\n            \t    highlightFill: "rgba(220,220,220,0.75)",\n            \t    highlightStroke: "rgba(220,220,220,1)",\n            \t    data: ["12","61","259","628","1248","2221","3553","4935","6394","8883","13358","20249","34123","20216","14296","10137","7003","4402","2532","1245","612","230","50","16","7"]\n                }]\n\t};\n        histogram = new Chart(ctx).Bar(data, { multiTooltipTemplate: "<%= datasetLabel %> - <%= value %>", scaleFontColor: "#000"});    \t\t\t\t\t\n    </script>\n</div>\n'

    assert_equal(expected_output, html_snippet)
def test_histogram_output():
    image = get_pair_images()[0]
    html_snippet = plot_histogram(image,view_in_browser=False)
    html_snippet = "".join(html_snippet)

    # This is the output we should get
    expected_output = '<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/1.0.2/Chart.min.js"></script>\n\n<!-- Image Histogram-->\n<div id = "imagehistogram" class="span12 widget blue" onTablet="span11" onDesktop="span12">\n<div class="clearfix"></div><br>\n<canvas id="histogram" width="1000" height="400"></canvas>\n    <script>\n\tvar ctx = document.getElementById("histogram").getContext("2d");\n        var data = {\n    \tlabels: ["-3.68","-3.38","-3.09","-2.79","-2.49","-2.19","-1.90","-1.60","-1.30","-1.00","-0.71","-0.41","-0.11","0.19","0.48","0.78","1.08","1.38","1.67","1.97","2.27","2.57","2.86","3.16","3.46","3.76"],\n\t\twidth: 400,\n   \t \tdatasets: [\n        \t{\n            \t    label: "Image Histogram",\n            \t    fillColor: "rgba(220,220,220,0.5)",\n            \t    strokeColor: "rgba(220,220,220,0.8)",\n            \t    highlightFill: "rgba(220,220,220,0.75)",\n            \t    highlightStroke: "rgba(220,220,220,1)",\n            \t    data: ["12","61","259","628","1248","2221","3553","4935","6394","8883","13358","20249","34123","20216","14296","10137","7003","4402","2532","1245","612","230","50","16","7"]\n                }]\n\t};\n        histogram = new Chart(ctx).Bar(data, { multiTooltipTemplate: "<%= datasetLabel %> - <%= value %>", scaleFontColor: "#000"});    \t\t\t\t\t\n    </script>\n</div>\n'

    assert_equal(expected_output,html_snippet)
示例#3
0
def test_binary_deletion_mask_values():

    images = get_pair_images(voxdims=["2", "2"])
    image1 = nibabel.load(images[0])
    image2 = nibabel.load(images[1])
    pdmask = make_binary_deletion_mask([image1, image2])
    assert_equal(numpy.unique(pdmask)[0], 0.0)
    assert_equal(numpy.unique(pdmask)[1], 1.0)
    assert_false(numpy.isnan(pdmask).any())
    assert_false(numpy.isinf(pdmask).any())
示例#4
0
def test_binary_deletion_mask_values():
   
  images = get_pair_images(voxdims=["2","2"]) 
  image1 = nibabel.load(images[0])
  image2 = nibabel.load(images[1]) 
  pdmask = make_binary_deletion_mask([image1,image2]) 
  assert_equal(numpy.unique(pdmask)[0],0.0)
  assert_equal(numpy.unique(pdmask)[1],1.0)
  assert_false(numpy.isnan(pdmask).any())
  assert_false(numpy.isinf(pdmask).any())   
def test_unmasked_transformation():
    image1 = nibabel.load(get_pair_images()[0])
    nonzero_voxels = len(image1.get_data().flatten())
    image1_vector = make_resampled_transformation_vector(image1,resample_dim=[2,2,2],standard_mask=False)
    assert_equal(nonzero_voxels,len(image1_vector))

    brain_4mm = get_standard_mask(4)
    nonzero_voxels = len(brain_4mm.get_data().flatten())
    image1_vector = make_resampled_transformation_vector(image1,resample_dim=[4,4,4],standard_mask=False)
    assert_equal(nonzero_voxels,len(image1_vector))
def test_masked_transformation():
    image1 = nibabel.load(get_pair_images()[0])
    brain_4mm = get_standard_mask(4)
    nonzero_voxels = brain_4mm.get_data()[brain_4mm.get_data()!=0].shape[0]
    image1_vector = make_resampled_transformation_vector(image1,resample_dim=[4,4,4],standard_mask=True)
    assert_equal(nonzero_voxels,len(image1_vector))

    brain_8mm = get_standard_mask(8)
    nonzero_voxels = brain_8mm.get_data()[brain_8mm.get_data()!=0].shape[0]
    image1_vector = make_resampled_transformation_vector(image1,resample_dim=[8,8,8],standard_mask=True)
    assert_equal(nonzero_voxels,len(image1_vector))
示例#7
0
def test_unmasked_transformation():
    image1 = nibabel.load(get_pair_images()[0])
    nonzero_voxels = len(image1.get_data().flatten())
    image1_vector = make_resampled_transformation_vector(
        image1, resample_dim=[2, 2, 2], standard_mask=False)
    assert_equal(nonzero_voxels, len(image1_vector))

    brain_4mm = get_standard_brain(4)
    nonzero_voxels = len(brain_4mm.get_data().flatten())
    image1_vector = make_resampled_transformation_vector(
        image1, resample_dim=[4, 4, 4], standard_mask=False)
    assert_equal(nonzero_voxels, len(image1_vector))
示例#8
0
def test_masked_transformation():
    image1 = nibabel.load(get_pair_images()[0])
    brain_4mm = get_standard_brain(4)
    nonzero_voxels = brain_4mm.get_data()[brain_4mm.get_data() != 0].shape[0]
    image1_vector = make_resampled_transformation_vector(
        image1, resample_dim=[4, 4, 4], standard_mask=True)
    assert_equal(nonzero_voxels, len(image1_vector))

    brain_8mm = get_standard_brain(8)
    nonzero_voxels = brain_8mm.get_data()[brain_8mm.get_data() != 0].shape[0]
    image1_vector = make_resampled_transformation_vector(
        image1, resample_dim=[8, 8, 8], standard_mask=True)
    assert_equal(nonzero_voxels, len(image1_vector))
示例#9
0
# Create a scatterplot from two brain images
from pybraincompare.mr.datasets import get_pair_images, get_mni_atlas
from pybraincompare.compare.mrutils import resample_images_ref, get_standard_brain, get_standard_mask, do_mask
from pybraincompare.compare.maths import calculate_correlation
from pybraincompare.compare import scatterplot, atlas as Atlas
from pybraincompare.template.visual import view
from nilearn.image import resample_img
import numpy
import nibabel

# SCATTERPLOT COMPARE ---- (with nifti input) ---------------------------------------------------

# Images that we want to compare - they must be in MNI space
image_names = ["image 1","image 2"]
images = get_pair_images(voxdims=["2","8"])

html_snippet,data_table = scatterplot.scatterplot_compare(images=images,
                                                     image_names=image_names,
                                                     corr_type="pearson") 
view(html_snippet)

# RESAMPLING IMAGES -----------------------------------------------------------------------------

# If you use your own standard brain (arg reference) we recommend resampling to 8mm voxel
# Here you will get the resampled images and mask returned
reference = nibabel.load(get_standard_brain("FSL"))
images_resamp,ref_resamp = resample_images_ref(images,
                                               reference=reference,
                                               interpolation="continuous",
                                               resample_dim=[8,8,8])
# - the first layer will show where the images overlap
# - the second layer will be where image 1 has voxels, image 2 doesn't
# - the third layer will be where image 2 has voxels, image 1 doesn't
# the last layer will be a brain atlas?

# READ IN DATA, APPLY BRAIN MASK, GENERATE VECTORS

import nibabel
import numpy
import pandas
from pybraincompare.mr.datasets import get_mni_atlas, get_pair_images, get_mni_atlas
from pybraincompare.compare.mrutils import get_standard_mask, make_binary_deletion_mask, resample_images_ref
from pybraincompare.compare.maths import calculate_atlas_correlation
from nilearn.masking import apply_mask

image1,image2 = get_pair_images(voxdims=["2","2"])
image1 = nibabel.load(image1)
image2 = nibabel.load(image2)
brain_mask  = nibabel.load(get_standard_mask("FSL"))
atlas = get_mni_atlas("2")["2"]
pdmask = make_binary_deletion_mask([image1,image2])

# Combine the pdmask and the brainmask
mask = numpy.logical_and(pdmask,brain_mask.get_data())
mask = nibabel.nifti1.Nifti1Image(mask,affine=brain_mask.get_affine(),header=brain_mask.get_header())

# Resample images to mask
images_resamp, ref_resamp = resample_images_ref([image1,image2],
                                                mask,
                                                interpolation="continuous")
示例#11
0
#!/usr/bin/python

from pybraincompare.report.histogram import plot_histogram
from pybraincompare.mr.datasets import get_pair_images

image = get_pair_images()[0]
plot_histogram(image)
示例#12
0
# Create a scatterplot from two brain images
from pybraincompare.mr.datasets import get_pair_images, get_mni_atlas
from pybraincompare.compare.mrutils import resample_images_ref, get_standard_brain, get_standard_mask, do_mask
from pybraincompare.compare.maths import calculate_correlation
from pybraincompare.compare import scatterplot, atlas as Atlas
from pybraincompare.template.visual import view
from nilearn.image import resample_img
import numpy
import nibabel

# SCATTERPLOT COMPARE ---- (with nifti input) ---------------------------------------------------

# Images that we want to compare - they must be in MNI space
image_names = ["image 1", "image 2"]
images = get_pair_images(voxdims=["2", "8"])

html_snippet, data_table = scatterplot.scatterplot_compare(
    images=images, image_names=image_names, corr_type="pearson")
view(html_snippet)

# RESAMPLING IMAGES -----------------------------------------------------------------------------

# If you use your own standard brain (arg reference) we recommend resampling to 8mm voxel
# Here you will get the resampled images and mask returned
reference = nibabel.load(get_standard_brain("FSL"))
images_resamp, ref_resamp = resample_images_ref(images,
                                                reference=reference,
                                                interpolation="continuous",
                                                resample_dim=[8, 8, 8])

# SCATTERPLOT COMPARE ---- (with vector input) ---------------------------------------------------