def bstsvreg(sub):
    # perform brainsuite and svreg processing
    subdir = join('/big_disk/ajoshi/coding_ground/pvcthickness/HCP_data/', sub)
    t1file = join('/data_disk/HCP_All', sub, 'T1w',
                  'T1w_acpc_dc_restore_brain.nii.gz')
    if not isfile(t1file):
        return

    outt1 = join(subdir, 't1.nii.gz')

    if isfile(join(subdir, 't1.roiwise.stats.txt')):
        return

    makedirs(subdir)

    # compute 1mm downsampled image
    system('flirt -in ' + t1file + ' -ref ' + t1file + ' -out ' + outt1 +
           ' -applyisoxfm 1 >/dev/null 2>&1')

    # generate mask
    msk = compute_background_mask(outt1)
    msk.to_filename(join(subdir, 't1.mask.nii.gz'))

    # make a copy of the original image
    copyfile(outt1, join(subdir, 't1.bse.nii.gz'))

    system(
        '/big_disk/ajoshi/coding_ground/pvcthickness/cortical_extraction_nobse.sh '
        + join(subdir, 't1') + ' >/dev/null 2>&1')
    system('/home/ajoshi/BrainSuite18a/svreg/bin/svreg.sh ' +
           join(subdir, 't1') + ' -S >/dev/null 2>&1')
Esempio n. 2
0
def load_fmri(filename, maskname='', sigma=3):
    """
		Reads 4D fmri data. 
		Smooths using 3D Gaussian filter
		Applies mask to data: 
			- If mask not provided, calculates binary mask
		returns fmri data matrix (Time x Voxels)
	"""
    img = nib.load(filename)
    print(img.shape)
    rep_time = img.header.get_zooms()[-1]
    img = image.smooth_img(img, sigma)
    if maskname != '':
        img_mask = nib.load(maskname)
    else:
        print('Mask not provided. Calculating mask ...')
        img_mask = masking.compute_background_mask(img)
    img = masking.apply_mask(img, img_mask)
    print('Mask applied!')
    print('Detrending data!')
    img = signal.clean(img,
                       detrend=True,
                       high_pass=0.01,
                       standardize=False,
                       t_r=rep_time)
    return img
Esempio n. 3
0
    def mask_background(self, apply=False):
        """Method to generate a mask matrix which does not consider blank pixels outside the brain area.

        :param apply: {bool} whether the mask should directly be applied to extract the wanted pixels.
        :return: mask as boolean ``numpy.array`` in the attribute :py:attr:`mask`. If ``apply=True``, data in the
            attribute :py:attr:`data` is reshaped as well.
        """
        self.mask = compute_background_mask(self.img)
        if apply:
            self.data = apply_mask(self.img, self.mask)
Esempio n. 4
0
 def set_mask(self, mask):
     if mask == None or mask == "background":
         try:
             self.mask = masking.compute_background_mask(
                 self.nifti).get_data()
         except:
             self.mask = mask
             warnings.warn(
                 "Unable to generate mask using selected method. This is expected behavior if you initialze a SearchLight object without data. A mask using the selected method will be created when you call run(). If this message appears when calling run(), please check your data."
             )
     elif hasattr(mask, "get_data"):
         self.mask = mask.get_data()
     else:
         self.mask = mask
Esempio n. 5
0
def test_compute_background_mask():
    for value in (0, np.nan):
        mean_image = value * np.ones((9, 9, 9))
        mean_image[3:-3, 3:-3, 3:-3] = 1
        mask = mean_image == 1
        mean_image = Nifti1Image(mean_image, np.eye(4))
        mask1 = compute_background_mask(mean_image, opening=False)
        np.testing.assert_array_equal(get_data(mask1), mask.astype(np.int8))

    # Check that we get a ValueError for incorrect shape
    mean_image = np.ones((9, 9))
    mean_image[3:-3, 3:-3] = 10
    mean_image[5, 5] = 100
    mean_image = Nifti1Image(mean_image, np.eye(4))
    assert_raises(ValueError, compute_background_mask, mean_image)

    # Check that we get a useful warning for empty masks
    mean_image = np.zeros((9, 9, 9))
    mean_image = Nifti1Image(mean_image, np.eye(4))
    with warnings.catch_warnings(record=True) as w:
        compute_background_mask(mean_image)
    assert_equal(len(w), 1)
    assert_true(isinstance(w[0].message, masking.MaskWarning))
def main(argv=None):

    args = get_parser().parse_args(argv)

    output_dir = op.join(args.dset, 'derivatives', args.deriv, args.sub,
                         args.ses)
    os.makedirs(output_dir, exist_ok=True)

    nii_files = glob(
        op.join(args.dset, 'derivatives',
                '3dTproject_denoise_acompcor_csfwm+12mo+0.35mm', args.sub,
                args.ses, '*.nii.gz'))

    for tmp_nii_file in nii_files:
        print(tmp_nii_file)
        imgs = nib.load(tmp_nii_file)

        fsaverage = fetch_surf_fsaverage(mesh='fsaverage5')

        mask = compute_background_mask(imgs)
        surf_lh = surface.vol_to_surf(imgs,
                                      fsaverage.pial_left,
                                      radius=24,
                                      interpolation='nearest',
                                      kind='ball',
                                      n_samples=None,
                                      mask_img=mask)
        surf_rh = surface.vol_to_surf(imgs,
                                      fsaverage.pial_right,
                                      radius=24,
                                      interpolation='nearest',
                                      kind='ball',
                                      n_samples=None,
                                      mask_img=mask)

        time_series = np.transpose(np.vstack((surf_lh, surf_rh)))
        correlation = ConnectivityMeasure(kind='correlation')
        time_series = correlation.fit_transform([time_series])[0]
        plotting.plot_matrix(time_series, figure=(10, 8))
        plt.savefig(
            op.join(
                output_dir, '{0}-correlation_matrix.png'.format(
                    op.basename(tmp_nii_file).split('.')[0])))
        plt.close()
        with open(
                op.join(
                    output_dir, '{0}-correlation_matrix.pkl'.format(
                        op.basename(tmp_nii_file).split('.')[0])), 'wb') as fo:
            pickle.dump(time_series, fo)
Esempio n. 7
0
def test_compute_background_mask():
    for value in (0, np.nan):
        mean_image = value * np.ones((9, 9, 9))
        mean_image[3:-3, 3:-3, 3:-3] = 1
        mask = mean_image == 1
        mean_image = Nifti1Image(mean_image, np.eye(4))
        mask1 = compute_background_mask(mean_image, opening=False)
        np.testing.assert_array_equal(mask1.get_data(),
                                      mask.astype(np.int8))

    # Check that we get a ValueError for incorrect shape
    mean_image = np.ones((9, 9))
    mean_image[3:-3, 3:-3] = 10
    mean_image[5, 5] = 100
    mean_image = Nifti1Image(mean_image, np.eye(4))
    assert_raises(ValueError, compute_background_mask, mean_image)

    # Check that we get a useful warning for empty masks
    mean_image = np.zeros((9, 9, 9))
    mean_image = Nifti1Image(mean_image, np.eye(4))
    with warnings.catch_warnings(record=True) as w:
        compute_background_mask(mean_image)
    assert_equal(len(w), 1)
    assert_true(isinstance(w[0].message, masking.MaskWarning))
Esempio n. 8
0
def estimateMask(im, st="background", logger=None):
    """
    mask the wholehead image (if we don"t have one).
    wrapper for NiLearn implementation
    :param im: image
    :param st: type of automatic extraction. epi for epi images,
    background for all other.
    :param logger: logger file
    :return: mask
    """
    from nilearn import masking
    write_to_logger("Estimating masks...", logger)
    if st == "epi":
        mask = masking.compute_epi_mask(im)
    else:
        mask = masking.compute_background_mask(im)
    return mask
Esempio n. 9
0
    def _resample_image(self, image_file, output_file, target_nii):
        # Compute the background and extrapolate outside of the mask
        log.info("Extrapolating %s", image_file)

        niimg = nb.load(image_file)
        affine = niimg.get_affine()
        data = niimg.get_data().squeeze()
        niimg = nb.Nifti1Image(data, affine, header=niimg.get_header())
        bg_mask = compute_background_mask(niimg).get_data()

        # Test if the image has been masked:
        out_of_mask = data[np.logical_not(bg_mask)]
        if np.all(np.isnan(out_of_mask)) or len(np.unique(out_of_mask)) == 1:
            # Need to extrapolate
            data = _extrapolate_out_mask(data.astype(np.float),
                                         bg_mask, iterations=3)[0]
        niimg = nb.Nifti1Image(data, affine, header=niimg.get_header())
        del out_of_mask, bg_mask

        log.info("Resampling %s", image_file)
        resampled_nii = resample_img(
            niimg, target_nii.get_affine(), target_nii.shape)
        resampled_nii = nb.Nifti1Image(resampled_nii.get_data().squeeze(),
                                       resampled_nii.get_affine(),
                                       header=niimg.get_header())
        if len(resampled_nii.shape) == 3:
            resampled_nii.to_filename(output_file)
        else:
            # We have a 4D file
            raise Exception('4D File.')
            # assert len(resampled_nii.shape) == 4
            # resampled_data = resampled_nii.get_data()
            # affine = resampled_nii.get_affine()
            # for index in range(resampled_nii.shape[-1]):
            #     # First save the files separately
            #     this_nii = nb.Nifti1Image(resampled_data[..., index], affine)
            #     this_id = int("%i%i" % (-row[1]['image_id'], index))
            #     this_file = os.path.join(output_dir, "%06d%s" % (this_id, ext))
            #     this_nii.to_filename(this_file)
            #     # Second, fix the dataframe
            #     out_df = out_df[out_df.image_id != row[1]['image_id']]
            #     this_row = row[1].copy()
            #     this_row.image_id = this_id
            #     out_df = out_df.append(this_row)
        return output_file
Esempio n. 10
0
    def background_mask(self, calculate=True, apply=False, plot=True):
        """Compute the average background mask for all given brains.

        :param calculate: {bool} do calculate or just use other options.
        :param apply: {bool} if True, the mask is directly applied to cut down the brains in :py:attr:`data` and
            reshape them into vectors
        :param plot: {bool} whether the generated mask should be visualized in a plot. The first image in 4th
            dimension is shown.
        :return: mask matrix in the attribute :py:attr:`mask` and if `apply=True` data vectors in :py:attr:`data`.
        """
        print("Computing background mask...")
        if calculate:
            self.mask = compute_background_mask(self.img)
        if apply:
            self.apply_mask()
        if plot:
            plot_roi(self.mask, Nifti1Image(self.img.dataobj[..., 0], self.img.affine))
        print("\tBackground mask computed!")
Esempio n. 11
0
def mp2rage_masked(filename_uni, filename_inv2, filename_output=None, threshold="70%"):
    """ This is an alternativ approach to get rid of the unusual noise
    distribution in the background of mp2rage images. It was inspired by
    a suggestion on GitHub.com and works as follows:
    1. thresholding the INV2 image 
    2. creating a background mask using nilearn's 'compute_background_mask'
    3. applying that mask to clear the background from noise
    
    Parameters
    ----------
    filename_uni : string
        path to the uniform T1-image (UNI)
    filename_inv2 : string
        path to the second inversion image (INV2)
    filename_output : string  (optional)
        path to output image 
    threshold : float, string (optional)
        absolute value (float) or percentage (string, e. g. '50%')
    """
    # load data
    image_uni  = nib.load(filename_uni)
    image_inv2 = nib.load(filename_inv2)

    image_uni_fdata = image_uni.get_fdata()
    image_inv2_fdata  = image_inv2.get_fdata()
    
    # scale UNI image values 
    if (np.amin(image_uni_fdata) >=0) and (np.amax(image_uni_fdata >= 0.51)):
        scale = lambda x: (x - np.amax(image_uni_fdata)/2) / np.amax(image_uni_fdata)
        image_uni_fdata = scale(image_uni_fdata)

    image_mask = masking.compute_background_mask(image.threshold_img(image_inv2,threshold))

    image_uni_fdata[image_mask.get_data()==0] = -.5

    image_output = nib.Nifti1Image(image_uni_fdata, image_uni.affine, nib.Nifti1Header())

    if filename_output:
        nib.save(image_output, filename_output)
    else:
        return image_output
Esempio n. 12
0
  def download_and_resample(self, dest_dir, target,collection_ids=None,image_ids=None):
    """Downloads all stat maps and resamples them to a common space"""
    target_nii = nb.load(target)
    orig_path = os.path.join(dest_dir, "original")
    mkdir_p(orig_path)
    resampled_path = os.path.join(dest_dir, "resampled")
    mkdir_p(resampled_path)
    combined_df = self.get_images_with_collections_df()
    # If the user has specified specific images
    if image_ids:
      combined_df = combined_df.loc[combined_df.image_id.isin(image_ids)]
    # If the user wants to subset to a set of collections
    if collection_ids:
      if isinstance(collection_ids,str): collection_ids = [collection_ids]
      combined_df = combined_df[combined_df['collection_id'].isin(collection_ids)]
    out_df = combined_df.copy()

    for row in combined_df.iterrows():
      # Downloading the file to the "original" subfolder
      _, _, ext = split_filename(row[1]['file'])
      orig_file = os.path.join(orig_path, "%04d%s" % (row[1]['image_id'], ext))
      if not os.path.exists(orig_file):
        try:
          print "Downloading %s" % orig_file
          urllib.urlretrieve(row[1]['file'], orig_file)

          # Compute the background and extrapolate outside of the mask
          print "Extrapolating %s" % orig_file
          niimg = nb.load(orig_file)
          affine = niimg.get_affine()
          data = niimg.get_data().squeeze()
          niimg = nb.Nifti1Image(data, affine,header=niimg.get_header())
          bg_mask = compute_background_mask(niimg).get_data()
          # Test if the image has been masked:
          out_of_mask = data[np.logical_not(bg_mask)]
          if np.all(np.isnan(out_of_mask)) or len(np.unique(out_of_mask)) == 1:
            # Need to extrapolate
            data = _extrapolate_out_mask(data.astype(np.float), bg_mask,iterations=3)[0]
          niimg = nb.Nifti1Image(data, affine,header=niimg.get_header())
          del out_of_mask, bg_mask
          # Resampling the file to target and saving the output in the "resampled" folder
          resampled_file = os.path.join(resampled_path,"%06d%s" % (row[1]['image_id'], ext))
          print "Resampling %s" % orig_file
          resampled_nii = resample_img(niimg, target_nii.get_affine(),target_nii.shape)
          resampled_nii = nb.Nifti1Image(resampled_nii.get_data().squeeze(),
                                         resampled_nii.get_affine(),
                                         header=niimg.get_header())
          if len(resampled_nii.shape) == 3: 
            resampled_nii.to_filename(resampled_file)
          else:
            # We have a 4D file
            assert len(resampled_nii.shape) == 4
            resampled_data = resampled_nii.get_data()
            affine = resampled_nii.get_affine()
            for index in range(resampled_nii.shape[-1]):
              # First save the files separately
              this_nii = nb.Nifti1Image(resampled_data[..., index],affine)
              this_id = int("%i%i" % (-row[1]['image_id'], index))
              this_file = os.path.join(resampled_path,"%06d%s" % (this_id, ext))
              this_nii.to_filename(this_file)
              # Second, fix the dataframe
              out_df = out_df[out_df.image_id != row[1]['image_id']]
              this_row = row[1].copy()
              this_row.image_id = this_id
              out_df = out_df.append(this_row)
        except:
          print "Error downloading image id %s, retry this image." %(row[1]["image_id"])
    return out_df
Esempio n. 13
0
def neuropowerinput(request, neurovault_id=None, end_session=False):
    '''step 2: input'''

    # Create the session id for the user
    sid = get_session_id(request)

    # Get the template/step status
    template = "neuropower/neuropowerinput.html"
    steps = get_neuropower_steps(template, sid)

    parsform = ParameterForm(request.POST or None,
                             request.FILES or None,
                             default_url="URL to nifti image",
                             err="")

    neurovault_id = request.GET.get('neurovault', '')
    context = {"steps": steps}

    # If the user has ended their session, give message
    if end_session == True:
        context["message"] = "Session has been successfully reset."

    if neurovault_id:
        neurovault_image = get_url(
            "http://neurovault.org/api/images/%s/?format=json" %
            (neurovault_id))
        collection_id = str(neurovault_image['collection_id'])
        neurovault_collection = get_url(
            "http://neurovault.org/api/collections/%s/?format=json" %
            (collection_id))

        parsform = ParameterForm(
            request.POST or None,
            request.FILES or None,
            default_url="",
            err="",
            initial={
                "url": neurovault_image["file"],
                "ZorT":
                "T" if neurovault_image["map_type"] == "T map" else "Z",
                "Subj": neurovault_collection["number_of_subjects"]
            })
        context["parsform"] = parsform

        # fields = ['url','spmfile','maskfile','ZorT','Exc','Subj','Samples',
        #           'alpha','Smoothx','Smoothy','Smoothz','Voxx','Voxy','Voxz']

        return render(request, template, context)

    if not request.method == "POST" or not parsform.is_valid():
        context["parsform"] = parsform
        return render(request, template, context)

    else:
        form = parsform.save(commit=False)
        form.SID = sid
        mapID = "%s_%s" % (str(sid), str(uuid.uuid4()))
        form.mapID = mapID
        peaktable = os.path.join(settings.MEDIA_ROOT, "peaktables",
                                 "peaks_%s.csv" % (mapID))
        form.peaktable = peaktable
        form.save()

        # handle data: copy to temporary location
        parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0]
        mapID = "%s_%s" % (str(sid), str(uuid.uuid4()))
        form.mapID = mapID
        if not parsdata.url == "":
            url = parsform.cleaned_data['url']
            location = utils.create_temporary_copy(url,
                                                   mapID,
                                                   mask=False,
                                                   url=True)
        elif not parsdata.spmfile == "":
            spmfile = os.path.join(settings.MEDIA_ROOT, str(parsdata.spmfile))
            location = utils.create_temporary_copy(spmfile,
                                                   mapID,
                                                   mask=False,
                                                   url=False)
        form.location = location
        form.save()
        parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0]
        SPM = nib.load(parsdata.location)

        # check if the IQR is realistic (= check whether these are Z- or T-values)
        IQR = np.subtract(*np.percentile(SPM.get_data(), [75, 25]))
        if IQR > 20:
            parsform = ParameterForm(request.POST or None,
                                     request.FILES or None,
                                     default_url="URL to nifti image",
                                     err="median")
            context["parsform"] = parsform
            return render(request, template, context)

        # save other parameters
        form.DoF = parsdata.Subj - 1 if parsdata.Samples == 1 else parsdata.Subj - 2
        form.ExcZ = float(parsdata.Exc) if float(
            parsdata.Exc) > 1 else -norm.ppf(float(parsdata.Exc))

        # handle mask
        if parsdata.maskfile == "":
            mask = masking.compute_background_mask(SPM,
                                                   border_size=2,
                                                   opening=True)
            nvox = np.sum(mask.get_data())
            form.nvox = nvox
        else:
            maskfile = os.path.join(settings.MEDIA_ROOT,
                                    str(parsdata.maskfile))
            masklocation = utils.create_temporary_copy(maskfile,
                                                       mapID,
                                                       mask=True,
                                                       url=False)
            mask = nib.load(masklocation).get_data()

            # return error when dimensions are different
            if SPM.get_data().shape != mask.shape:
                parsform = ParameterForm(request.POST or None,
                                         request.FILES or None,
                                         default_url="URL to nifti image",
                                         err="dim")
                context["parsform"] = parsform
                return render(request, template, context)
            else:
                SPM_masked = np.multiply(SPM.get_data(), mask)
                SPM_nib = nib.Nifti1Image(SPM_masked, np.eye(4))
                nib.save(SPM_nib, parsdata.location)
                form.nvox = np.sum(mask)
        form.save()

        if parsdata.spmfile == "":
            return HttpResponseRedirect('/neuropowerviewer/')
        else:
            return HttpResponseRedirect('/neuropowertable/')
Esempio n. 14
0
def neuropowerinput(request, neurovault_id=None, end_session=False):
    '''step 2: input'''

    # Create the session id for the user
    sid = get_session_id(request)

    # get DB entry for sid
    try:
        neuropowerdata = NeuropowerModel.objects.get(SID=sid)
    except NeuropowerModel.DoesNotExist:
        neuropowerdata = None

    # Get the template/step status
    template = "neuropower/neuropowerinput.html"
    context = {}
    steps = get_neuropower_steps(template, sid)
    context["steps"] = steps

    # Initiate parameter form
    parsform = ParameterForm(request.POST or None,
                             request.FILES or None,
                             instance=neuropowerdata,
                             default_url="URL to nifti image",
                             err="")

    # Check if a message is passed
    message = request.GET.get('message', '')
    context['message'] = message

    # Check if redirect from neurovault
    neurovault_id = request.GET.get('neurovault', '')

    if neurovault_id:
        neurovault_data = get_neurovault_form(request, neurovault_id)

        context['parsform'] = neurovault_data["parsform"]
        if not neurovault_data["message"] == None:
            context['message'] = neurovault_data["message"]

        return render(request, template, context)

    # Check if new user or if parameterform is invalid
    if not request.method == "POST" or not parsform.is_valid():
        context["parsform"] = parsform

        return render(request, template, context)

    else:
        form = parsform.save(commit=False)
        form.SID = sid
        form.save()

        # handle data: copy to local drive

        neuropowerdata = NeuropowerModel.objects.get(SID=sid)

        # create folder to save map local

        if not os.path.exists("/var/maps/"):
            os.mkdir("/var/maps/")

        # make local copies of map and mask

        map_local = "/var/maps/" + sid + "_map"
        mask_local = "/var/maps/" + sid + "_mask"

        if not neuropowerdata.map_url == "":
            map_url = neuropowerdata.map_url
        else:
            map_url = "https://" + settings.AWS_S3_CUSTOM_DOMAIN + str(
                neuropowerdata.spmfile.name)

        map_local = create_local_copy(map_url, map_local)

        if not neuropowerdata.maskfile == "":
            mask_url = "https://" + settings.AWS_S3_CUSTOM_DOMAIN + str(
                neuropowerdata.maskfile.name)
            mask_local = create_local_copy(mask_url, mask_local)

        # save map locations to database

        form = parsform.save(commit=False)
        form.map_url = map_url
        form.map_local = map_local
        if not neuropowerdata.maskfile == "":
            form.mask_url = mask_url
            form.mask_local = mask_local
        else:
            form.mask_local = mask_local
        form.save()

        # perform some higher level cleaning

        error = None

        neuropowerdata = NeuropowerModel.objects.get(SID=sid)
        SPM = nib.load(neuropowerdata.map_local)
        if len(SPM.shape) > 3:
            if not SPM.shape[3] == 1 or len(SPM.shape) > 4:
                error = "shape"

        # check if the IQR is realistic (= check whether these are Z- or T-values)
        IQR = np.subtract(*np.percentile(SPM.get_data(), [75, 25]))
        if IQR > 20:
            error = "median"

        # save other parameters

        form.DoF = neuropowerdata.Subj - 1 if neuropowerdata.Samples == 1 else neuropowerdata.Subj - 2
        form.ExcZ = float(neuropowerdata.Exc) if float(
            neuropowerdata.Exc) > 1 else -norm.ppf(float(neuropowerdata.Exc))

        # if mask does not exist: create
        if not error == 'shape':
            if neuropowerdata.maskfile == "":
                mask = masking.compute_background_mask(SPM,
                                                       border_size=2,
                                                       opening=True)
                nvox = np.sum(mask.get_data())
                form.mask_local = neuropowerdata.mask_local + ".nii.gz"
                nib.save(mask, form.mask_local)
                form.nvox = nvox
            # if mask is given: check dimensions
            else:
                mask = nib.load(neuropowerdata.mask_local).get_data()
                if SPM.get_data().shape != mask.shape:
                    error = "dim"
                else:
                    form.nvox = np.sum(mask)

        # throw error if detected
        if error:
            parsform = ParameterForm(request.POST or None,
                                     request.FILES or None,
                                     default_url="URL to nifti image",
                                     err=error)
            context["parsform"] = parsform
            return render(request, template, context)
        else:
            form.step = 1
            form.save()

        return HttpResponseRedirect('../neuropowertable/')
Esempio n. 15
0
def neuropowerinput(request,neurovault_id=None,end_session=False):
    '''step 2: input'''

    # Create the session id for the user
    sid = get_session_id(request)

    # Get the template/step status
    template = "neuropower/neuropowerinput.html"
    steps = get_neuropower_steps(template,sid)

    parsform = ParameterForm(request.POST or None,
                             request.FILES or None,
                             default_url="URL to nifti image",
                             err="")

    neurovault_id = request.GET.get('neurovault','')
    context = {"steps":steps}

    # If the user has ended their session, give message
    if end_session == True:
        context["message"] = "Session has been successfully reset."

    if neurovault_id:
        neurovault_image = get_url("http://neurovault.org/api/images/%s/?format=json" %(neurovault_id))
        collection_id = str(neurovault_image['collection_id'])
        neurovault_collection = get_url("http://neurovault.org/api/collections/%s/?format=json" %(collection_id))

        parsform = ParameterForm(request.POST or None,
                                 request.FILES or None,
                                 default_url = "",
                                 err = "",
                                 initial = {"url":neurovault_image["file"],
                                            "ZorT":"T" if neurovault_image["map_type"] =="T map" else "Z",
                                            "Subj":neurovault_collection["number_of_subjects"]})
        context["parsform"] = parsform

        # fields = ['url','spmfile','maskfile','ZorT','Exc','Subj','Samples',
        #           'alpha','Smoothx','Smoothy','Smoothz','Voxx','Voxy','Voxz']

        return render(request,template,context)

    if not request.method=="POST" or not parsform.is_valid():
        context["parsform"] = parsform 
        return render(request,template,context)

    else:
        form = parsform.save(commit=False)
        form.SID = sid
        mapID = "%s_%s" %(str(sid),str(uuid.uuid4()))
        form.mapID = mapID
        peaktable = os.path.join(settings.MEDIA_ROOT,"peaktables","peaks_%s.csv" %(mapID))
        form.peaktable = peaktable
        form.save()

        # handle data: copy to temporary location
        parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0]
        mapID = "%s_%s" %(str(sid),str(uuid.uuid4()))
        form.mapID = mapID
        if not parsdata.url == "":
            url = parsform.cleaned_data['url']
            location = utils.create_temporary_copy(url,mapID,mask=False,url=True)
        elif not parsdata.spmfile == "":
            spmfile = os.path.join(settings.MEDIA_ROOT,str(parsdata.spmfile))
            location = utils.create_temporary_copy(spmfile,mapID,mask=False, url=False)
        form.location = location
        form.save()
        parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0]
        SPM = nib.load(parsdata.location)

        # check if the IQR is realistic (= check whether these are Z- or T-values)
        IQR = np.subtract(*np.percentile(SPM.get_data(),[75,25]))
        if IQR > 20:
            parsform = ParameterForm(request.POST or None,
                                     request.FILES or None,
                                     default_url = "URL to nifti image",
                                     err = "median")
            context["parsform"] = parsform
            return render(request,template,context)

        # save other parameters
        form.DoF = parsdata.Subj-1 if parsdata.Samples==1 else parsdata.Subj-2
        form.ExcZ = float(parsdata.Exc) if float(parsdata.Exc)>1 else -norm.ppf(float(parsdata.Exc))

        # handle mask
        if parsdata.maskfile == "":
            mask = masking.compute_background_mask(SPM,border_size=2, opening=True)
            nvox = np.sum(mask.get_data())
            form.nvox = nvox
        else:
            maskfile = os.path.join(settings.MEDIA_ROOT,str(parsdata.maskfile))
            masklocation = utils.create_temporary_copy(maskfile,mapID,mask=True,url=False)
            mask = nib.load(masklocation).get_data()

            # return error when dimensions are different
            if SPM.get_data().shape != mask.shape:
                parsform = ParameterForm(request.POST or None,
                                         request.FILES or None,
                                         default_url="URL to nifti image",
                                         err="dim")
                context["parsform"] = parsform
                return render(request,template,context)
            else:
                SPM_masked = np.multiply(SPM.get_data(),mask)
                SPM_nib = nib.Nifti1Image(SPM_masked,np.eye(4))
                nib.save(SPM_nib,parsdata.location)
                form.nvox = np.sum(mask)
        form.save()
        
        if parsdata.spmfile == "":
            return HttpResponseRedirect('/neuropowerviewer/')
        else:
            return HttpResponseRedirect('/neuropowertable/')
Esempio n. 16
0
def download_images(dest_dir, images_df=None, target=None, resample=True):
    """Downloads images dataframe and resamples them to a common space"""
    orig_path = os.path.join(dest_dir, "original")
    mkdir_p(orig_path)
    if resample == True:
        if not target:
            print("To resample you must specify a target!")
            return
        resampled_path = os.path.join(dest_dir, "resampled")
        mkdir_p(resampled_path)
        target_nii = nb.load(target)

    if not isinstance(images_df, pandas.DataFrame):
        images_df = get_images()

    out_df = images_df.copy()

    for row in images_df.iterrows():
        # Downloading the file to the "original" subfolder
        _, _, ext = split_filename(row[1]['file'])
        orig_file = os.path.join(orig_path,
                                 "%04d%s" % (row[1]['image_id'], ext))
        if not os.path.exists(orig_file):
            try:
                print("Downloading %s" % orig_file)
                urlretrieve(row[1]['file'], orig_file)

                if resample == True:
                    # Compute the background and extrapolate outside of the mask
                    print("Extrapolating %s" % orig_file)
                    niimg = nb.load(orig_file)
                    affine = niimg.get_affine()
                    data = niimg.get_data().squeeze()
                    niimg = nb.Nifti1Image(data,
                                           affine,
                                           header=niimg.get_header())
                    bg_mask = compute_background_mask(niimg).get_data()
                    # Test if the image has been masked:
                    out_of_mask = data[np.logical_not(bg_mask)]
                    if np.all(np.isnan(out_of_mask)) or len(
                            np.unique(out_of_mask)) == 1:
                        # Need to extrapolate
                        data = _extrapolate_out_mask(data.astype(np.float),
                                                     bg_mask,
                                                     iterations=3)[0]
                    niimg = nb.Nifti1Image(data,
                                           affine,
                                           header=niimg.get_header())
                    del out_of_mask, bg_mask
                    # Resampling the file to target and saving the output in the "resampled" folder
                    resampled_file = os.path.join(
                        resampled_path, "%06d%s" % (row[1]['image_id'], ext))
                    print("Resampling %s" % orig_file)
                    resampled_nii = resample_img(niimg,
                                                 target_nii.get_affine(),
                                                 target_nii.shape)
                    resampled_nii = nb.Nifti1Image(
                        resampled_nii.get_data().squeeze(),
                        resampled_nii.get_affine(),
                        header=niimg.get_header())
                    if len(resampled_nii.shape) == 3:
                        resampled_nii.to_filename(resampled_file)
                    else:
                        # We have a 4D file
                        assert len(resampled_nii.shape) == 4
                        resampled_data = resampled_nii.get_data()
                        affine = resampled_nii.get_affine()
                        for index in range(resampled_nii.shape[-1]):
                            # First save the files separately
                            this_nii = nb.Nifti1Image(
                                resampled_data[..., index], affine)
                            this_id = int("%i%i" %
                                          (-row[1]['image_id'], index))
                            this_file = os.path.join(resampled_path,
                                                     "%06d%s" % (this_id, ext))
                            this_nii.to_filename(this_file)
                            # Second, fix the dataframe
                            out_df = out_df[
                                out_df.image_id != row[1]['image_id']]
                            this_row = row[1].copy()
                            this_row.image_id = this_id
                            out_df = out_df.append(this_row)
            except:
                print("Error downloading image id %s, retry this image." %
                      (row[1]["image_id"]))
    return out_df
Esempio n. 17
0
def t1_pipeline(do_normalise_before=False,
                do_segment=True,
                do_normalise_after=False,
                do_plot=True,
                keep_tmp=True,
                sub_name='sub-11',
                sess_num='ses-17',
                root_path='/neurospin/ibc'):
    """
    Preprocess qMRI t1 images and then run estimation to generate t1-maps,
    more details in scripts/qmri_README.md,
    only one of do_normalise_before and do_normalise_after should be True,
    both can be False
    """
    DATA_LOC = join(root_path, 'sourcedata', sub_name, sess_num)
    SAVE_TO = join(root_path, 'derivatives', sub_name, sess_num)

    if do_normalise_before or do_normalise_after:
        space = "MNI152"
    else:
        space = "individual"

    if not exists(SAVE_TO):
        makedirs(SAVE_TO)

    start_time = time.time()

    # data files
    data_dir = DATA_LOC

    niftis = []
    jsons = []
    for fi in listdir(join(data_dir, 'anat')):
        if fi.split('_')[-1] == 'T1map.nii.gz':
            niftis.append(join(data_dir, 'anat', fi))
        elif fi.split('_')[-1] == 'T1map.json':
            jsons.append(join(data_dir, 'anat', fi))
        elif fi.split('_')[-1] == 'B1map.nii.gz':
            b1_map_nifti = join(data_dir, 'anat', fi)
        elif fi.split('_')[-1] == 'B1map.json':
            b1_map_json = join(data_dir, 'anat', fi)
        else:
            continue
    niftis.sort()
    jsons.sort()

    # preprocessing directory setup
    time_elapsed = time.time() - start_time
    print('[INFO,  t={:.2f}s] copying necessary files...'.format(time_elapsed))
    cwd = SAVE_TO
    preproc_dir = join(cwd, 'tmp_t1', 'preproc')
    if not exists(preproc_dir):
        makedirs(preproc_dir)

    # copy data files to tmp_t1 directory
    cnt = 0
    for nii, json in zip(niftis, jsons):
        system('cp {} {}'.format(nii, preproc_dir))
        niftis[cnt] = join(preproc_dir, nii.split(sep)[-1])
        system('gunzip -df {}'.format(niftis[cnt]))
        niftis[cnt] = join(preproc_dir,
                           nii.split(sep)[-1].split('.')[0] + '.nii')
        system('cp {} {}'.format(json, preproc_dir))
        jsons[cnt] = join(preproc_dir, json.split(sep)[-1])
        cnt += 1
    system('cp {} {}'.format(b1_map_nifti, preproc_dir))
    b1_map_nifti = join(preproc_dir, b1_map_nifti.split(sep)[-1])
    system('gunzip -df {}'.format(b1_map_nifti))
    b1_map_nifti = join(preproc_dir,
                        b1_map_nifti.split(sep)[-1].split('.')[0] + '.nii')
    system('cp {} {}'.format(b1_map_json, preproc_dir))
    b1_map_json = join(preproc_dir, b1_map_json.split(sep)[-1])

    # preprocessing step: spatial normalization to MNI space
    # of T1 maps
    if do_normalise_before:
        time_elapsed = time.time() - start_time
        print('[INFO,  t={:.2f}s] segmenting highest flip angle image'.format(
            time_elapsed))
        image = niftis[-1]
        out_info = segment(image, True)
        # save normalised segments
        segments = [
            join(preproc_dir, f"w{out_info[segment].split('/')[-1]}")
            for segment in ['gm', 'wm']
        ]
        time_elapsed = time.time() - start_time
        print('[INFO,  t={:.2f}s] transforming images to MNI space...'.format(
            time_elapsed))
        normed_niftis = []
        cnt = 0
        for nii in niftis:
            image = nii
            if cnt == len(niftis) - 1:
                b1_map = b1_map_nifti
                out_info = to_MNI(image, data=out_info, func=b1_map)
                normed_b1_map = out_info['func'][0]
            else:
                out_info = to_MNI(image, data=out_info)
            normed_niftis.append(out_info['anat'])
            time_elapsed = time.time() - start_time
            print('[INFO,  t={:.2f}s] \t transformed {}'.format(
                time_elapsed,
                nii.split(sep)[-1]))
            cnt = cnt + 1

    # preprocessing step: segmenting largest flip angle image
    if do_segment:
        time_elapsed = time.time() - start_time
        if do_normalise_before:
            print('[INFO,  t={:.2f}s] creating a mask'.format(time_elapsed))
            image = normed_niftis[-1]
            mni = compute_brain_mask(image)
            closed_mni = closing(mni, 12)
            union = intersect_masks([mni, closed_mni], threshold=0)
        else:
            print('[INFO,  t={:.2f}s] segmenting highest flip angle image'.
                  format(time_elapsed))
            image = niftis[-1]
            out_info = segment(image, True)
            segments = [out_info['gm'], out_info['wm']]
            time_elapsed = time.time() - start_time
            print('[INFO,  t={:.2f}s] \t segmented {}'.format(
                time_elapsed,
                image.split(sep)[-1]))

            # preprocessing step: creating a mask
            print('[INFO,  t={:.2f}s] creating a mask using segments'.format(
                time_elapsed))
            add = math_img("img1 + img2", img1=segments[0], img2=segments[1])
            if sub_name == 'sub-08':
                full = compute_epi_mask(add, exclude_zeros=True)
            else:
                full = compute_epi_mask(add)
            insides = compute_background_mask(full, opening=12)
            union = intersect_masks([full, insides], threshold=0)

        mask_file = join(preproc_dir, 'mask.nii')
        union.to_filename(mask_file)

    # estimation directory setup
    time_elapsed = time.time() - start_time
    print('[INFO,  t={:.2f}s] starting estimation...'.format(time_elapsed))
    recon_dir = join(cwd, 'tmp_t1', 'recon')
    if not exists(recon_dir):
        makedirs(recon_dir)
    jsons_str = ' '.join(jsons)

    if do_normalise_before:
        niftis_str = ' '.join(normed_niftis)
        b1_map_str = normed_b1_map
    else:
        niftis_str = ' '.join(niftis)
        b1_map_str = b1_map_nifti

    if do_segment == False:
        mask_file = None

    # estimation: parameter extraction
    system(f"python3 ../scripts/qmri_t1_map_b1_params.py\
        -v 1\
        -s {sub_name}\
        -o {recon_dir}\
        -g {jsons_str}\
        -b {b1_map_json}")

    # estimation: t1 estimation
    system(f"python3 ../scripts/qmri_t1_map_b1.py\
        -v 1\
        -s {sub_name}\
        -o {recon_dir}\
        -g {niftis_str}\
        -b {b1_map_str}\
        -r {join(recon_dir,f'{sub_name}_t1_map_b1.json')}\
        -d fit\
        -m {mask_file}")

    recon_map = join(recon_dir, f'{sub_name}_T1map.nii.gz')

    # postprocessing: normalization of reconstructed t1 map
    if do_normalise_after:
        postproc_dir = join(cwd, 'tmp_t1', 'postproc')
        if not exists(postproc_dir):
            makedirs(postproc_dir)
        recon_map = join(recon_dir, f'{sub_name}_T1map.nii.gz')
        system('cp {} {}'.format(recon_map, postproc_dir))
        time_elapsed = time.time() - start_time
        print('[INFO,  t={:.2f}s] normalizing reconstructed t1 map...'.format(
            time_elapsed))
        image = join(postproc_dir, f'{sub_name}_T1map.nii.gz')
        system('gunzip -df {}'.format(image))
        image = join(postproc_dir, f'{sub_name}_T1map.nii')
        out_info = to_MNI(image, segmented=out_info.nipype_results['segment'])
        norm_recon_map = out_info['anat']

    # doing the plots
    if do_plot:
        time_elapsed = time.time() - start_time
        print('\n[INFO,  t={:.2f}s] plotting the map...'.format(time_elapsed))
        plot_dir = join(cwd, 'tmp_t1', 'plot')
        if not exists(plot_dir):
            makedirs(plot_dir)
        if do_normalise_after:
            t1_img = norm_recon_map
        else:
            t1_img = join(recon_dir, f'{sub_name}_T1map.nii.gz')
        plot_thresholded_qmap(img=t1_img,
                              thresh="99",
                              map=f"{sub_name}_T1map_fit",
                              interactive=True,
                              coords=(10, 56, 43),
                              output_folder=plot_dir)

    time_elapsed = time.time() - start_time
    print('\n[INFO,  t={:.2f}s] DONE'.format(time_elapsed))

    # move derived files out and delete tmp_t1 directory
    final_recon_map = join(SAVE_TO, f'{sub_name}_space-{space}_T1map.nii.gz')
    if do_normalise_after:
        system('gzip {}'.format(norm_recon_map))
        recon_map = norm_recon_map + '.gz'
    shutil.move(recon_map, final_recon_map)

    if do_plot:
        for fi in listdir(plot_dir):
            plot_name = join(plot_dir, fi)
            final_plot_name = join(
                SAVE_TO,
                final_recon_map.split(sep)[-1].split('.')[0] + '_' +
                fi.split('_')[-1])
            shutil.move(plot_name, final_plot_name)

    if not keep_tmp:
        shutil.rmtree(join(SAVE_TO, 'tmp_t1'))

    time_elapsed = time.time() - start_time
    print('\n[INFO,  t={:.2f}s] created {} \n\n'.format(
        time_elapsed, final_recon_map))
Esempio n. 18
0
# Load the fMRI imaging

from nilearn.image import load_img

fmri_data = load_img('D:/FaceData/func_img/wrarun1.nii')

# Gain the mask

from nilearn import masking

mask = masking.compute_background_mask(fmri_data)

# Download the atlas from the internet

from nilearn import datasets

dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')

atlas_filename = dataset.maps
labels = dataset.labels

# Apply atlas to my data

from nilearn.image import resample_to_img

atlas = resample_to_img(atlas_filename, mask, interpolation='nearest')

# Gain the timeseries

from nilearn.input_data import NiftiLabelsMasker
Esempio n. 19
0
def neuropowerinput(request, neurovault_id=None, end_session=False):
    '''step 2: input'''

    # Create the session id for the user
    sid = get_session_id(request)

    # Get the template/step status
    template = "neuropower/neuropowerinput.html"
    context = {}
    steps = get_neuropower_steps(template, sid)

    parsform = ParameterForm(request.POST or None,
                             request.FILES or None,
                             default_url="URL to nifti image",
                             err="")

    neurovault_id = request.GET.get('neurovault', '')
    context["steps"] = steps
    message = request.GET.get('message', '')
    context['message'] = message

    # If the user has ended their session, give message
    if end_session == True:
        context["message"] = "Session has been successfully reset."

    if neurovault_id:
        neurovault_image = get_url(
            "http://neurovault.org/api/images/%s/?format=json" %
            (neurovault_id))
        collection_id = str(neurovault_image['collection_id'])

        if not (neurovault_image['map_type'] == 'Z map'
                or neurovault_image['map_type'] == 'T map'
                or neurovault_image['analysis_level'] == None):
            context[
                "message"] = "Power analyses can only be performed on Z or T maps."
        if not (neurovault_image['analysis_level'] == 'group'
                or neurovault_image['analysis_level'] == None):
            context[
                "message"] = "Power analyses can only be performed on group statistical maps."

        parsform = ParameterForm(
            request.POST or None,
            request.FILES or None,
            default_url="",
            err='',
            initial={
                "url": neurovault_image["file"],
                "ZorT":
                "T" if neurovault_image["map_type"] == "T map" else "Z",
                "Subj": neurovault_image["number_of_subjects"]
            })
        context["parsform"] = parsform
        return render(request, template, context)

    if not request.method == "POST" or not parsform.is_valid():
        context["parsform"] = parsform
        return render(request, template, context)

    else:
        form = parsform.save(commit=False)
        form.SID = sid
        mapID = "%s_%s" % (str(sid), str(uuid.uuid4()))
        form.mapID = mapID
        form.save()

        # handle data: copy to temporary location
        parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0]
        mapID = "%s_%s" % (str(sid), str(uuid.uuid4()))
        form.mapID = mapID
        if not parsdata.url == "":
            url = parsform.cleaned_data['url']
            location = create_temporary_copy(url, mapID, mask=False, url=True)
        elif not parsdata.spmfile == "":
            spmfile = os.path.join(settings.MEDIA_ROOT, str(parsdata.spmfile))
            location = create_temporary_copy(spmfile,
                                             mapID,
                                             mask=False,
                                             url=False)
        form.location = location
        form.save()
        parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0]
        SPM = nib.load(parsdata.location)
        if len(SPM.shape) > 3:
            if not SPM.shape[4] == 1 or len(SPM.shape) > 4:
                parsform = ParameterForm(request.POST or None,
                                         request.FILES or None,
                                         default_url="URL to nifti image",
                                         err="shape")
                context["parsform"] = parsform
                return render(request, template, context)

        # check if the IQR is realistic (= check whether these are Z- or T-values)
        IQR = np.subtract(*np.percentile(SPM.get_data(), [75, 25]))
        if IQR > 20:
            parsform = ParameterForm(request.POST or None,
                                     request.FILES or None,
                                     default_url="URL to nifti image",
                                     err="median")
            context["parsform"] = parsform
            return render(request, template, context)

        # save other parameters
        form.DoF = parsdata.Subj - 1 if parsdata.Samples == 1 else parsdata.Subj - 2
        form.ExcZ = float(parsdata.Exc) if float(
            parsdata.Exc) > 1 else -norm.ppf(float(parsdata.Exc))

        # handle mask
        if parsdata.maskfile == "":
            mask = masking.compute_background_mask(SPM,
                                                   border_size=2,
                                                   opening=True)
            nvox = np.sum(mask.get_data())
            masklocation = os.path.join(settings.MEDIA_ROOT,
                                        "maps/mask_" + mapID + ".nii.gz")
            nib.save(mask, masklocation)
            form.nvox = nvox
        else:
            maskfile = os.path.join(settings.MEDIA_ROOT,
                                    str(parsdata.maskfile))
            masklocation = create_temporary_copy(maskfile,
                                                 mapID,
                                                 mask=True,
                                                 url=False)
            mask = nib.load(masklocation).get_data()

            # return error when dimensions are different
            if SPM.get_data().shape != mask.shape:
                parsform = ParameterForm(request.POST or None,
                                         request.FILES or None,
                                         default_url="URL to nifti image",
                                         err="dim")
                context["parsform"] = parsform
                return render(request, template, context)
            else:
                form.nvox = np.sum(mask)
        form.masklocation = masklocation
        form.save()

        if parsdata.spmfile == "":
            return HttpResponseRedirect('../neuropowerviewer/')
        else:
            return HttpResponseRedirect('../neuropowertable/')
Esempio n. 20
0
# Load fmri image
# Note: functions in learn can accept parameters as: image object or fmri filepath
from nilearn.image import load_img
fMRIData = load_img(r'C:\Users\wq\nilearn_data\haxby2001\subj2\bold.nii')

# Gain mask
from nilearn import masking
mask = masking.compute_background_mask(fMRIData)

# Download atlas from internet
from nilearn import datasets
dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')
atlas_filename = dataset.maps
labels = dataset.labels

# Apply atlas to my data
from nilearn.image import resample_to_img
Atlas = resample_to_img(atlas_filename, mask, interpolation='nearest')

# Gain the TimeSeries
from nilearn.input_data import NiftiLabelsMasker
masker = NiftiLabelsMasker(labels_img=Atlas,
                           standardize=True,
                           memory='nilearn_cache',
                           verbose=5)
time_series = masker.fit_transform(fMRIData)

# Extracting times series to build a functional connectome
from nilearn.connectome import ConnectivityMeasure
correlation_measure = ConnectivityMeasure(kind='correlation')
correlation_matrix = correlation_measure.fit_transform([time_series])[0]
Esempio n. 21
0
def download_images(dest_dir,images_df=None,target=None,resample=True):
    """Downloads images dataframe and resamples them to a common space"""
    orig_path = os.path.join(dest_dir, "original")
    mkdir_p(orig_path)
    if resample == True:
        if not target:
            print "To resample you must specify a target!"
            return
        resampled_path = os.path.join(dest_dir, "resampled")
        mkdir_p(resampled_path)
        target_nii = nb.load(target)  

    if not isinstance(images_df,pandas.DataFrame):
        images_df = get_images()

    out_df = images_df.copy()

    for row in images_df.iterrows():
        # Downloading the file to the "original" subfolder
        _, _, ext = split_filename(row[1]['file'])
        orig_file = os.path.join(orig_path, "%04d%s" % (row[1]['image_id'], ext))
        if not os.path.exists(orig_file):
            try:
                print "Downloading %s" % orig_file
                urllib.urlretrieve(row[1]['file'], orig_file)

                if resample == True:
                    # Compute the background and extrapolate outside of the mask
                    print "Extrapolating %s" % orig_file
                    niimg = nb.load(orig_file)
                    affine = niimg.get_affine()
                    data = niimg.get_data().squeeze()
                    niimg = nb.Nifti1Image(data, affine,header=niimg.get_header())
                    bg_mask = compute_background_mask(niimg).get_data()
                    # Test if the image has been masked:
                    out_of_mask = data[np.logical_not(bg_mask)]
                    if np.all(np.isnan(out_of_mask)) or len(np.unique(out_of_mask)) == 1:
                        # Need to extrapolate
                        data = _extrapolate_out_mask(data.astype(np.float), bg_mask,iterations=3)[0]
                    niimg = nb.Nifti1Image(data, affine,header=niimg.get_header())
                    del out_of_mask, bg_mask
                    # Resampling the file to target and saving the output in the "resampled" folder
                    resampled_file = os.path.join(resampled_path,"%06d%s" % (row[1]['image_id'], ext))
                    print "Resampling %s" % orig_file
                    resampled_nii = resample_img(niimg, target_nii.get_affine(),target_nii.shape)
                    resampled_nii = nb.Nifti1Image(resampled_nii.get_data().squeeze(),
                                                   resampled_nii.get_affine(),
                                                     header=niimg.get_header())
                    if len(resampled_nii.shape) == 3: 
                        resampled_nii.to_filename(resampled_file)
                    else:
                        # We have a 4D file
                        assert len(resampled_nii.shape) == 4
                        resampled_data = resampled_nii.get_data()
                        affine = resampled_nii.get_affine()
                        for index in range(resampled_nii.shape[-1]):
                            # First save the files separately
                            this_nii = nb.Nifti1Image(resampled_data[..., index],affine)
                            this_id = int("%i%i" % (-row[1]['image_id'], index))
                            this_file = os.path.join(resampled_path,"%06d%s" % (this_id, ext))
                            this_nii.to_filename(this_file)
                            # Second, fix the dataframe
                            out_df = out_df[out_df.image_id != row[1]['image_id']]
                            this_row = row[1].copy()
                            this_row.image_id = this_id
                            out_df = out_df.append(this_row)
            except:
                print "Error downloading image id %s, retry this image." %(row[1]["image_id"])
    return out_df
Esempio n. 22
0
from nilearn import masking

mask = masking.compute_background_mask(
    r'E:\home\bct_test\NC_01_0001\rs6_f8dGR_w3_rabrat_4D.nii')
print(mask.get_data().shape)
from nilearn.masking import apply_mask

masked_data = apply_mask(
    r'E:\home\bct_test\NC_01_0001\rs6_f8dGR_w3_rabrat_4D.nii', mask)
print(masked_data.shape)
# masked_data shape is (timepoints, voxels). We can plot the first 150
# timepoints from two voxels

# And now plot a few of these
import matplotlib.pyplot as plt

plt.figure(figsize=(7, 5))
plt.plot(masked_data[:230, 98:100])
plt.xlabel('Time [TRs]', fontsize=16)
plt.ylabel('Intensity', fontsize=16)
plt.xlim(0, 150)
plt.subplots_adjust(bottom=.12, top=.95, right=.95, left=.12)

plt.show()
Esempio n. 23
0
def neuropowerinput(request,neurovault_id=None,end_session=False):
    '''step 2: input'''

    # Create the session id for the user
    sid = get_session_id(request)

    # Get the template/step status
    template = "neuropower/neuropowerinput.html"
    context = {}
    steps = get_neuropower_steps(template,sid)

    parsform = ParameterForm(request.POST or None,
                             request.FILES or None,
                             default_url="URL to nifti image",
                             err="")

    neurovault_id = request.GET.get('neurovault','')
    context["steps"] = steps
    message = request.GET.get('message','')
    context['message'] = message

    # If the user has ended their session, give message
    if end_session == True:
        context["message"] = "Session has been successfully reset."

    if neurovault_id:
        neurovault_image = get_url("http://neurovault.org/api/images/%s/?format=json" %(neurovault_id))
        collection_id = str(neurovault_image['collection_id'])

        if not (neurovault_image['map_type'] == 'Z map' or neurovault_image['map_type'] == 'T map' or neurovault_image['analysis_level']==None):
            context["message"] = "Power analyses can only be performed on Z or T maps."
        if not (neurovault_image['analysis_level'] == 'group' or neurovault_image['analysis_level']==None):
            context["message"] = "Power analyses can only be performed on group statistical maps."

        parsform = ParameterForm(request.POST or None,
                                 request.FILES or None,
                                 default_url = "",
                                 err = '',
                                 initial = {"url":neurovault_image["file"],
                                            "ZorT":"T" if neurovault_image["map_type"] =="T map" else "Z",
                                            "Subj":neurovault_image["number_of_subjects"]})
        context["parsform"] = parsform
        return render(request,template,context)

    if not request.method=="POST" or not parsform.is_valid():
        context["parsform"] = parsform
        return render(request,template,context)

    else:
        form = parsform.save(commit=False)
        form.SID = sid
        mapID = "%s_%s" %(str(sid),str(uuid.uuid4()))
        form.mapID = mapID
        form.save()

        # handle data: copy to temporary location
        parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0]
        mapID = "%s_%s" %(str(sid),str(uuid.uuid4()))
        form.mapID = mapID
        if not parsdata.url == "":
            url = parsform.cleaned_data['url']
            location = create_temporary_copy(url,mapID,mask=False,url=True)
        elif not parsdata.spmfile == "":
            spmfile = os.path.join(settings.MEDIA_ROOT,str(parsdata.spmfile))
            location = create_temporary_copy(spmfile,mapID,mask=False, url=False)
        form.location = location
        form.save()
        parsdata = ParameterModel.objects.filter(SID=sid)[::-1][0]
        SPM = nib.load(parsdata.location)
        if len(SPM.shape)>3:
            if not SPM.shape[4]==1 or len(SPM.shape)>4:
                parsform = ParameterForm(request.POST or None,
                                         request.FILES or None,
                                         default_url = "URL to nifti image",
                                         err = "shape")
                context["parsform"] = parsform
                return render(request,template,context)


        # check if the IQR is realistic (= check whether these are Z- or T-values)
        IQR = np.subtract(*np.percentile(SPM.get_data(),[75,25]))
        if IQR > 20:
            parsform = ParameterForm(request.POST or None,
                                     request.FILES or None,
                                     default_url = "URL to nifti image",
                                     err = "median")
            context["parsform"] = parsform
            return render(request,template,context)

        # save other parameters
        form.DoF = parsdata.Subj-1 if parsdata.Samples==1 else parsdata.Subj-2
        form.ExcZ = float(parsdata.Exc) if float(parsdata.Exc)>1 else -norm.ppf(float(parsdata.Exc))

        # handle mask
        if parsdata.maskfile == "":
            mask = masking.compute_background_mask(SPM,border_size=2, opening=True)
            nvox = np.sum(mask.get_data())
            masklocation = os.path.join(settings.MEDIA_ROOT,"maps/mask_"+mapID+".nii.gz")
            nib.save(mask,masklocation)
            form.nvox = nvox
        else:
            maskfile = os.path.join(settings.MEDIA_ROOT,str(parsdata.maskfile))
            masklocation = create_temporary_copy(maskfile,mapID,mask=True,url=False)
            mask = nib.load(masklocation).get_data()

            # return error when dimensions are different
            if SPM.get_data().shape != mask.shape:
                parsform = ParameterForm(request.POST or None,
                                         request.FILES or None,
                                         default_url="URL to nifti image",
                                         err="dim")
                context["parsform"] = parsform
                return render(request,template,context)
            else:
                form.nvox = np.sum(mask)
        form.masklocation = masklocation
        form.save()

        if parsdata.spmfile == "":
            return HttpResponseRedirect('../neuropowerviewer/')
        else:
            return HttpResponseRedirect('../neuropowertable/')
Esempio n. 24
0
    def _run(self):
        # NOTE TO FUTURE READERS:
        # This code is extremely memory inefficient.  I am currently working
        # with a small dataset which allows for this inefficiency.
        # Refactoring will be necessary at larger scales.

        print("========== PREPROCESSING ==========")

        # Set up structures to hold brain data, brain imgs, associated tags, and 1-hot mapping
        brain_imgs = []
        brain_data = []
        brain_data_tags = []

        tag_encoding_count = 0
        self.brain_data_tag_encoding_map = {}
        self.brain_data_tag_decoding_map = {}

        # Retrieve names of all files in data_dirs
        collection_filenames = self.all_data_paths()

        # Loop over data files:
        total_num_files = len(collection_filenames)
        num_processed = 0

        for filename in collection_filenames:
            num_processed += 1
            print("PERCENT COMPLETE: {0:.2f}%\r".format(100.0 * float(num_processed) / float(total_num_files)), end='')

            # Ignore files that are not images.
            if filename[-2:] != 'gz':
                continue

            # Load brain image.
            brain_img = load_img(filename)
            brain_imgs.append(brain_img)

            # Load brain image metadata.
            metadata_tags = self.labels_for_brain_image(filename)
            brain_data_tags.append(metadata_tags)

            # Downsample brain image.
            downsampled_brain_img = resample_brain_img(brain_img, scale=self.scale)
            downsampled_brain_img_data = downsampled_brain_img.get_data()

            # Normalize brain image data.
            normalized_downsampled_brain_img_data = normalize_brain_img_data(downsampled_brain_img_data)
            brain_data.append(normalized_downsampled_brain_img_data)

            # Build one hot encoding map.
            if self.multi_tag_label_encoding:
                for tag in metadata_tags:
                    if tag not in self.brain_data_tag_encoding_map:
                        self.brain_data_tag_encoding_map[tag] = tag_encoding_count
                        self.brain_data_tag_decoding_map[tag_encoding_count] = tag
                        tag_encoding_count += 1
            else:
                # Merge all metadata tags into a single string:
                metadata_tags.sort()
                metadata_tags = ",".join(metadata_tags)

                if metadata_tags not in self.brain_data_tag_encoding_map:
                    self.brain_data_tag_encoding_map[metadata_tags] = tag_encoding_count
                    self.brain_data_tag_decoding_map[tag_encoding_count] = metadata_tags
                    tag_encoding_count += 1

        # Compute dataset mask.
        print("Computing dataset mask...\r", end='')
        brain_data_mask = masking.compute_background_mask(brain_imgs)

        # 1-hot encode all brain data tags.
        print("1-hot encoding brain data tags...\r", end='')
        for i in range(len(brain_data_tags)):
            brain_data_tags[i] = self.encode_label(brain_data_tags[i])

        # Write preprocessed brain data out as binary files.
        print("Writing preprocessed brain data out to files...\r", end='')
        brain_data_f = open(self.brain_data_path, 'wb')
        brain_data_mask_f = open(self.brain_data_mask_path, 'wb')
        brain_data_tags_f = open(self.brain_data_tags_path, 'wb')
        brain_data_tags_encoding_f = open(self.brain_data_tags_encoding_path, 'wb')
        brain_data_tags_decoding_f = open(self.brain_data_tags_decoding_path, 'wb')
        print("                                                 \r", end='')

        pickle.dump(brain_data, brain_data_f)
        pickle.dump(brain_data_mask, brain_data_mask_f)
        pickle.dump(brain_data_tags, brain_data_tags_f)
        pickle.dump(self.brain_data_tag_encoding_map, brain_data_tags_encoding_f)
        pickle.dump(self.brain_data_tag_decoding_map, brain_data_tags_decoding_f)
Esempio n. 25
0
def t2_pipeline(do_coreg=True,
                do_normalise_before=False,
                do_segment=True,
                do_normalise_after=False,
                do_plot=True,
                keep_tmp=True,
                sub_name='sub-11',
                sess_num='ses-17',
                root_path='/neurospin/ibc'):
    """
    Preprocess qMRI t2 images and then run estimation to generate t2-maps,
    more details in scripts/qmri_README.md,
    only one of do_normalise_before and do_normalise_after should be True,
    both can be False
    """
    DATA_LOC = join(root_path, 'sourcedata', sub_name, sess_num)
    SAVE_TO = join(root_path, 'derivatives', sub_name, sess_num)

    if do_normalise_before or do_normalise_after:
        space = "MNI152"
    else:
        space = "individual"

    if not exists(SAVE_TO):
        makedirs(SAVE_TO)

    start_time = time.time()

    # data files
    data_dir = DATA_LOC

    niftis = []
    jsons = []
    for fi in listdir(join(data_dir, 'anat')):
        if fi.split('_')[-1] == 'T2map.nii.gz':
            niftis.append(join(data_dir, 'anat', fi))
        elif fi.split('_')[-1] == 'T2map.json':
            jsons.append(join(data_dir, 'anat', fi))
        else:
            continue
    niftis.sort()
    jsons.sort()

    run_count = 0
    for nifti in niftis:
        # preprocessing directory setup
        time_elapsed = time.time() - start_time
        print('[INFO,  t={:.2f}s] copying necessary files...'.format(
            time_elapsed))
        cwd = SAVE_TO
        preproc_dir = join(cwd, 'tmp_t2', 'preproc')
        if not exists(preproc_dir):
            makedirs(preproc_dir)

        system('cp {} {}'.format(nifti, preproc_dir))
        nifti = join(preproc_dir, nifti.split(sep)[-1])
        system('gunzip -df {}'.format(nifti))
        nifti = join(preproc_dir, nifti.split(sep)[-1].split('.')[0] + '.nii')

        if do_coreg:
            # t1 image as an anatomical image
            t1_niftis = []
            for fi in listdir(join(data_dir, 'anat')):
                if fi.split('_')[-1] == 'T1map.nii.gz':
                    t1_niftis.append(join(data_dir, 'anat', fi))
            t1_niftis.sort()
            t1_nifti = t1_niftis[-1]
            system('cp {} {}'.format(t1_nifti, preproc_dir))
            t1_nifti = join(preproc_dir, t1_nifti.split(sep)[-1])
            system('gunzip -df {}'.format(t1_nifti))
            t1_nifti = join(preproc_dir,
                            t1_nifti.split(sep)[-1].split('.')[0] + '.nii')

        # preprocessing step: spatial normalization to MNI space
        # of T1 maps
        if do_normalise_before:
            image = nifti
            if do_coreg:
                t1_img = t1_nifti
                time_elapsed = time.time() - start_time
                print('[INFO,  t={:.2f}s] segmenting the t1 image'.format(
                    time_elapsed))
                out_info = segment(t1_img, False)
                time_elapsed = time.time() - start_time
                print('[INFO,  t={:.2f}s] transforming images to MNI space...'.
                      format(time_elapsed))
                out_info = to_MNI(image=t1_img, data=out_info, func=image)
                normed_t1_img = out_info['anat']
                normed_nifti = out_info['func'][0]
            else:
                time_elapsed = time.time() - start_time
                print('[INFO,  t={:.2f}s] segmenting the t2 image'.format(
                    time_elapsed))
                out_info = segment(image, False)
                time_elapsed = time.time() - start_time
                print('[INFO,  t={:.2f}s] transforming images to MNI space...'.
                      format(time_elapsed))
                out_info = to_MNI(image, data=out_info)
                normed_nifti = out_info['anat']
            time_elapsed = time.time() - start_time
            print('[INFO,  t={:.2f}s] \t transformed {}'.format(
                time_elapsed,
                nifti.split(sep)[-1]))

        # preprocessing step: transform t1 image to t2 space
        if do_coreg:
            time_elapsed = time.time() - start_time
            print('[INFO,  t={:.2f}s] transforming t1 image to t2 space...'.
                  format(time_elapsed))
            if do_normalise_before:
                t2_img = normed_nifti
                t1_img = normed_t1_img
            else:
                t2_img = nifti
                t1_img = t1_nifti
            mean_t2 = mean_img(t2_img)
            mean_t2_img = join(preproc_dir,
                               'mean_{}'.format(t2_img.split(sep)[-1]))
            mean_t2.to_filename(mean_t2_img)
            out_info = to_T2space(t2_img=mean_t2_img,
                                  t1_img=t1_img,
                                  output_dir=preproc_dir)
            print('[INFO,  t={:.2f}s] \t transformed {} to {} space'.format(
                time_elapsed,
                t1_img.split(sep)[-1],
                mean_t2_img.split(sep)[-1]))

        # preprocessing step: segmenting largest flip angle image
        if do_segment:
            if do_normalise_before:
                time_elapsed = time.time() - start_time
                print(
                    '[INFO,  t={:.2f}s] creating a mask'.format(time_elapsed))
                if do_coreg:
                    image = normed_t1_img
                else:
                    image = normed_nifti
                mni = compute_brain_mask(image)
                closed_mni = closing(mni, 12)
                union = intersect_masks([mni, closed_mni], threshold=0)
            else:
                if do_coreg:
                    image = t1_img
                else:
                    image = nifti
                time_elapsed = time.time() - start_time
                print(
                    '[INFO,  t={:.2f}s] segmenting the image for creating a mask'
                    .format(time_elapsed))
                out_info = segment(image, False)
                segments = [out_info['gm'], out_info['wm']]
                time_elapsed = time.time() - start_time
                print('[INFO,  t={:.2f}s] \t segmented {}'.format(
                    time_elapsed,
                    image.split(sep)[-1]))

                # preprocessing step: creating a mask
                time_elapsed = time.time() - start_time
                print(
                    '[INFO,  t={:.2f}s] creating a mask using segments'.format(
                        time_elapsed))
                add = math_img("img1 + img2",
                               img1=segments[0],
                               img2=segments[1])
                if sub_name == 'sub-08':
                    full = compute_epi_mask(add, exclude_zeros=True)
                else:
                    full = compute_epi_mask(add)
                insides = compute_background_mask(full, opening=12)
                union = intersect_masks([full, insides], threshold=0)

            mask_file = join(preproc_dir, 'mask.nii')
            union.to_filename(mask_file)

            if do_coreg:
                resampled_mask = resample_to_img(mask_file,
                                                 mean_t2_img,
                                                 clip=True)
                rounded_resampled_mask = math_img('np.around(img1)',
                                                  img1=resampled_mask)
                resampled_mask_img = join(preproc_dir, 'resampled_mask.nii')
                rounded_resampled_mask.to_filename(resampled_mask_img)
            else:
                resampled_mask_img = mask_file

        # estimation directory setup
        time_elapsed = time.time() - start_time
        print('[INFO,  t={:.2f}s] starting estimation...'.format(time_elapsed))
        recon_dir = join(cwd, 'tmp_t2', 'recon')
        if not exists(recon_dir):
            makedirs(recon_dir)

        if do_normalise_before:
            niftis_str = normed_nifti
        else:
            niftis_str = nifti

        if do_segment == False:
            mask_file = None

        # estimation: t2 estimation
        system(f"python3 ../scripts/qmri_t2_map.py\
            -v 1\
            -s {sub_name}\
            -o {recon_dir}\
            -n {niftis_str}\
            -m {resampled_mask_img}")

        recon_map = join(recon_dir, f'{sub_name}_T2map.nii.gz')

        # postprocessing: normalization of reconstructed t1 map
        if do_normalise_after:
            postproc_dir = join(cwd, 'tmp_t2', 'postproc')
            if not exists(postproc_dir):
                makedirs(postproc_dir)

            system('cp {} {}'.format(recon_map, postproc_dir))
            time_elapsed = time.time() - start_time
            print('[INFO,  t={:.2f}s] normalizing reconstructed map...'.format(
                time_elapsed))
            image = join(postproc_dir, f'{sub_name}_T2map.nii.gz')
            system('gunzip -df {}'.format(image))
            image = join(postproc_dir, f'{sub_name}_T2map.nii')
            out_info = to_MNI(image,
                              segmented=out_info.nipype_results['segment'])
            norm_recon_map = out_info['anat']

        # doing the plots
        if do_plot:
            time_elapsed = time.time() - start_time
            print('\n[INFO,  t={:.2f}s] plotting the map...'.format(
                time_elapsed))
            plot_dir = join(cwd, 'tmp_t2', 'plot')
            if not exists(plot_dir):
                makedirs(plot_dir)
            if do_normalise_after:
                t2_img = norm_recon_map
            else:
                t2_img = recon_map
            plot_thresholded_qmap(img=t2_img,
                                  thresh="95",
                                  map=f"{sub_name}_T2map",
                                  interactive=True,
                                  coords=(10, 56, 43),
                                  output_folder=plot_dir)

        time_elapsed = time.time() - start_time
        print('\n[INFO,  t={:.2f}s] DONE'.format(time_elapsed))

        # move derived files out and delete tmp_t2 directory
        final_recon_map = join(
            SAVE_TO,
            f'{sub_name}_run-0{run_count+1}_space-{space}_T2map.nii.gz')
        if do_normalise_after:
            system('gzip {}'.format(norm_recon_map))
            recon_map = norm_recon_map + '.gz'
        shutil.move(recon_map, final_recon_map)

        if do_plot:
            for fi in listdir(plot_dir):
                plot_name = join(plot_dir, fi)
                final_plot_name = join(
                    SAVE_TO,
                    final_recon_map.split(sep)[-1].split('.')[0] + '_' +
                    fi.split('_')[-1])
                shutil.move(plot_name, final_plot_name)

        if not keep_tmp:
            shutil.rmtree(join(SAVE_TO, 'tmp_t2'))

        time_elapsed = time.time() - start_time
        print('\n[INFO,  t={:.2f}s] created {} \n\n'.format(
            time_elapsed, final_recon_map))
        run_count = run_count + 1