Пример #1
0
def rescale_centroids(ctd_list, img, voxel_spacing=(1, 1, 1)):
    """rescale centroid coordinates to new spacing in current x-y-z-orientation
    
    Parameters:
    ----------
    ctd_list: list of centroids
    img: nibabel image 
    voxel_spacing: desired spacing
    
    Returns:
    ----------
    out_list: rescaled list of centroids 
    
    """
    ornt_img = nio.io_orientation(img.affine)
    ornt_ctd = nio.axcodes2ornt(ctd_list[0])
    if np.array_equal(ornt_img, ornt_ctd):
        zms = img.header.get_zooms()
    else:
        ornt_trans = nio.ornt_transform(ornt_img, ornt_ctd)
        aff_trans = nio.inv_ornt_aff(ornt_trans, img.dataobj.shape)
        new_aff = np.matmul(img.affine, aff_trans)
        zms = nib.affines.voxel_sizes(new_aff)
    ctd_arr = np.transpose(np.asarray(ctd_list[1:]))
    v_list = ctd_arr[0].astype(int).tolist()  # vertebral labels
    ctd_arr = ctd_arr[1:]
    ctd_arr[0] = np.around(ctd_arr[0] * zms[0] / voxel_spacing[0], decimals=1)
    ctd_arr[1] = np.around(ctd_arr[1] * zms[1] / voxel_spacing[1], decimals=1)
    ctd_arr[2] = np.around(ctd_arr[2] * zms[2] / voxel_spacing[2], decimals=1)
    out_list = [ctd_list[0]]
    ctd_list = np.transpose(ctd_arr).tolist()
    for v, ctd in zip(v_list, ctd_list):
        out_list.append([v] + ctd)
    print("[*] Rescaled centroid coordinates to spacing (x, y, z) =", voxel_spacing, "mm")
    return out_list
Пример #2
0
def reorient_to(img, axcodes_to=('P', 'I', 'R'), verb=False):
    """Reorients the nifti from its original orientation to another specified orientation
    
    Parameters:
    ----------
    img: nibabel image
    axcodes_to: a tuple of 3 characters specifying the desired orientation
    
    Returns:
    ----------
    newimg: The reoriented nibabel image 
    
    """
    aff = img.affine
    arr = np.asanyarray(img.dataobj, dtype=img.dataobj.dtype)
    ornt_fr = nio.io_orientation(aff)
    ornt_to = nio.axcodes2ornt(axcodes_to)
    ornt_trans = nio.ornt_transform(ornt_fr, ornt_to)
    arr = nio.apply_orientation(arr, ornt_trans)
    aff_trans = nio.inv_ornt_aff(ornt_trans, arr.shape)
    newaff = np.matmul(aff, aff_trans)
    newimg = nib.Nifti1Image(arr, newaff)
    if verb:
        print("[*] Image reoriented from", nio.ornt2axcodes(ornt_fr), "to", axcodes_to)
    return newimg
Пример #3
0
def get_orientation(im):
    from nibabel import orientations
    orientation_dic = {
        (0, 1): 'L',
        (0, -1): 'R',
        (1, 1): 'P',
        (1, -1): 'A',
        (2, 1): 'I',
        (2, -1): 'S',
    }

    orientation_matrix = orientations.io_orientation(im.hdr.get_best_affine())
    ori = orientation_dic[tuple(orientation_matrix[0])] + orientation_dic[tuple(orientation_matrix[1])] + orientation_dic[tuple(orientation_matrix[2])]

    return ori
Пример #4
0
def get_orientation(im):
    from nibabel import orientations
    orientation_dic = {
        (0, 1): 'L',
        (0, -1): 'R',
        (1, 1): 'P',
        (1, -1): 'A',
        (2, 1): 'I',
        (2, -1): 'S',
    }

    orientation_matrix = orientations.io_orientation(im.hdr.get_best_affine())
    ori = orientation_dic[tuple(orientation_matrix[0])] + orientation_dic[tuple(orientation_matrix[1])] + orientation_dic[tuple(orientation_matrix[2])]

    return ori
Пример #5
0
def as_xyz_affable(img, name2xyz=None):
    """ Return version of `img` that has a valid xyz affine, or raise error

    Parameters
    ----------
    img : ``Image`` instance or nibabel image
        It has a ``coordmap`` attribute (``Image``) or a ``get_affine`` method
        (nibabel image object)
    name2xyz : None or mapping
        Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
        raises a KeyError for a str ``ax_name``.  None means use module default.
        Not used for nibabel `img` input.

    Returns
    -------
    reo_img : ``Image`` instance or nibabel image
        Returns image of same type as `img` input. If necessary, `reo_img` has
        its data and coordmap changed to allow it to return an xyz affine.  If
        `img` is already xyz affable we return the input unchanged (``img is
        reo_img``).

    Raises
    ------
    SpaceTypeError : if `img` does not have an affine coordinate map
    AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range
    AffineError : if axes dropped from the affine contribute to x, y, z
    coordinates
    """
    try:
        aff = xyz_affine(img, name2xyz)
    except rsp.AffineError:
        pass
    else:
        return img
    cmap = img.coordmap
    order = rsp.xyz_order(cmap.function_range, name2xyz)
    # Reorder reference to canonical order
    reo_img = img.reordered_reference(order)
    # Which input axes correspond?
    ornt = io_orientation(reo_img.coordmap.affine)
    desired_input_order = np.argsort(ornt[:,0])
    reo_img = reo_img.reordered_axes(list(desired_input_order))
    try:
        aff = xyz_affine(reo_img, name2xyz)
    except rsp.AffineError:
        raise rsp.AffineError("Could not reorder so xyz coordinates did not "
                              "depend on the other axis coordinates")
    return reo_img
Пример #6
0
def reorder_voxels(vox_array, affine, voxel_order):
    '''Reorder the given voxel array and corresponding affine.

    Parameters
    ----------
    vox_array : array
        The array of voxel data

    affine : array
        The affine for mapping voxel indices to Nifti patient space

    voxel_order : str
        A three character code specifing the desired ending point for rows,
        columns, and slices in terms of the orthogonal axes of patient space:
        (l)eft, (r)ight, (a)nterior, (p)osterior, (s)uperior, and (i)nferior.

    Returns
    -------
    out_vox : array
        An updated view of vox_array.

    out_aff : array
        A new array with the updated affine

    reorient_transform : array
        The transform used to update the affine.

    ornt_trans : tuple
        The orientation transform used to update the orientation.

    '''
    #Check if voxel_order is valid
    voxel_order = voxel_order.upper()
    if len(voxel_order) != 3:
        raise ValueError('The voxel_order must contain three characters')
    dcm_axes = ['LR', 'AP', 'SI']
    for char in voxel_order:
        if not char in 'LRAPSI':
            raise ValueError('The characters in voxel_order must be one '
                             'of: L,R,A,P,I,S')
        for idx, axis in enumerate(dcm_axes):
            if char in axis:
                del dcm_axes[idx]
    if len(dcm_axes) != 0:
        raise ValueError('No character in voxel_order corresponding to '
                         'axes: %s' % dcm_axes)

    #Check the vox_array and affine have correct shape/size
    if len(vox_array.shape) < 3:
        raise ValueError('The vox_array must be at least three dimensional')
    if affine.shape != (4, 4):
        raise ValueError('The affine must be 4x4')

    #Pull the current index directions from the affine
    orig_ornt = io_orientation(affine)
    new_ornt = axcodes2ornt(voxel_order)
    ornt_trans = ornt_transform(orig_ornt, new_ornt)
    orig_shape = vox_array.shape
    vox_array = apply_orientation(vox_array, ornt_trans)
    aff_trans = inv_ornt_aff(ornt_trans, orig_shape)
    affine = np.dot(affine, aff_trans)

    return (vox_array, affine, aff_trans, ornt_trans)
Пример #7
0
def to_affine(
    orientation,
    spacing: Sequence[Union[int, float]] = None,
    origin: Sequence[Union[int, float]] = None,
):
    """Convert orientation, spacing, and origin data into affine matrix.

    Args:
        orientation (Sequence[str]): Image orientation in the standard orientation format
            (e.g. ``("LR", "AP", "SI")``).
        spacing (int(s) | float(s)): Number(s) corresponding to pixel spacing of each direction.
            If a single value, same pixel spacing is used for all directions.
            If sequence is less than length of ``orientation``, remaining direction have unit
            spacing (i.e. ``1``). Defaults to unit spacing ``(1, 1, 1)``
        origin (int(s) | float(s)): The ``(x0, y0, z0)`` origin for the scan.
            If a single value, same origin is used for all directions.
            If sequence is less than length of ``orientation``, remaining direction have standard
            origin (i.e. ``0``). Defaults to ``(0, 0, 0)``

    Returns:
        ndarray: A 4x4 ndarray representing the affine matrix.

    Examples:
        >>> to_affine(("SI", "AP", "RL"), spacing=(0.5, 0.5, 1.5), origin=(10, 20, 0))
        array([[-0. , -0. , -1.5,  10. ],
               [-0. , -0.5, -0. ,  20. ],
               [-0.5, -0. , -0. ,  30. ],
               [ 0. ,  0. ,  0. ,   1. ]])

    Note:
        This method assumes all direction follow the standard principal directions in the normative
        patient orientation. Moving along one direction of the array only moves along one fo the
        normative directions.
    """
    def _format_numbers(input, default_val, name, expected_num):
        """Formats (sequence of) numbers (spacing, origin) into standard 3-length tuple."""
        if input is None:
            return (default_val, ) * expected_num
        if isinstance(input, (int, float)):
            return (input, ) * expected_num

        if not isinstance(input,
                          (np.ndarray, Sequence)) or len(input) > expected_num:
            raise ValueError(
                f"`{name}` must be a real number or sequence (length<={expected_num}) "
                f"of real numbers. Got {input}")
        input = tuple(input)

        if len(input) < expected_num:
            input += (default_val, ) * (expected_num - len(input))
        assert len(input) == expected_num

        return input

    if len(orientation) == 2:
        orientation = _infer_orientation(orientation)
    __check_orientation__(orientation)
    spacing = _format_numbers(spacing, 1, "spacing", len(orientation))
    origin = _format_numbers(origin, 0, "origin", len(orientation))

    affine = np.eye(4)
    start_ornt = nibo.io_orientation(affine)
    end_ornt = nibo.axcodes2ornt(orientation_standard_to_nib(orientation))
    ornt = nibo.ornt_transform(start_ornt, end_ornt)

    transpose_idxs = ornt[:, 0].astype(np.int)
    flip_idxs = ornt[:, 1]

    affine[:3] = affine[:3][transpose_idxs]
    affine[:3] *= flip_idxs[..., np.newaxis]
    affine[:3, :3] *= np.asarray(spacing)
    affine[:3, 3] = np.asarray(origin)

    return affine
Пример #8
0
def MGDMBrainSegmentation(input_filename_type_list,
                          output_dir=None,
                          num_steps=5,
                          atlas_file=None,
                          topology_lut_dir=None):
    """
    Perform MGDM segmentation
    :param input_filename_type_list: list of [[fname1,type1],[fname2,type2],...] - for a maximum of 4 inputs
    :param output_dir: full path to the output directory
    :param num_steps: number of steps for (default 5, set to 0 for testing)
    :param atlas_file: full path to the atlas file, default set in defaults.py
    :param topology_lut_dir: full path to the directory with the topology files, default set in defaults.py
    :return:
    """

    from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation, ornt_transform
    import os

    print(
        "Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs"
    )
    print("Sit back and relax, let the magic of algorithms happen...")
    print("")
    if output_dir is None:
        output_dir = os.path.dirname(input_filename_type_list[0][0])
    if atlas_file is None:
        atlas = os.path.join(ATLAS_DIR, 'brain-atlas-3.0.3.txt')
    else:
        atlas = atlas_file

    if topology_lut_dir is None:
        topology_lut_dir = TOPOLOGY_LUT_DIR  # grabbing this from the default settings in defaults.py
    else:
        if not (
                topology_lut_dir[-1] == os.sep
        ):  #if we don't end in a path sep, we need to make sure that we add it
            topology_lut_dir += os.sep

    print("Atlas file: " + atlas)
    print("Topology LUT durectory: " + topology_lut_dir)
    print("")

    if not any(isinstance(el, list)
               for el in input_filename_type_list):  #make into list of lists
        input_filename_type_list = [input_filename_type_list]

    #now we setup the mgdm specfic settings
    mgdm = cj.BrainMgdmMultiSegmentation2()
    mgdm.setAtlasFile(atlas)
    mgdm.setTopologyLUTdirectory(topology_lut_dir)

    mgdm.setOutputImages('segmentation')
    # --> mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S) # this is the default for MGDM, <--
    # mgdm.setOrientations(mgdm.AXIAL, mgdm.L2R, mgdm.P2A, mgdm.I2S)  #LR,PA,IS is always how they are returned from nibabel
    mgdm.setAdjustIntensityPriors(False)  # default is True
    mgdm.setComputePosterior(False)
    mgdm.setDiffuseProbabilities(False)
    mgdm.setSteps(num_steps)
    mgdm.setTopology('wcs')  # {'wcs','no'} no=off for testing, wcs=default
    for idx, con in enumerate(input_filename_type_list):
        print("Input files and filetypes:")
        print("  " + str(idx + 1) + " "),
        print(con)
        #flipLR = False
        #flipAP = False
        #flipIS = False

        fname = con[0]
        type = con[1]
        d, d_aff, d_head = niiLoad(fname, return_header=True)

        ## usage example in the proc_file function of : https://github.com/nipy/nibabel/blob/master/bin/parrec2nii
        ornt_orig = io_orientation(d_aff)
        ornt_mgdm = io_orientation(np.diag(
            [-1, -1, 1,
             1]).dot(d_aff))  # -1 -1 1 LPS (mgdm default); 1 1 1 is RAS
        ornt_chng = ornt_transform(
            ornt_mgdm, ornt_orig)  # to get from MGDM to our original input

        # convert orientation information to mgdm slice and orientation info
        aff_orients, aff_slc = get_affine_orientation_slice(d_aff)
        print("data orientation: " + str(aff_orients)),
        print("slice settings: " + aff_slc)
        print("mgdm orientation: " + str(ornt_mgdm))
        print("data orientation: " + str(ornt_orig))

        if aff_slc == "AXIAL":
            SLC = mgdm.AXIAL
        elif aff_slc == "SAGITTAL":
            SLC = mgdm.SAGITTAL
        else:
            SLC = mgdm.CORONAL
        for aff_orient in aff_orients:  #TODO: if anything is different from the default MGDM settings, we need to flip axes of the data at the end
            if aff_orient == "L":
                LR = mgdm.R2L
            elif aff_orient == "R":
                LR = mgdm.L2R
            # flipLR = True
            elif aff_orient == "A":
                AP = mgdm.P2A
                #flipAP = True
            elif aff_orient == "P":
                AP = mgdm.A2P
            elif aff_orient == "I":
                IS = mgdm.S2I
                #flipIS = True
            elif aff_orient == "S":
                IS = mgdm.I2S
        mgdm.setOrientations(SLC, LR, AP,
                             IS)  #L2R,P2A,I2S is nibabel default (i.e., RAS)

        if idx + 1 == 1:
            # we use the first image to set the dimensions and resolutions
            res = d_head.get_zooms()
            res = [a1.item()
                   for a1 in res]  # cast to regular python float type
            mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2])
            mgdm.setResolutions(res[0], res[1], res[2])

            # keep the shape and affine from the first image for saving
            d_shape = np.array(d.shape)
            out_root_fname = os.path.basename(fname)[0:os.path.basename(
                fname).find('.')]  #assumes no periods in filename, :-/

            mgdm.setContrastImage1(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType1(type)
        elif idx + 1 == 2:
            mgdm.setContrastImage2(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType2(type)
        elif idx + 1 == 3:
            mgdm.setContrastImage3(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType3(type)
        elif idx + 1 == 4:
            mgdm.setContrastImage4(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType4(type)
    try:
        print("Executing MGDM on your inputs")
        print("Don't worry, the magic is happening!")
        mgdm.execute()
        print(os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz'))

        # outputs
        # reshape fortran stype to convert back to the format the nibabel likes
        seg_im = np.reshape(
            np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32), d_shape,
            'F')
        lbl_im = np.reshape(
            np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32),
            d_shape, 'F')
        ids_im = np.reshape(
            np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32), d_shape,
            'F')

        # fix orientation back to the input orientation :-/ not really working
        # seg_im = apply_orientation(seg_im, ornt_chng) # this takes care of the orientations between mipav and input
        # lbl_im = apply_orientation(lbl_im, ornt_chng) # TODO: fix the origin point offset?, 2x check possible RL flip
        # ids_im = apply_orientation(ids_im, ornt_chng) # alternative: register? https://github.com/pyimreg
        #

        # save
        seg_file = os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz')
        lbl_file = os.path.join(output_dir, out_root_fname + '_lbl_cjs.nii.gz')
        ids_file = os.path.join(output_dir, out_root_fname + '_ids_cjs.nii.gz')

        ## this will work, but the solution with nibabel.orientations is much cleaner
        # if our settings were not the same as MGDM likes, we need to flip the relevant settings:
        #d_aff_new = flip_affine_orientation(d_aff, flipLR=flipLR, flipAP=flipAP, flipIS=flipIS)

        d_head['data_type'] = np.array(32).astype(
            'uint32')  #convert the header as well
        d_head['cal_max'] = np.max(seg_im)  #max for display
        niiSave(seg_file, seg_im, d_aff, header=d_head, data_type='uint32')
        d_head['cal_max'] = np.max(lbl_im)
        niiSave(lbl_file, lbl_im, d_aff, header=d_head, data_type='uint32')
        d_head['cal_max'] = np.max(ids_im)  # convert the header as well
        niiSave(ids_file, ids_im, d_aff, header=d_head, data_type='uint32')
        print("Data stored in: " + output_dir)
    except:
        print("--- MGDM failed. Go cry. ---")
        return
    print("Execution completed")

    return seg_im, d_aff, d_head
Пример #9
0
def get_affine_orientation(a):
    import nibabel.orientations as orient
    return orient.io_orientation(a)  #orientation of the x, y, z
Пример #10
0
def proc_file(infile, opts):
    # figure out the output filename, and see if it exists
    basefilename = splitext_addext(os.path.basename(infile))[0]
    if opts.outdir is not None:
        # set output path
        basefilename = os.path.join(opts.outdir, basefilename)

    # prep a file
    if opts.compressed:
        verbose('Using gzip compression')
        outfilename = basefilename + '.nii.gz'
    else:
        outfilename = basefilename + '.nii'
    if os.path.isfile(outfilename) and not opts.overwrite:
        raise IOError('Output file "%s" exists, use --overwrite to '
                      'overwrite it' % outfilename)

    # load the PAR header and data
    scaling = 'dv' if opts.scaling == 'off' else opts.scaling
    infile = fname_ext_ul_case(infile)
    pr_img = pr.load(infile,
                     permit_truncated=opts.permit_truncated,
                     scaling=scaling,
                     strict_sort=opts.strict_sort)
    pr_hdr = pr_img.header
    affine = pr_hdr.get_affine(origin=opts.origin)
    slope, intercept = pr_hdr.get_data_scaling(scaling)
    if opts.scaling != 'off':
        verbose('Using data scaling "%s"' % opts.scaling)
    # get original scaling, and decide if we scale in-place or not
    if opts.scaling == 'off':
        slope = np.array([1.])
        intercept = np.array([0.])
        in_data = pr_img.dataobj.get_unscaled()
        out_dtype = pr_hdr.get_data_dtype()
    elif not np.any(np.diff(slope)) and not np.any(np.diff(intercept)):
        # Single scalefactor case
        slope = slope.ravel()[0]
        intercept = intercept.ravel()[0]
        in_data = pr_img.dataobj.get_unscaled()
        out_dtype = pr_hdr.get_data_dtype()
    else:
        # Multi scalefactor case
        slope = np.array([1.])
        intercept = np.array([0.])
        in_data = np.array(pr_img.dataobj)
        out_dtype = np.float64
    # Reorient data block to LAS+ if necessary
    ornt = io_orientation(np.diag([-1, 1, 1, 1]).dot(affine))
    if np.all(ornt == [[0, 1], [1, 1], [2, 1]]):  # already in LAS+
        t_aff = np.eye(4)
    else:  # Not in LAS+
        t_aff = inv_ornt_aff(ornt, pr_img.shape)
        affine = np.dot(affine, t_aff)
        in_data = apply_orientation(in_data, ornt)

    bvals, bvecs = pr_hdr.get_bvals_bvecs()
    if not opts.keep_trace:  # discard Philips DTI trace if present
        if bvecs is not None:
            bad_mask = np.logical_and(bvals != 0, (bvecs == 0).all(axis=1))
            if bad_mask.sum() > 0:
                pl = 's' if bad_mask.sum() != 1 else ''
                verbose('Removing %s DTI trace volume%s' %
                        (bad_mask.sum(), pl))
                good_mask = ~bad_mask
                in_data = in_data[..., good_mask]
                bvals = bvals[good_mask]
                bvecs = bvecs[good_mask]

    # Make corresponding NIfTI image
    nimg = nifti1.Nifti1Image(in_data, affine, pr_hdr)
    nhdr = nimg.header
    nhdr.set_data_dtype(out_dtype)
    nhdr.set_slope_inter(slope, intercept)
    nhdr.set_sform(affine, code=1)
    nhdr.set_qform(affine, code=1)

    if 'parse' in opts.minmax:
        # need to get the scaled data
        verbose('Loading (and scaling) the data to determine value range')
    if opts.minmax[0] == 'parse':
        nhdr['cal_min'] = in_data.min() * slope + intercept
    else:
        nhdr['cal_min'] = float(opts.minmax[0])
    if opts.minmax[1] == 'parse':
        nhdr['cal_max'] = in_data.max() * slope + intercept
    else:
        nhdr['cal_max'] = float(opts.minmax[1])

    # container for potential NIfTI1 header extensions
    if opts.store_header:
        # dump the full PAR header content into an extension
        with open(infile, 'rb') as fobj:  # contents must be bytes
            hdr_dump = fobj.read()
            dump_ext = nifti1.Nifti1Extension('comment', hdr_dump)
        nhdr.extensions.append(dump_ext)

    verbose('Writing %s' % outfilename)
    nibabel.save(nimg, outfilename)

    # write out bvals/bvecs if requested
    if opts.bvs:
        if bvals is None and bvecs is None:
            verbose('No DTI volumes detected, bvals and bvecs not written')
        elif bvecs is None:
            verbose('DTI volumes detected, but no diffusion direction info was'
                    'found.  Writing .bvals file only.')
            with open(basefilename + '.bvals', 'w') as fid:
                # np.savetxt could do this, but it's just a loop anyway
                for val in bvals:
                    fid.write('%s ' % val)
                fid.write('\n')
        else:
            verbose('Writing .bvals and .bvecs files')
            # Transform bvecs with reorientation affine
            orig2new = npl.inv(t_aff)
            bv_reorient = from_matvec(to_matvec(orig2new)[0], [0, 0, 0])
            bvecs = apply_affine(bv_reorient, bvecs)
            with open(basefilename + '.bvals', 'w') as fid:
                # np.savetxt could do this, but it's just a loop anyway
                for val in bvals:
                    fid.write('%s ' % val)
                fid.write('\n')
            with open(basefilename + '.bvecs', 'w') as fid:
                for row in bvecs.T:
                    for val in row:
                        fid.write('%s ' % val)
                    fid.write('\n')

    # export data labels varying along the 4th dimensions if requested
    if opts.vol_info:
        labels = pr_img.header.get_volume_labels()
        if len(labels) > 0:
            vol_keys = list(labels.keys())
            with open(basefilename + '.ordering.csv', 'w') as csvfile:
                csvwriter = csv.writer(csvfile, delimiter=',')
                csvwriter.writerow(vol_keys)
                for vals in zip(*[labels[k] for k in vol_keys]):
                    csvwriter.writerow(vals)

    # write out dwell time if requested
    if opts.dwell_time:
        try:
            dwell_time = calculate_dwell_time(pr_hdr.get_water_fat_shift(),
                                              pr_hdr.get_echo_train_length(),
                                              opts.field_strength)
        except MRIError:
            verbose('No EPI factors, dwell time not written')
        else:
            verbose('Writing dwell time (%r sec) calculated assuming %sT '
                    'magnet' % (dwell_time, opts.field_strength))
            with open(basefilename + '.dwell_time', 'w') as fid:
                fid.write('%r\n' % dwell_time)
Пример #11
0
def proc_file(infile, opts):
    # figure out the output filename, and see if it exists
    basefilename = splitext_addext(os.path.basename(infile))[0]
    if opts.outdir is not None:
        # set output path
        basefilename = os.path.join(opts.outdir, basefilename)

    # prep a file
    if opts.compressed:
        verbose("Using gzip compression")
        outfilename = basefilename + ".nii.gz"
    else:
        outfilename = basefilename + ".nii"
    if os.path.isfile(outfilename) and not opts.overwrite:
        raise IOError('Output file "%s" exists, use --overwrite to ' "overwrite it" % outfilename)

    # load the PAR header and data
    scaling = "dv" if opts.scaling == "off" else opts.scaling
    infile = fname_ext_ul_case(infile)
    pr_img = pr.load(infile, permit_truncated=opts.permit_truncated, scaling=scaling, strict_sort=opts.strict_sort)
    pr_hdr = pr_img.header
    affine = pr_hdr.get_affine(origin=opts.origin)
    slope, intercept = pr_hdr.get_data_scaling(scaling)
    if opts.scaling != "off":
        verbose('Using data scaling "%s"' % opts.scaling)
    # get original scaling, and decide if we scale in-place or not
    if opts.scaling == "off":
        slope = np.array([1.0])
        intercept = np.array([0.0])
        in_data = pr_img.dataobj.get_unscaled()
        out_dtype = pr_hdr.get_data_dtype()
    elif not np.any(np.diff(slope)) and not np.any(np.diff(intercept)):
        # Single scalefactor case
        slope = slope.ravel()[0]
        intercept = intercept.ravel()[0]
        in_data = pr_img.dataobj.get_unscaled()
        out_dtype = pr_hdr.get_data_dtype()
    else:
        # Multi scalefactor case
        slope = np.array([1.0])
        intercept = np.array([0.0])
        in_data = np.array(pr_img.dataobj)
        out_dtype = np.float64
    # Reorient data block to LAS+ if necessary
    ornt = io_orientation(np.diag([-1, 1, 1, 1]).dot(affine))
    if np.all(ornt == [[0, 1], [1, 1], [2, 1]]):  # already in LAS+
        t_aff = np.eye(4)
    else:  # Not in LAS+
        t_aff = inv_ornt_aff(ornt, pr_img.shape)
        affine = np.dot(affine, t_aff)
        in_data = apply_orientation(in_data, ornt)

    bvals, bvecs = pr_hdr.get_bvals_bvecs()
    if not opts.keep_trace:  # discard Philips DTI trace if present
        if bvecs is not None:
            bad_mask = np.logical_and(bvals != 0, (bvecs == 0).all(axis=1))
            if bad_mask.sum() > 0:
                pl = "s" if bad_mask.sum() != 1 else ""
                verbose("Removing %s DTI trace volume%s" % (bad_mask.sum(), pl))
                good_mask = ~bad_mask
                in_data = in_data[..., good_mask]
                bvals = bvals[good_mask]
                bvecs = bvecs[good_mask]

    # Make corresponding NIfTI image
    nimg = nifti1.Nifti1Image(in_data, affine, pr_hdr)
    nhdr = nimg.header
    nhdr.set_data_dtype(out_dtype)
    nhdr.set_slope_inter(slope, intercept)
    nhdr.set_sform(affine, code=1)
    nhdr.set_qform(affine, code=1)

    if "parse" in opts.minmax:
        # need to get the scaled data
        verbose("Loading (and scaling) the data to determine value range")
    if opts.minmax[0] == "parse":
        nhdr["cal_min"] = in_data.min() * slope + intercept
    else:
        nhdr["cal_min"] = float(opts.minmax[0])
    if opts.minmax[1] == "parse":
        nhdr["cal_max"] = in_data.max() * slope + intercept
    else:
        nhdr["cal_max"] = float(opts.minmax[1])

    # container for potential NIfTI1 header extensions
    if opts.store_header:
        # dump the full PAR header content into an extension
        with open(infile, "rb") as fobj:  # contents must be bytes
            hdr_dump = fobj.read()
            dump_ext = nifti1.Nifti1Extension("comment", hdr_dump)
        nhdr.extensions.append(dump_ext)

    verbose("Writing %s" % outfilename)
    nibabel.save(nimg, outfilename)

    # write out bvals/bvecs if requested
    if opts.bvs:
        if bvals is None and bvecs is None:
            verbose("No DTI volumes detected, bvals and bvecs not written")
        elif bvecs is None:
            verbose("DTI volumes detected, but no diffusion direction info was" "found.  Writing .bvals file only.")
            with open(basefilename + ".bvals", "w") as fid:
                # np.savetxt could do this, but it's just a loop anyway
                for val in bvals:
                    fid.write("%s " % val)
                fid.write("\n")
        else:
            verbose("Writing .bvals and .bvecs files")
            # Transform bvecs with reorientation affine
            orig2new = npl.inv(t_aff)
            bv_reorient = from_matvec(to_matvec(orig2new)[0], [0, 0, 0])
            bvecs = apply_affine(bv_reorient, bvecs)
            with open(basefilename + ".bvals", "w") as fid:
                # np.savetxt could do this, but it's just a loop anyway
                for val in bvals:
                    fid.write("%s " % val)
                fid.write("\n")
            with open(basefilename + ".bvecs", "w") as fid:
                for row in bvecs.T:
                    for val in row:
                        fid.write("%s " % val)
                    fid.write("\n")

    # export data labels varying along the 4th dimensions if requested
    if opts.vol_info:
        labels = pr_img.header.get_volume_labels()
        if len(labels) > 0:
            vol_keys = list(labels.keys())
            with open(basefilename + ".ordering.csv", "w") as csvfile:
                csvwriter = csv.writer(csvfile, delimiter=",")
                csvwriter.writerow(vol_keys)
                for vals in zip(*[labels[k] for k in vol_keys]):
                    csvwriter.writerow(vals)

    # write out dwell time if requested
    if opts.dwell_time:
        try:
            dwell_time = calculate_dwell_time(
                pr_hdr.get_water_fat_shift(), pr_hdr.get_echo_train_length(), opts.field_strength
            )
        except MRIError:
            verbose("No EPI factors, dwell time not written")
        else:
            verbose("Writing dwell time (%r sec) calculated assuming %sT " "magnet" % (dwell_time, opts.field_strength))
            with open(basefilename + ".dwell_time", "w") as fid:
                fid.write("%r\n" % dwell_time)
Пример #12
0
        'Reorient to LIA and resample to 1mm iso-voxel resolution if required')

    parser.add_argument('source', type=str, help='Input volume')

    parser.add_argument('destination', type=str, help='Normalized volume')

    args = parser.parse_args()

    src_nib = nib_funcs.squeeze_image(nib.load(args.source))
    current_orientation = ''.join(nib.aff2axcodes(src_nib.affine))
    print('Input: {} [{}]'.format(src_nib.header.get_zooms(),
                                  current_orientation))

    # Avoid resampling if already 1mm iso-voxel
    # Note: Also in cases of tiny rounding error, e.g. (1.0000001, 1.0000001, 1.0)
    if not np.allclose(src_nib.header.get_zooms(), [1, 1, 1]):
        # requires re-sampling
        print('Resampling')
        dst_nib = nib_processing.conform(src_nib, orientation='LIA')
    elif current_orientation != 'LIA':
        # requires just reorient
        print('Reorientating {} to LIA'.format(current_orientation))
        start_ornt = nib_orientations.io_orientation(src_nib.affine)
        end_ornt = nib_orientations.axcodes2ornt('LIA')
        transform = nib_orientations.ornt_transform(start_ornt, end_ornt)
        dst_nib = src_nib.as_reoriented(transform)
    else:
        dst_nib = src_nib

    nib.save(dst_nib, args.destination)
Пример #13
0
def MGDMBrainSegmentation(input_filename_type_list, output_dir = None, num_steps = 5, atlas_file=None,
                          topology_lut_dir = None):
    """
    Perform MGDM segmentation
    :param input_filename_type_list: list of [[fname1,type1],[fname2,type2],...] - for a maximum of 4 inputs
    :param output_dir: full path to the output directory
    :param num_steps: number of steps for (default 5, set to 0 for testing)
    :param atlas_file: full path to the atlas file, default set in defaults.py
    :param topology_lut_dir: full path to the directory with the topology files, default set in defaults.py
    :return:
    """

    from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation, ornt_transform
    import os

    print("Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs")
    print("Sit back and relax, let the magic of algorithms happen...")
    print("")
    if output_dir is None:
        output_dir = os.path.dirname(input_filename_type_list[0][0])
    if atlas_file is None:
        atlas = os.path.join(ATLAS_DIR,'brain-atlas-3.0.3.txt')
    else:
        atlas = atlas_file

    if topology_lut_dir is None:
        topology_lut_dir = TOPOLOGY_LUT_DIR  # grabbing this from the default settings in defaults.py
    else:
        if not(topology_lut_dir[-1] == os.sep): #if we don't end in a path sep, we need to make sure that we add it
            topology_lut_dir += os.sep

    print("Atlas file: " + atlas)
    print("Topology LUT durectory: " + topology_lut_dir)
    print("")

    if not any(isinstance(el, list) for el in input_filename_type_list): #make into list of lists
        input_filename_type_list = [input_filename_type_list]

    #now we setup the mgdm specfic settings
    mgdm = cj.BrainMgdmMultiSegmentation2()
    mgdm.setAtlasFile(atlas)
    mgdm.setTopologyLUTdirectory(topology_lut_dir)

    mgdm.setOutputImages('segmentation')
    # --> mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S) # this is the default for MGDM, <--
    # mgdm.setOrientations(mgdm.AXIAL, mgdm.L2R, mgdm.P2A, mgdm.I2S)  #LR,PA,IS is always how they are returned from nibabel
    mgdm.setAdjustIntensityPriors(False)  # default is True
    mgdm.setComputePosterior(False)
    mgdm.setDiffuseProbabilities(False)
    mgdm.setSteps(num_steps)
    mgdm.setTopology('wcs')  # {'wcs','no'} no=off for testing, wcs=default
    for idx,con in enumerate(input_filename_type_list):
        print("Input files and filetypes:")
        print("  " + str(idx+1) + " "),
        print(con)
        #flipLR = False
        #flipAP = False
        #flipIS = False


        fname = con[0]
        type = con[1]
        d,d_aff,d_head = niiLoad(fname,return_header=True)

        ## usage example in the proc_file function of : https://github.com/nipy/nibabel/blob/master/bin/parrec2nii
        ornt_orig = io_orientation(d_aff)
        ornt_mgdm = io_orientation(np.diag([-1, -1, 1, 1]).dot(d_aff))  # -1 -1 1 LPS (mgdm default); 1 1 1 is RAS
        ornt_chng = ornt_transform(ornt_mgdm, ornt_orig)  # to get from MGDM to our original input


        # convert orientation information to mgdm slice and orientation info
        aff_orients,aff_slc = get_affine_orientation_slice(d_aff)
        print("data orientation: " + str(aff_orients)),
        print("slice settings: " + aff_slc)
        print("mgdm orientation: " + str(ornt_mgdm))
        print("data orientation: " + str(ornt_orig))

        if aff_slc == "AXIAL":
            SLC=mgdm.AXIAL
        elif aff_slc == "SAGITTAL":
            SLC=mgdm.SAGITTAL
        else:
            SLC=mgdm.CORONAL
        for aff_orient in aff_orients: #TODO: if anything is different from the default MGDM settings, we need to flip axes of the data at the end
            if aff_orient == "L":
                LR=mgdm.R2L
            elif aff_orient == "R":
                LR = mgdm.L2R
               # flipLR = True
            elif aff_orient == "A":
                AP = mgdm.P2A
                #flipAP = True
            elif aff_orient == "P":
                AP = mgdm.A2P
            elif aff_orient == "I":
                IS = mgdm.S2I
                #flipIS = True
            elif aff_orient == "S":
                IS = mgdm.I2S
        mgdm.setOrientations(SLC, LR, AP, IS)  #L2R,P2A,I2S is nibabel default (i.e., RAS)

        if idx+1 == 1:
            # we use the first image to set the dimensions and resolutions
            res = d_head.get_zooms()
            res = [a1.item() for a1 in res]  # cast to regular python float type
            mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2])
            mgdm.setResolutions(res[0], res[1], res[2])

            # keep the shape and affine from the first image for saving
            d_shape = np.array(d.shape)
            out_root_fname = os.path.basename(fname)[0:os.path.basename(fname).find('.')] #assumes no periods in filename, :-/

            mgdm.setContrastImage1(cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType1(type)
        elif idx+1 == 2:
            mgdm.setContrastImage2(cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType2(type)
        elif idx + 1 == 3:
            mgdm.setContrastImage3(cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType3(type)
        elif idx + 1 == 4:
            mgdm.setContrastImage4(cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType4(type)
    try:
        print("Executing MGDM on your inputs")
        print("Don't worry, the magic is happening!")
        mgdm.execute()
        print(os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz'))


        # outputs
        # reshape fortran stype to convert back to the format the nibabel likes
        seg_im = np.reshape(np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32), d_shape,'F')
        lbl_im = np.reshape(np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32), d_shape, 'F')
        ids_im = np.reshape(np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32), d_shape, 'F')

        # fix orientation back to the input orientation :-/ not really working
        # seg_im = apply_orientation(seg_im, ornt_chng) # this takes care of the orientations between mipav and input
        # lbl_im = apply_orientation(lbl_im, ornt_chng) # TODO: fix the origin point offset?, 2x check possible RL flip
        # ids_im = apply_orientation(ids_im, ornt_chng) # alternative: register? https://github.com/pyimreg
                                                      #

        # save
        seg_file = os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz')
        lbl_file = os.path.join(output_dir, out_root_fname + '_lbl_cjs.nii.gz')
        ids_file = os.path.join(output_dir, out_root_fname + '_ids_cjs.nii.gz')

        ## this will work, but the solution with nibabel.orientations is much cleaner
        # if our settings were not the same as MGDM likes, we need to flip the relevant settings:
        #d_aff_new = flip_affine_orientation(d_aff, flipLR=flipLR, flipAP=flipAP, flipIS=flipIS)

        d_head['data_type'] = np.array(32).astype('uint32') #convert the header as well
        d_head['cal_max'] = np.max(seg_im)  #max for display
        niiSave(seg_file, seg_im, d_aff, header=d_head, data_type='uint32')
        d_head['cal_max'] = np.max(lbl_im)
        niiSave(lbl_file, lbl_im, d_aff, header=d_head, data_type='uint32')
        d_head['cal_max'] = np.max(ids_im)  # convert the header as well
        niiSave(ids_file, ids_im, d_aff, header=d_head, data_type='uint32')
        print("Data stored in: " + output_dir)
    except:
        print("--- MGDM failed. Go cry. ---")
        return
    print("Execution completed")

    return seg_im,d_aff,d_head
Пример #14
0
def get_affine_orientation(a):
    import nibabel.orientations as orient
    return orient.io_orientation(a) #orientation of the x, y, z