def resample_file(fname_data, fname_out, new_size, new_size_type,
                  interpolation, verbose):
    """This function will resample the specified input
    image file to the target size.
    Can deal with 2d, 3d or 4d image objects.
    :param fname_data: The input image filename.
    :param fname_out: The output image filename.
    :param new_size: The target size, i.e. 0.25x0.25
    :param new_size_type: Unit of resample (mm, vox, factor)
    :param interpolation: The interpolation type
    :param verbose: verbosity level
    """

    # Load data
    sct.printv('\nLoad data...', verbose)
    nii = nipy.load_image(fname_data)

    nii_r = resample_image(nii, new_size, new_size_type, interpolation,
                           verbose)

    # build output file name
    if fname_out == '':
        fname_out = sct.add_suffix(fname_data, '_r')
    else:
        fname_out = fname_out

    # save data
    nipy.save_image(nii_r, fname_out)

    # to view results
    sct.display_viewer_syntax([fname_out], verbose=verbose)

    return nii_r
예제 #2
0
def save_to_image(data,
                  template_file=DEFAULT_template,
                  output_file=DEFAULT_output):
    template = load_image(template_file)
    newimg = Image(data, vox2mni(template.affine))
    save_image(newimg, output_file)
    return output_file
예제 #3
0
def save_niigz(file_path, vol, affine=None, header=None):
    """Saves a volume into a Nifti (.nii.gz) file.

    Parameters
    ----------
    vol: Numpy 3D or 4D array
        Volume with the data to be saved.
    file_path: string
        Output file name path
    affine: 4x4 Numpy array
        Array with the affine transform of the file.
    header: nibabel.nifti1.Nifti1Header, optional
        Header for the file, optional but recommended.

    Note
    ----
        affine and header only work for numpy volumes.

    """
    if isinstance(vol, np.ndarray):
        log.debug('Saving numpy nifti file: ' + file_path)
        ni = nib.Nifti1Image(vol, affine, header)
        nib.save(ni, file_path)

    elif isinstance(vol, nib.Nifti1Image):
        log.debug('Saving nibabel nifti file: ' + file_path)
        nib.save(vol, file_path)

    elif isinstance(vol, niim.Image):
        log.debug('Saving nibabel nifti file: ' + file_path)
        save_image(vol, file_path)
def expandFrames(imgFn, saveDir):
    """
    Expand a timeseries image into a set of individual frames in the
    specified directory

    Inputs:
    - imgFn: the timeseries image's filename
    - saveDir: the directory in which the frames will be stored

    Returns:
    - frameFns: the list of filenames
    """
    # Load the image
    img = load_image(imgFn)
    coord = img.coordmap
    frameFns = []

    # Make the save directory
    framesDir = saveDir + '/frames/'  # need to check for //
    # check for duplicate //
    framesDir = framesDir.replace("//", '/')
    if not os.path.exists(framesDir):
        os.mkdir(framesDir)

    for i in xrange(img.get_data().shape[3]):
        frame = img[:, :, :, i].get_data()[:, :, :, None]
        frameImg = Image(frame, coord)
        outFn = framesDir + str(i).zfill(3) + ".nii.gz"
        save_image(frameImg, outFn)
        frameFns.append(outFn)

    return frameFns
예제 #5
0
def resample_file(fname_data, fname_out, new_size, new_size_type,
                  interpolation, verbose):
    """This function will resample the specified input
    image file to the target size.

    :param fname_data: The input image filename.
    :param fname_out: The output image filename.
    :param new_size: The target size, i.e. 0.25x0.25
    :param new_size_type: Unit of resample (mm, vox, factor)
    :param interpolation: The interpolation type
    :param verbose: verbosity level
    """

    # Load data
    sct.printv('\nLoad data...', verbose)
    nii = nipy.load_image(fname_data)

    nii_r = resample_image(nii, new_size, new_size_type, interpolation,
                           verbose)

    # build output file name
    if fname_out == '':
        fname_out = sct.add_suffix(fname_data, '_r')
    else:
        fname_out = fname_out

    # save data
    nipy.save_image(nii_r, fname_out)

    # to view results
    sct.printv('\nDone! To view results, type:', verbose)
    sct.printv('fslview ' + fname_out + ' &', verbose, 'info')

    return nii_r
예제 #6
0
def space_time_realign(Images,TR=2,numslices=None,SliceTime='asc_alt_2',RefScan=None):
    '''
    4D simultaneous slice timing and spatial realignment. Adapted from
    Alexis Roche's example script, and extend to be used for multiplex
    imaging sequences
    
    Inputs:
    
        Images: list of images, input as a list of strings
        
        numslices: for non-multiplex sequence, default to be the number of
            slices in the image. For multiplex sequence, enter as a tuple,
            such that the first element is the number of planes acquired in
            parallel between each other, and the second element is the number
            of slices of each parallel plane/slab
        
        SliceTime:enter as a string to specify how the slices are ordered.
            Choices are the following
            1).'ascending': sequential ascending acquisition
            2).'descending': sequential descending acquisition
            3).'asc_alt_2': ascending interleaved, starting at first slice
            4).'asc_alt_2_1': ascending interleaved, starting at the second
                slice
            5).'desc_alt_2': descending interleaved, starting at last slice
            6).'asc_alt_siemens': ascending interleaved, starting at the first
                slice if odd number of slices, or second slice if even number
                of slices
            7).'asc_alt_half': ascending interleaved by half the volume
            8).'desc_alt_half': descending interleaved by half the volume
        
        RefScan: reference volume for spatial realignment movement estimation
    '''
    
    # load images    
    runs = [load_image(run) for run in Images]
    # parse data info
    if numslices is None:
        numslices = runs[0].shape[2]
        numplanes = 1
    elif isinstance(numslices,tuple):
        numslices = numslices[0]
        numplanes = numplanes[1]
    # parse slice timing according to the input
    slice_timing = getattr(timefuncs,SliceTime)(TR,numslices)
    #repeat the slice timing for multiplex seqquence
    slice_timing = np.tile(slice_timing,numplanes)
    # Spatio-temporal realigner assuming interleaved ascending slice order
    R = SpaceTimeRealign(runs, tr=TR, slice_times=slice_timing, slice_info=2,
                         affine_class='Rigid')
    
    print('Slice times: %s' % slice_timing)
    # Estimate motion within- and between-sessions
    R.estimate(refscan=RefScan)
    # Resample data on a regular space+time lattice using 4d interpolation
    print('Saving results ...')
    for i in range(len(runs)):
        corr_run = R.resample(i)
        fname = os.path.join(os.path.split(Images[i])[0],'ra' + os.path.split(Images[i])[1])
        save_image(corr_run, fname)
        print(fname)
예제 #7
0
def get_nifti(dataset,
              features,
              out_file=None,
              split_files=False,
              base_nifti=None):
    """
    Function to get nifti image and save nifti files.

    Parameters
    ----------
    dataset: MRI class.
        A dataset of the MRI class for processing the nifti from.
        Must implement get_nifti.
    features: array-like.
        Features for nifti processing.
    out_file: str, optional.
        Output file for nifti image.

    Returns
    -------
    nifti: nipy image.
    """
    logger.info("Getting nifti for dataset of type %r and %d features." %
                (type(dataset), features.shape[0]))
    if not isinstance(dataset, MRI):
        raise ValueError("Dataset type is %r and not an instance of %r" %
                         (type(dataset), MRI))
    weights_view = dataset.get_weights_view(features)

    image = dataset.get_nifti(weights_view, base_nifti=base_nifti)
    if out_file is not None:
        nipy.save_image(image, out_file + ".gz")

    return image
예제 #8
0
def sources_to_nifti(CHECKPOINT, MASKMAT, BASENIFTI, ONAME, savepath, voxels, win):
    bnifti = load_image(BASENIFTI)
    mask = loadmat(MASKMAT)['mask']
    model = np.load(CHECKPOINT) # Numpy array of sources from Infomax ICA

    for i in range(len(model)): # Goes component by component

        W = model[i,:].reshape([voxels,win])

        f = zeros(len(mask))
        idx = where(mask==1)
        data = zeros((bnifti.shape[0],bnifti.shape[1],bnifti.shape[2],W.shape[1]))

        f[idx[0].tolist()] = detrend(W)/std(W)

        for j in range(0,W.shape[1]):
            data[:,:,:,j] = reshape(f,(bnifti.shape[0],bnifti.shape[1],bnifti.shape[2] ), order='F')

        img = Image.from_image(bnifti,data=data)

        os.chdir(savepath)

        fn = ONAME + "%s.nii" % (str(i)) # Where result should be saved and under what name

        save_image(img,fn)
예제 #9
0
def save_nii(data, coord, save_file):
    """
    Saves a numpy array (data) as a nifti file
    The coordinate space must match the array dimensions
    """
    arr_img = Image(data, coord)
    save_image(arr_img, save_file)
    return 0
예제 #10
0
def resample_image(source_file, target_file, outdir, w2wmap=None, order=3,
                   cval=0, verbose=0):
    """ Resample the source image to match the target image using Nipy.

    Parameters
    ----------
    source_file: str (mandatory)
        the image to resample.
    target_file: str (mandatory)
        the reference image.
    outdir: str (mandatory)
        the folder where the resampled image will be saved.
    w2wmap: array (4, 4) or callable
        physical to physical transformation.
    verbose: int (optional, default 0)
        the verbosity level.

    Returns
    -------
    resampled_file: str
        the resampled image.
    """
    # Get target image information
    target_image = nipy.load_image(target_file)
    onto_shape = target_image.shape[:3]
    onto_aff = xyz_affine(target_image.affine, xyz=[0, 1, 2], verbose=verbose)

    # Define index and physical coordinate systems
    arraycoo = "ijklmnopq"[:len(onto_shape)]
    spacecoo = "xyztrsuvw"[:len(onto_shape)]
    if verbose > 0:
        print("\narraycoo: ", arraycoo, "\nspacecoo: ", spacecoo,
              "\nonto_aff\n", onto_aff)
    dmaker = CoordSysMaker(arraycoo, 'generic-array')
    rmaker = CoordSysMaker(spacecoo, 'generic-scanner')
    cm_maker = cmap.CoordMapMaker(dmaker, rmaker)
    cmap_out = cm_maker.make_affine(onto_aff)
    if verbose > 0:
        print("cmap_out:\n", cmap_out)

    # Define the default physical to physical transformation
    if w2wmap is None:
        w2wmap = np.eye(onto_aff.shape[0])
    if verbose > 0:
        print("w2wmap:\n", w2wmap)

    # Resample
    source_image = nipy.load_image(source_file)
    resampled_image = resample(
        source_image, cmap_out, w2wmap, onto_shape, order=order, cval=cval)

    # Save the resampled image
    resampled_file = os.path.join(
        outdir, "resampled_{0}".format(os.path.basename(source_file)))
    nipy.save_image(resampled_image, resampled_file)

    return resampled_file
예제 #11
0
def sample_map_allen_space(image_path, annot_csv_path, save_path, type='well_id'):
    #assign values to samples in MNI space

    I=nipy.load_image(image_path)
    image_name=os.path.basename(image_path)
    df=pd.DataFrame.from_csv(annot_csv_path)
    coordinate, well_id=(np.array( df['mri_voxel_x']) , np.array(df['mri_voxel_y']), np.array(df['mri_voxel_z'] )), df[type]
    I._data[np.where(I._data!=0)]=0
    I._data[coordinate]=well_id
    nipy.save_image(I, os.path.join(save_path, image_name))
예제 #12
0
def sample_map_allen_space(image_path, annot_csv_path, save_path, type='well_id'):
    #assign values to samples in MNI space

    I=nipy.load_image(image_path)
    image_name=os.path.basename(image_path)
    df=pd.DataFrame.from_csv(annot_csv_path)
    coordinate, well_id=(np.array( df['mri_voxel_x']) , np.array(df['mri_voxel_y']), np.array(df['mri_voxel_z'] )), df[type]
    I._data[np.where(I._data!=0)]=0
    I._data[coordinate]=well_id
    nipy.save_image(I, os.path.join(save_path, image_name))
예제 #13
0
def tsdiffana(args):
    """ Generate tsdiffana plots from command line params `args`

    Parameters
    ----------
    args : object
        object with attributes

        * filename : str - 4D image filename
        * out_file : str - graphics file to write to instead of leaving
          graphics on screen
        * time_axis : str - name or number of time axis in `filename`
        * slice_axis : str - name or number of slice axis in `filename`
        * write_results : bool - if True, write images and plots to files
        * out_path : None or str - path to which to write results
        * out_fname_label : None or filename - suffix of output results files

    Returns
    -------
    axes : Matplotlib axes
       Axes on which we have done the plots.
    """
    if args.out_file is not None and args.write_results:
        raise ValueError("Cannot have OUT_FILE and WRITE_RESULTS options "
                         "together")
    img, time_axis, slice_axis = parse_fname_axes(args.filename,
                                                  args.time_axis,
                                                  args.slice_axis)
    results = time_slice_diffs_image(img, time_axis, slice_axis)
    axes = plot_tsdiffs(results)
    if args.out_file is None and not args.write_results:
        # interactive mode
        return axes
    if args.out_file is not None:
        # plot only mode
        axes[0].figure.savefig(args.out_file)
        return axes
    # plot and images mode
    froot, ext, addext = splitext_addext(args.filename)
    fpath, fbase = psplit(froot)
    fpath = fpath if args.out_path is None else args.out_path
    fbase = fbase if args.out_fname_label is None else args.out_fname_label
    axes[0].figure.savefig(pjoin(fpath, 'tsdiff_' + fbase + '.png'))
    # Save image volumes
    for key, prefix in (('slice_diff2_max_vol', 'dv2_max_'), ('diff2_mean_vol',
                                                              'dv2_mean_')):
        fname = pjoin(fpath, prefix + fbase + ext + addext)
        nipy.save_image(results[key], fname)
    # Save time courses into npz
    np.savez(
        pjoin(fpath, 'tsdiff_' + fbase + '.npz'),
        volume_means=results['volume_means'],
        slice_mean_diff2=results['slice_mean_diff2'],
    )
    return axes
예제 #14
0
def tissue_classification(img,
                          mask=None,
                          niters=25,
                          beta=0.5,
                          ngb_size=6,
                          probc=None,
                          probg=None,
                          probw=None):
    import numpy as np

    from nipy import load_image, save_image
    from nipy.core.image.image_spaces import (make_xyz_image, xyz_affine)
    from nipy.algorithms.segmentation import BrainT1Segmentation
    import os
    # Input image
    img = load_image(img)

    # Input mask image
    mask_img = mask
    if mask_img == None:
        mask_img = img
    else:
        mask_img = load_image(mask_img)

    # Other optional arguments
    #niters = int(get_argument('niters', 25))
    #beta = float(get_argument('beta', 0.5))
    #ngb_size = int(get_argument('ngb_size', 6))

    # Perform tissue classification
    mask = mask_img.get_data() > 0
    S = BrainT1Segmentation(img.get_data(),
                            mask=mask,
                            model='5k',
                            niters=niters,
                            beta=beta,
                            ngb_size=ngb_size)

    # Save label image
    outfile = os.path.abspath('hard_classif.nii')
    save_image(make_xyz_image(S.label, xyz_affine(img), 'scanner'), outfile)
    print('Label image saved in: %s' % outfile)

    # Compute fuzzy Dice indices if a 3-class fuzzy model is provided
    if not probc == None and\
       not probg == None and\
       not probw == None:
        print('Computing Dice index')
        gold_ppm = np.zeros(S.ppm.shape)
        gold_ppm_img = (probc, probg, probw)
        for k in range(3):
            img = load_image(gold_ppm_img[k])
            gold_ppm[..., k] = img.get_data()
        d = fuzzy_dice(gold_ppm, S.ppm, np.where(mask_img.get_data() > 0))
        print('Fuzzy Dice indices: %s' % d)
예제 #15
0
파일: commands.py 프로젝트: Naereen/nipy
def tsdiffana(args):
    """ Generate tsdiffana plots from command line params `args`

    Parameters
    ----------
    args : object
        object with attributes

        * filename : str - 4D image filename
        * out_file : str - graphics file to write to instead of leaving
          graphics on screen
        * time_axis : str - name or number of time axis in `filename`
        * slice_axis : str - name or number of slice axis in `filename`
        * write_results : bool - if True, write images and plots to files
        * out_path : None or str - path to which to write results
        * out_fname_label : None or filename - suffix of output results files

    Returns
    -------
    axes : Matplotlib axes
       Axes on which we have done the plots.
    """
    if args.out_file is not None and args.write_results:
        raise ValueError("Cannot have OUT_FILE and WRITE_RESULTS options "
                         "together")
    img, time_axis, slice_axis = parse_fname_axes(args.filename,
                                                  args.time_axis,
                                                  args.slice_axis)
    results = time_slice_diffs_image(img, time_axis, slice_axis)
    axes = plot_tsdiffs(results)
    if args.out_file is None and not args.write_results:
        # interactive mode
        return axes
    if args.out_file is not None:
        # plot only mode
        axes[0].figure.savefig(args.out_file)
        return axes
    # plot and images mode
    froot, ext, addext = splitext_addext(args.filename)
    fpath, fbase = psplit(froot)
    fpath = fpath if args.out_path is None else args.out_path
    fbase = fbase if args.out_fname_label is None else args.out_fname_label
    axes[0].figure.savefig(pjoin(fpath, 'tsdiff_' + fbase + '.png'))
    # Save image volumes
    for key, prefix in (('slice_diff2_max_vol', 'dv2_max_'),
                        ('diff2_mean_vol', 'dv2_mean_')):
        fname = pjoin(fpath, prefix + fbase + ext + addext)
        nipy.save_image(results[key], fname)
    # Save time courses into npz
    np.savez(pjoin(fpath, 'tsdiff_' + fbase + '.npz'),
             volume_means=results['volume_means'],
             slice_mean_diff2=results['slice_mean_diff2'],
            )
    return axes
예제 #16
0
def affine_registration_nipy(in_path, ref_path, out_path, 
                             in_ref_mat = '', ref_in_mat = '',
                             T = None, extra_params={}):
    """
    Affine registation and resampling. Use Histogram registration from nipy. 
    
    inputs:
        in_path: path to the source (input) image.
        ref_path: path to the target (reference) image.
        out_path: path to use to save the registered image. 
        in_ref_mat: if bool(in_ref_mat) is True, save the 4x4 transformation
                    matrix to a text file <in_ref_mat>. 
        ref_in_mat: if bool(ref_in_mat) is True, save the reverse of the 4x4
                    transformation matrix to a text file <ref_in_mat>. 
        T: affine transformation to use. if None, T will be estimated using 
           HistogramRegistration and optimizers; if type(T) is not Affine, 
           T = Affine(array=T)
        extra_params: extra parameters passing to HistogramRegistration,
                      HistogramRegistration.optimize, resample
        
    return T
    """

    source_image = load_image(in_path)
    target_image = load_image(ref_path)

    if T is None:
        print('assess the affine transformation using histogram registration. ')
        
#        R = HistogramRegistration(source_image, target_image)
        R = AllFeatures(HistogramRegistration,extra_params).run(source_image, target_image)
        
#        T = R.optimize('affine', optimizer='powell')
        T = AllFeatures(R.optimize,extra_params).run('affine', optimizer='powell')
        print('receive affine transformation %s' % T)

    else:
        if type(T) is not Affine:
            print('create Affine from T')
            T = Affine(array=T)
        print('using a predefined affine:\n%s\nwith a 4x4 matrix:\n%s\n' % (T, T.as_affine()))

#    It = resample(source_image, T.inv(), target_image)
    It = AllFeatures(resample,extra_params).run(source_image, T.inv(), target_image)

    # the second argument of resample takes an transformation from ref to mov
    # so that's why we need T.inv() here
    save_image(It, out_path)
    if in_ref_mat:
        np.savetxt(in_ref_mat, T.as_affine())
    if ref_in_mat:
        np.savetxt(ref_in_mat, T.inv().as_affine())
    
    return T
예제 #17
0
def segment_file(input_filename, output_filename,
                 model_name, threshold, verbosity):
    """Segment a volume file.

    :param input_filename: the input filename.
    :param output_filename: the output filename.
    :param model_name: the name of model to use.
    :param threshold: threshold to apply in predictions (if None,
                      no threshold will be applied)
    :param verbosity: the verbosity level.
    :return: the output filename.
    """
    nii_original = nipy.load_image(input_filename)
    pixdim = nii_original.header["pixdim"][3]
    target_resample = "0.25x0.25x{:.5f}".format(pixdim)

    nii_resampled = nipy_resample.resample_image(nii_original,
                                                 target_resample,
                                                 'mm', 'linear',
                                                 verbosity)

    if (nii_resampled.shape[0] < 200) \
       or (nii_resampled.shape[1] < 200):
        raise RuntimeError("Image too small ({}, {})".format(
                           nii_resampled.shape[0],
                           nii_resampled.shape[1]))

    nii_resampled = nipy2nifti(nii_resampled)
    pred_slices = segment_volume(nii_resampled, model_name, threshold)

    original_res = "{:.5f}x{:.5f}x{:.5f}".format(
        nii_original.header["pixdim"][1],
        nii_original.header["pixdim"][2],
        nii_original.header["pixdim"][3])

    volume_affine = nii_resampled.affine
    volume_header = nii_resampled.header
    nii_segmentation = nib.Nifti1Image(pred_slices, volume_affine,
                                       volume_header)
    nii_segmentation = nifti2nipy(nii_segmentation)

    nii_resampled_original = nipy_resample.resample_image(nii_segmentation,
                                                          original_res,
                                                          'mm', 'linear',
                                                          verbosity)
    res_data = nii_resampled_original.get_data()

    # Threshold after resampling, only if specified
    if threshold is not None:
        res_data = threshold_predictions(res_data, 0.5)

    nipy.save_image(nii_resampled_original, output_filename)
    return output_filename
예제 #18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-1',
                        '--roi1',
                        type=str,
                        help='Filename of the first ROI image')
    parser.add_argument('-2',
                        '--roi2',
                        type=str,
                        help='Filename of the second ROI image')
    parser.add_argument('-c',
                        '--coord-image',
                        type=str,
                        help='Filename of the coordinates image')
    parser.add_argument('-o',
                        '--output-file',
                        type=str,
                        help='Filename to use for combined ROIs')

    args = parser.parse_args()
    print(args)

    # check that both images exist
    if not os.path.exists(args.roi1):
        print("File '" + args.roi1 + "' does not exist")
    if not os.path.exists(args.roi2):
        print("File '" + args.roi2 + "' does not exist")
    if not os.path.exists(args.coord_image):
        print("File '" + args.coord_image)

    # load both images for the purpose of checking coordinates
    roi1, coords1 = mil.loadBOLD(args.roi1)
    roi2, coords2 = mil.loadBOLD(args.roi2)
    img, imgCoords = mil.loadBOLD(args.coord_image)

    print("roi1", roi1.shape)
    print("roi2", roi2.shape)
    print("coords", img.shape)

    # perform the OR-ing of the 2 rois
    addingImgs = BinaryMaths()
    addingImgs.inputs.in_file = args.roi1
    addingImgs.inputs.operand_file = args.roi2
    addingImgs.inputs.operation = "max"
    addingImgs.inputs.out_file = args.output_file
    addingImgs.run()

    # resample the new roi to be in the coordinate frame of the coordinate image
    jointRoi = load_image(args.output_file)
    coordsImg = load_image(args.coord_image)
    print("joint rois", jointRoi.get_data().shape)
    newRoiImg = resample_img2img(jointRoi, coordsImg, order=0)
    save_image(newRoiImg, args.output_file)
예제 #19
0
파일: storage.py 프로젝트: Neurita/boyle
def save_niigz(filepath, vol, header=None, affine=None):
    """Saves a volume into a Nifti (.nii.gz) file.

    Parameters
    ----------
    vol: Numpy 3D or 4D array
        Volume with the data to be saved.

    file_path: string
        Output file name path

    affine: (optional) 4x4 Numpy array
        Array with the affine transform of the file.
        This is needed if vol is a np.ndarray.

    header: (optional) nibabel.nifti1.Nifti1Header, optional
        Header for the file, optional but recommended.
        This is needed if vol is a np.ndarray.

    Note
    ----
    affine and header only work for numpy volumes.
    """
    # delayed import because could not install nipy on Python 3 on OSX
    we_have_nipy = False
    try:
        import nipy.core.image as     niim
        from   nipy            import save_image
    except:
        pass
    else:
        we_have_nipy = True

    if isinstance(vol, np.ndarray):
        log.debug('Saving numpy nifti file: {}.'.format(filepath))
        ni = nib.Nifti1Image(vol, affine, header)
        nib.save(ni, filepath)

    elif isinstance(vol, nib.Nifti1Image):
        log.debug('Saving nibabel nifti file: {}.'.format(filepath))
        nib.save(vol, filepath)

    elif we_have_nipy and isinstance(vol, niim.Image):
        log.debug('Saving nipy nifti file: {}.'.format(filepath))
        save_image(vol, filepath)

    #elif isinstance(vol, NeuroImage):
    #    log.debug('Saving boyle.NeuroImage nifti file: {}.'.format(filepath))
    #    nib.save(vol.img, filepath)

    else:
        raise ValueError('Could not recognise input vol filetype. Got: {}.'.format(repr_imgs(vol)))
예제 #20
0
def save_npy_to_nifti(npy_data, filename, base_nifti_filename):
    """
    Saves numpy to nifti.

    Arguments:
        npy_data: numpy array
        filename: filename to save
        base_nifti_filename: base nifti filename
    """

    bnifti = load_image(base_nifti_filename)
    img = Image.from_image(bnifti, data=npy_data.astype('uint8'))
    save_image(img, filename)
    print('Saved {}..'.format(filename))
예제 #21
0
    def _run_interface(self, runtime):
        from nipy import save_image, load_image

        all_ims = [load_image(fname) for fname in self.inputs.in_file]

        if not isdefined(self.inputs.slice_times):
            from nipy.algorithms.registration.groupwise_registration import SpaceRealign

            R = SpaceRealign(all_ims)
        else:
            from nipy.algorithms.registration import SpaceTimeRealign

            R = SpaceTimeRealign(
                all_ims,
                tr=self.inputs.tr,
                slice_times=self.inputs.slice_times,
                slice_info=self.inputs.slice_info,
            )

        R.estimate(refscan=None)

        corr_run = R.resample()
        self._out_file_path = []
        self._par_file_path = []

        for j, corr in enumerate(corr_run):
            self._out_file_path.append(
                os.path.abspath(
                    "corr_%s.nii.gz" % (split_filename(self.inputs.in_file[j])[1])
                )
            )
            save_image(corr, self._out_file_path[j])

            self._par_file_path.append(
                os.path.abspath("%s.par" % (os.path.split(self.inputs.in_file[j])[1]))
            )
            mfile = open(self._par_file_path[j], "w")
            motion = R._transforms[j]
            # nipy does not encode euler angles. return in original form of
            # translation followed by rotation vector see:
            # http://en.wikipedia.org/wiki/Rodrigues'_rotation_formula
            for i, mo in enumerate(motion):
                params = [
                    "%.10f" % item for item in np.hstack((mo.translation, mo.rotation))
                ]
                string = " ".join(params) + "\n"
                mfile.write(string)
            mfile.close()

        return runtime
예제 #22
0
def motion_correction_nipy(in_file, out_path, mc_alg, extra_params={}):
    """
    an attempt at motion correction using NiPy package. 
    
    inputs:
        in_file: Full path to the resting-state scan. 
        out_path: Full path to the (to be) output file. 
        mc_alg: can be either 'nipy_spacerealign' or 'nipy_spacetimerealign'
        extra_params: extra parameters to SpaceRealign, SpaceTimeRealign, estimate
    return: the motion corrected image
    """

    alg_dict = {
        'nipy_spacerealign': (SpaceRealign, {}),
        'nipy_spacetimerealign': (SpaceTimeRealign, {
            'tr': 2,
            'slice_times': 'asc_alt_2',
            'slice_info': 2
        })
    }
    # format: {'function_name':(function, kwargs), ...}

    # processing starts here
    if type(in_file) in nib.all_image_classes:
        I = nifti2nipy(in_file)  # assume Nifti1Image
    else:
        I = load_image(in_file)
    print 'source image loaded. '

    # initialize the registration algorithm
    reg = AllFeatures(alg_dict[mc_alg][0],
                      extra_params).run(I, **alg_dict[mc_alg][1])
    #    reg = alg_dict[mc_alg][0](I, **alg_dict[mc_alg][1]) # SpaceTimeRealign(I, tr=2, ...)
    print 'motion correction algorithm established. '
    print 'estimating...'

    if USE_CACHE:
        mem = Memory("func_preproc_cache_2")
        mem.cache(AllFeatures(reg.estimate, extra_params).run)(refscan=None)
#        mem.cache(reg.estimate)(refscan=None)
    else:
        AllFeatures(reg.estimate, extra_params).run(refscan=None)
#        reg.estimate(refscan=None)

    print 'estimation complete. Writing to file...'
    result = reg.resample(0)
    if out_path:
        save_image(result, out_path)
    return nipy2nifti(result)
def time_space_realign(run_fnames, TR, time_to_space, slice_axis):
    run_imgs = [load_image(run) for run in run_fnames]
    # Spatio-temporal realigner
    R = FmriRealign4d(run_imgs,
                      tr=TR,
                      slice_order=time_to_space,
                      slice_info=(slice_axis, 1))
    # Estimate motion within- and between-sessions
    R.estimate(refscan=None)
    # Save back out
    for i, fname in enumerate(run_fnames):
        corr_run = R.resample(i)
        pth, name = os.path.split(fname)
        processed_fname = os.path.join(pth, 'ra' + name)
        save_image(corr_run, processed_fname)
예제 #24
0
def get_mode(seg_stack, out_file):
    img = load_image(seg_stack)
    new_coord = img[:, :, :, 0].coordmap
    data = img.get_data()
    mode = np.zeros(data.shape[:3])
    for i in xrange(data.shape[0]):
        for j in xrange(data.shape[1]):
            for k in xrange(data.shape[2]):
                u, indices = np.unique(data[i, j, k, :], return_inverse=True)
                voxel_mode = u[np.argmax(np.bincount(indices))]
                print "mode at {0},{1},{2} = {3}".format(i, j, k, voxel_mode)
                mode[i, j, k] = voxel_mode
    mode_image = Image(mode, new_coord)
    save_image(mode_image, out_file)
    return mode
예제 #25
0
def peelTemplateBrain():
    ns=181
    nr=217
    nc=181
    gt_template=np.fromfile('data/phantom_1.0mm_normal_crisp.rawb', dtype=np.ubyte).reshape((ns,nr,nc))
    t1_template=np.fromfile('data/t1/t1_icbm_normal_1mm_pn0_rf0.rawb', dtype=np.ubyte).reshape((ns,nr,nc))
    t2_template=np.fromfile('data/t2/t2_icbm_normal_1mm_pn0_rf0.rawb', dtype=np.ubyte).reshape((ns,nr,nc))
    #t1_template*=((1<=gt_template)*(gt_template<=3)+(gt_template==8))
    t1_template*=((1<=gt_template)*(gt_template<=3))
    t2_template*=((1<=gt_template)*(gt_template<=3))
    affine_transform=AffineTransform('ijk', ['aligned-z=I->S','aligned-y=P->A', 'aligned-x=L->R'], np.eye(4))
    t1_template=Image(t1_template, affine_transform)
    t2_template=Image(t2_template, affine_transform)
    nipy.save_image(t1_template,'data/t1/t1_icbm_normal_1mm_pn0_rf0_peeled.nii.gz')
    nipy.save_image(t2_template,'data/t2/t2_icbm_normal_1mm_pn0_rf0_peeled.nii.gz')
예제 #26
0
    def _run_interface(self, runtime):
        from nipy.algorithms.registration import FmriRealign4d as FR4d
        all_ims = [load_image(fname) for fname in self.inputs.in_file]

        if not isdefined(self.inputs.tr_slices):
            TR_slices = None
        else:
            TR_slices = self.inputs.tr_slices

        R = FR4d(all_ims,
                 tr=self.inputs.tr,
                 slice_order=self.inputs.slice_order,
                 tr_slices=TR_slices,
                 time_interp=self.inputs.time_interp,
                 start=self.inputs.start)

        R.estimate(loops=list(self.inputs.loops),
                   between_loops=list(self.inputs.between_loops),
                   speedup=list(self.inputs.speedup))

        corr_run = R.resample()
        self._out_file_path = []
        self._par_file_path = []

        for j, corr in enumerate(corr_run):
            self._out_file_path.append(
                os.path.abspath('corr_%s.nii.gz' %
                                (split_filename(self.inputs.in_file[j])[1])))
            save_image(corr, self._out_file_path[j])

            self._par_file_path.append(
                os.path.abspath('%s.par' %
                                (os.path.split(self.inputs.in_file[j])[1])))
            mfile = open(self._par_file_path[j], 'w')
            motion = R._transforms[j]
            # nipy does not encode euler angles. return in original form of
            # translation followed by rotation vector see:
            # http://en.wikipedia.org/wiki/Rodrigues'_rotation_formula
            for i, mo in enumerate(motion):
                params = [
                    '%.10f' % item
                    for item in np.hstack((mo.translation, mo.rotation))
                ]
                string = ' '.join(params) + '\n'
                mfile.write(string)
            mfile.close()

        return runtime
예제 #27
0
def main():
    try:
        DATA_PATH = sys.argv[1]
    except IndexError:
        raise RuntimeError("Pass data path on command line")
    subjects = get_subjects(DATA_PATH)
    for name in sorted(subjects):
        subject = subjects[name]
        print("Smoothing subject " + name)
        for run in subject['functionals']:
            fname = run['filename']
            pth, fpart = os.path.split(fname)
            ra_fname = os.path.join(pth, 'ra' + fpart)
            sra_fname = os.path.join(pth, 'sra' + fpart)
            img = load_image(ra_fname)
            save_image(smooth_image(img, 8.), sra_fname)
예제 #28
0
파일: fmri.py 프로젝트: nidl/cortex
    def save_niftis(self, X):
        base_nifti = nipy.load_image(self.base_nifti_file)

        if self.pca is not None and self.pca_components:
            X = self.pca.inverse_transform(X)

        images = []
        out_files = []
        for i, x in enumerate(X):
            image = self.make_image(x, base_nifti, do_pca=False)
            out_file = path.join(self.tmp_path, 'tmp_image_%d.nii.gz' % i)
            nipy.save_image(image, out_file)
            images.append(image)
            out_files.append(out_file)

        return images, out_files
def main():
    try:
        DATA_PATH = sys.argv[1]
    except IndexError:
        raise RuntimeError("Pass data path on command line")
    subjects = get_subjects(DATA_PATH)
    for name in sorted(subjects):
        subject = subjects[name]
        print("Smoothing subject " + name)
        for run in subject['functionals']:
            fname = run['filename']
            pth, fpart = os.path.split(fname)
            ra_fname = os.path.join(pth, 'ra' + fpart)
            sra_fname = os.path.join(pth, 'sra' + fpart)
            img = load_image(ra_fname)
            save_image(smooth_image(img, 8.), sra_fname)
예제 #30
0
def generateTestingPair(betaGT):
    betaGTRads=np.array(betaGT, dtype=np.float64)
    betaGTRads[0:3]=np.copy(np.pi*betaGTRads[0:3]/180.0)
    ns=181
    nr=217
    nc=181
    left=np.fromfile('data/t2/t2_icbm_normal_1mm_pn0_rf0.rawb', dtype=np.ubyte).reshape(ns,nr,nc)
    left=left.astype(np.float64)
    right=np.fromfile('data/t1/t1_icbm_normal_1mm_pn0_rf0.rawb', dtype=np.ubyte).reshape(ns,nr,nc)
    right=right.astype(np.float64)
    right=rcommon.applyRigidTransformation3D(right, betaGTRads)
    affine_transform=AffineTransform('ijk', ['aligned-z=I->S','aligned-y=P->A', 'aligned-x=L->R'], np.eye(4))
    left=Image(left, affine_transform)
    right=Image(right, affine_transform)
    nipy.save_image(left,'moving.nii')
    nipy.save_image(right,'fixed.nii')
예제 #31
0
    def _run_interface(self, runtime):

        all_ims = []

        for image in self.inputs.in_file:
            im = nb.load(image)
            im.affine = im.get_affine()
            all_ims.append(im)

        if not isdefined(self.inputs.tr_slices):
            TR_slices = None
        else:
            TR_slices = self.inputs.tr_slices

        R = FR4d(all_ims, tr=self.inputs.tr,
            slice_order=self.inputs.slice_order,
            interleaved=self.inputs.interleaved,
            tr_slices=TR_slices,
            time_interp=self.inputs.time_interp,
            start=self.inputs.start)

        R.estimate(loops=self.inputs.loops,
                   between_loops=self.inputs.between_loops,
                   speedup=self.inputs.speedup)

        corr_run = R.resample()
        self._out_file_path = []
        self._par_file_path = []

        for j, corr in enumerate(corr_run):
            self._out_file_path.append(os.path.abspath('corr_%s.nii.gz' %
            (split_filename(self.inputs.in_file[j])[1])))
            save_image(corr, self._out_file_path[j])

            self._par_file_path.append(os.path.abspath('%s.par' %
            (os.path.split(self.inputs.in_file[j])[1])))
            mfile = open(self._par_file_path[j], 'w')
            motion = R._transforms[j]
            #output a .par file that looks like fsl.mcflirt's .par file
            for i, mo in enumerate(motion):
                params = ['%.10f' % item for item in np.hstack((mo.rotation,
                                                             mo.translation))]
                string = ' '.join(params) + '\n'
                mfile.write(string)
            mfile.close()

        return runtime
예제 #32
0
    def _run_interface(self, runtime):
        from nipy import save_image, load_image
        all_ims = [load_image(fname) for fname in self.inputs.in_file]

        if not isdefined(self.inputs.slice_times):
            from nipy.algorithms.registration.groupwise_registration import \
                SpaceRealign
            R = SpaceRealign(all_ims)
        else:
            from nipy.algorithms.registration import SpaceTimeRealign
            R = SpaceTimeRealign(
                all_ims,
                tr=self.inputs.tr,
                slice_times=self.inputs.slice_times,
                slice_info=self.inputs.slice_info,
            )

        R.estimate(refscan=None)

        corr_run = R.resample()
        self._out_file_path = []
        self._par_file_path = []

        for j, corr in enumerate(corr_run):
            self._out_file_path.append(
                os.path.abspath('corr_%s.nii.gz' %
                                (split_filename(self.inputs.in_file[j])[1])))
            save_image(corr, self._out_file_path[j])

            self._par_file_path.append(
                os.path.abspath('%s.par' %
                                (os.path.split(self.inputs.in_file[j])[1])))
            mfile = open(self._par_file_path[j], 'w')
            motion = R._transforms[j]
            # nipy does not encode euler angles. return in original form of
            # translation followed by rotation vector see:
            # http://en.wikipedia.org/wiki/Rodrigues'_rotation_formula
            for i, mo in enumerate(motion):
                params = [
                    '%.10f' % item
                    for item in np.hstack((mo.translation, mo.rotation))
                ]
                string = ' '.join(params) + '\n'
                mfile.write(string)
            mfile.close()

        return runtime
예제 #33
0
def smooth_mask_nipy(infile, outfile, fwhm=14):
    """uses nipy to smooth an image using gaussian filter of fwhm"""
    img = nipy.load_image(infile)
    lf = nipy.algorithms.kernel_smooth.LinearFilter(img.coordmap, img.shape,
                                                    fwhm)
    simg = lf.smooth(img)
    outimg = nipy.save_image(simg, outfile)
    return outimg
예제 #34
0
def segment_file(input_filename, output_filename,
                 model_name, threshold, verbosity,
                 use_tta):
    """Segment a volume file.

    :param input_filename: the input filename.
    :param output_filename: the output filename.
    :param model_name: the name of model to use.
    :param threshold: threshold to apply in predictions (if None,
                      no threshold will be applied)
    :param verbosity: the verbosity level.
    :param use_tta: whether it should use TTA (test-time augmentation)
                    or not.
    :return: the output filename.
    """
    nii_original = nipy.load_image(input_filename)
    pixdim = nii_original.header["pixdim"][3]
    target_resample = "0.25x0.25x{:.5f}".format(pixdim)

    nii_resampled = resampling.resample_nipy(nii_original, new_size=target_resample, new_size_type='mm',
                                             interpolation='linear', verbose=verbosity)
    nii_resampled = nipy2nifti(nii_resampled)
    pred_slices = segment_volume(nii_resampled, model_name, threshold,
                                 use_tta)

    original_res = "{:.5f}x{:.5f}x{:.5f}".format(
        nii_original.header["pixdim"][1],
        nii_original.header["pixdim"][2],
        nii_original.header["pixdim"][3])

    volume_affine = nii_resampled.affine
    volume_header = nii_resampled.header
    nii_segmentation = nib.Nifti1Image(pred_slices, volume_affine,
                                       volume_header)
    nii_segmentation = nifti2nipy(nii_segmentation)

    nii_resampled_original = resampling.resample_nipy(nii_segmentation, new_size=original_res, new_size_type='mm',
                                                      interpolation='linear', verbose=verbosity)
    res_data = nii_resampled_original.get_data()

    # Threshold after resampling, only if specified
    if threshold is not None:
        res_data = threshold_predictions(res_data, 0.5)

    nipy.save_image(nii_resampled_original, output_filename)
    return output_filename
def tissue_classification(img,mask=None,niters=25,beta=0.5,ngb_size=6,probc=None,probg=None,probw=None):
    import numpy as np

    from nipy import load_image, save_image
    from nipy.core.image.image_spaces import (make_xyz_image,
                                              xyz_affine)
    from nipy.algorithms.segmentation import BrainT1Segmentation
    import os
    # Input image
    img = load_image(img)

    # Input mask image
    mask_img = mask
    if mask_img == None:
        mask_img = img
    else:
        mask_img = load_image(mask_img)

    # Other optional arguments
    #niters = int(get_argument('niters', 25))
    #beta = float(get_argument('beta', 0.5))
    #ngb_size = int(get_argument('ngb_size', 6))

    # Perform tissue classification
    mask = mask_img.get_data() > 0
    S = BrainT1Segmentation(img.get_data(), mask=mask, model='5k',
        niters=niters, beta=beta, ngb_size=ngb_size)

    # Save label image
    outfile = os.path.abspath('hard_classif.nii')
    save_image(make_xyz_image(S.label, xyz_affine(img), 'scanner'),
        outfile)
    print('Label image saved in: %s' % outfile)

    # Compute fuzzy Dice indices if a 3-class fuzzy model is provided
    if not probc == None and\
       not probg == None and\
       not probw == None:
        print('Computing Dice index')
        gold_ppm = np.zeros(S.ppm.shape)
        gold_ppm_img = (probc, probg, probw)
        for k in range(3):
            img = load_image(gold_ppm_img[k])
            gold_ppm[..., k] = img.get_data()
        d = fuzzy_dice(gold_ppm, S.ppm, np.where(mask_img.get_data() > 0))
        print('Fuzzy Dice indices: %s' % d)
예제 #36
0
def tissue_segmentation(save_path,segmented_image, type, info):

    if info=='freesurfer' and type=="GM":
        # FreeSurfer gray matter regions
        # SupraGMV=LeftCerebralCortex+RightCerebralCortex+SubcorticalGMV
        region=np.array([42,3,10,11,12,13,17,18,26,49,50,51,52,53,54,58])


        S=nipy.load_image(segmented_image)
        name="GM_"+ os.path.basename(segmented_image)
        index=np.in1d(S._data, region).reshape(S._data.shape) # coordinates of voxels with values = values from region array
        S._data[index]=1 # set voxels values to 1
        index=np.logical_not(index) #get coordinates of voxels with values not = values from region array
        S._data[index]=0 # set voxels values to 0
        nipy.save_image(S, os.path.join(save_path,name))
    else:
        raise ValueError('Not implemented!')
예제 #37
0
    def _run_interface(self, runtime):
        from nipy.algorithms.registration import FmriRealign4d as FR4d

        all_ims = [load_image(fname) for fname in self.inputs.in_file]

        if not isdefined(self.inputs.tr_slices):
            TR_slices = None
        else:
            TR_slices = self.inputs.tr_slices

        R = FR4d(
            all_ims,
            tr=self.inputs.tr,
            slice_order=self.inputs.slice_order,
            tr_slices=TR_slices,
            time_interp=self.inputs.time_interp,
            start=self.inputs.start,
        )

        R.estimate(
            loops=list(self.inputs.loops),
            between_loops=list(self.inputs.between_loops),
            speedup=list(self.inputs.speedup),
        )

        corr_run = R.resample()
        self._out_file_path = []
        self._par_file_path = []

        for j, corr in enumerate(corr_run):
            self._out_file_path.append(os.path.abspath("corr_%s.nii.gz" % (split_filename(self.inputs.in_file[j])[1])))
            save_image(corr, self._out_file_path[j])

            self._par_file_path.append(os.path.abspath("%s.par" % (os.path.split(self.inputs.in_file[j])[1])))
            mfile = open(self._par_file_path[j], "w")
            motion = R._transforms[j]
            # nipy does not encode euler angles. return in original form of
            # translation followed by rotation vector see:
            # http://en.wikipedia.org/wiki/Rodrigues'_rotation_formula
            for i, mo in enumerate(motion):
                params = ["%.10f" % item for item in np.hstack((mo.translation, mo.rotation))]
                string = " ".join(params) + "\n"
                mfile.write(string)
            mfile.close()

        return runtime
예제 #38
0
def smooth_mask_nipy(infile, outfile, fwhm=14):
    """uses nipy to smooth an image using gaussian filter of fwhm"""
    img = nipy.load_image(infile)
    lf = nipy.algorithms.kernel_smooth.LinearFilter(img.coordmap,
                                                    img.shape,
                                                    fwhm)
    simg = lf.smooth(img)
    outimg = nipy.save_image(simg, outfile)
    return outimg
예제 #39
0
def seg_recovery(files, model_settings):
    def get_model(model_setting):
        modelname = model_setting['modelname']
        axis = model_setting['axis']
        loss = model_setting['loss']
        postPocess = model_setting['postPocess']
        seg_model = segment_model(valfiles=None,
                                  modelname=modelname,
                                  axis=axis,
                                  metric=None,
                                  loss=loss,
                                  postPocess=postPocess)
        seg_model.load(model_setting['path'])
        return seg_model

    for f in files:
        img_3d = load_image(TEST_DATA + f)
        image = img_3d.get_data()
        # image = normlize_data(image)
        result_h1 = []
        result_h2 = []
        for setting in tqdm(model_settings):
            seg_model = get_model(setting)
            h1, h2 = model_predict(image, seg_model)
            result_h1.append(h1)
            result_h2.append(h2)
        h1 = average(result_h1)
        h2 = average(result_h2)

        h1 = np.around(h1)
        h2 = np.around(h2)

        shape = str(image.shape[2])
        area = crop_config['3d' + shape]

        label = np.zeros_like(image)
        label[area['x'][0]:area['x'][1], area['y'][0]:area['y'][1],
              area['z1'][0]:area['z1'][1]] = h1
        label[area['x'][0]:area['x'][1], area['y'][0]:area['y'][1],
              area['z2'][0]:area['z2'][1]] = h2 * 2

        img = Image(label, img_3d.coordmap)
        save_image(img, OUTPUT + f)
예제 #40
0
def test_nipy_diagnose():
    # Test nipy diagnose script
    fimg = load_image(funcfile)
    ncomps = 12
    with InTemporaryDirectory() as tmpdir:
        cmd = ['nipy_diagnose', funcfile,
               '--ncomponents={0}'.format(ncomps),
               '--out-path=' + tmpdir]
        run_command(cmd)
        for out_fname in ('components_functional.png',
                          'pcnt_var_functional.png',
                          'tsdiff_functional.png',
                          'vectors_components_functional.npz'):
            assert_true(isfile(out_fname))
        for out_img in ('max_functional.nii.gz',
                        'mean_functional.nii.gz',
                        'min_functional.nii.gz',
                        'std_functional.nii.gz'):
            img = load_image(out_img)
            assert_equal(img.shape, fimg.shape[:-1])
            del img
        pca_img = load_image('pca_functional.nii.gz')
        assert_equal(pca_img.shape, fimg.shape[:-1] + (ncomps,))
        vecs_comps = np.load('vectors_components_functional.npz')
        vec_diff = vecs_comps['slice_mean_diff2'].copy()# just in case
        assert_equal(vec_diff.shape, (fimg.shape[-1]-1, fimg.shape[2]))
        del pca_img, vecs_comps
    with InTemporaryDirectory() as tmpdir:
        # Check we can pass in slice and time flags
        s0_img = rollimg(fimg, 'k')
        save_image(s0_img, 'slice0.nii')
        cmd = ['nipy_diagnose', 'slice0.nii',
               '--ncomponents={0}'.format(ncomps),
               '--out-path=' + tmpdir,
               '--time-axis=t',
               '--slice-axis=0']
        run_command(cmd)
        pca_img = load_image('pca_slice0.nii')
        assert_equal(pca_img.shape, s0_img.shape[:-1] + (ncomps,))
        vecs_comps = np.load('vectors_components_slice0.npz')
        assert_almost_equal(vecs_comps['slice_mean_diff2'], vec_diff)
        del pca_img, vecs_comps
예제 #41
0
def test_nipy_diagnose():
    # Test nipy diagnose script
    fimg = load_image(funcfile)
    ncomps = 12
    with InTemporaryDirectory() as tmpdir:
        cmd = ['nipy_diagnose', funcfile,
               '--ncomponents={0}'.format(ncomps),
               '--out-path=' + tmpdir]
        run_command(cmd)
        for out_fname in ('components_functional.png',
                          'pcnt_var_functional.png',
                          'tsdiff_functional.png',
                          'vectors_components_functional.npz'):
            assert_true(isfile(out_fname))
        for out_img in ('max_functional.nii.gz',
                        'mean_functional.nii.gz',
                        'min_functional.nii.gz',
                        'std_functional.nii.gz'):
            img = load_image(out_img)
            assert_equal(img.shape, fimg.shape[:-1])
            del img
        pca_img = load_image('pca_functional.nii.gz')
        assert_equal(pca_img.shape, fimg.shape[:-1] + (ncomps,))
        vecs_comps = np.load('vectors_components_functional.npz')
        vec_diff = vecs_comps['slice_mean_diff2'].copy()# just in case
        assert_equal(vec_diff.shape, (fimg.shape[-1]-1, fimg.shape[2]))
        del pca_img, vecs_comps
    with InTemporaryDirectory() as tmpdir:
        # Check we can pass in slice and time flags
        s0_img = rollimg(fimg, 'k')
        save_image(s0_img, 'slice0.nii')
        cmd = ['nipy_diagnose', 'slice0.nii',
               '--ncomponents={0}'.format(ncomps),
               '--out-path=' + tmpdir,
               '--time-axis=t',
               '--slice-axis=0']
        run_command(cmd)
        pca_img = load_image('pca_slice0.nii')
        assert_equal(pca_img.shape, s0_img.shape[:-1] + (ncomps,))
        vecs_comps = np.load('vectors_components_slice0.npz')
        assert_almost_equal(vecs_comps['slice_mean_diff2'], vec_diff)
        del pca_img, vecs_comps
예제 #42
0
def generateTestingPair(betaGT):
    betaGTRads = np.array(betaGT, dtype=np.float64)
    betaGTRads[0:3] = np.copy(np.pi * betaGTRads[0:3] / 180.0)
    ns = 181
    nr = 217
    nc = 181
    left = np.fromfile('data/t2/t2_icbm_normal_1mm_pn0_rf0.rawb',
                       dtype=np.ubyte).reshape(ns, nr, nc)
    left = left.astype(np.float64)
    right = np.fromfile('data/t1/t1_icbm_normal_1mm_pn0_rf0.rawb',
                        dtype=np.ubyte).reshape(ns, nr, nc)
    right = right.astype(np.float64)
    right = rcommon.applyRigidTransformation3D(right, betaGTRads)
    affine_transform = AffineTransform(
        'ijk', ['aligned-z=I->S', 'aligned-y=P->A', 'aligned-x=L->R'],
        np.eye(4))
    left = Image(left, affine_transform)
    right = Image(right, affine_transform)
    nipy.save_image(left, 'moving.nii')
    nipy.save_image(right, 'fixed.nii')
예제 #43
0
    def _run_interface(self, runtime):

        all_ims = [load_image(fname) for fname in self.inputs.in_file]

        if not isdefined(self.inputs.tr_slices):
            TR_slices = None
        else:
            TR_slices = self.inputs.tr_slices

        R = FR4d(all_ims, tr=self.inputs.tr,
            slice_order=self.inputs.slice_order,
            tr_slices=TR_slices,
            time_interp=self.inputs.time_interp,
            start=self.inputs.start)

        R.estimate(loops=self.inputs.loops,
                   between_loops=self.inputs.between_loops,
                   speedup=self.inputs.speedup)

        corr_run = R.resample()
        self._out_file_path = []
        self._par_file_path = []

        for j, corr in enumerate(corr_run):
            self._out_file_path.append(os.path.abspath('corr_%s.nii.gz' %
            (split_filename(self.inputs.in_file[j])[1])))
            save_image(corr, self._out_file_path[j])

            self._par_file_path.append(os.path.abspath('%s.par' %
            (os.path.split(self.inputs.in_file[j])[1])))
            mfile = open(self._par_file_path[j], 'w')
            motion = R._transforms[j]
            #output a .par file that looks like fsl.mcflirt's .par file
            for i, mo in enumerate(motion):
                params = ['%.10f' % item for item in np.hstack((mo.rotation,
                                                             mo.translation))]
                string = ' '.join(params) + '\n'
                mfile.write(string)
            mfile.close()

        return runtime
예제 #44
0
def save_niftis(dataset, features, image_dir, base_nifti=None, **kwargs):
    """
    Saves a series of niftis.
    """
    logger.info("Saving mri images")
    spatial_maps = features.spatial_maps
    spatial_maps = dataset.get_weights_view(spatial_maps)
    for i, feature in features.f.iteritems():
        image = dataset.get_nifti(spatial_maps[i], base_nifti=base_nifti)
        nipy.save_image(image, path.join(image_dir, "%d.nii.gz" % feature.id))

    nifti_files = [
        path.join(image_dir, "%d.nii.gz" % feature.id)
        for feature in features.f.values()
    ]
    roi_dict = rois.main(nifti_files)

    anat_file = ("/export/mialab/users/mindgroup/Data/mrn/"
                 "mri_extra/ch2better_aligned2EPI.nii")
    anat = nipy.load_image(anat_file)
    nifti_viewer.save_images(nifti_files, anat, roi_dict, image_dir, **kwargs)
예제 #45
0
def tissue_segmentation(save_path, segmented_image, type, info):

    if info == 'freesurfer' and type == "GM":
        # FreeSurfer gray matter regions
        # SupraGMV=LeftCerebralCortex+RightCerebralCortex+SubcorticalGMV
        region = np.array(
            [42, 3, 10, 11, 12, 13, 17, 18, 26, 49, 50, 51, 52, 53, 54, 58])

        S = nipy.load_image(segmented_image)
        name = "GM_" + os.path.basename(segmented_image)
        index = np.in1d(S._data, region).reshape(
            S._data.shape
        )  # coordinates of voxels with values = values from region array
        S._data[index] = 1  # set voxels values to 1
        index = np.logical_not(
            index
        )  #get coordinates of voxels with values not = values from region array
        S._data[index] = 0  # set voxels values to 0
        nipy.save_image(S, os.path.join(save_path, name))
    else:
        raise ValueError('Not implemented!')
예제 #46
0
def testIntersubjectRigidRegistration(fname0, fname1, level, outfname):
    nib_left = nib.load(fname0)
    nib_right = nib.load(fname1)
    left=nib_left.get_data().astype(np.double).squeeze()
    right=nib_right.get_data().astype(np.double).squeeze()
    leftPyramid=[i for i in rcommon.pyramid_gaussian_3D(left, level)]
    rightPyramid=[i for i in rcommon.pyramid_gaussian_3D(right, level)]
    plotSlicePyramidsAxial(leftPyramid, rightPyramid)
    print 'Estimation started.'
    beta=estimateRigidTransformationMultiscale3D(leftPyramid, rightPyramid)
    print 'Estimation finished.'
    rcommon.applyRigidTransformation3D(left, beta)
    sl=np.array(left.shape)//2
    sr=np.array(right.shape)//2
    rcommon.overlayImages(left[sl[0],:,:], leftPyramid[0][sr[0],:,:])
    rcommon.overlayImages(left[sl[0],:,:], right[sr[0],:,:])
    affine_transform=AffineTransform('ijk', ['aligned-z=I->S','aligned-y=P->A', 'aligned-x=L->R'], np.eye(4))
    left=Image(left, affine_transform)
    nipy.save_image(left,outfname)
    
    return beta
예제 #47
0
def expandTimepoints(imgFn, baseDir):
    """
    Expand an image sequence stored as a .nii.gz file into a collection of 
    .nii.gz images (where each frame is its own .nii.gz file)

    Inputs:
    - imgFn: the time series image's filename
    - baseDir: the directory in which a new directory 
        will be created to hold the collection of files

    Returns:
    - filenames: list of filenames
    """
    # load the image
    img = load_image(imgFn)
    coord = img.coordmap

    if not os.path.exists(baseDir + 'timepoints/'):
        os.mkdir(baseDir + 'timepoints/')
    outDir = baseDir + 'timepoints/'

    # pull out the first image from the sequence (timepoint 0)
    first = img[:, :, :, 0].get_data()[:, :, :, None]
    first_img = Image(first, coord)
    # save the first image as 000
    save_image(first_img, outDir + str(0).zfill(3) + '.nii.gz')
    # build the list of filenames
    filenames = [outDir + '000.nii.gz']

    # for the remaining images
    for i in xrange(1, img.get_data().shape[3], 1):
        # pull out the image and save it
        tmp = img[:, :, :, i].get_data()[:, :, :, None]
        tmp_img = Image(tmp, coord)
        outFn = str(i).zfill(3) + '.nii.gz'
        save_image(tmp_img, outDir + outFn)
        # add the name of the image to the list of filenames
        filenames.append(outDir + outFn)

    return filenames
예제 #48
0
    def save_niftis(self, X):
        '''Save nifti files from array.

        Args:
            X (numpy.array): array from which to make images.

        Returns:
            list: list of nifti images.
            list: list of output files for images.

        '''

        base_nifti = nipy.load_image(self.base_nifti_file)

        images = []
        out_files = []
        for i, x in enumerate(X):
            image = self.make_image(x, base_nifti)
            out_file = path.join(self.tmp_path, 'tmp_image_%d.nii.gz' % i)
            nipy.save_image(image, out_file)
            images.append(image)
            out_files.append(out_file)

        return images, out_files
예제 #49
0
def screen_data_dirnme(in4d, outdir):
    """uses nipy diagnostic code to screen the data for
    outlier values and saves results to three images
    mean, std, pca, in same dir as original file(s)"""
    img = nipy.load_image(in4d)
    result = diag.screen(img)
    # save mean, std, pca
    pth, nme = os.path.split(in4d)
    stripnme = nme.split('.')[0]
    
    pcafile = os.path.join(outdir,
                           'QA-PCA_%s.nii.gz'%(nme))
    meanfile = os.path.join(outdir,
                            'QA-MEAN_%s.nii.gz'%(nme))
    stdfile = os.path.join(outdir,
                           'QA-STD_%s.nii.gz'%(nme))
    nipy.save_image(result['mean'], meanfile)
    nipy.save_image(result['std'], stdfile)
    nipy.save_image(result['pca'], pcafile)
    print 'saved: %s\n \t%s\n \t%s\n'%(pcafile, meanfile, stdfile)
예제 #50
0
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""This example shows how to create a temporary image to use during processing.

The array is filled with zeros.
"""

import numpy as np

from nipy import load_image, save_image
from nipy.core.api import Image, vox2mni

# create an array of zeros, the shape of your data array
zero_array = np.zeros((91,109,91))

# create an image from our array.  The image will be in MNI space
img = Image(zero_array, vox2mni(np.diag([2, 2, 2, 1])))

# save the image to a file
newimg = save_image(img, 'tempimage.nii.gz')

# Example of creating a temporary image file from an existing image with a
# matching coordinate map.
img = load_image('tempimage.nii.gz')
zeroarray = np.zeros(img.shape)
zeroimg = Image(zeroarray, img.coordmap)
newimg = save_image(zeroimg, 'another_tempimage.nii.gz')
예제 #51
0
else:
    mask_img = load_image(mask_img)

# Other optional arguments
niters = int(get_argument('niters', 25))
beta = float(get_argument('beta', 0.5))
ngb_size = int(get_argument('ngb_size', 6))

# Perform tissue classification
mask = mask_img.get_data() > 0
S = BrainT1Segmentation(img.get_data(), mask=mask, model='5k',
                        niters=niters, beta=beta, ngb_size=ngb_size)

# Save label image
outfile = 'hard_classif.nii'
save_image(make_xyz_image(S.label, xyz_affine(img), 'scanner'),
           outfile)
print('Label image saved in: %s' % outfile)

# Compute fuzzy Dice indices if a 3-class fuzzy model is provided
if args.probc is not None and \
        args.probg is not None and \
        args.probw is not None:
    print('Computing Dice index')
    gold_ppm = np.zeros(S.ppm.shape)
    gold_ppm_img = (args.probc, args.probg, args.probw)
    for k in range(3):
        img = load_image(gold_ppm_img[k])
        gold_ppm[..., k] = img.get_data()
    d = fuzzy_dice(gold_ppm, S.ppm, np.where(mask_img.get_data() > 0))
    print('Fuzzy Dice indices: %s' % d)
예제 #52
0
from nipy.algorithms.registration import FmriRealign4d

from nipy import load_image, save_image
from nipy.utils import example_data

from os.path import join, split
import sys
import tempfile

# Input images are provided with the nipy-data package
runnames = [example_data.get_filename('fiac','fiac0',run+'.nii.gz') \
                for run in ('run1','run2')]
runs = [load_image(run) for run in runnames]

# Spatio-temporal realigner
R = FmriRealign4d(runs, tr=2.5, slice_order='ascending', interleaved=True)

# Estimate motion within- and between-sessions
R.estimate()

# Resample data on a regular space+time lattice using 4d interpolation
corr_runs = R.resample()

# Save images 
savedir = tempfile.mkdtemp()
for i in range(len(runs)):
    aux = split(runnames[i])
    save_image(corr_runs[i], join(savedir, 'ra'+aux[1]))


예제 #53
0
#resample data
ra_runs = R.resample()

#save resampled images
for i, corrImage in enumerate(ra_runs):
    #save realigned image
    #trim off .nii or .nii.gz extension
    iname = cmdInput.inputs[i].split('.')
    if len(iname) > 2 and (iname[-1].lower() == "gz" and iname[-2].lower() == "nii"):
        imname = '.'.join(iname[:-2]) + cmdInput.prefix + '.nii.gz' #drop last two dotted pieces: .nii.gz
    elif len(iname) > 1 and iname[-1].lower() == "nii":
        immname = '.'.join(iname[:-1])  + cmdInput.prefix + '.nii' #drop last dotted piece: .nii
    else:
        print "Can't determine file name of input properly."
        exit(1)
    save_image(corrImage, imname)

    #save motion estimates, stored in realign object _transforms
    motion = R._transforms[i]

    #trim off .nii or .nii.gz extension
    iname = cmdInput.inputs[i].split('.')
    if len(iname) > 2 and (iname[-1].lower() == "gz" and iname[-2].lower() == "nii"):
        mname = '.'.join(iname[:-2]) + '.par' #drop last two dotted pieces: .nii.gz
    elif len(iname) > 1 and iname[-1].lower() == "nii":
        mname = '.'.join(iname[:-1]) + '.par' #drop last dotted piece: .nii
    else:
        print "Can't determine file name of input properly."
        exit(1)

    mfile = open(mname, 'w')
예제 #54
0
J = load_image(target_file)

# Perform affine registration
# The output is an array-like object such that
# np.asarray(T) is a customary 4x4 matrix
print('Setting up registration...')
tic = time.time()
R = HistogramRegistration(I, J, similarity=similarity, interp=interp,
                          renormalize=renormalize)
T = R.optimize('affine', optimizer=optimizer)
toc = time.time()
print('  Registration time: %f sec' % (toc - tic))

# Resample source image
print('Resampling source image...')
tic = time.time()
#It = resample2(I, J.coordmap, T.inv(), J.shape)
It = resample(I, T.inv(), reference=J)
toc = time.time()
print('  Resampling time: %f sec' % (toc - tic))

# Save resampled source
outroot = source + '_TO_' + target
outimg = outroot + '.nii.gz'
print ('Saving resampled source in: %s' % outimg)
save_image(It, outimg)

# Save transformation matrix
outparams = outroot + '.npy'
np.save(outparams, np.asarray(T))
예제 #55
0
def space_time_realign(Images,TR=2,numslices=None,SliceTime='asc_alt_2',RefScan=0,Prefix='ra'):
    '''
    4D simultaneous slice timing and spatial realignment. Adapted from
    Alexis Roche's example script, and extend to be used for multiplex
    imaging sequences
    
    Inputs:
    
        Images: list of images, input as a list of strings/paths to images
        
        numslices: for non-multiplex sequence, default to be the number of
            slices in the image. For multiplex sequence, enter as a tuple,
            such that the first element is the number of planes acquired in
            parallel between each other, and the second element is the number
            of slices of each parallel plane/slab, i.e. (numplanes,numslices)
        
        SliceTime:enter as a string to specify how the slices are ordered.
            Choices are the following
            1).'ascending': sequential ascending acquisition
            2).'descending': sequential descending acquisition
            3).'asc_alt_2': ascending interleaved, starting at first slice
            4).'asc_alt_2_1': ascending interleaved, starting at the second
                slice
            5).'desc_alt_2': descending interleaved, starting at last slice
            6).'asc_alt_siemens': ascending interleaved, starting at the first
                slice if odd number of slices, or second slice if even number
                of slices
            7).'asc_alt_half': ascending interleaved by half the volume
            8).'desc_alt_half': descending interleaved by half the volume
        
        RefScan: reference volume for spatial realignment movement estimation.
            Note that scan 0 is the first scan.
        
        Prefix: prefix of the new corrected images. Default is 'ra'
        
        
    Author: Alexis Roche, 2009.
            Edward Cui, February 2014
    '''
    
    # Load images
    runs = [load_image(run) for run in Images]
    # Parse data info
    if numslices is None:
        numslices = runs[0].shape[2]
        numplanes = 1
    elif isinstance(numslices,tuple):
        (numplanes,numslices) = numslices
    else:
        numplanes = 1
    # Print image info
    if numplanes>1:
        print('Running multiplex: %s' % numplanes)
    print('Number of slices: %s' % numslices)
    # Parse slice timing according to the input
    slice_timing = getattr(timefuncs,SliceTime)(numslices,TR)
    # Repeat the slice timing for multiplex seqquence
    slice_timing = np.tile(slice_timing,numplanes)
    # Print slice timing info
    print('Slice times: %s' % slice_timing)
    # Spatio-temporal realigner
    R = SpaceTimeRealign(runs, tr=TR, slice_times=slice_timing, slice_info=2)
    # Estimate motion within- and between-sessions
    print('Estimating motion ...')
    R.estimate(refscan=RefScan)
    # Resample data on a regular space+time lattice using 4d interpolation
    fname=[None]*len(Images) # output images
    mfname=[None]*len(Images) # output motion parameter files
    print('Saving results ...')
    for n in range(len(Images)):
        # extract motion parameters
        motionparams = np.array([np.concatenate((M.translation,M.rotation),axis=1) for M in R._transforms[n]])
        # set motion parameter file name
        mfname[n] = os.path.join(os.path.split(Images[n])[0], 'rp_a0001.txt')
        # write the motion parameters to file
        np.savetxt(mfname[n],motionparams,fmt='%10.7e',delimiter='\t')
        # resample data
        corr_run = R.resample(n)
        # set image name
        fname[n] = os.path.join(os.path.split(Images[n])[0], Prefix + os.path.split(Images[n])[1])
        # save image
        save_image(corr_run, fname[n])
        print(fname[n])
    return(fname,mfname)
예제 #56
0
s = R.eval(T0)
sa = R.eval(T0.affine)
assert_almost_equal(s, sa)

# Test 2
T = SplineTransform(I, cp, sigma=5., grid_coords=True, affine=A)
T.param += 1.
s0 = R.eval(T0)
s = R.eval(T)
print(s-s0)
"""

# Optimize spline transform
# T = R.optimize(T0, method='steepest')
###T = R.optimize(T0)

T = T0
###T.param = np.load('spline_param.npy')


# Resample target image 
Jt = resample(J, T, reference=I)
save_image(Jt, 'deform_anubis_to_ammon.nii')


# Test 3
"""
ts = t[R._slices+[slice(0,3)]]
tts = T[R._slices]()
"""