コード例 #1
0
 def _run_interface(self, runtime):
     mag_filename_list=self.inputs.mag
     snr_window_sz_mm=self.inputs.snr_window_sz
     avg_out_filename=self._list_outputs()['avg_out_filename']
     weight_out_filename=self._list_outputs()['weight_out_filename']        
     ne=len(mag_filename_list)
     mag_img_obj=nib.load(mag_filename_list[0])
     shape=mag_img_obj.get_shape()
     voxel_size=mag_img_obj.header['pixdim'][1:4]
     snr_window_sz_vxl=np.ceil(snr_window_sz_mm/np.array(voxel_size)).astype('int')        
     avg=np.zeros(shape)
     weight_avg=0
     weight=np.empty(shape+(ne,))
     count=0
     mag_filename_list.sort()
     eps = sys.float_info.min**(1.0/4)
     for imgloc in mag_filename_list:
         curr_img=(nib.load(imgloc).get_data()).astype('float')
         curr_img_avg=np.abs(curr_img).mean()            
         local_mean = ndimage.uniform_filter(curr_img, snr_window_sz_vxl)
         local_sqr_mean = ndimage.uniform_filter(curr_img**2, snr_window_sz_vxl)
         local_var = local_sqr_mean - local_mean**2 + eps
         weight[...,count]=local_mean/local_var + eps#snr
         avg+=curr_img_avg*curr_img #weighted avg, later echos weighted less
         weight_avg+=curr_img_avg
         count+=1
     avg=avg/weight_avg        
     
     niftifile=nib.Nifti1Pair(avg,mag_img_obj.affine)
     nib.save(niftifile,avg_out_filename)
     niftifile=nib.Nifti1Pair(weight,mag_img_obj.affine)
     nib.save(niftifile,weight_out_filename)
     
     return runtime
コード例 #2
0
def test_files_interface():
    # test high-level interface to files mapping
    arr = np.zeros((2, 3, 4))
    aff = np.eye(4)
    img = nib.Nifti1Image(arr, aff)
    # single image
    img.set_filename('test')
    yield assert_equal(img.get_filename(), 'test.nii')
    yield assert_equal(img.file_map['image'].filename, 'test.nii')
    yield assert_raises(KeyError, img.file_map.__getitem__, 'header')
    # pair - note new class
    img = nib.Nifti1Pair(arr, aff)
    img.set_filename('test')
    yield assert_equal(img.get_filename(), 'test.img')
    yield assert_equal(img.file_map['image'].filename, 'test.img')
    yield assert_equal(img.file_map['header'].filename, 'test.hdr')
    # fileobjs - single image
    img = nib.Nifti1Image(arr, aff)
    img.file_map['image'].fileobj = StringIO()
    img.to_file_map()  # saves to files
    img2 = nib.Nifti1Image.from_file_map(img.file_map)
    # img still has correct data
    yield assert_array_equal(img2.get_data(), img.get_data())
    # fileobjs - pair
    img = nib.Nifti1Pair(arr, aff)
    img.file_map['image'].fileobj = StringIO()
    # no header yet
    yield assert_raises(FileHolderError, img.to_file_map)
    img.file_map['header'].fileobj = StringIO()
    img.to_file_map()  # saves to files
    img2 = nib.Nifti1Pair.from_file_map(img.file_map)
    # img still has correct data
    yield assert_array_equal(img2.get_data(), img.get_data())
コード例 #3
0
    def _run_interface(self, runtime):
        phase_filename_list=self.inputs.phase        
        mask_filename=self.inputs.mask       
        threshold=self.inputs.threshold
        reliability_mask_filename=self._list_outputs()['reliability_mask_filename']
        reliability_filename=self._list_outputs()['reliability_filename']
        ne=len(phase_filename_list)
        ph_img_obj=nib.load(phase_filename_list[0])        
        shape=ph_img_obj.get_shape()        
        ph_imgs=np.empty(shape+(ne,))        
        for echo in range(ph_imgs.shape[-1]):
            ph_imgs[...,echo]=(nib.load(phase_filename_list[echo]).get_data())            
        reliability=np.empty_like(ph_imgs)
        mask=(nib.load(mask_filename).get_data()).astype('bool')
        for echo in range(ph_imgs.shape[-1]):
            reliability[...,echo]=cr.calculateReliability(ph_imgs[...,echo].astype('float32'),mask)
                
        mask=mask[...,np.newaxis]
        mask.repeat(ne,axis=-1) 
        """
        #absolute value of second difference dependent on image mean
        #dependent on background field
        #could try using std units to avoid this
        threshold_std=self.inputs.threshold_std_units
        tmpmean=reliability[mask].mean()
        tmpmask=(reliability<tmpmean)*(reliability>0)
        mean=reliability[tmpmask].mean()        
        std=reliability[tmpmask].std()
        threshold=mean + threshold_std * std  
        """        
        reliability_mask=(reliability<threshold)*mask
        reliability_mask=np.squeeze(reliability_mask)
        reliability_mask=reliability_mask.astype('int8')

        """        
        #smooth reliability mask to avoid noisey on/off pixel patterns
        rtmp=reliability_mask.copy()
        kernelSz_mm=0.5#mm
        KernelSz_vxl=np.ceil(kernelSz_mm/np.array(voxelSize)).astype('int')        
        for i in range(5):
            rtmp=ndimage.gaussian_filter(rtmp.astype('float'),np.concatenate((KernelSz_vxl,(0,))))
            rtmp=rtmp*reliabilityMask          
        rtmp=ndimage.gaussian_filter(rtmp.astype('float'),(0,0,0,.5))
        #rtmp=rtmp*reliabilityMask
        for echo in range(reliabilityMask.shape[-1]):
            rtmp[...,echo]=ndimage.binary_erosion(rtmp[...,echo])
        reliability_mask=rtmp
        #"""
        
        niftifile=nib.Nifti1Pair(reliability_mask,ph_img_obj.affine)
        nib.save(niftifile,reliability_mask_filename)   
        niftifile=nib.Nifti1Pair(reliability,ph_img_obj.affine)
        nib.save(niftifile,reliability_filename)                       
        return runtime
コード例 #4
0
def _write_nibabel(
    tensor: TypeData,
    affine: TypeData,
    path: TypePath,
    squeeze: bool = True,
    channels_last: bool = True,
) -> None:
    """
    Expects a path with an extension that can be used by nibabel.save
    to write a NIfTI-1 image, such as '.nii.gz' or '.img'
    """
    assert tensor.ndim == 4
    if channels_last:
        tensor = tensor.permute(1, 2, 3, 0)
    tensor = tensor.squeeze() if squeeze else tensor
    suffix = Path(str(path).replace('.gz', '')).suffix
    if '.nii' in suffix:
        img = nib.Nifti1Image(np.asarray(tensor), affine)
    elif '.hdr' in suffix or '.img' in suffix:
        img = nib.Nifti1Pair(np.asarray(tensor), affine)
    else:
        raise nib.loadsave.ImageFileError
    img.header['qform_code'] = 1
    img.header['sform_code'] = 0
    img.to_filename(str(path))
コード例 #5
0
ファイル: data_io.py プロジェクト: Borda/BIRL
def convert_image_to_nifti(path_image, path_out_dir=None):
    """ converting normal image to Nifty Image

    :param str path_image: input image
    :param str path_out_dir: path to output folder
    :return str: resulted image

    >>> path_img = os.path.join(update_path('data-images'), 'images',
    ...                         'artificial_moving-affine.jpg')
    >>> path_img2 = convert_image_to_nifti(path_img, '.')
    >>> path_img2  # doctest: +ELLIPSIS
    '...artificial_moving-affine.nii'
    >>> os.path.isfile(path_img2)
    True
    >>> path_img3 = convert_image_from_nifti(path_img2)
    >>> os.path.isfile(path_img3)
    True
    >>> list(map(os.remove, [path_img2, path_img3]))  # doctest: +ELLIPSIS
    [...]
    """
    path_image = update_path(path_image)
    path_img_out = _gene_out_path(path_image, '.nii', path_out_dir)
    logging.debug('Convert image to Nifti format "%s" ->  "%s"', path_image,
                  path_img_out)

    # img = Image.open(path_file).convert('LA')
    img = load_image(path_image)
    nim = nibabel.Nifti1Pair(img, np.eye(4))
    del img
    nibabel.save(nim, path_img_out)

    return path_img_out
コード例 #6
0
ファイル: io.py プロジェクト: zhouqp631/torchio
def _write_nibabel(
    tensor: TypeData,
    affine: TypeData,
    path: TypePath,
    squeeze: bool = False,
) -> None:
    """
    Expects a path with an extension that can be used by nibabel.save
    to write a NIfTI-1 image, such as '.nii.gz' or '.img'
    """
    assert tensor.ndim == 4
    num_components = tensor.shape[0]

    # NIfTI components must be at the end, in a 5D array
    if num_components == 1:
        tensor = tensor[0]
    else:
        tensor = tensor[np.newaxis].permute(2, 3, 4, 0, 1)
    tensor = tensor.squeeze() if squeeze else tensor
    suffix = Path(str(path).replace('.gz', '')).suffix
    if '.nii' in suffix:
        img = nib.Nifti1Image(np.asarray(tensor), affine)
    elif '.hdr' in suffix or '.img' in suffix:
        img = nib.Nifti1Pair(np.asarray(tensor), affine)
    else:
        raise nib.loadsave.ImageFileError
    if num_components > 1:
        img.header.set_intent('vector')
    img.header['qform_code'] = 1
    img.header['sform_code'] = 0
    nib.save(img, str(path))
コード例 #7
0
ファイル: module.py プロジェクト: mi2rl/MI2RLNet
    def predict(
            self,
            path: str,
            save_path: Optional[str] = None) -> Tuple[np.ndarray, np.ndarray]:
        """
        Liver segmentation
        Args:
            (string) path : image path (hdr/img, nii, npy)
            (string) save_path : 
            (bool) istogether: with image which was used or not

        Return:
            (numpy ndarray) liver mask with shape (depth, width, height)
        """

        path = os.path.abspath(path)
        imgo, img = self._preprocessing(path)
        mask = np.squeeze(self.model(img).numpy().argmax(axis=-1))
        mask_shape = mask.shape
        mask = zoom(mask, [
            self.img_shape[0] / mask_shape[0], self.img_shape[1] /
            mask_shape[1], self.img_shape[2] / mask_shape[2]
        ],
                    order=1,
                    mode='constant').astype(np.uint8)

        if save_path:
            temp2 = np.swapaxes(mask, 1, 2)
            temp2 = np.swapaxes(temp2, 0, 1)
            temp2 = np.swapaxes(temp2, 1, 2)
            mask_pair = nib.Nifti1Pair(
                temp2, np.diag([-self.space[0], -self.space[1], 5., 1]))
            nib.save(mask_pair, save_path)

        return (np.squeeze(imgo), mask)
コード例 #8
0
    def rebuild_pred(self, pred, meta, save_dir=None):
        affine = meta['affine']
        cropped_shape = meta['cropped_shape']
        original_shape = meta['shape']
        orient = meta['orient']

        # pad_width for np.pad
        pad_width = meta['nonair_bbox']
        for i in range(len(original_shape)):
            pad_width[i][1] = original_shape[i] - (pad_width[i][1] + 1)

        print('Resample pred to original spacing...')
        pred = resize(pred, cropped_shape, is_label=True)
        print('Add padding to pred...')
        pred = np.pad(pred, pad_width, constant_values=0)
        pred = nib.orientations.apply_orientation(pred, orient)

        if save_dir:
            save_dir = Path(save_dir)
            if not save_dir.exists():
                save_dir.mkdir(parents=True)

            pred_nib = nib.Nifti1Pair(pred, np.array(affine))
            nib_fname = '%s_pred.nii.gz' % meta['case_id']
            nib.save(pred_nib, str(save_dir / nib_fname))

        return {'pred': pred, 'meta': meta}
コード例 #9
0
ファイル: module.py プロジェクト: daeun02/MI2RLNet
    def predict(self,
                path: str,
                save_path: Optional[str] = None) -> Tuple[np.array, np.array]:
        """
        blackblood segmentation
        Args:
            (string) path : image path (nii)
            (bool) istogether : with image which was used or not
        Return:
            (numpy ndarray) blackblood mask with shape
        """
        path = os.path.abspath(path)
        image = self._preprocessing(path)
        mask = np.squeeze(self.model(image).numpy().argmax(axis=-1))
        mask_shape = mask.shape
        mask = ndimage.zoom(mask, [
            self.img_shape[0] / mask_shape[0], self.img_shape[1] /
            mask_shape[1], self.img_shape[2] / mask_shape[2]
        ],
                            order=1,
                            mode='constant')
        mask = mask.astype(np.uint8)

        if save_path:
            temp2 = np.swapaxes(mask, 1, 2)
            temp2 = np.swapaxes(temp2, 0, 1)
            temp2 = np.swapaxes(temp2, 1, 2)
            mask_pair = nib.Nifti1Pair(
                temp2,
                np.diag([-self.space[0], self.space[1], self.space[2], 1]))
            nib.save(mask_pair, save_path)

        return (np.squeeze(image), mask)
コード例 #10
0
ファイル: data_io.py プロジェクト: Borda/BIRL
def convert_image_to_nifti_gray(path_image, path_out_dir=None):
    """ converting normal image to Nifty Image

    :param str path_image: input image
    :param str path_out_dir: path to output folder
    :return str: resulted image

    >>> path_img = './sample-image.png'
    >>> save_image(path_img, np.zeros((100, 200, 3)))
    >>> path_img2 = convert_image_to_nifti_gray(path_img)
    >>> os.path.isfile(path_img2)
    True
    >>> path_img3 = convert_image_from_nifti(path_img2, '.')
    >>> os.path.isfile(path_img3)
    True
    >>> list(map(os.remove, [path_img, path_img2, path_img3]))  # doctest: +ELLIPSIS
    [...]
    """
    path_image = update_path(path_image)
    path_img_out = _gene_out_path(path_image, '.nii', path_out_dir)
    logging.debug('Convert image to Nifti format "%s" ->  "%s"', path_image,
                  path_img_out)

    # img = Image.open(path_file).convert('LA')
    img = rgb2gray(load_image(path_image))
    nim = nibabel.Nifti1Pair(np.swapaxes(img, 1, 0), np.eye(4))
    del img
    nibabel.save(nim, path_img_out)

    return path_img_out
コード例 #11
0
def convert_image_to_nifti(path_image, path_out):
    """ converting normal image to Nifty Image

    :param str path_image: input image
    :param str path_out: path to output folder
    :return str: resulted image

    >>> path_img = './sample-image.png'
    >>> save_image(path_img, np.zeros((100, 200, 3)))
    >>> path_img2 = convert_image_to_nifti(path_img, '.')
    >>> os.path.isfile(path_img2)
    True
    >>> path_img3 = convert_image_from_nifti(path_img2, '.')
    >>> os.path.isfile(path_img3)
    True
    >>> list(map(os.remove, [path_img, path_img2, path_img3]))  # doctest: +ELLIPSIS
    [...]
    """
    img_name = os.path.splitext(os.path.basename(path_image))[0]
    path_img_out = os.path.join(path_out, img_name + '.nii')
    logging.debug('Convert image to Nifti format "%s" ->  "%s"', path_image,
                  path_img_out)

    # img = Image.open(path_image).convert('LA')
    img = load_image(path_image)
    nim = nibabel.Nifti1Pair(img, np.eye(4))
    del img
    nibabel.save(nim, path_img_out)

    return path_img_out
コード例 #12
0
def main():
    args = parse_arguments(sys.argv[1:])
    fn_in = os.path.abspath(os.path.realpath(os.path.expanduser(args.fn_in)))
    if not os.path.isfile(fn_in):
        raise IOError('Could not find file: {0}'.format(args.fn_in))
    fn_out = os.path.abspath(os.path.realpath(os.path.expanduser(args.out)))
    if os.path.splitext(fn_out)[1] == '':
        fn_out += '.nii.gz'
    reference = os.path.abspath(os.path.realpath(os.path.expanduser(args.ref)))

    affine = nib.load(reference).affine
    nvox = nib.load(reference).shape
    if len(nvox) == 2:
        nvox = nvox + (1,)

    m = msh.read_msh(fn_in)
    if 'J' not in m.field.keys():
        raise IOError('Input mesh file must have a current density (J) field')

    B = calc_B(m.field['J'], nvox, affine, calc_res=args.res, mask=args.mask)

    img = nib.Nifti1Pair(B, affine)
    img.header.set_xyzt_units('mm')
    img.set_qform(affine)
    img.update_header()
    nib.save(img, fn_out)
コード例 #13
0
def convert_img_2_nifti_gray(path_img_in, path_out):
    """ converting standard image to Nifti format

    :param str path_img_in: path to input image
    :param str path_out: path to output directory
    :return str: path to output image

    >>> np.random.seed(0)
    >>> img = np.random.random((150, 125))
    >>> p_in = './test_sample_image.png'
    >>> io.imsave(p_in, img)
    >>> p_out = convert_img_2_nifti_gray(p_in, '.')
    >>> p_out
    'test_sample_image.nii'
    >>> os.remove(p_out)
    >>> os.remove(p_in)
    """
    assert os.path.exists(path_img_in), 'missing input: %s' % path_img_in
    assert os.path.exists(path_out), 'missing output: %s' % path_out
    name_img_out = os.path.splitext(os.path.basename(path_img_in))[0] + '.nii'
    path_img_out = os.path.join(os.path.dirname(path_out), name_img_out)
    logging.debug('Convert image to Nifti format "%s" ->  "%s"', path_img_in,
                  path_img_out)

    # img = Image.open(imgIn).convert('LA')
    img = io.imread(path_img_in)
    img = color.rgb2gray(img)

    img = np.swapaxes(img, 1, 0)
    nim = nibabel.Nifti1Pair(img, np.eye(4))
    nibabel.save(nim, path_img_out)

    # for k in nim.header.keys():
    #     print('{:20s}: \t{}'.format(k, nim.header[k]))
    return path_img_out
コード例 #14
0
def convert_img_2_nifti_rgb(path_img, path_out):
    """ converting standard image to Nifti format

    :param str path_img: path to input image
    :param str path_out: path to output directory
    :return str: path to output image

    >>> np.random.seed(0)
    >>> p_in = './temp_sample-image.png'
    >>> io.imsave(p_in, np.random.random((150, 125, 3)))
    >>> p_nifty = convert_img_2_nifti_rgb(p_in, '.')
    >>> p_nifty
    'temp_sample-image.nii'
    >>> os.remove(p_nifty)
    >>> os.remove(p_in)
    """
    assert os.path.exists(path_img), 'missing input: %s' % path_img
    assert os.path.exists(path_out), 'missing output: %s' % path_out
    name_img_out = os.path.splitext(os.path.basename(path_img))[0] + '.nii'
    path_img_out = os.path.join(os.path.dirname(path_out), name_img_out)
    logging.debug('Convert image to Nifti format "%s" ->  "%s"', path_img,
                  path_img_out)

    img = io_imread(path_img)
    dims = img.shape

    img = img.reshape([dims[0], dims[1], 1, dims[2], 1])
    img = np.swapaxes(np.swapaxes(img, 0, 3), 1, 4)

    nim = nibabel.Nifti1Pair(img, np.eye(4))
    nibabel.save(nim, path_img_out)

    # for k in nim.header.keys():
    #     print('{:20s}: \t{}'.format(k, nim.header[k]))
    return path_img_out
コード例 #15
0
def merge_atlas(primary_fname,
                secondary_fname,
                output_fnames_dict,
                background_values={0},
                selem=None):
    """
    Merge two atlases together.  The primary atlas is preserved.  New ROIs in the secondary atlas are dilated and used to fill in the "cracks" of the primary atlas.  These should be two NIfTI paths.
    """
    primary = nibabel.load(primary_fname)
    primary.data = primary.get_data().astype(int)
    secondary = nibabel.load(secondary_fname)
    secondary.data = secondary.get_data().astype(int)
    # get all ROI values and remove background ROI
    labels_in_primary = set(np.unique(primary.data)) - background_values
    mask_primary = np.isin(primary.data, list(labels_in_primary))
    # keep only new ROIs, keep only voxels that are not primary atlas or background values
    mask_secondary = np.isin(
        secondary.data, list(labels_in_primary), invert=True) * np.isin(
            secondary.data, list(background_values), invert=True)

    new_nuclei = secondary.data * mask_secondary
    new_nuclei_values = set(np.unique(new_nuclei)) - background_values
    new_nuclei = morphology.dilation(new_nuclei, selem=selem)
    # new_nuclei = morphology.closing(new_nuclei, selem=np.ones((3, 3, 3)))
    merged_atlas = new_nuclei * np.invert(
        mask_primary) + primary.data * mask_primary
    for value in new_nuclei_values:
        nii = nibabel.Nifti1Pair(merged_atlas == value,
                                 primary.get_affine(),
                                 header=primary.get_header())
        nibabel.save(nii, output_fnames_dict[value])
コード例 #16
0
def test_image_container_unexpected_arguments():
    """ Check that passing unexpected arguments raises an error """
    with pytest.raises(TypeError):
        NumpyImageContainer(image=np.zeros((3, 3, 3)), unexpected="test")
    with pytest.raises(TypeError):
        NiftiImageContainer(nib.Nifti1Pair(np.zeros((3, 3, 3)),
                                           affine=np.eye(4)),
                            unexpected="test")
コード例 #17
0
 def _run_interface(self, runtime):
     phaseFilename=self.inputs.phase        
     maskFilename=self.inputs.mask       
     threshold=self.inputs.threshold
     rad=self.inputs.erosion_sz
     trimmed_mask_filename=self._list_outputs()['trimmed_mask_filename']
     reliability_filename=self._list_outputs()['reliability_filename']
             
     ph_img_obj=nib.load(phaseFilename)
     voxel_size=ph_img_obj.header['pixdim'][1:4]        
     ph_img=ph_img_obj.get_data()               
     mask=nib.load(maskFilename).get_data()
     
     px,py,pz=int(rad/voxel_size[0]),int(rad/voxel_size[1]),int(rad/voxel_size[2])
     x=np.linspace(-rad,rad,2*px+1)
     y=np.linspace(-rad,rad,2*py+1)
     z=np.linspace(-rad,rad,2*pz+1)
     Y,X,Z=np.meshgrid(y,x,z)
     dist2=(X**2+Y**2+Z**2)
     circ=dist2<=rad**2        
     
     mask_eroded=ndimage.binary_erosion(mask,circ)      
     mask_dilated=ndimage.binary_dilation(mask,iterations=3)
     reliability=cr.calculateReliability(ph_img.astype('float32'),mask_dilated.astype('bool'))
     
     """
     #absolute value of second difference dependent on image mean
     #dependent on background field
     #could try using std units to avoid this
     threshold_std=self.inputs.threshold_std
     mean=reliability[mask.astype('bool')].mean()
     std=reliability[mask.astype('bool')].std()        
     threshold=mean + threshold_std * std
     threshold=threshold_std
     """        
             
     newmask=(((reliability<threshold)*mask+mask_eroded)>0)
     newmask=ndimage.binary_fill_holes(newmask)
     newmask=ndimage.binary_opening(newmask).astype('int8')        
   
     niftifile=nib.Nifti1Pair(newmask,ph_img_obj.affine)
     nib.save(niftifile,trimmed_mask_filename)       
     niftifile=nib.Nifti1Pair(reliability,ph_img_obj.affine)
     nib.save(niftifile,reliability_filename)                
     return runtime
コード例 #18
0
 def _run_interface(self, runtime):
     mag_filename_list=self.inputs.mag
     phase_filename_list=self.inputs.phase
     freq_filename=self.inputs.freq_loc
     mask_filename=self.inputs.mask
     json_filename_list=self.inputs.json        
             
     R2star_filename=self._list_outputs()['R2star']
     R2star_fit_filename=self._list_outputs()['R2star_fit']
     neg_mask_filename=self._list_outputs()['neg_mask']        
     nan_mask_filename=self._list_outputs()['nan_mask']        
     
     mag_img_obj=nib.load(mag_filename_list[0])
     shape=mag_img_obj.get_shape()
     
     complex_img=np.empty(shape+(len(mag_filename_list),),dtype='complex')        
     te=np.empty(len(json_filename_list))
     count=0
     
     for magloc,phloc,jsonloc in zip(mag_filename_list,phase_filename_list,json_filename_list):
         complex_img[...,count]=(nib.load(magloc).get_data()).astype('float')*np.exp(1j*(nib.load(phloc).get_data())) 
         with open(jsonloc) as f:
             te[count]=json.load(f)['EchoTime']
         count+=1        
     freq=nib.load(freq_filename).get_data()
     mask=nib.load(mask_filename).get_data()
     #to avoid temporal wraps, we can remove the phase evolution of each echo
     #we retain the noise useful for the complex fit
     data_reduced_phase=complex_img*np.exp(-1j*freq[...,np.newaxis]*te) 
     R2star_img,goodness_of_fit,neg_mask,nan_mask=calc_R2star_fn(data_reduced_phase,mask,te)        
     neg_mask=neg_mask.astype('int8')
     nan_mask=nan_mask.astype('int8')
     
     niftifile=nib.Nifti1Pair(R2star_img,mag_img_obj.affine)
     nib.save(niftifile,R2star_filename)
     niftifile=nib.Nifti1Pair(goodness_of_fit,mag_img_obj.affine)
     nib.save(niftifile,R2star_fit_filename)
     niftifile=nib.Nifti1Pair(neg_mask,mag_img_obj.affine)
     nib.save(niftifile,neg_mask_filename)
     niftifile=nib.Nifti1Pair(nan_mask,mag_img_obj.affine)
     nib.save(niftifile,nan_mask_filename)
     
     return runtime
コード例 #19
0
def main():
    parser = argparse.ArgumentParser(
        description='apply affine transform stored '
        'with image to normalize image',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-i', '--image', required=True, help='input image')
    parser.add_argument('-m',
                        '--mask',
                        help='mask image; crop input if provided')
    parser.add_argument('-s',
                        '--scale',
                        type=float,
                        default=1,
                        help='scale of output image')
    parser.add_argument('--mask_crop_enlarge',
                        type=float,
                        default=1.2,
                        help='enlarge factor for mask crop box')
    parser.add_argument('--min_mask_border',
                        type=int,
                        default=0,
                        help='minimum distance from mask to border')
    parser.add_argument('-o',
                        '--output',
                        required=True,
                        help='output filename; do not include extension')
    args = parser.parse_args()

    image = load_and_normalize(args.image, args.scale)
    if args.mask:
        mask = load_and_normalize(args.mask, args.scale)
        assert image.shape == mask.shape
        image, mask = apply_mask(image, mask, args.mask_crop_enlarge,
                                 args.min_mask_border)
        mask = nibabel.Nifti1Pair(mask, np.eye(4))

    image = nibabel.Nifti1Pair(image, np.eye(4))
    logger.info('output size: {}'.format(image.shape))
    nibabel.save(image, args.output + '.nii.gz')
    if args.mask:
        nibabel.save(mask, args.output + '-mask.nii.gz')
コード例 #20
0
ファイル: fileHanding.py プロジェクト: lmonster1997/radiology
def ROIhanding(ROIDir, newDir):
    roi_name_Dir = sorted([i for i in os.listdir(ROIDir)])
    for id, i in enumerate(roi_name_Dir):
        os.mkdir(os.path.join(newDir, i))
        roi = os.listdir(os.path.join(ROIDir, i))
        for j in roi:
            read_path = os.path.join(ROIDir, i, j)
            print(read_path)
            img = nib.load(read_path).get_data()
            save_path = os.path.join(newDir, i, j)
            pairimg = nib.Nifti1Pair(img, np.eye(4))
            nib.save(pairimg, save_path)
コード例 #21
0
 def _run_interface(self, runtime):
     infiles=self.inputs.infiles
     self.outfilenames=[]
             
     for f in infiles:
         imgobj=nib.load(f)
         img=imgobj.get_data()
         img=-img
         filename,ext=os.path.splitext(f)
         newfilename=os.path.abspath(os.path.basename(filename)+'_processed'+ext)
         niftifile=nib.Nifti1Pair(img,imgobj.affine)
         nib.save(niftifile,newfilename)
         self.outfilenames.append(newfilename)              
     return runtime
コード例 #22
0
def dump(obj, fpath, use_pickle=False):
    """dump to file
    :param fpath: file obj or file path"""
    assert isinstance(fpath, basestring)
    if '.nii' in fpath:
        assert isinstance(obj, np.ndarray)
        img = nib.Nifti1Pair(obj, np.eye(4))
        nib.save(img, fpath)
        return

    if use_pickle:
        with open(fpath, 'wb') as fout:
            pickle.dump(obj, fout, pickle.HIGHEST_PROTOCOL)
    else:
        joblib.dump(obj, fpath)
コード例 #23
0
def main():
    parser = argparse.ArgumentParser(
        description='apply mask to an image',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('img')
    parser.add_argument('mask')
    parser.add_argument('output')
    parser.add_argument('-v', '--verbose', action='store_true')

    args = parser.parse_args()
    img = serial.load(args.img)
    mask = serial.load(args.mask)
    img[mask <= mask.max() / 2.0] = img.min()
    if args.verbose:
        view_3d_data_single(img)

    out = nib.Nifti1Pair(img, np.eye(4))
    nib.save(out, args.output)
コード例 #24
0
def predict_liver(model):
    model = load_model(model, custom_objects={'loss': dice_coef_loss_weight, 'metrics': dice_coef})
    imgs = np.ndarray((128, 128, 128), dtype="float")
    segs = np.ndarray((128, 128, 128), dtype=int)
    for i in range(0, 128):
        img_name = "liver-020-" + str(i).zfill(3) + "-img.npy"
        seg_name = "liver-020-" + str(i).zfill(3) + "-seg.npy"
        img_path = os.path.join("../image/prepared_liver/test", img_name)
        seg_path = os.path.join("../image/prepared_liver/test", seg_name)
        imgs[i,:,:] = np.load(img_path)
        segs[i,:,:] = np.load(seg_path)
    preds = model.predict(imgs[..., np.newaxis])
    img_preds = np.argmax(preds, axis=3)
    #save 3d image
    pair_img = nib.Nifti1Pair(img_preds, np.eye(4))
    nib.save(pair_img, "pred_liver-20.img")
    #compute dice coefficient
    dic = dice_coef_2class(img_preds, segs)
    return dic
コード例 #25
0
    def _run_interface(self, runtime):
        
        rdf_file=self.inputs.lfs_loc
        mask_file=self.inputs.mask_loc
        iMag_file=self.inputs.iMag_loc
        WData_file=self.inputs.weight_loc
        alpha=self.inputs.lamda #split bregman alpha is the weight on the data fidelity, MEDI uses lambda for this
        lamda=2*alpha #split bregman uses lambda to refer to the weight which keeps bregman parameters close to their corresponding data
        maxiter=self.inputs.maxiter
        tol=self.inputs.tol
        CF=self.inputs.CF
        
        rdf_obj=nib.load(rdf_file)
        rdf=rdf_obj.get_data()
        voxel_size=rdf_obj.header['pixdim'][1:4]
        mask=nib.load(mask_file).get_data()
        iMag=nib.load(iMag_file).get_data().transpose(1,0,2)[:,::-1,:]
        WData_sqrd=nib.load(WData_file).get_data()[...,0]**2*mask
        
        #create mask to protect edges from regularization
        percentage=0.1 #keep 10% of voxels with highest gradient value
        WGradx,WGrady,WGradz=np.gradient(iMag,*voxel_size)
        wG=np.sqrt(WGradx**2+WGrady**2+WGradz**2)
        thresh=wG[mask.astype('bool')].max()
        num=((mask*wG)>thresh).sum()
        den=(mask>0).sum()        
        while float(num)/den<percentage:    
            thresh=thresh*0.95
            num=((mask*wG)>thresh).sum()
            den=(mask>0).sum()
        wG=(mask*wG)<thresh
        WGradx=WGrady=WGradz=wG*mask
        
        #use CFMM local implementation of MEDI
        #when using matlab implementation of MEDI solver, weighting matrix causes numerical artifacts
        FOV=list(voxel_size*rdf.shape)
        Xmap=pyQSM.Liu2012.SplitBregmanL1Grad(rdf,FOV, WData_sqrd, WGradx,WGrady,WGradz,alpha,lamda,maxiter=maxiter,tol=tol)
        Xmap=Xmap/((CF*1e-9)*2*np.pi)*mask #part per billion

        susceptibility_filename=self._list_outputs()['susceptibility_filename']               
        niftifile=nib.Nifti1Pair(Xmap,rdf_obj.affine)
        nib.save(niftifile,susceptibility_filename)        
        return runtime
コード例 #26
0
def attach_2_original_brain1(volume,
                             slice_track,
                             size_track,
                             direction,
                             tissue,
                             test=True):

    count = 0
    if test:
        object_number = 13  # testing data
        offset = 11
    else:
        object_number = 10  # training data
        offset = 1
    for i in range(object_number):
        name = 'subject-' + str(offset + i) + tissue + direction + '-label.img'
        print('attaching prediction volume ' + str(offset + i) +
              'to original volume ( ' + name + ' )')
        subject = np.zeros(shape=(size_track[i][0], size_track[i][1],
                                  size_track[i][2]))
        S = slice_track[i][1] - slice_track[i][0]
        subject[slice_track[i][0]:slice_track[i][1],
                slice_track[object_number][0]:slice_track[object_number][1],
                slice_track[object_number +
                            1][0]:slice_track[object_number +
                                              1][1]] = volume[count:count +
                                                              S, :, :]
        count += slice_track[i][1] - slice_track[i][0]
        if direction == 'coronal':
            subject = np.swapaxes(subject, 0, 1)
        elif direction == 'axial':
            subject = np.swapaxes(subject, 0, 2)
        if (offset + i) == 23:
            ss = np.zeros(shape=(160, 192, 256))
            ss[8:152] = subject
            subject = ss
        subject = nib.Nifti1Pair(subject, np.eye(4))
        nib.save(subject, os.path.join('iSeg-2017-Testing-Results', name))
コード例 #27
0
def load_nifti_data(brainID, use_HCP_and_MNI):
    '''

  Takes the name of a brainID containing two NIFTI files, 'T1.nii' and 'T2.nii'.

  Returns a 3-item list of numpy arrays containing the image measurements.

  '''
    os.chdir(config.figShareFolder)
    if use_HCP_and_MNI:
        NIFTI_FILES = ['S1200_AverageT1wDividedByT2w.nii.gz']
    else:
        NIFTI_FILES = [
            'm' + brainID + '_T1.nii.gz', 'm' + brainID + '_T2.nii.gz'
        ]
    #NIFTI_FILES = [brainID + '_T1.nii.gz', brainID + '_T2.nii.gz'] #use images that have not been bias corrected

    data = []
    for f in NIFTI_FILES:
        img = nibabel.load(f)
        #img = nibabel.processing.smooth_image(img, fwhm = 4) #for smoothing experiments

        #print img.header
        data.append(img.get_data())

    if not use_HCP_and_MNI:
        ratio_data = numpy.divide(data[0] * 1.0, data[1] * 1.0)
        data.append(ratio_data)

        ratio_filename = 'm' + brainID + '_T1divideT2.nii.gz'
        #write out ratio image if it doesn't exist already
        if not os.path.isfile(ratio_filename):
            img = nibabel.load(f)  #load a file to get header
            pair_img = nibabel.Nifti1Pair(ratio_data, img.header.get_sform())
            print("Writing ratio image")
            nibabel.save(pair_img, ratio_filename)

    return data
コード例 #28
0
 def _run_interface(self, runtime):        
     phase_filename_list=self.inputs.phase
     json_filename_list=self.inputs.json
     truncate_echo=self.inputs.truncate_echo
     if not truncate_echo:
         truncate_echo=None
     mask_filename=self.inputs.mask       
     freq_filename=self._list_outputs()['freq_filename']       
            
     if isdefined(self.inputs.weight):
         weight_filename=self.inputs.weight
         weight=nib.load(weight_filename).get_data()
     else:
         weight=None
     ne=len(phase_filename_list)
     ph_img_obj=nib.load(phase_filename_list[0])
     shape=ph_img_obj.get_shape()
     voxel_size=ph_img_obj.header['pixdim'][1:4]
     ph_img=np.empty(shape+(ne,))
     if weight is None:
         weight=np.ones(shape+(ne,))
     count=0
     phase_filename_list.sort()
     json_filename_list.sort()
     te=np.empty(ne)
     for imgloc,jsonloc in zip(phase_filename_list,json_filename_list):            
         #assert os.path.splitext(jsonloc)[0] in os.path.splitext(imgloc)[0]
         ph_img[...,count]=nib.load(imgloc).get_data()
         with open(jsonloc) as f:
             te[count]=float(json.load(f)['EchoTime'])          
         count+=1
     eps = np.sqrt(sys.float_info.min)
     ph_img = ph_img + eps                     
     mask=nib.load(mask_filename).get_data()
     freq=pyQSM.frequencyEstimate.estimateFrequencyFromWrappedPhase(ph_img,voxel_size,te,mask,weight,truncateEcho=truncate_echo)                
     niftifile=nib.Nifti1Pair(freq,ph_img_obj.affine)
     nib.save(niftifile,freq_filename)           
     return runtime
コード例 #29
0
ファイル: evaluation.py プロジェクト: mi2rl/KiTS19_ACE
def main():
    args = get_arguments()
    assert args.mode
    assert args.testset

    keras.backend.tensorflow_backend.set_session(get_session())

    if not os.path.isdir('./result'):
        os.mkdir('./result')
    if not os.path.isdir(os.path.join('./result', args.mode)):
        os.mkdir(os.path.join('./result', args.mode))

    testlist = sorted([
        os.path.join(args.testset, d) for d in os.listdir(args.testset)
        if 'case' in d
    ])
    config = get_config(args.mode)

    if args.mode == '1':
        ''' coreline '''
        from cascade_1st.Model_ACE_CNet import load_model
        from cascade_1st.run_eval_cascaded import TransAxis, resample_img_asdim, normalize_vol, CCL_check_1ststg, CCL_1ststg_post

        model = load_model(input_shape=(None, None, None, 1),
                           num_labels=1,
                           base_filter=32,
                           depth_size=config['depth'],
                           se_res_block=True,
                           se_ratio=16,
                           last_relu=True)

        model.load_weights(config['checkpoint'])

        for i in tqdm.trange(len(testlist)):
            data = testlist[i]
            img_ct_sag = sitk.ReadImage(os.path.join(data, 'imaging.nii'))
            img_ct_axial = TransAxis(img_ct_sag, dtype=np.int16)
            raw_ct = sitk.GetArrayFromImage(img_ct_axial)
            if int(data.split('_')[1]) == 223:
                raw_ct_original = np.array(raw_ct)
                raw_ct = raw_ct[-180:, :, :]

            raw_ct_shape = np.shape(raw_ct)
            if raw_ct_shape[0] > 200:
                is_large_z = True
            else:
                is_large_z = False

            # right kidney
            if not is_large_z:
                raw_ct_right_shape = (raw_ct_shape[0], raw_ct_shape[1],
                                      int(raw_ct_shape[2] * 3 / 5))
                raw_ct_right_frame_shape = (200, raw_ct_shape[1],
                                            int(raw_ct_shape[2] * 3 / 5))
                raw_ct_right_frame = np.ones(raw_ct_right_frame_shape,
                                             dtype=np.float32) * -1024
                z_start_dst = int((200 - raw_ct_shape[0]) / 2)
                z_end_dst = z_start_dst + raw_ct_shape[0]
                x_start_src = 0
                x_end_src = int(raw_ct_shape[2] * 3 / 5)
                raw_ct_right_frame[
                    z_start_dst:z_end_dst, :, :] = raw_ct[:, :, x_start_src:
                                                          x_end_src]
                img_ct_right = sitk.GetImageFromArray(raw_ct_right_frame)
                img_ct_right_rs = resample_img_asdim(img_ct_right,
                                                     config['input_dim'],
                                                     c_val=-1024)
                raw_ct_right_rs = sitk.GetArrayFromImage(img_ct_right_rs)
                raw_ct_right_rs_normed = normalize_vol(
                    raw_ct_right_rs,
                    norm_wind_lower=config['wlower'],
                    norm_wind_upper=config['wupper'])

                raw_ct_right_rs_normed = np.expand_dims(raw_ct_right_rs_normed,
                                                        axis=0)
                raw_ct_right_rs_normed = np.expand_dims(raw_ct_right_rs_normed,
                                                        axis=-1)
                prediction = model.predict(x=raw_ct_right_rs_normed)
                if np.shape(prediction)[-1] == 1:
                    prediction = np.squeeze(prediction)
                else:
                    prediction = np.squeeze(np.argmax(prediction, axis=-1))

                prediction = prediction[z_start_dst:z_end_dst, :, :]

                raw_pred_right = sitk.GetArrayFromImage(
                    resample_img_asdim(sitk.GetImageFromArray(prediction),
                                       tuple(reversed(raw_ct_right_shape)),
                                       interp=sitk.sitkNearestNeighbor))
                raw_pred_right[np.where(raw_pred_right > 0.5)] = 1
                raw_pred_right = CCL_check_1ststg(raw_pred_right)

            else:
                raw_ct_right_shape = (raw_ct_shape[0], raw_ct_shape[1],
                                      int(raw_ct_shape[2] * 3 / 5))

                raw_pred_right_shape = [
                    raw_ct_shape[0], 200, 200, config['num_labels_1ststg']
                ]
                raw_pred_right_tmp = np.zeros(
                    shape=raw_pred_right_shape)  # raw_ct_shape[0], 200, 200, 3
                raw_pred_right_tmp_cnt = np.zeros(
                    shape=raw_pred_right_shape)  # raw_ct_shape[0], 200, 200, 3

                z_list = list(np.arange(0, raw_ct_shape[0] - 200,
                                        100)) + [raw_ct_shape[0] - 200]
                x_start_src = 0
                x_end_src = int(raw_ct_shape[2] * 3 / 5)

                for z_start in z_list:
                    raw_ct_right_frame_shape = (200, raw_ct_shape[1],
                                                int(raw_ct_shape[2] * 3 / 5))
                    raw_ct_right_frame = np.ones(raw_ct_right_frame_shape,
                                                 dtype=np.float32) * -1024
                    raw_ct_right_frame[:, :, :] = raw_ct[z_start:z_start +
                                                         200, :,
                                                         x_start_src:x_end_src]
                    img_ct_right = sitk.GetImageFromArray(raw_ct_right_frame)
                    img_ct_right_rs = resample_img_asdim(img_ct_right,
                                                         config['input_dim'],
                                                         c_val=-1024)
                    raw_ct_right_rs = sitk.GetArrayFromImage(img_ct_right_rs)
                    raw_ct_right_rs_normed = normalize_vol(
                        raw_ct_right_rs,
                        norm_wind_lower=config['wlower'],
                        norm_wind_upper=config['wupper'])

                    raw_ct_right_rs_normed = np.expand_dims(
                        raw_ct_right_rs_normed, axis=0)
                    raw_ct_right_rs_normed = np.expand_dims(
                        raw_ct_right_rs_normed, axis=-1)
                    prediction = np.squeeze(
                        model.predict(x=raw_ct_right_rs_normed), axis=0)

                    raw_pred_right_tmp[z_start:z_start +
                                       200, :, :, :] += prediction
                    raw_pred_right_tmp_cnt[z_start:z_start + 200, :, :, :] += 1

                raw_pred_right_tmp[np.where(
                    raw_pred_right_tmp_cnt > 0)] /= raw_pred_right_tmp_cnt[
                        np.where(raw_pred_right_tmp_cnt > 0)]

                if config['num_labels_1ststg'] != 1:
                    prediction = np.argmax(raw_pred_right_tmp, axis=-1)
                else:
                    prediction = np.squeeze(raw_pred_right_tmp)
                    prediction[np.where(prediction > 0.5)] = 1

                raw_pred_right = sitk.GetArrayFromImage(
                    resample_img_asdim(sitk.GetImageFromArray(prediction),
                                       tuple(reversed(raw_ct_right_shape)),
                                       interp=sitk.sitkNearestNeighbor))
                raw_pred_right[np.where(raw_pred_right > 0.5)] = 1
                raw_pred_right = CCL_check_1ststg(raw_pred_right)

            # left kidney
            if not is_large_z:
                z_start_dst = int((200 - raw_ct_shape[0]) / 2)
                z_end_dst = z_start_dst + raw_ct_shape[0]
                x_start_src = int(raw_ct_shape[2] * 2 / 5)
                x_end_src = raw_ct_shape[2]
                raw_ct_left_shape = (raw_ct_shape[0], raw_ct_shape[1],
                                     x_end_src - x_start_src)
                raw_ct_left_frame_shape = (200, raw_ct_shape[1],
                                           x_end_src - x_start_src)
                raw_ct_left_frame = np.ones(raw_ct_left_frame_shape,
                                            dtype=np.float32) * -1024
                raw_ct_left_frame[
                    z_start_dst:z_end_dst, :, :] = raw_ct[:, :, x_start_src:
                                                          x_end_src]
                raw_ct_left_frame = raw_ct_left_frame[:, :, -1::-1]
                img_ct_left = sitk.GetImageFromArray(raw_ct_left_frame)
                img_ct_left_rs = resample_img_asdim(img_ct_left,
                                                    config['input_dim'],
                                                    c_val=-1024)
                raw_ct_left_rs = sitk.GetArrayFromImage(img_ct_left_rs)
                raw_ct_left_rs_normed = normalize_vol(
                    raw_ct_left_rs,
                    norm_wind_lower=config['wlower'],
                    norm_wind_upper=config['wupper'])

                raw_ct_left_rs_normed = np.expand_dims(raw_ct_left_rs_normed,
                                                       axis=0)
                raw_ct_left_rs_normed = np.expand_dims(raw_ct_left_rs_normed,
                                                       axis=-1)
                prediction = model.predict(x=raw_ct_left_rs_normed)

                if np.shape(prediction)[-1] == 1:
                    prediction = np.squeeze(prediction)
                else:
                    prediction = np.squeeze(np.argmax(prediction, axis=-1))

                prediction = prediction[z_start_dst:z_end_dst, :, :]

                raw_pred_left = sitk.GetArrayFromImage(
                    resample_img_asdim(sitk.GetImageFromArray(prediction),
                                       tuple(reversed(raw_ct_left_shape)),
                                       interp=sitk.sitkNearestNeighbor))
                raw_pred_left[np.where(raw_pred_left > 0.5)] = 1
                raw_pred_left = CCL_check_1ststg(raw_pred_left)

            else:
                raw_ct_left_shape = (raw_ct_shape[0], raw_ct_shape[1],
                                     int(raw_ct_shape[2] * 3 / 5))

                raw_pred_left_shape = [
                    raw_ct_shape[0], 200, 200, config['num_labels_1ststg']
                ]
                raw_pred_left_tmp = np.zeros(
                    shape=raw_pred_left_shape)  # raw_ct_shape[0], 200, 200, 3
                raw_pred_left_tmp_cnt = np.zeros(
                    shape=raw_pred_left_shape)  # raw_ct_shape[0], 200, 200, 3

                z_list = list(np.arange(0, raw_ct_shape[0] - 200,
                                        100)) + [raw_ct_shape[0] - 200]
                x_start_src = 0
                x_end_src = int(raw_ct_shape[2] * 3 / 5)
                for z_start in z_list:
                    raw_ct_left_frame_shape = (200, raw_ct_shape[1],
                                               int(raw_ct_shape[2] * 3 / 5))
                    raw_ct_left_frame = np.ones(raw_ct_left_frame_shape,
                                                dtype=np.float32) * -1024
                    raw_ct_left_frame[:, :, :] = raw_ct[
                        z_start:z_start + 200, :, -raw_ct_left_frame_shape[2]:]
                    raw_ct_left_frame = raw_ct_left_frame[:, :, -1::-1]
                    img_ct_left = sitk.GetImageFromArray(raw_ct_left_frame)
                    img_ct_left_rs = resample_img_asdim(img_ct_left,
                                                        config['input_dim'],
                                                        c_val=-1024)
                    raw_ct_left_rs = sitk.GetArrayFromImage(img_ct_left_rs)
                    raw_ct_left_rs_normed = normalize_vol(
                        raw_ct_left_rs,
                        norm_wind_lower=config['wlower'],
                        norm_wind_upper=config['wupper'])

                    raw_ct_left_rs_normed = np.expand_dims(
                        raw_ct_left_rs_normed, axis=0)
                    raw_ct_left_rs_normed = np.expand_dims(
                        raw_ct_left_rs_normed, axis=-1)
                    prediction = np.squeeze(
                        model.predict(x=raw_ct_left_rs_normed), axis=0)

                    raw_pred_left_tmp[z_start:z_start +
                                      200, :, :, :] += prediction
                    raw_pred_left_tmp_cnt[z_start:z_start + 200, :, :, :] += 1

                raw_pred_left_tmp[np.where(
                    raw_pred_left_tmp_cnt > 0)] /= raw_pred_left_tmp_cnt[
                        np.where(raw_pred_left_tmp_cnt > 0)]
                if config['num_labels_1ststg'] != 1:
                    prediction = np.argmax(raw_pred_left_tmp, axis=-1)
                else:
                    prediction = np.squeeze(raw_pred_left_tmp)
                    prediction[np.where(prediction > 0.5)] = 1

                raw_pred_left = sitk.GetArrayFromImage(
                    resample_img_asdim(sitk.GetImageFromArray(prediction),
                                       tuple(reversed(raw_ct_left_shape)),
                                       interp=sitk.sitkNearestNeighbor))
                raw_pred_left[np.where(raw_pred_left > 0.5)] = 1
                raw_pred_left = CCL_check_1ststg(raw_pred_left)

            # check if both kidneys are valid
            raw_pred_whole = np.zeros(np.shape(raw_ct), dtype=np.uint8)
            raw_pred_right_shape = np.shape(raw_pred_right)
            raw_pred_whole[:, :, :raw_pred_right_shape[2]] = raw_pred_right
            raw_pred_left_shape = np.shape(raw_pred_left)
            raw_pred_left[:, :, :] = raw_pred_left[:, :, -1::-1]
            raw_pred_whole_left_tmp = raw_pred_whole[:, :,
                                                     -raw_pred_left_shape[2]:]
            raw_pred_whole_left_tmp[np.where(
                raw_pred_left > 0)] = raw_pred_left[np.where(
                    raw_pred_left > 0)]
            raw_pred_whole[:, :,
                           -raw_pred_left_shape[2]:] = raw_pred_whole_left_tmp
            raw_pred_whole = CCL_1ststg_post(raw_pred_whole)

            if int(data.split('_')[1]) == 223:
                raw_pred_whole_tmp = np.zeros(np.shape(raw_ct_original),
                                              dtype=np.uint8)
                raw_pred_whole_tmp[-180:, :, :] = raw_pred_whole
                raw_pred_whole = raw_pred_whole_tmp

            x_nib = nib.load(os.path.join(data, 'imaging.nii'))
            p_nib = nib.Nifti1Image(raw_pred_whole[-1::-1], x_nib.affine)
            nib.save(
                p_nib,
                os.path.join('./result', args.mode,
                             'prediction_' + data.split('_')[1] + '.nii'))

    else:
        if args.mode == '2_1':
            ''' coreline '''
            from cascade_2nd.model_1.Model_ACE_CNet_2ndstg import load_model
            from cascade_1st.run_eval_cascaded import TransAxis, resample_img_asdim, normalize_vol, CCL

            model = load_model(input_shape=(None, None, None, 1),
                               num_labels=3,
                               base_filter=32,
                               depth_size=config['depth'],
                               se_res_block=True,
                               se_ratio=16,
                               last_relu=False)

            model.load_weights(config['checkpoint'])

            for i in tqdm.trange(len(testlist)):
                data = testlist[i]
                img_ct_sag = sitk.ReadImage(os.path.join(data, 'imaging.nii'))
                img_ct_axial = TransAxis(img_ct_sag, dtype=np.int16)
                raw_ct = sitk.GetArrayFromImage(img_ct_axial)
                if int(data.split('_')[1]) == 223:
                    raw_ct_original = np.array(raw_ct)
                    raw_ct = raw_ct[-180:, :, :]

                raw_ct_shape = np.shape(raw_ct)

                if os.path.isfile(
                        os.path.join(
                            './result/1',
                            'prediction_' + data.split('_')[1] + '.nii')):
                    img_gt_sag = sitk.ReadImage(
                        os.path.join(
                            './result/1',
                            'prediction_' + data.split('_')[1] + '.nii'))
                    img_gt_axial = TransAxis(img_gt_sag, dtype=np.uint8)
                    raw_gt = sitk.GetArrayFromImage(img_gt_axial)
                    if int(data.split('_')[1]) == 223:
                        raw_gt_original = np.array(raw_gt)
                        raw_gt = raw_gt[-180:, :, :]
                else:
                    raise ValueError('No masks here. Run model_1 first.')

                idcs_label_1 = np.where(raw_gt == 1)
                label_1_x_pos = np.mean(idcs_label_1[2])
                idcs_label_2 = np.where(raw_gt == 2)

                if len(idcs_label_2[0]) > len(idcs_label_1[0]) * 0.2:
                    is_both_kidney = True
                    label_2_x_pos = np.mean(idcs_label_2[2])
                else:
                    is_both_kidney = False

                if is_both_kidney:
                    if label_1_x_pos > label_2_x_pos:
                        # swap label btw. 1 and 2
                        raw_gt[idcs_label_1] = 2
                        raw_gt[idcs_label_2] = 1
                        is_left_kidney = True
                        is_right_kidney = True
                    else:
                        is_left_kidney = True
                        is_right_kidney = True
                else:
                    if np.min(idcs_label_1[2]) < raw_ct_shape[2] / 2:
                        raw_gt[idcs_label_1] = 1
                        raw_gt[idcs_label_2] = 0
                        is_right_kidney = True
                        is_left_kidney = False
                    else:
                        raw_gt[idcs_label_1] = 2
                        raw_gt[idcs_label_2] = 0
                        is_right_kidney = False
                        is_left_kidney = True

                # extract kidney coordinate
                if is_right_kidney:
                    idcs_label_1 = np.where(raw_gt == 1)
                    kidney_right_start = (np.max(
                        (np.min(idcs_label_1[0] - 16),
                         0)), np.max(
                             (np.min(idcs_label_1[1] - 16),
                              0)), np.max((np.min(idcs_label_1[2] - 16), 0)))
                    kidney_right_end = (np.min(
                        (np.max(idcs_label_1[0] + 16), raw_ct_shape[0])),
                                        np.min((np.max(idcs_label_1[1] + 16),
                                                raw_ct_shape[1])),
                                        np.min((np.max(idcs_label_1[2] + 16),
                                                raw_ct_shape[2])))

                if is_left_kidney:
                    idcs_label_2 = np.where(raw_gt == 2)
                    kidney_left_start = (np.max(
                        (np.min(idcs_label_2[0] - 16),
                         0)), np.max(
                             (np.min(idcs_label_2[1] - 16),
                              0)), np.max((np.min(idcs_label_2[2] - 16), 0)))
                    kidney_left_end = (np.min(
                        (np.max(idcs_label_2[0] + 16), raw_ct_shape[0])),
                                       np.min((np.max(idcs_label_2[1] + 16),
                                               raw_ct_shape[1])),
                                       np.min((np.max(idcs_label_2[2] + 16),
                                               raw_ct_shape[2])))

                # Seg right kidney if it is valid
                if is_right_kidney:
                    # right kidney
                    raw_ct_right_2nd_shape = (int(kidney_right_end[0] -
                                                  kidney_right_start[0]),
                                              int(kidney_right_end[1] -
                                                  kidney_right_start[1]),
                                              int(kidney_right_end[2] -
                                                  kidney_right_start[2]))

                    raw_ct_right_frame = np.ones(raw_ct_right_2nd_shape,
                                                 dtype=np.float32) * -1024
                    raw_ct_right_frame[:, :, :] = raw_ct[
                        kidney_right_start[0]:kidney_right_end[0],
                        kidney_right_start[1]:kidney_right_end[1],
                        kidney_right_start[2]:kidney_right_end[2]]
                    img_ct_right = sitk.GetImageFromArray(raw_ct_right_frame)
                    img_ct_right_rs = resample_img_asdim(img_ct_right,
                                                         config['input_dim'],
                                                         c_val=-1024)
                    raw_ct_right_rs = sitk.GetArrayFromImage(img_ct_right_rs)
                    raw_ct_right_rs_normed = normalize_vol(
                        raw_ct_right_rs,
                        norm_wind_lower=config['wlower'],
                        norm_wind_upper=config['wupper'])

                    raw_ct_right_rs_normed = np.expand_dims(
                        raw_ct_right_rs_normed, axis=0)
                    raw_ct_right_rs_normed = np.expand_dims(
                        raw_ct_right_rs_normed, axis=-1)
                    prediction = model.predict(x=raw_ct_right_rs_normed)
                    if np.shape(prediction)[-1] == 1:
                        prediction = np.squeeze(prediction)
                    else:
                        prediction = np.squeeze(np.argmax(prediction, axis=-1))

                    raw_pred_right = sitk.GetArrayFromImage(
                        resample_img_asdim(
                            sitk.GetImageFromArray(prediction),
                            tuple(reversed(raw_ct_right_2nd_shape)),
                            interp=sitk.sitkNearestNeighbor))

                    raw_pred_right_tmp = np.array(raw_pred_right)
                    raw_pred_right_tmp[np.where(raw_pred_right_tmp > 0)] = 1
                    raw_pred_right_tmp = CCL(raw_pred_right_tmp, num_labels=2)
                    raw_pred_right[np.where(raw_pred_right_tmp == 0)] = 0
                    raw_ct_right = np.array(
                        raw_ct[kidney_right_start[0]:kidney_right_end[0],
                               kidney_right_start[1]:kidney_right_end[1],
                               kidney_right_start[2]:kidney_right_end[2]])

                if is_left_kidney:
                    # left kidney
                    raw_ct_left_2nd_shape = (int(kidney_left_end[0] -
                                                 kidney_left_start[0]),
                                             int(kidney_left_end[1] -
                                                 kidney_left_start[1]),
                                             int(kidney_left_end[2] -
                                                 kidney_left_start[2]))
                    raw_ct_left_frame = np.ones(raw_ct_left_2nd_shape,
                                                dtype=np.float32) * -1024
                    raw_ct_left_frame[:, :, :] = raw_ct[
                        kidney_left_start[0]:kidney_left_end[0],
                        kidney_left_start[1]:kidney_left_end[1],
                        kidney_left_start[2]:kidney_left_end[2]]
                    raw_ct_left_frame = raw_ct_left_frame[:, :, -1::-1]
                    img_ct_left = sitk.GetImageFromArray(raw_ct_left_frame)
                    img_ct_left_rs = resample_img_asdim(img_ct_left,
                                                        config['input_dim'],
                                                        c_val=-1024)
                    raw_ct_left_rs = sitk.GetArrayFromImage(img_ct_left_rs)
                    raw_ct_left_rs_normed = normalize_vol(
                        raw_ct_left_rs,
                        norm_wind_lower=config['wlower'],
                        norm_wind_upper=config['wupper'])

                    raw_ct_left_rs_normed = np.expand_dims(
                        raw_ct_left_rs_normed, axis=0)
                    raw_ct_left_rs_normed = np.expand_dims(
                        raw_ct_left_rs_normed, axis=-1)
                    prediction = model.predict(x=raw_ct_left_rs_normed)

                    if np.shape(prediction)[-1] == 1:
                        prediction = np.squeeze(prediction)
                    else:
                        prediction = np.squeeze(np.argmax(prediction, axis=-1))

                    raw_pred_left = sitk.GetArrayFromImage(
                        resample_img_asdim(
                            sitk.GetImageFromArray(prediction),
                            tuple(reversed(raw_ct_left_2nd_shape)),
                            interp=sitk.sitkNearestNeighbor))
                    raw_pred_left = raw_pred_left[:, :, -1::-1]

                    raw_pred_left_tmp = np.array(raw_pred_left)
                    raw_pred_left_tmp[np.where(raw_pred_left_tmp > 0)] = 1
                    raw_pred_left_tmp = CCL(raw_pred_left_tmp, num_labels=2)
                    raw_pred_left[np.where(raw_pred_left_tmp == 0)] = 0
                    raw_ct_left = np.array(
                        raw_ct[kidney_left_start[0]:kidney_left_end[0],
                               kidney_left_start[1]:kidney_left_end[1],
                               kidney_left_start[2]:kidney_left_end[2]])

                raw_pred_whole = np.zeros(np.shape(raw_ct), dtype=np.uint8)

                if is_right_kidney:
                    raw_pred_whole[kidney_right_start[0]:kidney_right_end[0],
                                   kidney_right_start[1]:kidney_right_end[1],
                                   kidney_right_start[2]:
                                   kidney_right_end[2]] = raw_pred_right
                if is_left_kidney:
                    raw_pred_whole_left_tmp = raw_pred_whole[
                        kidney_left_start[0]:kidney_left_end[0],
                        kidney_left_start[1]:kidney_left_end[1],
                        kidney_left_start[2]:kidney_left_end[2]]
                    raw_pred_whole_left_tmp[np.where(
                        raw_pred_left > 0)] = raw_pred_left[np.where(
                            raw_pred_left > 0)]
                    raw_pred_whole[
                        kidney_left_start[0]:kidney_left_end[0],
                        kidney_left_start[1]:kidney_left_end[1],
                        kidney_left_start[2]:
                        kidney_left_end[2]] = raw_pred_whole_left_tmp

                if int(data.split('_')[1]) == 223:
                    raw_pred_whole_tmp = np.zeros(np.shape(raw_ct_original),
                                                  dtype=np.uint8)
                    raw_pred_whole_tmp[-180:, :, :] = raw_pred_whole
                    raw_pred_whole = raw_pred_whole_tmp

                x_nib = nib.load(os.path.join(data, 'imaging.nii'))
                p_nib = nib.Nifti1Image(raw_pred_whole[-1::-1], x_nib.affine)
                nib.save(
                    p_nib,
                    os.path.join('./result', args.mode,
                                 'prediction_' + data.split('_')[1] + '.nii'))

        else:
            ''' mi2rl '''
            from cascade_2nd.model_2_5.model import MyModel
            from cascade_2nd.model_2_5.load_data import Preprocessing

            model = MyModel(model=args.mode,
                            input_shape=(None, None, None, 1),
                            lossfn=config['lossfn'],
                            classes=3,
                            depth=config['depth'])

            model.mymodel.load_weights(config['checkpoint'])

            prep = Preprocessing(task=config['task'],
                                 standard=config['standard'],
                                 wlevel=config['wlevel'],
                                 wwidth=config['wwidth'],
                                 rotation_range=[0., 0., 0.])

            loop = 2 if config['task'] == 'tumor' else 1
            for i in tqdm.trange(len(testlist)):
                data = testlist[i]
                img_orig = sitk.ReadImage(os.path.join(data, 'imaging.nii'))
                mask_orig = sitk.ReadImage(
                    os.path.join('./result/1',
                                 'prediction_' + data.split('_')[1] + '.nii'))

                result_save = np.zeros_like(sitk.GetArrayFromImage(mask_orig))
                for idx in range(loop):
                    img, mask, spacing = prep._array2img([img_orig, mask_orig],
                                                         True)
                    if config['task'] == 'tumor':
                        img, mask, flag, bbox = prep._getvoi([img, mask, idx],
                                                             True)
                    else:
                        img, mask, flag, bbox, diff, diff1 = prep._getvoi(
                            [img, mask, idx], True)
                    if flag:
                        if idx == 1 and config['task'] == 'tumor':
                            img, mask = prep._horizontal_flip([img, mask])

                        img = prep._windowing(img)
                        img = prep._standard(img)
                        mask = prep._onehot(mask)
                        img, mask = prep._expand([img, mask])

                        result = model.mymodel.predict_on_batch(img)
                        result = np.argmax(np.squeeze(result), axis=-1)
                        label = np.argmax(np.squeeze(mask), axis=-1)

                        if config['task'] == 'tumor':
                            if idx == 1:
                                img, result = prep._horizontal_flip(
                                    [img, result])
                            result_save[np.maximum(0, bbox[0]):np.
                                        minimum(result_save.shape[0] -
                                                1, bbox[1] + 1),
                                        np.maximum(0, bbox[2]):np.
                                        minimum(result_save.shape[1] -
                                                1, bbox[3] + 1),
                                        np.maximum(0, bbox[4]):np.
                                        minimum(result_save.shape[2] -
                                                1, bbox[5] + 1)] = result

                        elif config['task'] == 'tumor1':
                            threshold = [380, 230, 72]
                            mask_orig = sitk.GetArrayFromImage(mask_orig)
                            result_save[
                                np.maximum(0, bbox[0]):np.
                                minimum(result_save.shape[0], bbox[1]),
                                np.maximum(0, bbox[2]):np.
                                minimum(result_save.shape[1], bbox[3]),
                                np.maximum(0, bbox[4]):np.minimum(
                                    result_save.shape[2], bbox[5])] = result[
                                        diff[0] // 2:-diff[0] // 2 -
                                        diff1[0] if -diff[0] // 2 -
                                        diff1[0] != 0 else result.shape[0],
                                        diff[1] // 2:-diff[1] // 2 -
                                        diff1[1] if -diff[1] // 2 -
                                        diff1[1] != 0 else result.shape[1],
                                        diff[2] // 2:-diff[2] // 2 -
                                        diff1[2] if -diff[2] // 2 -
                                        diff1[2] != 0 else result.shape[2]]

                temp2 = np.swapaxes(result_save, 1, 2)
                temp2 = np.swapaxes(temp2, 0, 1)
                temp2 = np.swapaxes(temp2, 1, 2)

                img_pair = nib.Nifti1Pair(
                    temp2, np.diag([-spacing[0], spacing[1], spacing[2], 1]))
                nib.save(
                    img_pair,
                    os.path.join('./result', args.mode,
                                 'prediction_' + data.split('_')[1] + '.nii'))
コード例 #30
0
import glob
import os
from pathlib import Path

import nibabel as nib
from scipy import ndimage
import numpy as np

dataset_folder = ''
ext = '.gz'
sampling = (0.4, 0.4, 3.0)

for mask_path in glob.glob(os.path.join(dataset_folder, '*',
                                        'truth.nii' + ext)):
    print(Path(mask_path).parent.stem)

    mask = nib.load(mask_path).get_data()

    dists = ndimage.morphology.distance_transform_edt(mask, sampling=sampling)
    dists_inv = ndimage.morphology.distance_transform_edt(1 - mask,
                                                          sampling=sampling)
    total_dists = dists + dists_inv

    nib.save(nib.Nifti1Pair(total_dists, np.eye(4)),
             os.path.join(Path(mask_path).parent, 'dists.nii.gz'))