コード例 #1
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input images
    input_data, input_header = load(args.input)
    original_data, _ = load(args.original)
    
    logger.debug('Old shape={}.'.format(input_data.shape))
    
    # compute position
    logger.info('Computing positon and pad volume...')
    position = __parse_contour_list(args.contours, input_data)
    
    # pad volume
    output_data = scipy.zeros(original_data.shape, input_data.dtype)
    output_data[position] = input_data
    
    
    logger.debug('New shape={}.'.format(input_data.shape))
    
    # save result contour volume
    save(output_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
コード例 #2
0
ファイル: threshold.py プロジェクト: loli/neuropipeline
def main():
	i, h = load(sys.argv[1])
	thr = float(sys.argv[2])

	o = i >= thr

	save(o, sys.argv[3], h)
コード例 #3
0
ファイル: align.py プロジェクト: loli/neuropipeline
def main():
	i1, h1 = load(sys.argv[1])
	i2, h2 = load(sys.argv[2])

	# shift image to align origins
	origin_h1 = numpy.sign(h1.get_qform()[0:3,0:3]).dot(header.get_offset(h1))
	origin_h2 = numpy.sign(h2.get_qform()[0:3,0:3]).dot(header.get_offset(h2))
	origin_difference_pixel = (origin_h1 - origin_h2) / numpy.asarray(header.get_pixel_spacing(h1))
	# negative values: shift image 1 by this upon inserting (which is the smae as cutting the output image)
	# positive values: cut image 1 by this at inserting and also cut right side by length of output image plus this value
	o = numpy.zeros(i2.shape, i2.dtype)
	o_slicer = []
	i_slicer = []
	for j, p in enumerate(origin_difference_pixel):
		if p >= 0:
			i_slicer.append(slice(0,      min(i1.shape[j], o.shape[j] - abs(p))))
			o_slicer.append(slice(abs(p), min(i1.shape[j] + abs(p), o.shape[j])))
		else:
			i_slicer.append(slice(abs(p), min(i1.shape[j], o.shape[j] + abs(p))))
			o_slicer.append(slice(0,      min(i1.shape[j] - abs(p), o.shape[j])))

	o[o_slicer] = i1[i_slicer]
	header.set_offset(h1, header.get_offset(h2))
	
	save(o, sys.argv[3], h1)
コード例 #4
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    # transform to uin8
    data_input = data_input.astype(scipy.uint8)
                                      
    # reduce to 3D, if larger dimensionality
    if data_input.ndim > 3:
        for _ in range(data_input.ndim - 3): data_input = data_input[...,0]
        
    # iter over slices (2D) until first with content is detected
    for plane in data_input:
        if scipy.any(plane):
            # set pixel spacing
            spacing = list(header.get_pixel_spacing(header_input))
            spacing = spacing[1:3]
            __update_header_from_array_nibabel(header_input, plane)
            header.set_pixel_spacing(header_input, spacing)
            # save image
            save(plane, args.output, header_input, args.force)
            break
    
    logger.info("Successfully terminated.")    
コード例 #5
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image already exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning('The output image {} already exists. Exiting.'.format(args.output))
            exit(-1)

    # load input image
    image_smoothed_data, image_header = load(args.input)
        
    # apply additional hole closing step
    logger.info('Closing holes...')
    def fun_holes(arr):
        return scipy.ndimage.morphology.binary_fill_holes(arr)
    xd_iterator(image_smoothed_data, (1, 2), fun_holes)
        
    # perform opening resp. closing
    # in 3D case: size 1 = 6-connectedness, 2 = 12-connectedness, 3 = 18-connectedness, etc.
    if 'erosion' == args.type:
        logger.info('Applying erosion...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_erosion(arr, footprint, iterations=args.iterations)
    elif 'dilation' == args.type:
        logger.info('Applying dilation...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_dilation(arr, footprint, iterations=args.iterations)
    elif 'opening' == args.type:
        logger.info('Applying opening...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_opening(arr, footprint, iterations=args.iterations)
    else: # closing
        logger.info('Applying closing...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_closing(arr, footprint, iterations=args.iterations)

    # iterate over slices and apply selected operation
    xd_iterator(image_smoothed_data, (1, 2), fun)

    # save resulting mas
    save(image_smoothed_data, args.output, image_header, args.force)
            
    logger.info('Successfully terminated.')
コード例 #6
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    logger.debug('Original shape = {}.'.format(data_input.shape))
    
    # check if supplied dimension parameters is inside the images dimensions
    if args.dimension1 >= data_input.ndim or args.dimension1 < 0:
        raise ArgumentError('The first swap-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension1, data_input.ndim))
    elif args.dimension2 >= data_input.ndim or args.dimension2 < 0:
        raise ArgumentError('The second swap-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension2, data_input.ndim))
    
    # swap axes
    data_output = scipy.swapaxes(data_input, args.dimension1, args.dimension2)
    # swap pixel spacing and offset
    ps = list(header.get_pixel_spacing(header_input))
    ps[args.dimension1], ps[args.dimension2] = ps[args.dimension2], ps[args.dimension1]
    header.set_pixel_spacing(header_input, ps)
    os = list(header.get_offset(header_input))
    os[args.dimension1], os[args.dimension2] = os[args.dimension2], os[args.dimension1]
    header.set_offset(header_input, os)
    
    logger.debug('Resulting shape = {}.'.format(data_output.shape))
    
    # save resulting volume
    save(data_output, args.output, header_input, args.force)
    
    logger.info("Successfully terminated.")    
コード例 #7
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    # check if output image exists (will also be performed before saving, but as the watershed might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise ArgumentError("The output image {} already exists.".format(args.output))

    # loading image
    data_input, header_input = load(args.input)

    # apply the watershed
    logger.info("Watershedding with settings: thr={} / level={}...".format(args.threshold, args.level))
    data_output = watershed(data_input, get_pixel_spacing(header_input), args.threshold, args.level)

    # save file
    save(data_output, args.output, header_input, args.force)

    logger.info("Successfully terminated.")
コード例 #8
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise parser.error('The output image {} already exists.'.format(args.output))
    
    # loading image
    data_input, header_input = load(args.input)
    
    # apply the watershed
    logger.info('Applying anisotropic diffusion with settings: niter={} / kappa={} / gamma={}...'.format(args.iterations, args.kappa, args.gamma))
    data_output = anisotropic_diffusion(data_input, args.iterations, args.kappa, args.gamma, get_pixel_spacing(header_input))

    # save file
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
コード例 #9
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning("The output image {} already exists. Exiting.".format(args.output))
            exit(-1)

    # load input image
    input_data, input_header = load(args.input)

    logger.debug("Old number of regions={}.".format(len(scipy.unique(input_data))))

    # cut and relabel along the required dimension
    logger.info("Cutting and relabeling...")
    dimensions = range(input_data.ndim)
    del dimensions[args.dimension]
    __split_along(input_data, dimensions)

    logger.debug("New number of regions={}.".format(len(scipy.unique(input_data))))

    # save result contour volume
    save(input_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
コード例 #10
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image
    input_data, input_header = load(args.input)
    
    logger.debug('Old shape={}.'.format(input_data.shape))
    
    # compute cut
    logger.info('Computing cut and cropping volume...')
    cut = __parse_contour_list(args.contours, input_data)
    # crop volume
    input_data = input_data[cut]
    
    logger.debug('New shape={}.'.format(input_data.shape))
    
    # save result contour volume
    save(input_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
コード例 #11
0
ファイル: unification.py プロジェクト: loli/neuroless
def sresamplebyexample(src, dest, referenceimage, binary = False):
    r"""
    Secure-re-sample an image located at ``src`` by example ``referenceimage`` and
    save it under ``dest``.
    
    Parameters
    ----------
    src : string
        Source image file.
    dest : string
        Destination image file.
    referenceimage : string
        Reference image displaying the target spacing, origin and size.
    binary : bool
        Set to ``True`` for binary images.
    """
    # get target voxel spacing
    refimage, refhdr = load(referenceimage)
    spacing = header.get_pixel_spacing(refhdr)
    
    with tmpdir() as t:
        # create a temporary copy of the reference image with the source image data-type (imiImageResample requires both images to be of the same dtype)
        srcimage, _ = load(src)
        save(refimage.astype(srcimage.dtype), os.path.join(t, 'ref.nii.gz'), refhdr)
    
        # prepare and run registration command
        cmd = ['imiImageResample', '-I', src, '-O', dest, '-R', os.path.join(t, 'ref.nii.gz'), '-s'] + map(str, spacing)
        if binary:
            cmd += ['-b']
        rtcode, stdout, stderr = call(cmd)
    
    # check if successful
    if not os.path.isfile(dest):
        raise CommandExecutionError(cmd, rtcode, stdout, stderr, 'Binary re-sampling result image not created.')
コード例 #12
0
ファイル: _test.py プロジェクト: ShimonaNiharika/MedIA
def test02(img, idx):
    # TEST 02: CAN THEY BE LOADED AGAIN WITHOUT A CHANGE OF DATA TYPE OR DATA CONTENT?
    for dt in dtypes:
        print '\n:::{}:::'.format(dt).upper()
        for t in types_int:
            print t.upper(), '\t->',
             
            try:
                img1 = img.astype(dt)
                name2 = tmp_folder + '.'.join(['tmp', t])
                save(img1, name2, hdr, True)
                try:
                    img2, _ = load(name2)
                    if img2.dtype == img1.dtype and img2[idx] == img1[idx]:
                        print True
                    elif img2.dtype == img1.dtype:
                        print 'dtype: {} / value: {} != {}'.format(True, img2[idx], img1[idx])
                    elif img2[idx] == img1[idx]:
                        print 'dtype: {} != {} / value: {}'.format(img2.dtype, img1.dtype, True)
                    else:
                        print 'dtype: {} != {} / value: {} != {}'.format(img2.dtype, img1.dtype, img2[idx], img1[idx])
                except Exception as e:
                    print 'loading failed, reason: {}'.format(e)    
                
            except Exception as e:
                print 'saving unsupported'
コード例 #13
0
ファイル: medpy_reduce.py プロジェクト: AlexanderRuesch/medpy
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image using nibabel
    logger.info('Loading image {}...'.format(args.input))
    image_labels_data, _ = load(args.image)    
    
    # load mask image
    logger.info('Loading mask {}...'.format(args.mask))
    image_mask_data, image_mask_data_header = load(args.mask)
    
    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning('The output image {} already exists. Skipping this image.'.format(args.output))
    
    # create a mask from the label image
    logger.info('Reducing the label image...')
    image_reduced_data = fit_labels_to_mask(image_labels_data, image_mask_data)
    
    # save resulting mask
    logger.info('Saving resulting mask as {} in the same format as input mask, only with data-type int8...'.format(args.output))
    image_reduced_data = image_reduced_data.astype(numpy.bool, copy=False) # bool sadly not recognized
    save(image_reduced_data, args.output, image_mask_data_header, args.force)
    
    logger.info('Successfully terminated.')
コード例 #14
0
ファイル: flip.py プロジェクト: loli/nspipeline
def main():
	_file = sys.argv[1]
	dim = int(sys.argv[2])

	i, h = load(_file)
	i = flip_axis(i, dim).copy()
	save(i, _file, h)
コード例 #15
0
ファイル: threshold.py プロジェクト: loli/atlasoverlap
def main():
	i, h = load(sys.argv[1])
	thr = float(sys.argv[2])
	
	i = i.copy()

	save(i >= thr, sys.argv[1], h)
コード例 #16
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # laod input image
    data_input, header_input = load(args.input)
    
#    # check if output image exists
#    if not args.force:
#        if os.path.exists(image_gradient_name):
#            logger.warning('The output image {} already exists. Skipping this step.'.format(image_gradient_name))
#            continue        
        
    # prepare result image
    data_output = scipy.zeros(data_input.shape, dtype=scipy.float32)
        
    # apply the gradient magnitude filter
    logger.info('Computing the gradient magnitude with Prewitt operator...')
    generic_gradient_magnitude(data_input, prewitt, output=data_output) # alternative to prewitt is sobel
        
    # save resulting mask
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
コード例 #17
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # check if output image exists (will also be performed before saving, but as the gradient might be time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise ArgumentError('The output image {} already exists.'.format(args.output))        
        
    # loading image
    data_input, header_input = load(args.input)
    
    logger.debug('Input array: dtype={}, shape={}'.format(data_input.dtype, data_input.shape))
    
    # execute the gradient map filter
    logger.info('Applying gradient map filter...')
    data_output = filter.gradient_magnitude(data_input, header.get_pixel_spacing(header_input))
        
    logger.debug('Resulting array: dtype={}, shape={}'.format(data_output.dtype, data_output.shape))
    
    # save image
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
コード例 #18
0
ファイル: apply_rdf.py プロジェクト: loli/neuropipeline
def main():
	# catch parameters
	forest_file = sys.argv[1]
	case_folder = sys.argv[2]
	mask_file = sys.argv[3]
	segmentation_file = sys.argv[4]

        # loading case features
	feature_vector = []
	for _file in os.listdir(case_folder):
		if _file.endswith('.npy') and _file.startswith('feature.'):
			with open(os.path.join(case_folder, _file), 'r') as f:
				feature_vector.append(numpy.load(f))
	feature_vector = join(*feature_vector)
	if 1 == feature_vector.ndim:
		feature_vector = numpy.expand_dims(feature_vector, -1)

	# load and apply the decision forest
	with open(forest_file, 'r') as f:
		forest = pickle.load(f)
	classification_results = forest.predict(feature_vector)

	# preparing  image
	m, h = load(mask_file)
    	m = m.astype(numpy.bool)
    	o = numpy.zeros(m.shape, numpy.uint8)
    	o[m] = numpy.squeeze(classification_results).ravel()

	# applying the post-processing morphology
	#o = binary_dilation(o, iterations=2)
	#o = keep_largest_connected_component(o)
	o = binary_fill_holes(o)

	# savin the results
    	save(o, segmentation_file, h, True)
コード例 #19
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # load input image
    logger.info('Loading {}...'.format(args.image))
    image_data, image_header = load(args.image)
    
    # check if supplied cut dimension is inside the input images dimensions
    if args.dimension < 0 or args.dimension >= image_data.ndim:
        logger.critical('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim))
        raise ArgumentError('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim))
    
    # prepare output filenames
    name_output = args.output.replace('{}', '{:03d}')
    
    # determine cut lines
    no_sub_volumes = image_data.shape[args.dimension] / args.maxsize + 1 # int-division is desired
    slices_per_volume = image_data.shape[args.dimension] / no_sub_volumes # int-division is desired
    
    # construct processing dict for each sub-volume
    processing_array = []
    for i in range(no_sub_volumes):
        processing_array.append(
            {'path': name_output.format(i+1),
             'cut': (i * slices_per_volume, (i + 1) * slices_per_volume)})
        if no_sub_volumes - 1 == i: # last volume has to have increased cut end
            processing_array[i]['cut'] = (processing_array[i]['cut'][0], image_data.shape[args.dimension])

    # construct base indexing list
    index = [slice(None) for _ in range(image_data.ndim)]
    
    # execute extraction of the sub-volumes
    logger.info('Extracting sub-volumes...')
    for dic in processing_array:
        # check if output images exists
        if not args.force:
            if os.path.exists(dic['path']):
                logger.warning('The output file {} already exists. Skipping this volume.'.format(dic['path']))
                continue
        
        # extracting sub-volume
        index[args.dimension] = slice(dic['cut'][0], dic['cut'][1])
        volume = image_data[index]
        
        logger.debug('Extracted volume is of shape {}.'.format(volume.shape))
        
        # saving sub-volume in same format as input image
        logger.info('Saving cut {} as {}...'.format(dic['cut'], dic['path']))
        save(volume, dic['path'], image_header, args.force)
        
    logger.info('Successfully terminated.')
コード例 #20
0
ファイル: _test.py プロジェクト: ShimonaNiharika/MedIA
def test03(img, hdr, idx, delta):
    # TEST 03: DOES ANY META-INFORMATION GET LOST DURING FORMAT CONVERSION? AND IF YES; WHICH?
    for tr in types_int: # reference type
        print ''
        oformat = tr
        
        # create, save and load reference image
        try:
            name_ref = tmp_folder + '.'.join(['tmp_ref', tr])
            save(img, name_ref, hdr, True)
            img_ref, hdr_ref = load(name_ref)
        except Exception as e:
            print '\tERROR: Could not generate reference image for type {}: {}'.format(otype, e)
            continue
        
        # extract meta-data from reference image
        mdata_ref = {'shape': img_ref.shape,
                     'dtype': img_ref.dtype,
                     'point': img_ref[idx],
                     'spacing': header.get_pixel_spacing(hdr_ref),
                     'offset': header.get_offset(hdr_ref),}        
        
        # print meta-data from reference image
        
        # iterate of test images
        for tt in types_int: # test type
            print '{} => {}'.format(oformat, tt),
            
            # create, save and load test images
            try:
                #print type(img_ref), type(hdr_ref)
                #print type(img_test), type(hdr_test)
                name_test = tmp_folder + '.'.join(['tmp_test', tt])
                save(img_ref, name_test, hdr_ref, True)
                img_test, hdr_test = load(name_test)
                
            except Exception as e:
                print '\tERROR: Could not generate test image. {}'.format(e)
                continue
            
            # extract meta-data from test image
            mdata_test = {'shape': img_test.shape,
                          'dtype': img_test.dtype,
                          'spacing': header.get_pixel_spacing(hdr_test),
                          'offset': header.get_offset(hdr_test),
                          'point': img_test[idx]}                    
            
            # compare reference against meta-image
            error = False
            for k in mdata_ref.keys():
                equal = _compare(mdata_ref[k], mdata_test[k], delta)
                #print '\n\t{} ({}) : {} = {}'.format(equal, k, mdata_ref[k], mdata_test[k]),
                if not equal:
                    error = True
                    print '\n\t{} ({}) : {} = {}'.format(equal, k, mdata_ref[k], mdata_test[k]),
            if not error:
                print '\t{}'.format(True)
            else:
                print '\n'
コード例 #21
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images
    b0img, b0hdr = load(args.b0image)
    bximg, bxhdr = load(args.bximage)
    
    # convert to float
    b0img = b0img.astype(numpy.float)
    bximg = bximg.astype(numpy.float)

    # check if image are compatible
    if not b0img.shape == bximg.shape:
        raise ArgumentError('The input images shapes differ i.e. {} != {}.'.format(b0img.shape, bximg.shape))
    if not header.get_pixel_spacing(b0hdr) == header.get_pixel_spacing(bxhdr):
        raise ArgumentError('The input images voxel spacing differs i.e. {} != {}.'.format(header.get_pixel_spacing(b0hdr), header.get_pixel_spacing(bxhdr)))
    
    # check if supplied threshold value as well as the b value is above 0
    if args.threshold is not None and not args.threshold >= 0:
        raise ArgumentError('The supplied threshold value must be greater than 0, otherwise a division through 0 might occur.')
    if not args.b > 0:
        raise ArgumentError('The supplied b-value must be greater than 0.')
    
    # compute threshold value if not supplied
    if args.threshold is None:
        b0thr = otsu(b0img, 32) / 4. # divide by 4 to decrease impact
        bxthr = otsu(bximg, 32) / 4.
        if 0 >= b0thr:
            raise ArgumentError('The supplied b0image seems to contain negative values.')
        if 0 >= bxthr:
            raise ArgumentError('The supplied bximage seems to contain negative values.')
    else:
        b0thr = bxthr = args.threshold
    
    logger.debug('thresholds={}/{}, b-value={}'.format(b0thr, bxthr, args.b))
    
    # threshold b0 + bx DW image to obtain a mask
    # b0 mask avoid division through 0, bx mask avoids a zero in the ln(x) computation
    mask = binary_fill_holes(b0img > b0thr) & binary_fill_holes(bximg > bxthr)
    
    # perform a number of binary morphology steps to select the brain only
    mask = binary_erosion(mask, iterations=1)
    mask = largest_connected_component(mask)
    mask = binary_dilation(mask, iterations=1)
    
    logger.debug('excluding {} of {} voxels from the computation and setting them to zero'.format(numpy.count_nonzero(mask), numpy.prod(mask.shape)))
    
    # compute the ADC
    adc = numpy.zeros(b0img.shape, b0img.dtype)
    adc[mask] = -1. * args.b * numpy.log(bximg[mask] / b0img[mask])
    adc[adc < 0] = 0
            
    # saving the resulting image
    save(adc, args.output, b0hdr, args.force)
コード例 #22
0
ファイル: correct_sform.py プロジェクト: loli/neuropipeline
def main():
	# load input image
	i, h = load(sys.argv[1])

	# correct sfrom
	h.set_sform(h.get_qform())

	# save
	save(i, sys.argv[2], h)
コード例 #23
0
ファイル: utility.py プロジェクト: lweckeck/albo
    def _run_interface(self, runtime):
        if not base.isdefined(self.inputs.out_file):
            self.inputs.out_file = self._gen_filename('out_file')

        mask, header = mio.load(self.inputs.in_file)
        inverted_mask = numpy.ones(mask.shape, numpy.uint8)
        inverted_mask[mask.astype(numpy.bool)] = 0

        mio.save(inverted_mask, self.inputs.out_file, header)
        return runtime
コード例 #24
0
ファイル: pass_header.py プロジェクト: loli/neuropipeline
def main():
	# load input image
	i, _ = load(sys.argv[1])

	# load template image
	_, h = load(sys.argv[2])
	
	# save input image with adapted header in place
	j = i.copy()
	save(j, sys.argv[1], h)
コード例 #25
0
ファイル: closing.py プロジェクト: loli/atlasoverlap
def main():
	i, h = load(sys.argv[1])
	
	i = i.copy()
	i = binary_closing(i, iterations=1)
	i = morphology2d(binary_closing, i, iterations=4)
	i = fill2d(i)
	

	save(i, sys.argv[1], h)
コード例 #26
0
ファイル: medpy_shrink_image.py プロジェクト: loli/medpy
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input data
    input_data, input_header = load(args.input)
    
    logger.debug('Old shape = {}.'.format(input_data.shape))
    
    # compute new shape
    new_shape = list(input_data.shape)
    new_shape[args.dimension] = 1 + (new_shape[args.dimension] - 1) / (args.discard + 1)
    
    # prepare output image
    output_data = scipy.zeros(new_shape, dtype=input_data.dtype)
    
    # prepare slicers
    slicer_in = [slice(None)] * input_data.ndim
    slicer_out = [slice(None)] * input_data.ndim
    
    # prepare skip-counter and output image slice counter
    skipc = 0
    slicec = 0
    
    logger.debug('Shrinking from {} to {}...'.format(input_data.shape, new_shape))
    for idx in range(input_data.shape[args.dimension]):
        
        if 0 == skipc:
            # transfer slice
            slicer_in[args.dimension] = slice(idx, idx + 1)
            slicer_out[args.dimension]  = slice(slicec, slicec + 1)
            output_data[slicer_out] = input_data[slicer_in]
            
            # resert resp. increase counter
            skipc = args.discard
            slicec += 1
            
        else: # skip slice
            # decrease skip counter
            skipc -= 1

    
    # set new pixel spacing
    new_spacing = list(header.get_pixel_spacing(input_header))
    new_spacing[args.dimension] = new_spacing[args.dimension] * float(args.discard + 1)
    logger.debug('Setting pixel spacing from {} to {}....'.format(header.get_pixel_spacing(input_header), new_spacing))
    header.set_pixel_spacing(input_header, tuple(new_spacing))
    
    save(output_data, args.output, input_header, args.force)
コード例 #27
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load first input image as example 
    example_data, example_header = load(args.inputs[0])
    
    # test if the supplied position is valid
    if args.position > example_data.ndim or args.position < 0:
        raise ArgumentError('The supplied position for the new dimension is invalid. It has to be between 0 and {}.'.format(example_data.ndim))
    
    # prepare empty output volume
    output_data = scipy.zeros([len(args.inputs)] + list(example_data.shape), dtype=example_data.dtype)
    
    # add first image to output volume
    output_data[0] = example_data
    
    # load input images and add to output volume
    for idx, image in enumerate(args.inputs[1:]):
        image_data, _ = load(image)
        if not args.ignore and image_data.dtype != example_data.dtype:
            raise ArgumentError('The dtype {} of image {} differs from the one of the first image {}, which is {}.'.format(image_data.dtype, image, args.inputs[0], example_data.dtype))
        if image_data.shape != example_data.shape:
            raise ArgumentError('The shape {} of image {} differs from the one of the first image {}, which is {}.'.format(image_data.shape, image, args.inputs[0], example_data.shape))
        output_data[idx + 1] = image_data
        
    # move new dimension to the end or to target position
    for dim in range(output_data.ndim - 1):
        if dim >= args.position: break
        output_data = scipy.swapaxes(output_data, dim, dim + 1)
        
    # set pixel spacing
    spacing = list(header.get_pixel_spacing(example_header))
    spacing = tuple(spacing[:args.position] + [args.spacing] + spacing[args.position:])
    
    # !TODO: Find a way to enable this also for PyDicom and ITK images
    if __is_header_nibabel(example_header):
        __update_header_from_array_nibabel(example_header, output_data)
        header.set_pixel_spacing(example_header, spacing)
    else:
        raise ArgumentError("Sorry. Setting the voxel spacing of the new dimension only works with NIfTI images. See the description of this program for more details.")
    
    # save created volume
    save(output_data, args.output, example_header, args.force)
        
    logger.info("Successfully terminated.")
コード例 #28
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load 3d image
    data_3d, header_3d = load(args.input)

    # check if supplied dimension parameter is inside the images dimensions
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError(
            'The supplied cut-dimension {} exceeds the number of input volume dimensions {}.'
            .format(args.dimension, data_3d.ndim))

    # check if the supplied offset parameter is a divider of the cut-dimensions slice number
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError(
            'The offset is not a divider of the number of slices in cut dimension ({} / {}).'
            .format(data_3d.shape[args.dimension], args.offset))

    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)

    logger.debug(
        'Separating {} slices into {} 3D volumes of thickness {}.'.format(
            data_3d.shape[args.dimension], volumes_3d, args.offset))

    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset,
                                             idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl + 1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]

    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, 3)

    # save resulting 4D volume
    save(data_4d, args.output, header_3d, args.force)

    logger.info("Successfully terminated.")
コード例 #29
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # copy the example image or generate empty image, depending on the modus
    if args.example:
        grid_image = scipy.zeros(args.example_image.shape, scipy.bool_)
        grid_header = args.example_header
    else:
        grid_image = scipy.zeros(args.shape, scipy.bool_)
        # !TODO: Find another solution for this
        # Saving and loading image once to generate a valid header
        tmp_dir = tempfile.mkdtemp()
        tmp_image = '{}/{}'.format(tmp_dir, args.output.split('/')[-1])
        save(grid_image, tmp_image)
        _, grid_header = load(tmp_image)
        try:
            os.remove(tmp_image)
            os.rmdir(tmp_dir)
        except Exception:
            pass

    # set the image attributes if supplied
    if args.pixelspacing:
        header.set_pixel_spacing(grid_header, args.pixelspacing)
    if args.offset:
        header.set_offset(grid_header, args.offset)

    # compute the right grid spacing for each dimension
    if args.real:
        grid_spacing = [
            int(round(sp / float(ps))) for sp, ps in zip(
                args.spacing, header.get_pixel_spacing(grid_header))
        ]
    else:
        grid_spacing = args.spacing

    # paint the grid into the empty image volume
    for dim in range(grid_image.ndim):
        if 0 == grid_spacing[dim]:
            continue  # skip dimension of 0 grid spacing supplied
        for offset in range(0, grid_image.shape[dim], grid_spacing[dim]):
            slicer = [slice(None)] * grid_image.ndim
            slicer[dim] = slice(offset, offset + 1)
            grid_image[slicer] = True

    # saving resulting grid volume
    save(grid_image, args.output, grid_header, args.force)
コード例 #30
0
def main():
    parser = getParser()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images
    img, hdr = load(args.input)
    img = img.astype(numpy.bool)
    
    # check spacing values
    if not len(args.spacing) == img.ndim:
        parser.error('The image has {} dimensions, but {} spacing parameters have been supplied.'.format(img.ndim, len(args.spacing)))
        
    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            parser.error('The output image {} already exists.'.format(args.output)) 
        
    logger.debug('target voxel spacing: {}'.format(args.spacing))
    
    # determine number of required complete slices for up-sampling
    vs = header.get_pixel_spacing(hdr)
    rcss = [int(y // x - 1) for x, y in zip(args.spacing, vs)] # TODO: For option b, remove the - 1; better: no option b, since I am rounding later anyway
    
    # remove negatives and round up to next even number
    rcss = [x if x > 0 else 0 for x in rcss]
    rcss = [x if 0 == x % 2 else x + 1 for x in rcss]
    logger.debug('intermediate slices to add per dimension: {}'.format(rcss))
    
    # for each dimension requiring up-sampling, from the highest down, perform shape based slice interpolation
    logger.info('Adding required slices using shape based interpolation.')
    for dim, rcs in enumerate(rcss):
        if rcs > 0:
            logger.debug('adding {} intermediate slices to dimension {}'.format(rcs, dim))
            img = shape_based_slice_interpolation(img, dim, rcs)
            logger.debug('resulting new image shape: {}'.format(img.shape))
            
    # compute and set new voxel spacing
    nvs = [x / (y + 1.) for x, y in zip(vs, rcss)]
    header.set_pixel_spacing(hdr, nvs)
    logger.debug('intermediate voxel spacing: {}'.format(nvs))
    
    # interpolate with nearest neighbour
    logger.info('Re-sampling the image with a b-spline order of {}.'.format(args.order))
    img, hdr = resample(img, hdr, args.spacing, args.order, mode='nearest')
    
    # saving the resulting image
    save(img, args.output, hdr, args.force)
コード例 #31
0
ファイル: medpy_binary_resampling.py プロジェクト: loli/medpy
def main():
    parser = getParser()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images
    img, hdr = load(args.input)
    img = img.astype(numpy.bool)
    
    # check spacing values
    if not len(args.spacing) == img.ndim:
        parser.error('The image has {} dimensions, but {} spacing parameters have been supplied.'.format(img.ndim, len(args.spacing)))
        
    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            parser.error('The output image {} already exists.'.format(args.output)) 
        
    logger.debug('target voxel spacing: {}'.format(args.spacing))
    
    # determine number of required complete slices for up-sampling
    vs = header.get_pixel_spacing(hdr)
    rcss = [int(y // x - 1) for x, y in zip(args.spacing, vs)] # TODO: For option b, remove the - 1; better: no option b, since I am rounding later anyway
    
    # remove negatives and round up to next even number
    rcss = [x if x > 0 else 0 for x in rcss]
    rcss = [x if 0 == x % 2 else x + 1 for x in rcss]
    logger.debug('intermediate slices to add per dimension: {}'.format(rcss))
    
    # for each dimension requiring up-sampling, from the highest down, perform shape based slice interpolation
    logger.info('Adding required slices using shape based interpolation.')
    for dim, rcs in enumerate(rcss):
        if rcs > 0:
            logger.debug('adding {} intermediate slices to dimension {}'.format(rcs, dim))
            img = shape_based_slice_interpolation(img, dim, rcs)
            logger.debug('resulting new image shape: {}'.format(img.shape))
            
    # compute and set new voxel spacing
    nvs = [x / (y + 1.) for x, y in zip(vs, rcss)]
    header.set_pixel_spacing(hdr, nvs)
    logger.debug('intermediate voxel spacing: {}'.format(nvs))
    
    # interpolate with nearest neighbour
    logger.info('Re-sampling the image with a b-spline order of {}.'.format(args.order))
    img, hdr = resample(img, hdr, args.spacing, args.order, mode='nearest')
    
    # saving the resulting image
    save(img, args.output, hdr, args.force)
コード例 #32
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists
    if not args.force:
        if os.path.exists(args.output + args.image[-4:]):
            logger.warning(
                'The output file {} already exists. Breaking.'.format(
                    args.output + args.image[-4:]))
            exit(1)

    # load images
    image_data, image_header = load(args.image)

    # check image dimensions against sub-volume dimensions
    if len(image_data.shape) != len(args.volume):
        logger.critical(
            'The supplied input image is of different dimension as the sub volume requested ({} to {})'
            .format(len(image_data.shape), len(args.volume)))
        raise ArgumentError(
            'The supplied input image is of different dimension as the sub volume requested ({} to {})'
            .format(len(image_data.shape), len(args.volume)))

    # execute extraction of the sub-area
    logger.info('Extracting sub-volume...')
    index = [slice(x[0], x[1]) for x in args.volume]
    volume = image_data[index]

    # check if the output image contains data
    if 0 == len(volume):
        logger.exception(
            'The extracted sub-volume is of zero-size. This usual means that the supplied volume coordinates and the image coordinates do not intersect. Exiting the application.'
        )
        sys.exit(-1)

    # squeeze extracted sub-volume for the case in which one dimensions has been eliminated
    volume = scipy.squeeze(volume)

    logger.debug('Extracted volume is of shape {}.'.format(volume.shape))

    # save results in same format as input image
    save(volume, args.output, image_header, args.force)

    logger.info('Successfully terminated.')
コード例 #33
0
def proprecessing(image_path, save_folder):

    if not os.path.exists("data/" + save_folder):
        os.mkdir("data/" + save_folder)
    filelist = os.listdir(image_path)
    filelist = [item for item in filelist if 'volume' in item]
    for file in filelist:
        img, img_header = load(image_path + file)
        img[img < -200] = -200
        img[img > 250] = 250
        img = np.array(img, dtype='float32')
        print("Saving image " + file)
        save(img, "./data/" + save_folder + "test-" + file)
コード例 #34
0
ファイル: biasfieldcorrection.py プロジェクト: loli/neuroless
def _correctniftiheader(image):
    r"""
    Correct the NIfTI header meta-data of a file in-place.
    This is usually required after an application of CMTK, as this screwes up the header.
    """
    # correct the NIfTI header meta-data (it gets screwed up by cmtk)
    i, h = load(image)
    aff = get_affine(h)
    set_qform(h, aff)
    set_sform(h, aff)
    set_qform_code(h, 1)
    set_sform_code(h, 1)
    save(i, image, h, force=True)
コード例 #35
0
ファイル: convert_size.py プロジェクト: sherridxy/H-DenseUNet
def convert(args):

    filelist = os.listdir('image/segmentation/')
    filelist = [item for item in filelist if 'segmentation' in item]
    for file in filelist:
        img = nib.load('image/segmentation/'+ file).get_data()
        print(img.shape)        
        img = np.swapaxes(img, 0, 2)
        img = np.array(img, dtype='float32')
        print(img.shape)
        #data_reshape = np.array(data_reshape, dtype='float32')
        print("Save image: " + file)
        save(img, args.save_path + file)
コード例 #36
0
def main():
	i, h = load(sys.argv[1])

	tasks = sys.argv[2:]

	for task in sys.argv[2:]:
		s, g = task.split('=')
		if g in GETTER:
			SETTER[s](h, GETTER[g](h))
		else:
			SETTER[s](h, int(g))

	save(i.copy(), sys.argv[1], h)
コード例 #37
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load dicom slices
    [series] = pydicom_series.read_files(args.input, False, True) # second to not show progress bar, third to retrieve data
    #print series.sampling # Note: The first value is the mean of all differences between ImagePositionPatient-values of the DICOM slices - of course total bullshit
    data_3d = series.get_pixel_array()
    
    # check parameters
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError('The image has only {} dimensions. The supplied target dimension {} exceeds this number.'.format(
                    data_3d.ndim,
                    args.dimension))
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError('The number of slices {} in the target dimension {} of the image shape {} is not dividable by the supplied number of consecutive slices {}.'.format(
                    data_3d.shape[args.dimension],
                    args.dimension,
                    data_3d.shape,
                    args.offset))
    
    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)
    
    logger.debug('Separating {} slices into {} 3D volumes of thickness {}.'.format(data_3d.shape[args.dimension], volumes_3d, args.offset))
        
    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset, idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl+1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]
        
    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, 3)
        
    # save resulting 4D volume
    save(data_4d, args.output, False, args.force)
    
    logger.info("Successfully terminated.")
コード例 #38
0
ファイル: utility.py プロジェクト: Ashrmis/albo
    def _run_interface(self, runtime):
        if not base.isdefined(self.inputs.out_file):
            self.inputs.out_file = self._gen_filename('out_file')

        in_file = self.inputs.in_file
        out_file = self.inputs.out_file

        image, header = mio.load(in_file)
        lower, upper = numpy.percentile(image, (1, 99.9))
        image[image < lower] = lower
        image[image > upper] = upper
        mio.save(image, out_file, header)

        return runtime
コード例 #39
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load first image as result image
    logger.info('Loading {}...'.format(args.images[0]))
    result_data, result_header = load(args.images[0])

    # check dimension argument
    if args.dimension >= result_data.ndim:
        raise argparse.ArgumentError(
            'The supplied stack-dimension {} exceeds the image dimensionality of 0 to {}.'
            .format(args.dimension, result_data.ndim - 1))

    # reduce the image dimensions
    if args.zero and result_data.all():
        result_data = numpy.zeros(result_data.shape, result_data.dtype)

    # iterate over remaining images and concatenate
    for image_name in args.images[1:]:
        logger.info('Loading {}...'.format(image_name))
        image_data, _ = load(image_name)

        # change to zero matrix if requested
        if args.zero and image_data.all():
            image_data = numpy.zeros(image_data.shape, image_data.dtype)

        #concatenate
        if args.reversed:
            result_data = numpy.concatenate((image_data, result_data),
                                            args.dimension)
        else:
            result_data = numpy.concatenate((result_data, image_data),
                                            args.dimension)

    logger.debug('Final image is of shape {}.'.format(result_data.shape))

    # save results in same format as input image
    logger.info('Saving concatenated image as {}...'.format(args.output))

    save(result_data, args.output, result_header, args.force)

    logger.info('Successfully terminated.')
コード例 #40
0
ファイル: classification.py プロジェクト: Ashrmis/albo
    def _run_interface(self, runtime):
        if not base.isdefined(self.inputs.segmentation_file):
            self.inputs.segmentation_file = self._gen_filename(
                'segmentation_file')
        if not base.isdefined(self.inputs.probability_file):
            self.inputs.probability_file = self._gen_filename(
                'probability_file')

        log.info('Appling RDF {} to features {}'.format(
            self.inputs.classifier_file,
            map(os.path.basename, self.inputs.feature_files)))

        features = []
        for path in self.inputs.feature_files:
            with open(path, 'r') as f:
                features.append(numpy.load(f))

        feature_vector = mutil.join(*features)
        if feature_vector.ndim == 1:
            feature_vector = numpy.expand_dims(feature_vector, -1)

        # load and apply the decision forest
        with gzip.open(self.inputs.classifier_file, 'r') as f:
            classifier = pickle.load(f)
            prob_classification = \
                classifier.predict_proba(feature_vector)[:, 1]
            # equivalent to forest.predict
            bin_classification = prob_classification > PROBABILITY_THRESHOLD

        # prepare result images to save to disk
        mask, header = mio.load(self.inputs.mask_file)
        mask = mask.astype(numpy.bool)
        segmentation_image = numpy.zeros(mask.shape, numpy.uint8)
        segmentation_image[mask] = numpy.squeeze(bin_classification).ravel()
        probability_image = numpy.zeros(mask.shape, numpy.float32)
        probability_image[mask] = numpy.squeeze(prob_classification).ravel()

        # apply the post-processing morphology
        segmentation_image = scipy.ndimage.morphology.binary_fill_holes(
            segmentation_image)

        mio.save(segmentation_image,
                 self.inputs.segmentation_file,
                 header,
                 force=True)
        mio.save(probability_image,
                 self.inputs.probability_file,
                 header,
                 force=True)
        return runtime
コード例 #41
0
ファイル: test.py プロジェクト: wltjr1007/MultimodalMRISegCNN
def test(data_name_list):
    test_data = np.memmap(os.path.join(WRITE_PATH, "test_orig.dat"),
                          dtype=np.float32,
                          mode="r",
                          shape=(110, SHAPE[1], SHAPE[2], SHAPE[3], SHAPE[0]))
    val_data = np.memmap(os.path.join(WRITE_PATH, "h_orig.dat"),
                         shape=(220, SHAPE[0], SHAPE[1], SHAPE[2], SHAPE[3]),
                         dtype=np.float32,
                         mode="r")
    test_size = test_data.shape[0]
    test_data_node = tf.placeholder(tf.float32,
                                    shape=(BATCH_SIZE * BATCH_MUL, PATCH[0],
                                           PATCH[1], NUM_CHANNELS))
    test_prediction = test_model(test_data_node)
    imghdr = load(ORIG_READ_PATH + "h.1.VSD.Brain.XX.O.MR_Flair.54512.nii")[1]
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver()
        saver.restore(sess, WRITE_PATH + "savedmodel/savedmodel_final.ckpt")
        print("Variable Initialized. Start Testing!")
        for i in range(test_size + VAL_SIZE):
            test_time = time.time()
            test_result = np.zeros(dtype=np.uint8,
                                   shape=(SHAPE[1], SHAPE[2], SHAPE[3]))
            for j in range(SHAPE[1]):
                for k in range(0, SHAPE[2], BATCH_MUL):
                    if i < VAL_SIZE:
                        batch_data, is_background = get_val_data(
                            val_data, i, j, k)
                    else:
                        batch_data, is_background = get_test_data(
                            test_data, i - VAL_SIZE, j, k)
                    if is_background:
                        continue
                    feed_dict = {test_data_node: batch_data}
                    test_result[j, k] = np.argmax(
                        sess.run(test_prediction, feed_dict=feed_dict), 1)
            if i < VAL_SIZE:
                test_result[np.where(val_data[i, 0] == 0)] = 0
                save(
                    test_result, WRITE_PATH + "VSD.h." + str(i) + "." +
                    data_name_list[0][i, 3] + ".nii", imghdr)
            else:
                test_result[np.where(test_data[i - VAL_SIZE, 0] == 0)] = 0
                save(
                    test_result, WRITE_PATH + "VSD.t." + str(i - VAL_SIZE) +
                    "." + data_name_list[2][i - VAL_SIZE, 3] + ".nii", imghdr)

            print("TEST %d/%d, Time elapsed: %d" %
                  (i - VAL_SIZE, test_size, time.time() - test_time))
コード例 #42
0
def preprocessing_filter(nii,volume,mask,resize,rate=0.1):
    for i in range(131):
        volumeName='volume-'+str(i)+'.nii'
        segmentName='segmentation-'+str(i)+'.nii'

        if not os.path.exists(volume+'volume-'+str(i)):
            os.mkdir(volume+'volume-'+str(i))
            pass
        if not os.path.exists(mask+'segmentation-'+str(i)):
            os.mkdir(mask+'segmentation-'+str(i))
            pass

        img1, img_header1 = load(nii + volumeName)
        img2, img_header2 = load(nii + segmentName)

        img1[img1<-200]=-200
        img1[img1>250]=250
        img1=((img1+200)*255//450)
        img1=np.array(img1,dtype='uint8')
        img2=np.array(img2,dtype='uint8')

        layers = img1.shape[2]
        black=[]
        for j in range(layers):
            if(np.max(img2[:,:,j])==0):
                black.append(j)
                continue
            else:
                im1 = np.array(img1[:, :, j])
                im2 = np.array(img2[:, :, j])

                im1 = cv2.resize(im1, resize)
                im2 = cv2.resize(im2, resize)

                save(im1, volume + 'volume-' + str(i) + '/' + str(j) + '.jpg')
                save(im2, mask + 'segmentation-' + str(i) + '/' + str(j) + '.jpg')
                print("Saving image " + str(i) + " "+ str(j))
            pass

        # Add black images
        all_num=int(rate*layers)
        if(all_num>len(black)):
            all_num=len(black)
            pass

        # shuffle
        random.shuffle(black)

        for k in range(all_num):
            im1 = np.array(img1[:, :, black[k]])
            im2 = np.array(img2[:, :, black[k]])

            im1 = cv2.resize(im1, resize)
            im2 = cv2.resize(im2, resize)

            save(im1, volume + 'volume-' + str(i) + '/' + str(black[k]) + '.jpg')
            save(im2, mask + 'segmentation-' + str(i) + '/' + str(black[k]) + '.jpg')
            print("Saving image " + str(i) + " " + str(black[k]))
        pass
コード例 #43
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images
    b0img, b0hdr = load(args.b0image)
    bximg, bxhdr = load(args.bximage)

    # check if image are compatible
    if not b0img.shape == bximg.shape:
        raise ArgumentError('The input images shapes differ i.e. {} != {}.'.format(b0img.shape, bximg.shape))
    if not header.get_pixel_spacing(b0hdr) == header.get_pixel_spacing(bxhdr):
        raise ArgumentError('The input images voxel spacing differs i.e. {} != {}.'.format(header.get_pixel_spacing(b0hdr), header.get_pixel_spacing(bxhdr)))
    
    # check if supplied threshold value as well as the b value is above 0
    if args.threshold is not None and not args.threshold >= 0:
        raise ArgumentError('The supplied threshold value must be greater than 0, otherwise a division through 0 might occur.')
    if not args.b > 0:
        raise ArgumentError('The supplied b-value must be greater than 0.')
    
    # compute threshold value if not supplied
    if args.threshold is None:
        b0thr = otsu(b0img, 32) / 2. # divide by 2 to decrease impact
        bxthr = otsu(bximg, 32) / 2.
        if 0 >= b0thr:
            raise ArgumentError('The supplied b0image seems to contain negative values.')
        if 0 >= bxthr:
            raise ArgumentError('The supplied bximage seems to contain negative values.')
    else:
        b0thr = bxthr = args.threshold
    
    logger.debug('thresholds={}/{}, b-value={}'.format(b0thr, bxthr, args.b))
    
    # threshold b0 + bx DW image to obtain a mask
    # b0 mask avoid division through 0, bx mask avoids a zero in the ln(x) computation
    mask = (b0img > b0thr) & (bximg > bxthr)
    
    logger.debug('excluding {} of {} voxels from the computation and setting them to zero'.format(scipy.count_nonzero(mask), scipy.prod(mask.shape)))
    
    # compute the ADC
    adc = scipy.zeros(b0img.shape, b0img.dtype)
    adc[mask] = -1. * args.b * scipy.log(bximg[mask] / b0img[mask])
            
    # saving the resulting image
    save(adc, args.output, b0hdr, args.force)
コード例 #44
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output1):
            raise parser.error('The output image {} already exists.'.format(
                args.output1))
        if os.path.exists(args.output2):
            raise parser.error('The output image {} already exists.'.format(
                args.output2))

    # loading images
    data_input1, header_input1 = load(args.input1)
    data_input2, header_input2 = load(args.input2)
    logger.debug('Original image sizes are {} and {}.'.format(
        data_input1.shape, data_input2.shape))

    # compute intersection volumes (punch)
    logger.info('Computing the intersection.')
    inters1, inters2, new_offset = intersection(data_input1, header_input1,
                                                data_input2, header_input2)
    logger.debug(
        'Punched images are of sizes {} and {} with new offset {}.'.format(
            inters1.shape, inters2.shape, new_offset))

    # check if any intersection could be found at all
    if 0 == inters1.size:
        logger.warning(
            'No intersection could be found between the images. Please check their meta-data e.g. with medpy_info'
        )

    # update header informations
    header.set_offset(header_input1, new_offset)
    header.set_offset(header_input2, new_offset)

    # save punched images
    save(inters1, args.output1, header_input1, args.force)
    save(inters2, args.output2, header_input2, args.force)

    logger.info('Successfully terminated.')
コード例 #45
0
ファイル: utility.py プロジェクト: Ashrmis/albo
    def _run_interface(self, runtime):
        if not base.isdefined(self.inputs.out_file):
            self.inputs.out_file = self._gen_filename('out_file')

        in_file = self.inputs.in_file
        mask_file = self.inputs.mask_file
        out_file = self.inputs.out_file

        image, header = mio.load(in_file)
        mask, _ = mio.load(mask_file)

        image[~(mask.astype(numpy.bool))] = 0
        mio.save(image, out_file, header)

        return runtime
コード例 #46
0
ファイル: niftimodifymetadata.py プロジェクト: Ashrmis/albo
def nifti_modify_metadata(image_file, tasks):
    """Modify metadata of image_file.

    See module docstring for list of possible tasks.
    """
    image, header = mio.load(image_file)

    for task in tasks:
        field, value = task.split('=')
        if value in GETTER:
            SETTER[field](header, GETTER[value](header))
        else:
            SETTER[field](header, int(value))

    mio.save(image.copy(), image_file, header)
コード例 #47
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images (as image, header pairs)
    images = []
    headers = []
    for image_name in args.images:
        i, h = load(image_name)
        images.append(i)
        headers.append(h)
    
    # loading binary foreground masks if supplied, else create masks from threshold value
    if args.masks:
        masks = [load(mask_name)[0].astype(numpy.bool) for mask_name in args.masks]
    else:
        masks = [i > args.threshold for i in images]
    
    # if in application mode, load the supplied model and apply it to the images
    if args.lmodel:
        logger.info('Loading the model and transforming images...')
        with open(args.lmodel, 'r') as f:
            trained_model = pickle.load(f)
            if not isinstance(trained_model, IntensityRangeStandardization):
                raise ArgumentError('{} does not seem to be a valid pickled instance of an IntensityRangeStandardization object'.format(args.lmodel))
            transformed_images = [trained_model.transform(i[m], surpress_mapping_check = args.ignore) for i, m in zip(images, masks)]
            
    # in in training mode, train the model, apply it to the images and save it
    else:
        logger.info('Training the average intensity model...')
        irs = IntensityRangeStandardization()
        trained_model, transformed_images = irs.train_transform([i[m] for i, m in zip(images, masks)], surpress_mapping_check = args.ignore)
        logger.info('Saving the trained model as {}...'.format(args.smodel))
        with open(args.smodel, 'wb') as f:
                pickle.dump(trained_model, f)
                
    # save the transformed images
    if args.simages:
        logger.info('Saving intensity transformed images to {}...'.format(args.simages))
        for ti, i, m, h, image_name in zip(transformed_images, images, masks, headers, args.images):
            i[m] = ti
            save(i, '{}/{}'.format(args.simages, image_name.split('/')[-1]), h, args.force)
    
    logger.info('Terminated.')
コード例 #48
0
ファイル: downsizeCT.py プロジェクト: sherridxy/H-DenseUNet
def convert(args):

    for idx in range(205):
        seg = nib.load(args.data_seg + 'image' + str(idx) + '.nii').get_data()
        seg_original = nib.load('data/TrainingData/' + 'segmentation-' +
                                str(idx) + '.nii').get_data()

        index = np.where(seg == 1)
        mini = np.min(index, axis=-1)
        maxi = np.max(index, axis=-1)
        #Add buffer to each end of CT
        data_reshape_seg_original = seg_original[:, :, mini[2] - 1:maxi[2] + 1]
        print("Saving seg...... shape: " +
              str(data_reshape_seg_original.shape))
        save(data_reshape_seg_original,
             args.save_path + "segmentation/segmentation-" + str(idx) + ".nii")
コード例 #49
0
def preprocessing_filter(nii,volume,mask,resize,rate,slices):
    for i in range(0,131,1):
        volumeName='volume-'+str(i)+'.nii'
        segmentName='segmentation-'+str(i)+'.nii'

        if not os.path.exists(volume+'volume-'+str(i)):
            os.mkdir(volume+'volume-'+str(i))
            pass
        if not os.path.exists(mask+'segmentation-'+str(i)):
            os.mkdir(mask+'segmentation-'+str(i))
            pass

        img1, img_header1 = load(nii + volumeName)
        img2, img_header2 = load(nii + segmentName)

        img1=np.array(img1,dtype='float64')

        img1[img1<-200]=-200
        img1[img1>250]=250
        img1=((img1+200)*255//450)
        img1=np.array(img1,dtype='uint8')
        img2=np.array(img2,dtype='uint8')


        startposition,endposition=getRangImageDepth(img2)

        layers = img1.shape[2]

        realstart=max(int(startposition*(1-rate))-slices//2,0)
        realend=min(int((layers-endposition)*rate+endposition)+slices//2,layers)


        for j in range(realend-realstart):
            index=j+realstart

            im1 = np.array(img1[:, :, index])
            im2 = np.array(img2[:, :, index])

            im1 = cv2.resize(im1, resize)
            im2 = cv2.resize(im2, resize)

            save(im1, volume + 'volume-' + str(i) + '/' + str(index) + '.jpg')
            save(im2, mask + 'segmentation-' + str(i) + '/' + str(index) + '.jpg')
            print("Saving image " + str(i) + " "+ str(index))
            pass
        pass
    pass
コード例 #50
0
def main():
    parser = getParser()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # loading input images
    img, hdr = load(args.input)

    # check shape dimensionality
    if not len(args.shape) == img.ndim:
        parser.error(
            'The image has {} dimensions, but {} shape parameters have been supplied.'
            .format(img.ndim, len(args.shape)))

    # check if output image exists
    if not args.force and os.path.exists(args.output):
        parser.error('The output image {} already exists.'.format(args.output))

    # compute required cropping and extention
    slicers_cut = []
    slicers_extend = []
    for dim in range(len(img.shape)):
        slicers_cut.append(slice(None))
        slicers_extend.append(slice(None))
        if args.shape[dim] != img.shape[dim]:
            difference = abs(img.shape[dim] - args.shape[dim])
            cutoff_left = difference / 2
            cutoff_right = difference / 2 + difference % 2
            if args.shape[dim] > img.shape[dim]:
                slicers_extend[-1] = slice(cutoff_left, -1 * cutoff_right)
            else:
                slicers_cut[-1] = slice(cutoff_left, -1 * cutoff_right)

    # crop original image
    img = img[slicers_cut]

    # create output image and place input image centered
    out = numpy.zeros(args.shape, img.dtype)
    out[slicers_extend] = img

    # saving the resulting image
    save(out, args.output, hdr, args.force)
コード例 #51
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    # treat as binary
    data_input = data_input.astype(numpy.bool)
    
    # check dimension argument
    if args.dimension and (not args.dimension >= 0 or not args.dimension < data_input.ndim):
        argparse.ArgumentError(args.dimension, 'Invalid dimension of {} supplied. Image has only {} dimensions.'.format(args.dimension, data_input.ndim))
        
    # compute erosion and dilation steps
    erosions = int(math.ceil(args.width / 2.))
    dilations = int(math.floor(args.width / 2.))
    logger.debug("Performing {} erosions and {} dilations to achieve a contour of width {}.".format(erosions, dilations, args.width))
    
    # erode, dilate and compute contour
    if not args.dimension:
        eroded = binary_erosion(data_input, iterations=erosions) if not 0 == erosions else data_input
        dilated = binary_dilation(data_input, iterations=dilations) if not 0 == dilations else data_input
        data_output = dilated - eroded
    else:
        slicer = [slice(None)] * data_input.ndim
        bs_slicer = [slice(None)] * data_input.ndim
        data_output = numpy.zeros_like(data_input)
        for sl in range(data_input.shape[args.dimension]):
            slicer[args.dimension] = slice(sl, sl+1)
            bs_slicer[args.dimension] = slice(1, 2)
            bs = generate_binary_structure(data_input.ndim, 1)
            
            eroded = binary_erosion(data_input[slicer], structure=bs[bs_slicer], iterations=erosions) if not 0 == erosions else data_input[slicer]
            dilated = binary_dilation(data_input[slicer], structure=bs[bs_slicer], iterations=dilations) if not 0 == dilations else data_input[slicer]
            data_output[slicer] = dilated - eroded
    logger.debug("Contour image contains {} contour voxels.".format(numpy.count_nonzero(data_output)))

    # save resulting volume
    save(data_output, args.output, header_input, args.force)
    
    logger.info("Successfully terminated.")    
コード例 #52
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input images and cast to bool
    images = []
    for input_ in args.inputs:
        t = load(input_)
        images.append((t[0], t[1]))

    # check if their shapes and voxel spacings are all equal
    s0 = images[0][0].shape
    if not numpy.all([i[0].shape == s0 for i in images[1:]]):
        raise argparse.ArgumentError(
            args.input,
            'At least one input image is of a different shape than the others.'
        )
    vs0 = header.get_pixel_spacing(images[0][1])
    if not numpy.all(
        [header.get_pixel_spacing(i[1]) == vs0 for i in images[1:]]):
        raise argparse.ArgumentError(
            args.input,
            'At least one input image has a different voxel spacing than the others.'
        )

    # execute operation
    logger.debug('Executing operation {} over {} images.'.format(
        args.operation, len(images)))
    if 'max' == args.operation:
        out = numpy.maximum.reduce([t[0] for t in images])
    elif 'min' == args.operation:
        out = numpy.minimum.reduce([t[0] for t in images])
    elif 'sum' == args.operation:
        out = numpy.sum([t[0] for t in images], 0).astype(numpy.uint8)
    else:  # avg
        out = numpy.average([t[0] for t in images], 0).astype(numpy.float32)

    # save output
    save(out, args.output, images[0][1], args.force)

    logger.info("Successfully terminated.")
コード例 #53
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image already exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning(
                'The output image {} already exists. Exiting.'.format(
                    args.output))
            exit(-1)

    # load input image
    image_smoothed_data, image_header = load(args.input)

    # apply additional hole closing step
    logger.info('Closing holes...')

    def fun_holes(arr):
        return scipy.ndimage.morphology.binary_fill_holes(arr)

    xd_iterator(image_smoothed_data, (1, 2), fun_holes)

    # set parameters
    ed_params = [(6, 9), (3, 2), (3, 2)]
    es_params = [(6, 9), (5, 2), (4, 3)]

    # apply to ED and ES with distinct parameters
    image_smoothed_data[:, :, :, :4] = morphology(
        image_smoothed_data[:, :, :, :4], ed_params, args.order, args.size)
    image_smoothed_data[:, :, :,
                        4:] = morphology(image_smoothed_data[:, :, :, 4:],
                                         es_params, args.order, args.size)

    # save resulting mask
    save(image_smoothed_data, args.output, image_header, args.force)

    logger.info('Successfully terminated.')
コード例 #54
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # loading input image
    input_data, input_header = load(args.example)

    # create empty volume with same attributes
    output_data = scipy.zeros(input_data.shape, dtype=input_data.dtype)

    # save resulting image
    save(output_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
コード例 #55
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image
    data_input, header_input = load(args.input)

    # eventually empty data
    if args.empty: data_input.fill(False)

    # save resulting volume
    save(data_input, args.output, header_input, args.force)

    logger.info("Successfully terminated.")
コード例 #56
0
def truncate_hu_value(image_path, saved_folder):
    print("*** Truncating HU value to eliminate superfluous information ***")
    file_list = os.listdir(image_path)
    volume_list = []
    # Only volume data need preprocessing, liver mask is not needed.
    for item in file_list:
        if 'volume' in item:
            volume_list.append(item)

    for item in volume_list:
        img, img_header = load(image_path + '/' + item)
        img = np.clip(img, -200, 250)
        normalize_image(img)
        apply_clahe(img)
        normalize_image(img)
        img = np.array(img, dtype='int16')
        print('Saving image ' + item)
        save(img, saved_folder + '/' + item)
コード例 #57
0
def preprocessing_multi_patch(nii,volume,mask,resize,patch,layer):
    for i in range(131):
        volumeName='volume-'+str(i)+'.nii'
        segmentName='segmentation-'+str(i)+'.nii'

        img1, img_header1 = load(nii + volumeName)
        img2, img_header2 = load(nii + segmentName)

        img1=np.array(img1,dtype='float64')

        img1[img1<-200]=-200
        img1[img1>250]=250
        img1=((img1+200)*255//450)
        img1=np.array(img1,dtype='uint8')
        img2=np.array(img2,dtype='uint8')

        startposition,endposition=getRangImageDepth(img2)

        sub_srcimages=make_multi_patch(img1,resize,patch,layer,startposition,endposition)
        sub_truthimage=make_multi_patch(img2,resize,patch,layer,startposition,endposition)

        for ind in range(sub_truthimage.shape[3]):
            mMaxP = np.max(sub_truthimage[:, :, layer//2,ind])
            if not mMaxP == 2:
                # n=random.randint(1,100)
                # if(n<rate*100):
                #     save(sub_volume[:, :, ind], volume + 'volume-' + str(i) + '/' + str(index) + "-" + str(ind) + '.jpg')
                #     save(sub_segment[:, :, ind], mask + 'segmentation-' + str(i) + '/' + str(index) + "-" + str(ind) + '.jpg')
                continue
                pass
            sub_srcimages = np.array(sub_srcimages, dtype='uint8')
            sub_truthimage = np.array(sub_truthimage, dtype='uint8')

            for lay in range(layer):
                if not os.path.exists(volume + 'volume-' + str(i)+"-"+str(ind)):
                    os.mkdir(volume + 'volume-' + str(i)+"-"+str(ind))
                    pass

                save(sub_srcimages[:, :, lay,ind], volume + 'volume-' + str(i)+"-"+str(ind) + '/' + str(lay)+ '.jpg')
            if not os.path.exists(mask + 'segmentation-' + str(i)+"-"+str(ind)):
                os.mkdir(mask + 'segmentation-' + str(i)+"-"+str(ind))
                pass
            save(sub_truthimage[:, :,layer//2, ind], mask + 'segmentation-' + str(i)+"-"+str(ind) + '/' + str(lay)+ '.jpg')
            print("Saving image " + str(i) + " " + str(ind))
コード例 #58
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image
    image_smoothed_data, image_header = load(args.input)

    # perform opening resp. closing
    # in 3D case: size 1 = 6-connectedness, 2 = 12-connectedness, 3 = 18-connectedness, etc.
    footprint = scipy.ndimage.morphology.generate_binary_structure(
        image_smoothed_data.ndim, args.size)
    if 'erosion' == args.type:
        logger.info('Applying erosion...')
        image_smoothed_data = scipy.ndimage.morphology.binary_erosion(
            image_smoothed_data, footprint, iterations=args.iterations)
    elif 'dilation' == args.type:
        logger.info('Applying dilation...')
        image_smoothed_data = scipy.ndimage.morphology.binary_dilation(
            image_smoothed_data, footprint, iterations=args.iterations)
    elif 'opening' == args.type:
        logger.info('Applying opening...')
        image_smoothed_data = scipy.ndimage.morphology.binary_opening(
            image_smoothed_data, footprint, iterations=args.iterations)
    else:  # closing
        logger.info('Applying closing...')
        image_smoothed_data = scipy.ndimage.morphology.binary_closing(
            image_smoothed_data, footprint, iterations=args.iterations)

    # apply additional hole closing step
    logger.info('Closing holes...')
    image_smoothed_data = scipy.ndimage.morphology.binary_fill_holes(
        image_smoothed_data)

    # save resulting mas
    save(image_smoothed_data, args.output, image_header, args.force)

    logger.info('Successfully terminated.')
コード例 #59
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning(
                'The output image {} already exists. Exiting.'.format(
                    args.output))
            exit(-1)

    # constants
    # the minimal edge length of a subvolume-cube ! has to be of type int!
    minimal_edge_length = 200
    overlap = 20

    # load input images
    region_image_data, reference_header = load(args.region)
    markers_image_data, _ = load(args.markers)
    gradient_image_data, _ = load(args.gradient)

    # split marker image into fg and bg images
    fgmarkers_image_data, bgmarkers_image_data = split_marker(
        markers_image_data)

    # execute distributed graph cut
    output_volume = graphcut_split(graphcut_stawiaski, region_image_data,
                                   gradient_image_data, fgmarkers_image_data,
                                   bgmarkers_image_data, minimal_edge_length,
                                   overlap)

    # save resulting mask
    save(output_volume, args.output, reference_header, args.force)

    logger.info('Successfully terminated.')
コード例 #60
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input
    data_input, header_input = load(args.image)
    
    # change pixel spacing
    logger.info('Setting pixel spacing along {} to {}...'.format(data_input.shape, args.spacing))
    header.set_pixel_spacing(header_input, args.spacing)
    
    # save file
    save(data_input.copy(), args.image, header_input, True)
    
    logger.info("Successfully terminated.")