Пример #1
0
def main():
    parser = getParser()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # loading input images
    img, hdr = load(args.input)
    img = img.astype(numpy.bool)

    # check spacing values
    if not len(args.spacing) == img.ndim:
        parser.error(
            'The image has {} dimensions, but {} spacing parameters have been supplied.'
            .format(img.ndim, len(args.spacing)))

    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            parser.error('The output image {} already exists.'.format(
                args.output))

    logger.debug('target voxel spacing: {}'.format(args.spacing))

    # determine number of required complete slices for up-sampling
    vs = header.get_pixel_spacing(hdr)
    rcss = [
        int(y // x - 1) for x, y in zip(args.spacing, vs)
    ]  # TODO: For option b, remove the - 1; better: no option b, since I am rounding later anyway

    # remove negatives and round up to next even number
    rcss = [x if x > 0 else 0 for x in rcss]
    rcss = [x if 0 == x % 2 else x + 1 for x in rcss]
    logger.debug('intermediate slices to add per dimension: {}'.format(rcss))

    # for each dimension requiring up-sampling, from the highest down, perform shape based slice interpolation
    logger.info('Adding required slices using shape based interpolation.')
    for dim, rcs in enumerate(rcss):
        if rcs > 0:
            logger.debug(
                'adding {} intermediate slices to dimension {}'.format(
                    rcs, dim))
            img = shape_based_slice_interpolation(img, dim, rcs)
            logger.debug('resulting new image shape: {}'.format(img.shape))

    # compute and set new voxel spacing
    nvs = [x / (y + 1.) for x, y in zip(vs, rcss)]
    header.set_pixel_spacing(hdr, nvs)
    logger.debug('intermediate voxel spacing: {}'.format(nvs))

    # interpolate with nearest neighbour
    logger.info('Re-sampling the image with a b-spline order of {}.'.format(
        args.order))
    img, hdr = resample(img, hdr, args.spacing, args.order, mode='nearest')

    # saving the resulting image
    save(img, args.output, hdr, args.force)
Пример #2
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning("The output image {} already exists. Exiting.".format(args.output))
            exit(-1)

    # load input image
    input_data, input_header = load(args.input)

    logger.debug("Old number of regions={}.".format(len(scipy.unique(input_data))))

    # cut and relabel along the required dimension
    logger.info("Cutting and relabeling...")
    dimensions = range(input_data.ndim)
    del dimensions[args.dimension]
    __split_along(input_data, dimensions)

    logger.debug("New number of regions={}.".format(len(scipy.unique(input_data))))

    # save result contour volume
    save(input_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
Пример #3
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning(
                'The output image {} already exists. Exiting.'.format(
                    args.output))
            exit(-1)

    # load input image
    input_data, input_header = load(args.input)

    logger.debug('Old number of regions={}.'.format(
        len(scipy.unique(input_data))))

    # cut and relabel along the required dimension
    logger.info('Cutting and relabeling...')
    dimensions = range(input_data.ndim)
    del dimensions[args.dimension]
    __split_along(input_data, dimensions)

    logger.debug('New number of regions={}.'.format(
        len(scipy.unique(input_data))))

    # save result contour volume
    save(input_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
Пример #4
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    with open(args.output, 'w') as outf:
        outf.write('C;;E;;D;;ED;(std);ES;(std);Total;(std)')
        with open(args.input, 'r') as inf:
            for line in inf.readlines():
                line = line.strip()
                if 'ced' == line[0:3]:
                    _tmp = line.split('_')[1:]
                    _c = _tmp[0].split('-')
                    _e = _tmp[1].split('-')
                    _d = _tmp[2].split('-')
                    outf.write('\n')
                    outf.write('{};{};{};{};{};{};'.format(
                        _c[0][1:], _c[1], _e[0][1:], _e[1], _d[0][1:], _d[1]))
                elif 'Mean (std) endo DM' in line:
                    _tmp = line.split(' ')
                    outf.write('{};{};{};{};'.format(_tmp[5], _tmp[6], _tmp[8],
                                                     _tmp[9]))
                elif 'Total mean (std) endo DM' in line:
                    _tmp = line.split(' ')
                    outf.write('{};{}'.format(_tmp[5], _tmp[6]))

    logger.info("Successfully terminated.")
Пример #5
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load first input volume as example
    input_data, input_header = load(args.inputs[0])

    # create target array
    output_data = scipy.zeros(input_data.shape, scipy.float32)

    # add first input mask
    output_data += input_data

    # iterate over input masks, load them and add them to the output volume
    logger.info('Iterating over input masks...')
    for input_name in args.inputs[1:]:
        input_data, _ = load(input_name)
        output_data += input_data

    # divide by number of input files
    output_data /= float(len(args.inputs))

    logger.debug('Max and min values in created atlas are {} and {}.'.format(
        output_data.max(), output_data.min()))

    # save created image
    logger.info('Saving atlas...')
    save(output_data, args.output, input_header, args.force)
Пример #6
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # write header line
    print 'image;labels\n'

    # iterate over input images
    for image in args.images:

        # get and prepare image data
        logger.info('Processing image {}...'.format(image))
        image_data, _ = load(image)

        # count number of labels and flag a warning if they reach the ushort border
        count = len(numpy.unique(image_data))

        # count number of labels and write
        print '{};{}\n'.format(image.split('/')[-1], count)

        sys.stdout.flush()

    logger.info('Successfully terminated.')
Пример #7
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # check if output image exists (will also be performed before saving, but as the watershed might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise ArgumentError('The output image {} already exists.'.format(args.output))
    
    # loading image
    data_input, header_input = load(args.input)
    
    # apply the watershed
    logger.info('Watershedding with settings: thr={} / level={}...'.format(args.threshold, args.level))
    data_output = watershed(data_input, get_pixel_spacing(header_input), args.threshold, args.level)

    # save file
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
Пример #8
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image already exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning('The output image {} already exists. Exiting.'.format(args.output))
            exit(-1)

    # load input image
    image_smoothed_data, image_header = load(args.input)
        
    # apply additional hole closing step
    logger.info('Closing holes...')
    def fun_holes(arr):
        return scipy.ndimage.morphology.binary_fill_holes(arr)
    xd_iterator(image_smoothed_data, (1, 2), fun_holes)
        
    # perform opening resp. closing
    # in 3D case: size 1 = 6-connectedness, 2 = 12-connectedness, 3 = 18-connectedness, etc.
    if 'erosion' == args.type:
        logger.info('Applying erosion...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_erosion(arr, footprint, iterations=args.iterations)
    elif 'dilation' == args.type:
        logger.info('Applying dilation...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_dilation(arr, footprint, iterations=args.iterations)
    elif 'opening' == args.type:
        logger.info('Applying opening...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_opening(arr, footprint, iterations=args.iterations)
    else: # closing
        logger.info('Applying closing...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_closing(arr, footprint, iterations=args.iterations)

    # iterate over slices and apply selected operation
    xd_iterator(image_smoothed_data, (1, 2), fun)

    # save resulting mas
    save(image_smoothed_data, args.output, image_header, args.force)
            
    logger.info('Successfully terminated.')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise parser.error('The output image {} already exists.'.format(
                args.output))

    # loading image
    data_input, header_input = load(args.input)

    # apply the watershed
    logger.info(
        'Applying anisotropic diffusion with settings: niter={} / kappa={} / gamma={}...'
        .format(args.iterations, args.kappa, args.gamma))
    data_output = anisotropic_diffusion(data_input, args.iterations,
                                        args.kappa, args.gamma,
                                        get_pixel_spacing(header_input))

    # save file
    save(data_output, args.output, header_input, args.force)

    logger.info('Successfully terminated.')
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input images
    input_data, input_header = load(args.input)
    original_data, _ = load(args.original)
    
    logger.debug('Old shape={}.'.format(input_data.shape))
    
    # compute position
    logger.info('Computing positon and pad volume...')
    position = __parse_contour_list(args.contours, input_data)
    
    # pad volume
    output_data = scipy.zeros(original_data.shape, input_data.dtype)
    output_data[position] = input_data
    
    
    logger.debug('New shape={}.'.format(input_data.shape))
    
    # save result contour volume
    save(output_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
Пример #11
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # loading input images
    b0img, b0hdr = load(args.b0image)
    bximg, bxhdr = load(args.bximage)

    # check if image are compatible
    if not b0img.shape == bximg.shape:
        raise ArgumentError(
            'The input images shapes differ i.e. {} != {}.'.format(
                b0img.shape, bximg.shape))
    if not header.get_pixel_spacing(b0hdr) == header.get_pixel_spacing(bxhdr):
        raise ArgumentError(
            'The input images voxel spacing differs i.e. {} != {}.'.format(
                header.get_pixel_spacing(b0hdr),
                header.get_pixel_spacing(bxhdr)))

    # check if supplied threshold value as well as the b value is above 0
    if args.threshold is not None and not args.threshold >= 0:
        raise ArgumentError(
            'The supplied threshold value must be greater than 0, otherwise a division through 0 might occur.'
        )
    if not args.b > 0:
        raise ArgumentError('The supplied b-value must be greater than 0.')

    # compute threshold value if not supplied
    if args.threshold is None:
        b0thr = otsu(b0img, 32) / 2.  # divide by 2 to decrease impact
        bxthr = otsu(bximg, 32) / 2.
        if 0 >= b0thr:
            raise ArgumentError(
                'The supplied b0image seems to contain negative values.')
        if 0 >= bxthr:
            raise ArgumentError(
                'The supplied bximage seems to contain negative values.')
    else:
        b0thr = bxthr = args.threshold

    logger.debug('thresholds={}/{}, b-value={}'.format(b0thr, bxthr, args.b))

    # threshold b0 + bx DW image to obtain a mask
    # b0 mask avoid division through 0, bx mask avoids a zero in the ln(x) computation
    mask = (b0img > b0thr) & (bximg > bxthr)

    logger.debug(
        'excluding {} of {} voxels from the computation and setting them to zero'
        .format(scipy.count_nonzero(mask), scipy.prod(mask.shape)))

    # compute the ADC
    adc = scipy.zeros(b0img.shape, b0img.dtype)
    adc[mask] = -1. * args.b * scipy.log(bximg[mask] / b0img[mask])

    # saving the resulting image
    save(adc, args.output, b0hdr, args.force)
Пример #12
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image1
    data_input1, _ = load(args.input1)
    
    # load input image2
    data_input2, _ = load(args.input2)
    
    # compare dtype and shape
    if not data_input1.dtype == data_input2.dtype: print 'Dtype differs: {} to {}'.format(data_input1.dtype, data_input2.dtype)
    if not data_input1.shape == data_input2.shape:
        print 'Shape differs: {} to {}'.format(data_input1.shape, data_input2.shape)
        print 'The voxel content of images of different shape can not be compared. Exiting.'
        sys.exit(-1)
    
    # compare image data
    voxel_total = reduce(lambda x, y: x*y, data_input1.shape)
    voxel_difference = len((data_input1 != data_input2).nonzero()[0])
    if not 0 == voxel_difference:
        print 'Voxel differ: {} of {} total voxels'.format(voxel_difference, voxel_total)
        print 'Max difference: {}'.format(scipy.absolute(data_input1 - data_input2).max())
    else: print 'No other difference.'
    
    logger.info("Successfully terminated.")    
Пример #13
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    logger.debug('Original shape = {}.'.format(data_input.shape))
    
    # check if supplied dimension parameters is inside the images dimensions
    if args.dimension1 >= data_input.ndim or args.dimension1 < 0:
        raise ArgumentError('The first swap-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension1, data_input.ndim))
    elif args.dimension2 >= data_input.ndim or args.dimension2 < 0:
        raise ArgumentError('The second swap-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension2, data_input.ndim))
    
    # swap axes
    data_output = scipy.swapaxes(data_input, args.dimension1, args.dimension2)
    # swap pixel spacing and offset
    ps = list(header.get_pixel_spacing(header_input))
    ps[args.dimension1], ps[args.dimension2] = ps[args.dimension2], ps[args.dimension1]
    header.set_pixel_spacing(header_input, ps)
    os = list(header.get_offset(header_input))
    os[args.dimension1], os[args.dimension2] = os[args.dimension2], os[args.dimension1]
    header.set_offset(header_input, os)
    
    logger.debug('Resulting shape = {}.'.format(data_output.shape))
    
    # save resulting volume
    save(data_output, args.output, header_input, args.force)
    
    logger.info("Successfully terminated.")    
Пример #14
0
 def __init__(self, image_labels, image_original):
     """
     Computes a number of statistics for the labels of a label image.
     These include beside others:
     1. a histogram of the sizes of the regions
     2. a histogram of the sphericity (not roundness) of the regions
     3. a histogram of how the intensity distribution in each region differs
     from a Gaussian distribution
     
     @param image_lables: The label image for which the statistics should be
                          computed as a numpy array
     @param image_original: The original image for which the label image was created.
     """
     if image_labels.shape != image_original.shape:
         raise ValueError('The input images must be of the same shape.')
     
     if not 3 == len(image_labels.shape):
         raise ValueError('Currently this class is only working with 3D images.')
     
     # prepare logger
     self._logger = Logger.getInstance()
     
     self._image_labels = image_labels
     self._image_original = image_original
     
     self._compute()
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)  
    
    # constants
    colours = {'i': 10, 'o': 11}
    
    # load volumes
    marker_data, _ = load(args.marker)
    contour_data, _ = load(args.contour)
    
    # perform check
    contour_data = contour_data == colours[args.type]
    marker_data_fg = marker_data == 1
    marker_data_bg = marker_data == 2
    if scipy.logical_and(contour_data, marker_data_fg).any():
        logger.warning('Intersection between {} and {} (type {}) in foreground.'.format(args.marker, args.contour, args.type))
    elif scipy.logical_and(contour_data, marker_data_bg).any():
        logger.warning('Intersection between {} and {} (type {}) in background.'.format(args.marker, args.contour, args.type))
    else:
        print "No intersection."
Пример #16
0
def main():
    args = getArguments(getParser())
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    with open(args.output, 'w') as outf:        
        outf.write('C;;E;;D;;ED;(std);ES;(std);Total;(std)')        
        with open(args.input, 'r') as inf:            
            for line in inf.readlines():                
                line = line.strip()                
                if 'ced' == line[0:3]:
                    _tmp = line.split('_')[1:]
                    _c = _tmp[0].split('-')
                    _e = _tmp[1].split('-')
                    _d = _tmp[2].split('-')
                    outf.write('\n')
                    outf.write('{};{};{};{};{};{};'.format(_c[0][1:], _c[1], _e[0][1:], _e[1], _d[0][1:], _d[1]))
                elif 'Mean (std) endo DM' in line:
                    _tmp = line.split(' ')
                    outf.write('{};{};{};{};'.format(_tmp[5], _tmp[6], _tmp[8], _tmp[9]))
                elif 'Total mean (std) endo DM' in line:
                    _tmp = line.split(' ')
                    outf.write('{};{}'.format(_tmp[5], _tmp[6]))
    
    logger.info("Successfully terminated.")    
Пример #17
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # constants
    colours = {'i': 10, 'o': 11}

    # load volumes
    marker_data, _ = load(args.marker)
    contour_data, _ = load(args.contour)

    # perform check
    contour_data = contour_data == colours[args.type]
    marker_data_fg = marker_data == 1
    marker_data_bg = marker_data == 2
    if scipy.logical_and(contour_data, marker_data_fg).any():
        logger.warning(
            'Intersection between {} and {} (type {}) in foreground.'.format(
                args.marker, args.contour, args.type))
    elif scipy.logical_and(contour_data, marker_data_bg).any():
        logger.warning(
            'Intersection between {} and {} (type {}) in background.'.format(
                args.marker, args.contour, args.type))
    else:
        print "No intersection."
Пример #18
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    # transform to uin8
    data_input = data_input.astype(scipy.uint8)
                                      
    # reduce to 3D, if larger dimensionality
    if data_input.ndim > 3:
        for _ in range(data_input.ndim - 3): data_input = data_input[...,0]
        
    # iter over slices (2D) until first with content is detected
    for plane in data_input:
        if scipy.any(plane):
            # set pixel spacing
            spacing = list(header.get_pixel_spacing(header_input))
            spacing = spacing[1:3]
            __update_header_from_array_nibabel(header_input, plane)
            header.set_pixel_spacing(header_input, spacing)
            # save image
            save(plane, args.output, header_input, args.force)
            break
    
    logger.info("Successfully terminated.")    
Пример #19
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image using nibabel
    logger.info('Loading image {}...'.format(args.input))
    image_labels_data, _ = load(args.image)    
    
    # load mask image
    logger.info('Loading mask {}...'.format(args.mask))
    image_mask_data, image_mask_data_header = load(args.mask)
    
    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning('The output image {} already exists. Skipping this image.'.format(args.output))
    
    # create a mask from the label image
    logger.info('Reducing the label image...')
    image_reduced_data = fit_labels_to_mask(image_labels_data, image_mask_data)
    
    # save resulting mask
    logger.info('Saving resulting mask as {} in the same format as input mask, only with data-type int8...'.format(args.output))
    image_reduced_data = image_reduced_data.astype(numpy.bool, copy=False) # bool sadly not recognized
    save(image_reduced_data, args.output, image_mask_data_header, args.force)
    
    logger.info('Successfully terminated.')
Пример #20
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # laod input image
    data_input, header_input = load(args.input)

    #    # check if output image exists
    #    if not args.force:
    #        if os.path.exists(image_gradient_name):
    #            logger.warning('The output image {} already exists. Skipping this step.'.format(image_gradient_name))
    #            continue

    # prepare result image
    data_output = scipy.zeros(data_input.shape, dtype=scipy.float32)

    # apply the gradient magnitude filter
    logger.info('Computing the gradient magnitude with Prewitt operator...')
    generic_gradient_magnitude(
        data_input, prewitt,
        output=data_output)  # alternative to prewitt is sobel

    # save resulting mask
    save(data_output, args.output, header_input, args.force)

    logger.info('Successfully terminated.')
Пример #21
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # collect slice-wise results and compute average
    results_i, results_o = parseImageResults(args.score)
    results_i = splitResults(results_i)
    results_o = splitResults(results_o)

    # print results
    if args.csv:
        print 'Inner contours'
        csvPrintResults(results_i)
        print 'Outer contours'
        csvPrintResults(results_o)
    else:
        print "########## Inner contours ##########"
        prettyPrintResults(results_i)
        print "####################################"
        print

        print "########## Outer contours ##########"
        prettyPrintResults(results_o)
        print "####################################"
Пример #22
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # check if output image exists (will also be performed before saving, but as the gradient might be time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise ArgumentError('The output image {} already exists.'.format(args.output))        
        
    # loading image
    data_input, header_input = load(args.input)
    
    logger.debug('Input array: dtype={}, shape={}'.format(data_input.dtype, data_input.shape))
    
    # execute the gradient map filter
    logger.info('Applying gradient map filter...')
    data_output = filter.gradient_magnitude(data_input, header.get_pixel_spacing(header_input))
        
    logger.debug('Resulting array: dtype={}, shape={}'.format(data_output.dtype, data_output.shape))
    
    # save image
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
Пример #23
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists (will also be performed before saving, but as the gradient might be time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise ArgumentError('The output image {} already exists.'.format(
                args.output))

    # loading image
    data_input, header_input = load(args.input)

    logger.debug('Input array: dtype={}, shape={}'.format(
        data_input.dtype, data_input.shape))

    # execute the gradient map filter
    logger.info('Applying gradient map filter...')
    data_output = filter.gradient_magnitude(
        data_input, header.get_pixel_spacing(header_input))

    logger.debug('Resulting array: dtype={}, shape={}'.format(
        data_output.dtype, data_output.shape))

    # save image
    save(data_output, args.output, header_input, args.force)

    logger.info('Successfully terminated.')
Пример #24
0
def zoom(image, factor, dimension, hdr = False, order = 3):
    """
    Zooms the provided image by the supplied factor in the supplied dimension.
    The factor is an integer determining how many slices should be put between each
    existing pair.
    If an image header (hdr) is supplied, its voxel spacing gets updated.
    Returns the image and the updated header or false.
    """
    # check if supplied dimension is valid
    if dimension >= image.ndim:
        raise argparse.ArgumentError('The supplied zoom-dimension {} exceeds the image dimensionality of 0 to {}.'.format(dimension, image.ndim - 1))
    
    # get logger
    logger = Logger.getInstance()

    logger.debug('Old shape = {}.'.format(image.shape))

    # perform the zoom
    zoom = [1] * image.ndim
    zoom[dimension] = (image.shape[dimension] + (image.shape[dimension] - 1) * factor) / float(image.shape[dimension])
    logger.debug('Reshaping with = {}.'.format(zoom))
    image = interpolation.zoom(image, zoom, order=order)
        
    logger.debug('New shape = {}.'.format(image.shape))
    
    if hdr:
        new_spacing = list(header.get_pixel_spacing(hdr))
        new_spacing[dimension] = new_spacing[dimension] / float(factor + 1)
        logger.debug('Setting pixel spacing from {} to {}....'.format(header.get_pixel_spacing(hdr), new_spacing))
        header.set_pixel_spacing(hdr, tuple(new_spacing))
    
    return image, hdr
Пример #25
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    # collect slice-wise results and compute average
    results_i, results_o = parseImageResults(args.score)
    results_i = splitResults(results_i)
    results_o = splitResults(results_o)

    # print results
    if args.csv:
        print "Inner contours"
        csvPrintResults(results_i)
        print "Outer contours"
        csvPrintResults(results_o)
    else:
        print "########## Inner contours ##########"
        prettyPrintResults(results_i)
        print "####################################"
        print

        print "########## Outer contours ##########"
        prettyPrintResults(results_o)
        print "####################################"
Пример #26
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    # check if output image exists (will also be performed before saving, but as the watershed might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise ArgumentError("The output image {} already exists.".format(args.output))

    # loading image
    data_input, header_input = load(args.input)

    # apply the watershed
    logger.info("Watershedding with settings: thr={} / level={}...".format(args.threshold, args.level))
    data_output = watershed(data_input, get_pixel_spacing(header_input), args.threshold, args.level)

    # save file
    save(data_output, args.output, header_input, args.force)

    logger.info("Successfully terminated.")
Пример #27
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input images
    input_data, input_header = load(args.input)
    original_data, _ = load(args.original)

    logger.debug('Old shape={}.'.format(input_data.shape))

    # compute position
    logger.info('Computing positon and pad volume...')
    position = __parse_contour_list(args.contours, input_data)

    # pad volume
    output_data = scipy.zeros(original_data.shape, input_data.dtype)
    output_data[position] = input_data

    logger.debug('New shape={}.'.format(input_data.shape))

    # save result contour volume
    save(output_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
Пример #28
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image using nibabel
    logger.info('Loading image {}...'.format(args.input))
    image_labels_data, _ = load(args.image)    
    
    # load mask image
    logger.info('Loading mask {}...'.format(args.mask))
    image_mask_data, image_mask_data_header = load(args.mask)
    
    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning('The output image {} already exists. Skipping this image.'.format(args.output))
    
    # create a mask from the label image
    logger.info('Reducing the label image...')
    image_reduced_data = fit_labels_to_mask(image_labels_data, image_mask_data)
    
    # save resulting mask
    logger.info('Saving resulting mask as {} in the same format as input mask, only with data-type int8...'.format(args.output))
    image_reduced_data = image_reduced_data.astype(numpy.bool, copy=False) # bool sadly not recognized
    save(image_reduced_data, args.output, image_mask_data_header, args.force)
    
    logger.info('Successfully terminated.')
Пример #29
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # laod input image
    data_input, header_input = load(args.input)
    
#    # check if output image exists
#    if not args.force:
#        if os.path.exists(image_gradient_name):
#            logger.warning('The output image {} already exists. Skipping this step.'.format(image_gradient_name))
#            continue        
        
    # prepare result image
    data_output = scipy.zeros(data_input.shape, dtype=scipy.float32)
        
    # apply the gradient magnitude filter
    logger.info('Computing the gradient magnitude with Prewitt operator...')
    generic_gradient_magnitude(data_input, prewitt, output=data_output) # alternative to prewitt is sobel
        
    # save resulting mask
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
Пример #30
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise parser.error('The output image {} already exists.'.format(args.output))
    
    # loading image
    data_input, header_input = load(args.input)
    
    # apply the watershed
    logger.info('Applying anisotropic diffusion with settings: niter={} / kappa={} / gamma={}...'.format(args.iterations, args.kappa, args.gamma))
    data_output = anisotropic_diffusion(data_input, args.iterations, args.kappa, args.gamma, get_pixel_spacing(header_input))

    # save file
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
Пример #31
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # write header line
    print('image;labels\n')
    
    # iterate over input images
    for image in args.images:
        
        # get and prepare image data
        logger.info('Processing image {}...'.format(image))
        image_data, _ = load(image)
        
        # count number of labels and flag a warning if they reach the ushort border
        count = len(numpy.unique(image_data)) 
        
        # count number of labels and write
        print('{};{}\n'.format(image.split('/')[-1], count))
        
        sys.stdout.flush()
            
    logger.info('Successfully terminated.')
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image
    input_data, input_header = load(args.input)
    
    logger.debug('Old shape={}.'.format(input_data.shape))
    
    # compute cut
    logger.info('Computing cut and cropping volume...')
    cut = __parse_contour_list(args.contours, input_data)
    # crop volume
    input_data = input_data[cut]
    
    logger.debug('New shape={}.'.format(input_data.shape))
    
    # save result contour volume
    save(input_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    # transform to uin8
    data_input = data_input.astype(scipy.uint8)
                                      
    # reduce to 3D, if larger dimensionality
    if data_input.ndim > 3:
        for _ in range(data_input.ndim - 3): data_input = data_input[...,0]
        
    # iter over slices (2D) until first with content is detected
    for plane in data_input:
        if scipy.any(plane):
            # set pixel spacing
            spacing = list(header.get_pixel_spacing(header_input))
            spacing = spacing[1:3]
            __update_header_from_array_nibabel(header_input, plane)
            header.set_pixel_spacing(header_input, spacing)
            # save image
            save(plane, args.output, header_input, args.force)
            break
    
    logger.info("Successfully terminated.")    
Пример #34
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists
    if not args.force and os.path.exists(args.output):
        logger.warning('The output image {} already exists. Exiting.'.format(
            args.output))
        exit(-1)

    # load input data
    input_data, input_header = load(args.input)

    # if normal mode, perform the zoom
    logger.info('Performing normal zoom...')
    output_data, output_header = zoom(input_data,
                                      args.enhancement,
                                      args.dimension,
                                      hdr=input_header)

    # saving results
    save(output_data, args.output, output_header, args.force)
Пример #35
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image
    input_data, input_header = load(args.input)

    logger.debug('Old shape={}.'.format(input_data.shape))

    # compute cut
    logger.info('Computing cut and cropping volume...')
    cut = __parse_contour_list(args.contours, input_data)
    # crop volume
    input_data = input_data[cut]

    logger.debug('New shape={}.'.format(input_data.shape))

    # save result contour volume
    save(input_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
Пример #36
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # load input image
    logger.info('Loading {}...'.format(args.image))
    image_data, image_header = load(args.image)
    
    # check if supplied cut dimension is inside the input images dimensions
    if args.dimension < 0 or args.dimension >= image_data.ndim:
        logger.critical('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim))
        raise ArgumentError('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim))
    
    # prepare output filenames
    name_output = args.output.replace('{}', '{:03d}')
    
    # determine cut lines
    no_sub_volumes = image_data.shape[args.dimension] / args.maxsize + 1 # int-division is desired
    slices_per_volume = image_data.shape[args.dimension] / no_sub_volumes # int-division is desired
    
    # construct processing dict for each sub-volume
    processing_array = []
    for i in range(no_sub_volumes):
        processing_array.append(
            {'path': name_output.format(i+1),
             'cut': (i * slices_per_volume, (i + 1) * slices_per_volume)})
        if no_sub_volumes - 1 == i: # last volume has to have increased cut end
            processing_array[i]['cut'] = (processing_array[i]['cut'][0], image_data.shape[args.dimension])

    # construct base indexing list
    index = [slice(None) for _ in range(image_data.ndim)]
    
    # execute extraction of the sub-volumes
    logger.info('Extracting sub-volumes...')
    for dic in processing_array:
        # check if output images exists
        if not args.force:
            if os.path.exists(dic['path']):
                logger.warning('The output file {} already exists. Skipping this volume.'.format(dic['path']))
                continue
        
        # extracting sub-volume
        index[args.dimension] = slice(dic['cut'][0], dic['cut'][1])
        volume = image_data[index]
        
        logger.debug('Extracted volume is of shape {}.'.format(volume.shape))
        
        # saving sub-volume in same format as input image
        logger.info('Saving cut {} as {}...'.format(dic['cut'], dic['path']))
        save(volume, dic['path'], image_header, args.force)
        
    logger.info('Successfully terminated.')
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images
    b0img, b0hdr = load(args.b0image)
    bximg, bxhdr = load(args.bximage)
    
    # convert to float
    b0img = b0img.astype(numpy.float)
    bximg = bximg.astype(numpy.float)

    # check if image are compatible
    if not b0img.shape == bximg.shape:
        raise ArgumentError('The input images shapes differ i.e. {} != {}.'.format(b0img.shape, bximg.shape))
    if not header.get_pixel_spacing(b0hdr) == header.get_pixel_spacing(bxhdr):
        raise ArgumentError('The input images voxel spacing differs i.e. {} != {}.'.format(header.get_pixel_spacing(b0hdr), header.get_pixel_spacing(bxhdr)))
    
    # check if supplied threshold value as well as the b value is above 0
    if args.threshold is not None and not args.threshold >= 0:
        raise ArgumentError('The supplied threshold value must be greater than 0, otherwise a division through 0 might occur.')
    if not args.b > 0:
        raise ArgumentError('The supplied b-value must be greater than 0.')
    
    # compute threshold value if not supplied
    if args.threshold is None:
        b0thr = otsu(b0img, 32) / 4. # divide by 4 to decrease impact
        bxthr = otsu(bximg, 32) / 4.
        if 0 >= b0thr:
            raise ArgumentError('The supplied b0image seems to contain negative values.')
        if 0 >= bxthr:
            raise ArgumentError('The supplied bximage seems to contain negative values.')
    else:
        b0thr = bxthr = args.threshold
    
    logger.debug('thresholds={}/{}, b-value={}'.format(b0thr, bxthr, args.b))
    
    # threshold b0 + bx DW image to obtain a mask
    # b0 mask avoid division through 0, bx mask avoids a zero in the ln(x) computation
    mask = binary_fill_holes(b0img > b0thr) & binary_fill_holes(bximg > bxthr)
    
    # perform a number of binary morphology steps to select the brain only
    mask = binary_erosion(mask, iterations=1)
    mask = largest_connected_component(mask)
    mask = binary_dilation(mask, iterations=1)
    
    logger.debug('excluding {} of {} voxels from the computation and setting them to zero'.format(numpy.count_nonzero(mask), numpy.prod(mask.shape)))
    
    # compute the ADC
    adc = numpy.zeros(b0img.shape, b0img.dtype)
    adc[mask] = -1. * args.b * numpy.log(bximg[mask] / b0img[mask])
    adc[adc < 0] = 0
            
    # saving the resulting image
    save(adc, args.output, b0hdr, args.force)
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # load input image
    logger.info('Loading {}...'.format(args.image))
    image_data, image_header = load(args.image)
    
    # check if supplied cut dimension is inside the input images dimensions
    if args.dimension < 0 or args.dimension >= image_data.ndim:
        logger.critical('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim))
        raise ArgumentError('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim))
    
    # prepare output filenames
    name_output = args.output.replace('{}', '{:03d}')
    
    # determine cut lines
    no_sub_volumes = image_data.shape[args.dimension] / args.maxsize + 1 # int-division is desired
    slices_per_volume = image_data.shape[args.dimension] / no_sub_volumes # int-division is desired
    
    # construct processing dict for each sub-volume
    processing_array = []
    for i in range(no_sub_volumes):
        processing_array.append(
            {'path': name_output.format(i+1),
             'cut': (i * slices_per_volume, (i + 1) * slices_per_volume)})
        if no_sub_volumes - 1 == i: # last volume has to have increased cut end
            processing_array[i]['cut'] = (processing_array[i]['cut'][0], image_data.shape[args.dimension])

    # construct base indexing list
    index = [slice(None) for _ in range(image_data.ndim)]
    
    # execute extraction of the sub-volumes
    logger.info('Extracting sub-volumes...')
    for dic in processing_array:
        # check if output images exists
        if not args.force:
            if os.path.exists(dic['path']):
                logger.warning('The output file {} already exists. Skipping this volume.'.format(dic['path']))
                continue
        
        # extracting sub-volume
        index[args.dimension] = slice(dic['cut'][0], dic['cut'][1])
        volume = image_data[index]
        
        logger.debug('Extracted volume is of shape {}.'.format(volume.shape))
        
        # saving sub-volume in same format as input image
        logger.info('Saving cut {} as {}...'.format(dic['cut'], dic['path']))
        save(volume, dic['path'], image_header, args.force)
        
    logger.info('Successfully terminated.')
Пример #39
0
def main():
    # parse cmd arguments
    parser = getParser()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    logger.info("Loading testbench and preparing images...")
    original, label, bounding_boxes, model_fg_ids, eval_ids, truth_fg, truth_bg = __load(
        args.testbench, args.original, args.label
    )

    logger.debug("Of the {} eval regions, {} are fg and {} bg.".format(len(eval_ids), len(truth_fg), len(truth_bg)))

    for coarseness in args.coarseness:
        logger.info("Filtering image with coarseness (sigma) of {}...".format(coarseness))
        liver_img_filtered = scipy.zeros(original.shape, dtype=scipy.float32)
        scipy.ndimage.filters.gaussian_laplace(original, coarseness, liver_img_filtered)

        logger.info("Extract foreground (liver) features...")
        features_liver = __create_liver_model(model_fg_ids, bounding_boxes, label, original)

        logger.info("Collect features extracted from the foreground regions...")
        features_fg = [[] for _ in range(3)]  # three features
        for rid in truth_fg:
            features_region = __create_region_model(rid, bounding_boxes, label, original)
            for fid in range(len(features_region)):
                features_fg[fid].append(features_region[fid])

        logger.info("Collect features extracted from the background regions...")
        features_bg = [[] for _ in range(len(features_liver))]
        for rid in truth_bg:
            features_region = __create_region_model(rid, bounding_boxes, label, original)
            for fid in range(len(features_region)):
                features_bg[fid].append(features_region[fid])

        logger.info("Write results to files (~log_coarseness_{}_feature_?.dat)...".format(coarseness))
        for fid in range(len(features_liver)):
            filename = "log_coarseness_{}_feature_{}.dat".format(coarseness, __FEATURE_MAP[fid])
            # check if output file exists
            if not args.force:
                if os.path.exists(filename):
                    logger.warning("The output file {} already exists. Exiting.".format(filename))
                    sys.exit(0)
            # create file
            with open(filename, "w") as f:
                f.write("# Coarseness {}, Feature {}\n".format(coarseness, __FEATURE_MAP[fid]))
                f.write("# First row is liver, then foreground and then background.\n")
                f.write("# The columns are: x, mean, lower qaurtile, upper quartile, min, max\n")
                f.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(1, *(5 * [features_liver[fid]])))  # five time the same
                f.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(2, *__get_features_stats(features_fg[fid])))
                f.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(3, *__get_features_stats(features_bg[fid])))

    logger.info("Successfully terminated.")
Пример #40
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load first input image as example
    example_data, example_header = load(args.inputs[0])

    # test if the supplied position is valid
    if args.position > example_data.ndim or args.position < 0:
        raise ArgumentError(
            'The supplied position for the new dimension is invalid. It has to be between 0 and {}.'
            .format(example_data.ndim))

    # prepare empty output volume
    output_data = scipy.zeros([len(args.inputs)] + list(example_data.shape),
                              dtype=example_data.dtype)

    # add first image to output volume
    output_data[0] = example_data

    # load input images and add to output volume
    for idx, image in enumerate(args.inputs[1:]):
        image_data, _ = load(image)
        if not args.ignore and image_data.dtype != example_data.dtype:
            raise ArgumentError(
                'The dtype {} of image {} differs from the one of the first image {}, which is {}.'
                .format(image_data.dtype, image, args.inputs[0],
                        example_data.dtype))
        if image_data.shape != example_data.shape:
            raise ArgumentError(
                'The shape {} of image {} differs from the one of the first image {}, which is {}.'
                .format(image_data.shape, image, args.inputs[0],
                        example_data.shape))
        output_data[idx + 1] = image_data

    # move new dimension to the end or to target position
    for dim in range(output_data.ndim - 1):
        if dim >= args.position: break
        output_data = scipy.swapaxes(output_data, dim, dim + 1)

    # set pixel spacing
    spacing = list(header.get_pixel_spacing(example_header))
    spacing = tuple(spacing[:args.position] + [args.spacing] +
                    spacing[args.position:])
    example_header.set_voxel_spacing(spacing)

    # save created volume
    save(output_data, args.output, example_header, args.force)

    logger.info("Successfully terminated.")
Пример #41
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # load image as float using ITK
    logger.info('Loading image {} as float using ITK...'.format(args.image))
    image_type = itk.Image[itk.F, 4] # causes PyDev to complain -> ignore error warning
    reader = itk.ImageFileReader[image_type].New()
    reader.SetFileName(args.image)
    reader.Update()
    
    logger.debug(itku.getInformation(reader.GetOutput()))
    
    # process/smooth with anisotropic diffusion image filters (which preserves edges)
    # for watershed one could use 20 iterations with conductance = 1.0
    logger.info('Smoothing...')
    for iteration in args.iterations:
        for conductance in args.conductances:
            for timestep in args.timesteps:
                # build output image name
                image_smoothed_name = '.'.join(args.output.split('.')[:-1]) # remove file ending
                image_smoothed_name += '_i{}_c{}_t{}.'.format(iteration, conductance, timestep) # add parameter suffix
                image_smoothed_name += args.output.split('.')[-1]
                
                # check if output image exists
                if not args.force:
                    if os.path.exists(image_smoothed_name):
                        logger.warning('The output image {} already exists. Skipping this step.'.format(image_smoothed_name))
                        continue
                
                # execute the smoothing
                logger.info('Smooth with settings: iter={} / cond={} / tstep={}...'.format(iteration, conductance, timestep))
                image_smoothed = itk.GradientAnisotropicDiffusionImageFilter[image_type, image_type].New()
                image_smoothed.SetNumberOfIterations(iteration)
                image_smoothed.SetConductanceParameter(conductance)
                image_smoothed.SetTimeStep(timestep)
                image_smoothed.SetInput(reader.GetOutput())
                image_smoothed.Update()
                
                logger.debug(itku.getInformation(image_smoothed.GetOutput()))
                
                # save file
                logger.info('Saving smoothed image as {}...'.format(image_smoothed_name))
                writer = itk.ImageFileWriter[image_type].New()
                writer.SetFileName(image_smoothed_name)
                writer.SetInput(image_smoothed.GetOutput())
                writer.Update()
    
    logger.info('Successfully terminated.')
Пример #42
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # build output image name
    image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg'
    image_bg_name += args.mask.split('/')[-1][-4:]
        
    # check if output image exists
    if not args.force:
        if os.path.exists(image_bg_name):
            logger.warning('The output image {} already exists. Breaking.'.format(image_bg_name))
            exit(1)
    
    # load mask
    logger.info('Loading mask {}...'.format(args.mask))
    try: 
        mask_image = load(args.mask)
        mask_image_data = numpy.squeeze(mask_image.get_data()).astype(scipy.bool_)
    except ImageFileError as e:
        logger.critical('The mask image does not exist or its file type is unknown.')
        raise ArgumentError('The mask image does not exist or its file type is unknown.', e)  
    
    # array of indices to access desired slices
    sls = [(slice(1), slice(None), slice(None)),
           (slice(-1, None), slice(None), slice(None)),
           (slice(None), slice(1), slice(None)),
           (slice(None), slice(-1, None), slice(None)),
           (slice(None), slice(None), slice(1)),
           (slice(None), slice(None), slice(-1, None))]
    
    # security check
    logger.info('Determine if the slices are not intersection with the reference liver mask...')
    for sl in sls:
        if not 0 == len(mask_image_data[sl].nonzero()[0]):
            logger.critical('Reference mask reaches till the image border.')
            raise ArgumentError('Reference mask reaches till the image border.')
        
    # create and save background marker image
    logger.info('Creating background marker image...')
    image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_)
    for sl in sls:
        image_bg_data[sl] = True
    
    logger.info('Saving background marker image...')
    mask_image.get_header().set_data_dtype(scipy.int8)
    save(image_like(image_bg_data, mask_image), image_bg_name)
    
    logger.info('Successfully terminated.')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # load image as float using ITK
    logger.info('Loading image {} as float using ITK...'.format(args.image))
    image_type = itk.Image[itk.F, 4] # causes PyDev to complain -> ignore error warning
    reader = itk.ImageFileReader[image_type].New()
    reader.SetFileName(args.image)
    reader.Update()
    
    logger.debug(itku.getInformation(reader.GetOutput()))
    
    # process/smooth with anisotropic diffusion image filters (which preserves edges)
    # for watershed one could use 20 iterations with conductance = 1.0
    logger.info('Smoothing...')
    for iteration in args.iterations:
        for conductance in args.conductances:
            for timestep in args.timesteps:
                # build output image name
                image_smoothed_name = '.'.join(args.output.split('.')[:-1]) # remove file ending
                image_smoothed_name += '_i{}_c{}_t{}.'.format(iteration, conductance, timestep) # add parameter suffix
                image_smoothed_name += args.output.split('.')[-1]
                
                # check if output image exists
                if not args.force:
                    if os.path.exists(image_smoothed_name):
                        logger.warning('The output image {} already exists. Skipping this step.'.format(image_smoothed_name))
                        continue
                
                # execute the smoothing
                logger.info('Smooth with settings: iter={} / cond={} / tstep={}...'.format(iteration, conductance, timestep))
                image_smoothed = itk.GradientAnisotropicDiffusionImageFilter[image_type, image_type].New()
                image_smoothed.SetNumberOfIterations(iteration)
                image_smoothed.SetConductanceParameter(conductance)
                image_smoothed.SetTimeStep(timestep)
                image_smoothed.SetInput(reader.GetOutput())
                image_smoothed.Update()
                
                logger.debug(itku.getInformation(image_smoothed.GetOutput()))
                
                # save file
                logger.info('Saving smoothed image as {}...'.format(image_smoothed_name))
                writer = itk.ImageFileWriter[image_type].New()
                writer.SetFileName(image_smoothed_name)
                writer.SetInput(image_smoothed.GetOutput())
                writer.Update()
    
    logger.info('Successfully terminated.')
Пример #44
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input data
    input_data, input_header = load(args.input)

    logger.debug('Old shape = {}.'.format(input_data.shape))

    # compute new shape
    new_shape = list(input_data.shape)
    new_shape[args.dimension] = 1 + (new_shape[args.dimension] -
                                     1) / (args.discard + 1)

    # prepare output image
    output_data = scipy.zeros(new_shape, dtype=input_data.dtype)

    # prepare slicers
    slicer_in = [slice(None)] * input_data.ndim
    slicer_out = [slice(None)] * input_data.ndim

    # prepare skip-counter and output image slice counter
    skipc = 0
    slicec = 0

    logger.debug('Shrinking from {} to {}...'.format(input_data.shape,
                                                     new_shape))
    for idx in range(input_data.shape[args.dimension]):

        if 0 == skipc:
            # transfer slice
            slicer_in[args.dimension] = slice(idx, idx + 1)
            slicer_out[args.dimension] = slice(slicec, slicec + 1)
            output_data[slicer_out] = input_data[slicer_in]

            # resert resp. increase counter
            skipc = args.discard
            slicec += 1

        else:  # skip slice
            # decrease skip counter
            skipc -= 1

    # set new pixel spacing
    new_spacing = list(header.get_pixel_spacing(input_header))
    new_spacing[
        args.dimension] = new_spacing[args.dimension] * float(args.discard + 1)
    logger.debug('Setting pixel spacing from {} to {}....'.format(
        header.get_pixel_spacing(input_header), new_spacing))
    header.set_pixel_spacing(input_header, tuple(new_spacing))

    save(output_data, args.output, input_header, args.force)
Пример #45
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load dicom slices
    [series] = pydicom_series.read_files(
        args.input, False,
        True)  # second to not show progress bar, third to retrieve data
    #print series.sampling # Note: The first value is the mean of all differences between ImagePositionPatient-values of the DICOM slices - of course total bullshit
    data_3d = series.get_pixel_array()

    # check parameters
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError(
            'The image has only {} dimensions. The supplied target dimension {} exceeds this number.'
            .format(data_3d.ndim, args.dimension))
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError(
            'The number of slices {} in the target dimension {} of the image shape {} is not dividable by the supplied number of consecutive slices {}.'
            .format(data_3d.shape[args.dimension], args.dimension,
                    data_3d.shape, args.offset))

    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)

    logger.debug(
        'Separating {} slices into {} 3D volumes of thickness {}.'.format(
            data_3d.shape[args.dimension], volumes_3d, args.offset))

    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset,
                                             idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl + 1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]

    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, 3)

    # save resulting 4D volume
    save(data_4d, args.output, False, args.force)

    logger.info("Successfully terminated.")
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load first input image as example 
    example_data, example_header = load(args.inputs[0])
    
    # test if the supplied position is valid
    if args.position > example_data.ndim or args.position < 0:
        raise ArgumentError('The supplied position for the new dimension is invalid. It has to be between 0 and {}.'.format(example_data.ndim))
    
    # prepare empty output volume
    output_data = scipy.zeros([len(args.inputs)] + list(example_data.shape), dtype=example_data.dtype)
    
    # add first image to output volume
    output_data[0] = example_data
    
    # load input images and add to output volume
    for idx, image in enumerate(args.inputs[1:]):
        image_data, _ = load(image)
        if not args.ignore and image_data.dtype != example_data.dtype:
            raise ArgumentError('The dtype {} of image {} differs from the one of the first image {}, which is {}.'.format(image_data.dtype, image, args.inputs[0], example_data.dtype))
        if image_data.shape != example_data.shape:
            raise ArgumentError('The shape {} of image {} differs from the one of the first image {}, which is {}.'.format(image_data.shape, image, args.inputs[0], example_data.shape))
        output_data[idx + 1] = image_data
        
    # move new dimension to the end or to target position
    for dim in range(output_data.ndim - 1):
        if dim >= args.position: break
        output_data = scipy.swapaxes(output_data, dim, dim + 1)
        
    # set pixel spacing
    spacing = list(header.get_pixel_spacing(example_header))
    spacing = tuple(spacing[:args.position] + [args.spacing] + spacing[args.position:])
    
    # !TODO: Find a way to enable this also for PyDicom and ITK images
    if __is_header_nibabel(example_header):
        __update_header_from_array_nibabel(example_header, output_data)
        header.set_pixel_spacing(example_header, spacing)
    else:
        raise ArgumentError("Sorry. Setting the voxel spacing of the new dimension only works with NIfTI images. See the description of this program for more details.")
    
    # save created volume
    save(output_data, args.output, example_header, args.force)
        
    logger.info("Successfully terminated.")
Пример #47
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load 3d image
    data_3d, header_3d = load(args.input)

    # check if supplied dimension parameter is inside the images dimensions
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError(
            'The supplied cut-dimension {} exceeds the number of input volume dimensions {}.'
            .format(args.dimension, data_3d.ndim))

    # check if the supplied offset parameter is a divider of the cut-dimensions slice number
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError(
            'The offset is not a divider of the number of slices in cut dimension ({} / {}).'
            .format(data_3d.shape[args.dimension], args.offset))

    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)

    logger.debug(
        'Separating {} slices into {} 3D volumes of thickness {}.'.format(
            data_3d.shape[args.dimension], volumes_3d, args.offset))

    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset,
                                             idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl + 1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]

    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, args.dimension + 1)
    data_4d = scipy.rollaxis(data_4d, 0, 4)

    # save resulting 4D volume
    save(data_4d, args.output, header_3d, args.force)

    logger.info("Successfully terminated.")
Пример #48
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input data
    input_data, input_header = load(args.input)
    
    logger.debug('Old shape = {}.'.format(input_data.shape))
    
    # compute new shape
    new_shape = list(input_data.shape)
    new_shape[args.dimension] = 1 + (new_shape[args.dimension] - 1) / (args.discard + 1)
    
    # prepare output image
    output_data = scipy.zeros(new_shape, dtype=input_data.dtype)
    
    # prepare slicers
    slicer_in = [slice(None)] * input_data.ndim
    slicer_out = [slice(None)] * input_data.ndim
    
    # prepare skip-counter and output image slice counter
    skipc = 0
    slicec = 0
    
    logger.debug('Shrinking from {} to {}...'.format(input_data.shape, new_shape))
    for idx in range(input_data.shape[args.dimension]):
        
        if 0 == skipc:
            # transfer slice
            slicer_in[args.dimension] = slice(idx, idx + 1)
            slicer_out[args.dimension]  = slice(slicec, slicec + 1)
            output_data[slicer_out] = input_data[slicer_in]
            
            # resert resp. increase counter
            skipc = args.discard
            slicec += 1
            
        else: # skip slice
            # decrease skip counter
            skipc -= 1

    
    # set new pixel spacing
    new_spacing = list(header.get_pixel_spacing(input_header))
    new_spacing[args.dimension] = new_spacing[args.dimension] * float(args.discard + 1)
    logger.debug('Setting pixel spacing from {} to {}....'.format(header.get_pixel_spacing(input_header), new_spacing))
    header.set_pixel_spacing(input_header, tuple(new_spacing))
    
    save(output_data, args.output, input_header, args.force)
Пример #49
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists
    if not args.force:
        if os.path.exists(args.output + args.image[-4:]):
            logger.warning(
                'The output file {} already exists. Breaking.'.format(
                    args.output + args.image[-4:]))
            exit(1)

    # load images
    image_data, image_header = load(args.image)

    # check image dimensions against sub-volume dimensions
    if len(image_data.shape) != len(args.volume):
        logger.critical(
            'The supplied input image is of different dimension as the sub volume requested ({} to {})'
            .format(len(image_data.shape), len(args.volume)))
        raise ArgumentError(
            'The supplied input image is of different dimension as the sub volume requested ({} to {})'
            .format(len(image_data.shape), len(args.volume)))

    # execute extraction of the sub-area
    logger.info('Extracting sub-volume...')
    index = [slice(x[0], x[1]) for x in args.volume]
    volume = image_data[index]

    # check if the output image contains data
    if 0 == len(volume):
        logger.exception(
            'The extracted sub-volume is of zero-size. This usual means that the supplied volume coordinates and the image coordinates do not intersect. Exiting the application.'
        )
        sys.exit(-1)

    # squeeze extracted sub-volume for the case in which one dimensions has been eliminated
    volume = scipy.squeeze(volume)

    logger.debug('Extracted volume is of shape {}.'.format(volume.shape))

    # save results in same format as input image
    save(volume, args.output, image_header, args.force)

    logger.info('Successfully terminated.')
Пример #50
0
def main():
    parser = getParser()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images
    img, hdr = load(args.input)
    img = img.astype(numpy.bool)
    
    # check spacing values
    if not len(args.spacing) == img.ndim:
        parser.error('The image has {} dimensions, but {} spacing parameters have been supplied.'.format(img.ndim, len(args.spacing)))
        
    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            parser.error('The output image {} already exists.'.format(args.output)) 
        
    logger.debug('target voxel spacing: {}'.format(args.spacing))
    
    # determine number of required complete slices for up-sampling
    vs = header.get_pixel_spacing(hdr)
    rcss = [int(y // x - 1) for x, y in zip(args.spacing, vs)] # TODO: For option b, remove the - 1; better: no option b, since I am rounding later anyway
    
    # remove negatives and round up to next even number
    rcss = [x if x > 0 else 0 for x in rcss]
    rcss = [x if 0 == x % 2 else x + 1 for x in rcss]
    logger.debug('intermediate slices to add per dimension: {}'.format(rcss))
    
    # for each dimension requiring up-sampling, from the highest down, perform shape based slice interpolation
    logger.info('Adding required slices using shape based interpolation.')
    for dim, rcs in enumerate(rcss):
        if rcs > 0:
            logger.debug('adding {} intermediate slices to dimension {}'.format(rcs, dim))
            img = shape_based_slice_interpolation(img, dim, rcs)
            logger.debug('resulting new image shape: {}'.format(img.shape))
            
    # compute and set new voxel spacing
    nvs = [x / (y + 1.) for x, y in zip(vs, rcss)]
    header.set_pixel_spacing(hdr, nvs)
    logger.debug('intermediate voxel spacing: {}'.format(nvs))
    
    # interpolate with nearest neighbour
    logger.info('Re-sampling the image with a b-spline order of {}.'.format(args.order))
    img, hdr = resample(img, hdr, args.spacing, args.order, mode='nearest')
    
    # saving the resulting image
    save(img, args.output, hdr, args.force)
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # copy the example image or generate empty image, depending on the modus
    if args.example:
        grid_image = scipy.zeros(args.example_image.shape, scipy.bool_)
        grid_header = args.example_header
    else:
        grid_image = scipy.zeros(args.shape, scipy.bool_)
        # !TODO: Find another solution for this
        # Saving and loading image once to generate a valid header
        tmp_dir = tempfile.mkdtemp()
        tmp_image = '{}/{}'.format(tmp_dir, args.output.split('/')[-1])
        save(grid_image, tmp_image)
        _, grid_header = load(tmp_image)
        try:
            os.remove(tmp_image)
            os.rmdir(tmp_dir)
        except Exception:
            pass

    # set the image attributes if supplied
    if args.pixelspacing:
        header.set_pixel_spacing(grid_header, args.pixelspacing)
    if args.offset:
        header.set_offset(grid_header, args.offset)

    # compute the right grid spacing for each dimension
    if args.real:
        grid_spacing = [
            int(round(sp / float(ps))) for sp, ps in zip(
                args.spacing, header.get_pixel_spacing(grid_header))
        ]
    else:
        grid_spacing = args.spacing

    # paint the grid into the empty image volume
    for dim in range(grid_image.ndim):
        if 0 == grid_spacing[dim]:
            continue  # skip dimension of 0 grid spacing supplied
        for offset in range(0, grid_image.shape[dim], grid_spacing[dim]):
            slicer = [slice(None)] * grid_image.ndim
            slicer[dim] = slice(offset, offset + 1)
            grid_image[slicer] = True

    # saving resulting grid volume
    save(grid_image, args.output, grid_header, args.force)
Пример #52
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load dicom slices
    [series] = pydicom_series.read_files(args.input, False, True) # second to not show progress bar, third to retrieve data
    #print series.sampling # Note: The first value is the mean of all differences between ImagePositionPatient-values of the DICOM slices - of course total bullshit
    data_3d = series.get_pixel_array()
    
    # check parameters
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError('The image has only {} dimensions. The supplied target dimension {} exceeds this number.'.format(
                    data_3d.ndim,
                    args.dimension))
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError('The number of slices {} in the target dimension {} of the image shape {} is not dividable by the supplied number of consecutive slices {}.'.format(
                    data_3d.shape[args.dimension],
                    args.dimension,
                    data_3d.shape,
                    args.offset))
    
    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)
    
    logger.debug('Separating {} slices into {} 3D volumes of thickness {}.'.format(data_3d.shape[args.dimension], volumes_3d, args.offset))
        
    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset, idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl+1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]
        
    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, 3)
        
    # save resulting 4D volume
    save(data_4d, args.output, False, args.force)
    
    logger.info("Successfully terminated.")
Пример #53
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # collector variables
    dms = []
    hds = []
    assds = []
    precisions = []
    recalls = []
    
    # parse evaluation files
    logger.info("Parsing the evaluation files.")
    parameters = []
    for fn in args.inputs:
        parameter_chunk = fn.split('.')[PARAMETER_POSITION]
        if parameter_chunk in PARAMETER_REPLACEMENTS:
            parameters.append(PARAMETER_REPLACEMENTS[parameter_chunk])
        else:
            parameters.append(PARAMETER_TYPE(parameter_chunk))
        eva = Evaluation()
        eva.parse(fn, EXLUDED_FILES)
        dms.append(eva.mean('DC[0,1]', -1))
        hds.append(eva.mean('HD(mm)', -1))
        assds.append(eva.mean('P2C(mm)', -1))
        precisions.append(eva.mean('prec.', -1))
        recalls.append(eva.mean('recall', -1))

    # check data
    if not len(dms) == len(hds) == len(assds) == len(precisions) == len(recalls) == len(args.inputs) == len(parameters):
        raise Exception("Could not parse all files. Breaking.")

    # PLOT
    #plot_sequence_combinations(args.output, parameters, 'sequence combinations', (dms, ), (r'$DC$', ))
    #plot_feature_combinations(args.output, parameters, 'feature combinations', (dms, ), (r'$DC$', ))
    
    #plot_maxfeatures(parameters, '#features', (dms, precisions, recalls), ('DC/F1', 'Prec.', 'Rec.'))
    #plot_minsamples(parameters, '#samples', (dms, precisions, recalls), ('DC/F1', 'Prec.', 'Rec.'))
    #plot_depth(parameters, '#tree-depth', (dms, precisions, recalls), ('DC/F1', 'Prec.', 'Rec.'))
    #plot_trees(parameters, '#trees', (dms, precisions, recalls), ('DC/F1', 'Prec.', 'Rec.'))
    
    plot_samplesize(parameters, '#training-samples', (dms, precisions, recalls), ('DC', 'Prec.', 'Rec.'))
    
    #plt.show()
    plt.savefig(args.output)
    
    logger.info("Successfully terminated.")
Пример #54
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load cases
    casedb = FileSet.fromdirectory(args.cases, args.sequences, filesource='identifiers')
    
    print 'Deducted sequence to file mapping:', casedb.filenamemapping
    
    # select suitable forests
    _, forestdirs, _ = os.walk(args.forestbasedir).next()
    suitable_forests = []
    for forestdir in forestdirs:
        forest = TrainedForest.fromdirectory(os.path.join(args.forestbasedir, forestdir))
        if not set(forest.sequences).difference(args.sequences):
            suitable_forests.append(forest)
            
    # sort suitable forests by number of sequences
    suitable_forests = sorted(suitable_forests, key=lambda x: len(x.sequences))
        
    # extract configuration from most suitable forest
    forestinstance = suitable_forests[0]
    
    # pipeline: apply pre-processing steps to the cases
    print '00: Unifying MRI sequences...'
    unified = unify(os.path.join(args.workingdir, '00unification'), casedb, fixedsequence=forestinstance.fixedsequence, targetspacing=forestinstance.workingresolution)
    print '01: Computing brain masks...'
    brainmasks, _ = stripskull(os.path.join(args.workingdir, '01skullstrip'), unified, stripsequence=forestinstance.skullstripsequence)
    print '02: Computing and applying bias-field correction...'
    biascorrected = correctbiasfields(os.path.join(args.workingdir, '02biasfield'), unified, brainmasks)
    print '03: Applying intensity range models...'
    standarised = percentilemodelapplication(os.path.join(args.workingdir, '03intensitystd'), biascorrected, brainmasks, forestinstance.getintensitymodels())
    print '04: Extracting features...'
    features, _, fnames = extractfeatures(os.path.join(args.workingdir, '04features'), standarised, brainmasks)
    print '05: Segmenting cases...'
    segmentations, probabilities = applyforest(os.path.join(args.workingdir, '05segmentations'), forestinstance.forest, features, brainmasks)
    print '06: Post-processing segmentations...'
    postprocessed = postprocess(os.path.join(args.workingdir, '06postprocessed'), segmentations, args.objectthreshold)
    print '07: Re-sampling segmentations, probability maps and brain masks to original space...'
    origsegmentations = resamplebyexample(args.targetdir, postprocessed, casedb, forestinstance.fixedsequence, binary=True)
    origprobabilities = resamplebyexample(args.targetdir, probabilities, casedb, forestinstance.fixedsequence)
    origbrainmasks = resamplebyexample(os.path.join(args.targetdir, 'brainmasks'), brainmasks, casedb, forestinstance.fixedsequence, binary=True)

    print 'Successfully terminated.'
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load first image as result image
    logger.info('Loading {}...'.format(args.images[0]))
    result_data, result_header = load(args.images[0])

    # check dimension argument
    if args.dimension >= result_data.ndim:
        raise argparse.ArgumentError(
            'The supplied stack-dimension {} exceeds the image dimensionality of 0 to {}.'
            .format(args.dimension, result_data.ndim - 1))

    # reduce the image dimensions
    if args.zero and result_data.all():
        result_data = numpy.zeros(result_data.shape, result_data.dtype)

    # iterate over remaining images and concatenate
    for image_name in args.images[1:]:
        logger.info('Loading {}...'.format(image_name))
        image_data, _ = load(image_name)

        # change to zero matrix if requested
        if args.zero and image_data.all():
            image_data = numpy.zeros(image_data.shape, image_data.dtype)

        #concatenate
        if args.reversed:
            result_data = numpy.concatenate((image_data, result_data),
                                            args.dimension)
        else:
            result_data = numpy.concatenate((result_data, image_data),
                                            args.dimension)

    logger.debug('Final image is of shape {}.'.format(result_data.shape))

    # save results in same format as input image
    logger.info('Saving concatenated image as {}...'.format(args.output))

    save(result_data, args.output, result_header, args.force)

    logger.info('Successfully terminated.')
Пример #56
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output1):
            raise parser.error('The output image {} already exists.'.format(
                args.output1))
        if os.path.exists(args.output2):
            raise parser.error('The output image {} already exists.'.format(
                args.output2))

    # loading images
    data_input1, header_input1 = load(args.input1)
    data_input2, header_input2 = load(args.input2)
    logger.debug('Original image sizes are {} and {}.'.format(
        data_input1.shape, data_input2.shape))

    # compute intersection volumes (punch)
    logger.info('Computing the intersection.')
    inters1, inters2, new_offset = intersection(data_input1, header_input1,
                                                data_input2, header_input2)
    logger.debug(
        'Punched images are of sizes {} and {} with new offset {}.'.format(
            inters1.shape, inters2.shape, new_offset))

    # check if any intersection could be found at all
    if 0 == inters1.size:
        logger.warning(
            'No intersection could be found between the images. Please check their meta-data e.g. with medpy_info'
        )

    # update header informations
    header.set_offset(header_input1, new_offset)
    header.set_offset(header_input2, new_offset)

    # save punched images
    save(inters1, args.output1, header_input1, args.force)
    save(inters2, args.output2, header_input2, args.force)

    logger.info('Successfully terminated.')
Пример #57
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    data_3d, _ = load(args.input)
    
    # check parameters
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError('The image has only {} dimensions. The supplied target dimension {} exceeds this number.'.format(
                    data_3d.ndim,
                    args.dimension))
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError('The number of slices {} in the target dimension {} of the image shape {} is not dividable by the supplied number of consecutive slices {}.'.format(
                    data_3d.shape[args.dimension],
                    args.dimension,
                    data_3d.shape,
                    args.offset))
    
    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)
    
    logger.debug('Separating {} slices into {} 3D volumes of thickness {}.'.format(data_3d.shape[args.dimension], volumes_3d, args.offset))
        
    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset, idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl+1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]
        
    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, 3)
        
    # save resulting 4D volume
    save(data_4d, args.output, False, args.force)
    
    logger.info("Successfully terminated.")
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images (as image, header pairs)
    images = []
    headers = []
    for image_name in args.images:
        i, h = load(image_name)
        images.append(i)
        headers.append(h)
    
    # loading binary foreground masks if supplied, else create masks from threshold value
    if args.masks:
        masks = [load(mask_name)[0].astype(numpy.bool) for mask_name in args.masks]
    else:
        masks = [i > args.threshold for i in images]
    
    # if in application mode, load the supplied model and apply it to the images
    if args.lmodel:
        logger.info('Loading the model and transforming images...')
        with open(args.lmodel, 'r') as f:
            trained_model = pickle.load(f)
            if not isinstance(trained_model, IntensityRangeStandardization):
                raise ArgumentError('{} does not seem to be a valid pickled instance of an IntensityRangeStandardization object'.format(args.lmodel))
            transformed_images = [trained_model.transform(i[m], surpress_mapping_check = args.ignore) for i, m in zip(images, masks)]
            
    # in in training mode, train the model, apply it to the images and save it
    else:
        logger.info('Training the average intensity model...')
        irs = IntensityRangeStandardization()
        trained_model, transformed_images = irs.train_transform([i[m] for i, m in zip(images, masks)], surpress_mapping_check = args.ignore)
        logger.info('Saving the trained model as {}...'.format(args.smodel))
        with open(args.smodel, 'wb') as f:
                pickle.dump(trained_model, f)
                
    # save the transformed images
    if args.simages:
        logger.info('Saving intensity transformed images to {}...'.format(args.simages))
        for ti, i, m, h, image_name in zip(transformed_images, images, masks, headers, args.images):
            i[m] = ti
            save(i, '{}/{}'.format(args.simages, image_name.split('/')[-1]), h, args.force)
    
    logger.info('Terminated.')