def test_base64_written():
    with InTemporaryDirectory():
        with open(DATA_FILE5, "rb") as fobj:
            contents = fobj.read()
        # Confirm the bad tags are still in the file
        assert_true(b"GIFTI_ENCODING_B64BIN" in contents)
        assert_true(b"GIFTI_ENDIAN_LITTLE" in contents)
        # The good ones are missing
        assert_false(b"Base64Binary" in contents)
        assert_false(b"LittleEndian" in contents)
        # Round trip
        img5 = load(DATA_FILE5)
        save(img5, "fixed.gii")
        with open("fixed.gii", "rb") as fobj:
            contents = fobj.read()
        # The bad codes have gone, replaced by the good ones
        assert_false(b"GIFTI_ENCODING_B64BIN" in contents)
        assert_false(b"GIFTI_ENDIAN_LITTLE" in contents)
        assert_true(b"Base64Binary" in contents)
        if sys.byteorder == "little":
            assert_true(b"LittleEndian" in contents)
        else:
            assert_true(b"BigEndian" in contents)
        img5_fixed = load("fixed.gii")
        darrays = img5_fixed.darrays
        assert_array_almost_equal(darrays[0].data, DATA_FILE5_darr1)
        assert_array_almost_equal(darrays[1].data, DATA_FILE5_darr2)
def test_readwritedata():
    img = load(DATA_FILE2)
    with InTemporaryDirectory():
        save(img, "test.gii")
        img2 = load("test.gii")
        assert_equal(img.numDA, img2.numDA)
        assert_array_almost_equal(img.darrays[0].data, img2.darrays[0].data)
Example #3
0
def test_base64_written():
    with InTemporaryDirectory():
        with open(DATA_FILE5, 'rb') as fobj:
            contents = fobj.read()
        # Confirm the bad tags are still in the file
        assert_true(b'GIFTI_ENCODING_B64BIN' in contents)
        assert_true(b'GIFTI_ENDIAN_LITTLE' in contents)
        # The good ones are missing
        assert_false(b'Base64Binary' in contents)
        assert_false(b'LittleEndian' in contents)
        # Round trip
        img5 = load(DATA_FILE5)
        save(img5, 'fixed.gii')
        with open('fixed.gii', 'rb') as fobj:
            contents = fobj.read()
        # The bad codes have gone, replaced by the good ones
        assert_false(b'GIFTI_ENCODING_B64BIN' in contents)
        assert_false(b'GIFTI_ENDIAN_LITTLE' in contents)
        assert_true(b'Base64Binary' in contents)
        if sys.byteorder == 'little':
            assert_true(b'LittleEndian' in contents)
        else:
            assert_true(b'BigEndian' in contents)
        img5_fixed = load('fixed.gii')
        darrays = img5_fixed.darrays
        assert_array_almost_equal(darrays[0].data, DATA_FILE5_darr1)
        assert_array_almost_equal(darrays[1].data, DATA_FILE5_darr2)
Example #4
0
def test_load_dataarray3():
    img3 = load(DATA_FILE3)
    with InTemporaryDirectory():
        save(img3, 'test.gii')
        bimg = load('test.gii')
    for img in (img3, bimg):
        assert_array_almost_equal(img.darrays[0].data[30:50], DATA_FILE3_darr1)
Example #5
0
def test_readwritedata():
    img = load(DATA_FILE2)
    with InTemporaryDirectory():
        save(img, 'test.gii')
        img2 = load('test.gii')
        assert_equal(img.numDA, img2.numDA)
        assert_array_almost_equal(img.darrays[0].data, img2.darrays[0].data)
def test_load_dataarray3():
    img3 = load(DATA_FILE3)
    with InTemporaryDirectory():
        save(img3, "test.gii")
        bimg = load("test.gii")
    for img in (img3, bimg):
        assert_array_almost_equal(img.darrays[0].data[30:50], DATA_FILE3_darr1)
def test_load_dataarray2():
    img2 = load(DATA_FILE2)
    # Round trip
    with InTemporaryDirectory():
        save(img2, 'test.gii')
        bimg = load('test.gii')
    for img in (img2, bimg):
        assert_array_almost_equal(img.darrays[0].data[:10], DATA_FILE2_darr1)
Example #8
0
def test_load_dataarray4():
    img4 = load(DATA_FILE4)
    # Round trip
    with InTemporaryDirectory():
        save(img4, 'test.gii')
        bimg = load('test.gii')
    for img in (img4, bimg):
        assert_array_almost_equal(img.darrays[0].data[:10], DATA_FILE4_darr1)
def test_load_dataarray4():
    img4 = load(DATA_FILE4)
    # Round trip
    with InTemporaryDirectory():
        save(img4, "test.gii")
        bimg = load("test.gii")
    for img in (img4, bimg):
        assert_array_almost_equal(img.darrays[0].data[:10], DATA_FILE4_darr1)
Example #10
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # build output image name
    image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg'
    image_bg_name += args.mask.split('/')[-1][-4:]
        
    # check if output image exists
    if not args.force:
        if os.path.exists(image_bg_name):
            logger.warning('The output image {} already exists. Breaking.'.format(image_bg_name))
            exit(1)
    
    # load mask
    logger.info('Loading mask {}...'.format(args.mask))
    try: 
        mask_image = load(args.mask)
        mask_image_data = numpy.squeeze(mask_image.get_data()).astype(scipy.bool_)
    except ImageFileError as e:
        logger.critical('The mask image does not exist or its file type is unknown.')
        raise ArgumentError('The mask image does not exist or its file type is unknown.', e)  
    
    # array of indices to access desired slices
    sls = [(slice(1), slice(None), slice(None)),
           (slice(-1, None), slice(None), slice(None)),
           (slice(None), slice(1), slice(None)),
           (slice(None), slice(-1, None), slice(None)),
           (slice(None), slice(None), slice(1)),
           (slice(None), slice(None), slice(-1, None))]
    
    # security check
    logger.info('Determine if the slices are not intersection with the reference liver mask...')
    for sl in sls:
        if not 0 == len(mask_image_data[sl].nonzero()[0]):
            logger.critical('Reference mask reaches till the image border.')
            raise ArgumentError('Reference mask reaches till the image border.')
        
    # create and save background marker image
    logger.info('Creating background marker image...')
    image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_)
    for sl in sls:
        image_bg_data[sl] = True
    
    logger.info('Saving background marker image...')
    mask_image.get_header().set_data_dtype(scipy.int8)
    save(image_like(image_bg_data, mask_image), image_bg_name)
    
    logger.info('Successfully terminated.')
def test_load_dataarray1():
    img1 = load(DATA_FILE1)
    # Round trip
    with InTemporaryDirectory():
        save(img1, "test.gii")
        bimg = load("test.gii")
    for img in (img1, bimg):
        assert_array_almost_equal(img.darrays[0].data, DATA_FILE1_darr1)
        assert_array_almost_equal(img.darrays[1].data, DATA_FILE1_darr2)
        me = img.darrays[0].meta.metadata
        assert_true("AnatomicalStructurePrimary" in me)
        assert_true("AnatomicalStructureSecondary" in me)
        assert_equal(me["AnatomicalStructurePrimary"], "CortexLeft")
        assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4))
        assert_equal(xform_codes.niistring[img.darrays[0].coordsys.dataspace], "NIFTI_XFORM_TALAIRACH")
        assert_equal(xform_codes.niistring[img.darrays[0].coordsys.xformspace], "NIFTI_XFORM_TALAIRACH")
def main(args=None):
    """Main program function."""
    parser = _get_parser()
    opts = parser.parse_args(args)
    from_img = load(opts.infile)

    if not opts.force and Path(opts.outfile).exists():
        raise FileExistsError(f"Output file exists: {opts.outfile}")

    out_img = conform(from_img=from_img,
                      out_shape=opts.out_shape,
                      voxel_size=opts.voxel_size,
                      order=3,
                      cval=0.0,
                      orientation=opts.orientation)

    save(out_img, opts.outfile)
def test_volume(tmpdir, capsys):
    mask_data = np.zeros((20, 20, 20), dtype='u1')
    mask_data[5:15, 5:15, 5:15] = 1
    img = Nifti1Image(mask_data, np.eye(4))

    infile = tmpdir / "input.nii"
    save(img, infile)

    args = (f"{infile} --Volume")
    main(args.split())
    vol_mm3 = capsys.readouterr()
    args = (f"{infile} --Volume --units vox")
    main(args.split())
    vol_vox = capsys.readouterr()

    assert float(vol_mm3[0]) == 1000.0
    assert int(vol_vox[0]) == 1000
def test_load_labeltable():
    img6 = load(DATA_FILE6)
    # Round trip
    with InTemporaryDirectory():
        save(img6, "test.gii")
        bimg = load("test.gii")
    for img in (img6, bimg):
        assert_array_almost_equal(img.darrays[0].data[:3], DATA_FILE6_darr1)
        assert_equal(len(img.labeltable.labels), 36)
        labeldict = img.labeltable.get_labels_as_dict()
        assert_true(660700 in labeldict)
        assert_equal(labeldict[660700], "entorhinal")
        assert_equal(img.labeltable.labels[1].key, 2647065)
        assert_equal(img.labeltable.labels[1].red, 0.0980392)
        assert_equal(img.labeltable.labels[1].green, 0.392157)
        assert_equal(img.labeltable.labels[1].blue, 0.156863)
        assert_equal(img.labeltable.labels[1].alpha, 1)
Example #15
0
def test_load_labeltable():
    img6 = load(DATA_FILE6)
    # Round trip
    with InTemporaryDirectory():
        save(img6, 'test.gii')
        bimg = load('test.gii')
    for img in (img6, bimg):
        assert_array_almost_equal(img.darrays[0].data[:3], DATA_FILE6_darr1)
        assert_equal(len(img.labeltable.labels), 36)
        labeldict = img.labeltable.get_labels_as_dict()
        assert_true(660700 in labeldict)
        assert_equal(labeldict[660700], 'entorhinal')
        assert_equal(img.labeltable.labels[1].key, 2647065)
        assert_equal(img.labeltable.labels[1].red, 0.0980392)
        assert_equal(img.labeltable.labels[1].green, 0.392157)
        assert_equal(img.labeltable.labels[1].blue, 0.156863)
        assert_equal(img.labeltable.labels[1].alpha, 1)
Example #16
0
def test_parse_dataarrays():
    fn = 'bad_daa.gii'
    img = gi.GiftiImage()

    with InTemporaryDirectory():
        save(img, fn)
        with open(fn, 'r') as fp:
            txt = fp.read()
        # Make a bad gifti.
        txt = txt.replace('NumberOfDataArrays="0"', 'NumberOfDataArrays ="1"')
        with open(fn, 'w') as fp:
            fp.write(txt)

        with clear_and_catch_warnings() as w:
            warnings.filterwarnings('once', category=UserWarning)
            load(fn)
            assert_equal(len(w), 1)
            assert_equal(img.numDA, 0)
def test_parse_dataarrays():
    fn = "bad_daa.gii"
    img = gi.GiftiImage()

    with InTemporaryDirectory():
        save(img, fn)
        with open(fn, "r") as fp:
            txt = fp.read()
        # Make a bad gifti.
        txt = txt.replace('NumberOfDataArrays="0"', 'NumberOfDataArrays ="1"')
        with open(fn, "w") as fp:
            fp.write(txt)

        with clear_and_catch_warnings() as w:
            warnings.filterwarnings("once", category=UserWarning)
            load(fn)
            assert_equal(len(w), 1)
            assert_equal(img.numDA, 0)
Example #18
0
def test_load_dataarray1():
    img1 = load(DATA_FILE1)
    # Round trip
    with InTemporaryDirectory():
        save(img1, 'test.gii')
        bimg = load('test.gii')
    for img in (img1, bimg):
        assert_array_almost_equal(img.darrays[0].data, DATA_FILE1_darr1)
        assert_array_almost_equal(img.darrays[1].data, DATA_FILE1_darr2)
        me = img.darrays[0].meta.metadata
        assert_true('AnatomicalStructurePrimary' in me)
        assert_true('AnatomicalStructureSecondary' in me)
        assert_equal(me['AnatomicalStructurePrimary'], 'CortexLeft')
        assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4))
        assert_equal(xform_codes.niistring[img.darrays[0].coordsys.dataspace],
                     'NIFTI_XFORM_TALAIRACH')
        assert_equal(xform_codes.niistring[img.darrays[0].coordsys.xformspace],
                     'NIFTI_XFORM_TALAIRACH')
Example #19
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # check if output file exists
    if not args.force:
        if os.path.exists(args.image):
            logger.warning('The output file {} already exists. Exiting.'.format(args.image))
            sys.exit(0)
    
    logger.info('Unpickle testbench and loading label image...')
    label, label_img, bounding_boxes, model_fg_ids, model_bg_ids, truth_fg, truth_bg = __load(args.testbench, args.label)
    
    logger.info('Composing image image...')
    image = scipy.zeros(label.shape, dtype=scipy.int8)
    # set foreground ids
    for rid in truth_fg:
        image[bounding_boxes[rid - 1]][label[bounding_boxes[rid - 1]] == rid] = 1
    # set background ids
    for rid in truth_bg:
        image[bounding_boxes[rid - 1]][label[bounding_boxes[rid - 1]] == rid] = 2
    # set foreground model ids
    for rid in model_fg_ids:
        image[bounding_boxes[rid - 1]][label[bounding_boxes[rid - 1]] == rid] = 3
    # set background model ids
    for rid in model_bg_ids:
        image[bounding_boxes[rid - 1]][label[bounding_boxes[rid - 1]] == rid] = 4
    
    logger.info('Saving image as {} with data-type int8...'.format(args.image))
    image_img = image_like(image, label_img)
    image_img.get_header().set_data_dtype(scipy.int8)
    save(image_img, args.image)
    
    logger.info('Successfully terminated.')
def main():
    # prepare logger
    logger = Logger.getInstance()
    logger.setLevel(logging.DEBUG)
    
    # input image locations
    #i = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' # original image
    #i = '/home/omaier/Temp/test.nii' # original image
    #i = '/home/omaier/Temp/o09_smoothed_i4.0_c0.1_t0.0625.nii'
    i = '/home/omaier/Experiments/GraphCut/BoundaryTerm/Stawiaski/01gradient/o09_gradient.nii'
    
    # output image locations
    r = '/home/omaier/Temp/result_gradient.nii' # result mask
    
    # load images
    i_i = load(i)
 
    # extract and prepare image data
    i_d = scipy.squeeze(i_i.get_data())
    
    # crop input images to achieve faster execution
    crop = [slice(50, -200),
            slice(50, -150),
            slice(50, -100)]
    #i_d = i_d[crop]   
    
    i_d = scipy.copy(i_d)
    
    # !TODO: Test if input image is of size 0
    
    logger.debug('input image shape={},ndims={},dtype={}'.format(i_d.shape, i_d.ndim, i_d.dtype))

    result = watershed8(i_d, logger)

    logger.info('Saving resulting region map...')
    result_i = image_like(result, i_i)
    result_i.get_header().set_data_dtype(scipy.int32)
    save(result_i, r)

    logger.info('Done!')
Example #21
0
def main():
    # prepare logger
    logger = Logger.getInstance()
    logger.setLevel(logging.DEBUG)

    # input image locations
    #i = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' # original image
    #i = '/home/omaier/Temp/test.nii' # original image
    #i = '/home/omaier/Temp/o09_smoothed_i4.0_c0.1_t0.0625.nii'
    i = '/home/omaier/Experiments/GraphCut/BoundaryTerm/Stawiaski/01gradient/o09_gradient.nii'

    # output image locations
    r = '/home/omaier/Temp/result_gradient.nii'  # result mask

    # load images
    i_i = load(i)

    # extract and prepare image data
    i_d = scipy.squeeze(i_i.get_data())

    # crop input images to achieve faster execution
    crop = [slice(50, -200), slice(50, -150), slice(50, -100)]
    #i_d = i_d[crop]

    i_d = scipy.copy(i_d)

    # !TODO: Test if input image is of size 0

    logger.debug('input image shape={},ndims={},dtype={}'.format(
        i_d.shape, i_d.ndim, i_d.dtype))

    result = watershed8(i_d, logger)

    logger.info('Saving resulting region map...')
    result_i = image_like(result, i_i)
    result_i.get_header().set_data_dtype(scipy.int32)
    save(result_i, r)

    logger.info('Done!')
Example #22
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    logger.info('Selected viscous type is {}'.format(args.type))

    # iterate over input images
    for image in args.images:

        # get and prepare image data
        logger.info('Loading image {} using NiBabel...'.format(image))
        image_gradient = load(image)

        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())

        logger.debug('Intensity range of gradient image is ({}, {})'.format(
            image_gradient_data.min(), image_gradient_data.max()))

        # build output file name and check for its existence, if not in sections mode
        if 'sections' != args.type:
            # build output file name
            image_viscous_name = args.folder + '/' + image.split(
                '/')[-1][:-4] + '_viscous_{}_sec_{}_ds_{}'.format(
                    args.type, args.sections, args.dsize)
            image_viscous_name += image.split('/')[-1][-4:]

            # check if output file exists
            if not args.force:
                if os.path.exists(image_viscous_name):
                    logger.warning(
                        'The output file {} already exists. Skipping this image.'
                        .format(image_viscous_name))
                    continue

        # execute plain closing i.e. a closing operation over the whole image, if in plain mode
        if 'plain' == args.type:
            # prepare the disc structure (a ball with a diameter of (args.dsize * 2 + 1))
            disc = iterate_structure(generate_binary_structure(3, 1),
                                     args.dsize).astype(scipy.int_)

            # apply closing
            logger.info('Applying the morphology over whole image at once...')
            image_viscous_data = grey_closing(image_gradient_data,
                                              footprint=disc)

            # save resulting gradient image
            logger.info('Saving resulting gradient image as {}...'.format(
                image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

            # skip other morphologies
            continue

        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, args.sections)
        logger.debug('{} bins created'.format(len(bins) - 1))

        # check if the number of bins is consistent
        if args.sections != len(bins) - 1:
            raise Exception(
                'Inconsistency between the number of requested and created bins ({} to {})'
                .format(args.sections,
                        len(bins) - 1))

        # prepare result file
        image_viscous_data = image_gradient_data

        # transform the gradient images topography (Note: the content of one bin is: bins[slice - 1] <= content < bins[slice]
        logger.info(
            'Applying the viscous morphological operations {} times...'.format(
                args.sections))
        for slice in range(1, args.sections + 1):

            # build output file name and check for its existence, if in sections mode
            if 'sections' == args.type:
                # build output file name
                image_viscous_name = args.folder + '/' + image.split(
                    '/')[-1][:-4] + '_viscous_{}_sec_{}_ds_{}_sl_{}'.format(
                        args.type, args.sections, args.dsize, slice)
                image_viscous_name += image.split('/')[-1][-4:]

                # check if output file exists
                if not args.force:
                    if os.path.exists(image_viscous_name):
                        logger.warning(
                            'The output file {} already exists. Skipping this slice.'
                            .format(image_viscous_name))
                        continue

                # prepare result file
                image_viscous_data = image_gradient_data

            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = (image_gradient_data >= bins[slice]
                            )  # all voxels with are over the current slice
            mask_lower = (image_gradient_data < bins[slice - 1]
                          )  # all voxels which are under the current slice
            mask_equal = scipy.invert(
                mask_greater | mask_lower)  # all voxels in the current slice
            if 'mercury' == args.type:
                dsize = int((args.dsize / float(args.sections)) * (slice))
                disc = iterate_structure(generate_binary_structure(3, 1),
                                         dsize).astype(scipy.int_)
                mask_equal_or_greater = mask_equal | mask_greater
                image_threshold_data = image_gradient_data * mask_equal_or_greater
            elif 'oil' == args.type:
                dsize = int((args.dsize / float(args.sections)) *
                            (args.sections - slice + 1))
                disc = iterate_structure(generate_binary_structure(3, 1),
                                         dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                mask_equal_or_lower = mask_equal | mask_lower
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[
                    mask_equal_or_lower].max()
            elif 'sections' == args.type:
                dsize = args.dsize
                disc = iterate_structure(generate_binary_structure(3, 1),
                                         args.dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                # set all voxels under the current slice to zero
                image_threshold_data[mask_lower] = 0
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[
                    mask_equal].max()

            logger.debug('{} of {} voxels belong to this level.'.format(
                len(mask_equal.nonzero()[0]),
                scipy.prod(image_threshold_data.shape)))

            # apply the closing with the appropriate disc size
            logger.debug(
                'Applying a disk of {} to all values >= {} and < {}...'.format(
                    dsize, bins[slice - 1], bins[slice]))
            image_closed_data = grey_closing(image_threshold_data,
                                             footprint=disc)

            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data,
                                               image_closed_data)

            # save created output file, if in sections mode
            if 'sections' == args.type:
                # save resulting gradient image
                logger.info('Saving resulting gradient image as {}...'.format(
                    image_viscous_name))
                image_viscous = image_like(image_viscous_data, image_gradient)
                save(image_viscous, image_viscous_name)

        # save created output file, if not in sections mode
        if 'sections' != args.type:
            # save resulting gradient image
            logger.info('Saving resulting gradient image as {}...'.format(
                image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

    logger.info('Successfully terminated.')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    logger.info(
        'Executing weighted viscous morphology with {} ({} bins).'.format(
            ','.join(map(str, args.func)), len(args.func)))

    # iterate over input images
    for image in args.images:

        # build output file name
        image_viscous_name = args.folder + '/' + image.split(
            '/')[-1][:-4] + '_wviscous_' + '_'.join(map(str, args.func))
        image_viscous_name += image.split('/')[-1][-4:]

        # check if output file exists
        if not args.force:
            if os.path.exists(image_viscous_name):
                logger.warning(
                    'The output file {} already exists. Skipping this image.'.
                    format(image_viscous_name))
                continue

        # get and prepare image data
        logger.info('Loading image {} using NiBabel...'.format(image))
        image_gradient = load(image)

        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())

        # prepare result image and extract required attributes of input image
        if args.debug:
            logger.debug(
                'Intensity range of gradient image is ({}, {})'.format(
                    image_gradient_data.min(), image_gradient_data.max()))

        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, len(args.func))
        logger.debug('{} bins created'.format(len(bins) - 1))

        # check if the number of bins is consistent
        if len(args.func) != len(bins) - 1:
            raise Exception(
                'Inconsistency between the number of requested and created bins ({} to {})'
                .format(args.sections,
                        len(bins) - 1))

        # prepare result file
        image_viscous_data = image_gradient_data

        # transform the gradient images topography
        logger.info(
            'Applying the viscous morphological operations on {} sections...'.
            format(len(args.func)))
        for sl in range(1, len(args.func) + 1):

            # create sphere to use in this step
            if 0 >= args.func[sl - 1]:
                continue  # sphere of sizes 0 or below lead to no changes and are not executed
            sphere = iterate_structure(generate_binary_structure(3, 1),
                                       args.func[sl - 1]).astype(scipy.int_)

            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = (image_gradient_data >= bins[sl]
                            )  # all voxels with are over the current slice
            mask_lower = (image_gradient_data < bins[sl - 1]
                          )  # all voxels which are under the current slice
            mask_equal = scipy.invert(
                mask_greater | mask_lower)  # all voxels in the current slice

            # extract slice
            image_threshold_data = image_gradient_data.copy()
            image_threshold_data[
                mask_lower] = 0  # set all voxels under the current slice to zero
            image_threshold_data[mask_greater] = image_threshold_data[
                mask_equal].max(
                )  # set all voxels over the current slice to the max of all voxels in the current slice

            logger.debug('{} of {} voxels belong to this level.'.format(
                len(mask_equal.nonzero()[0]),
                scipy.prod(image_threshold_data.shape)))

            # apply the closing with the appropriate sphere
            logger.debug(
                'Applying a disk of {} to all values >= {} and < {} (sec {})...'
                .format(args.func[sl - 1], bins[sl - 1], bins[sl], sl))
            image_closed_data = grey_closing(image_threshold_data,
                                             footprint=sphere)

            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data,
                                               image_closed_data)

        # save resulting gradient image
        logger.info('Saving resulting gradient image as {}...'.format(
            image_viscous_name))
        image_viscous = image_like(image_viscous_data, image_gradient)
        save(image_viscous, image_viscous_name)

    logger.info('Successfully terminated.')
Example #24
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # build output image name
    output_hdr_name = args.output + '.hdr'
    output_img_name = args.output + '.img'
    output_msk_name = args.output + '.msk'

    # check if output image exists
    if not args.force:
        if os.path.exists(output_hdr_name):
            logger.warning(
                'The output header {} already exists. Breaking.'.format(
                    output_hdr_name))
            exit(1)
        elif os.path.exists(output_img_name):
            logger.warning(
                'The output image {} already exists. Breaking.'.format(
                    output_img_name))
            exit(1)
        elif os.path.exists(output_msk_name):
            logger.warning(
                'The output infor file {} already exists. Breaking.'.format(
                    output_msk_name))
            exit(1)

    # decide on most suitable bit format
    if len(args.masks) / 2 <= 8:
        bit_format = scipy.uint8
    elif len(args.masks) / 2 <= 16:
        bit_format = scipy.uint16
    elif len(args.masks) / 2 <= 32:
        bit_format = scipy.uint32
    elif len(args.masks) / 2 <= 64:
        bit_format = scipy.uint64
    else:
        raise ArgumentError(
            'It is not possible to combine more than 64 single masks.')

    logger.info(
        'Creating a Radiance® segmentation image in {} bit format...'.format(
            bit_format))

    # loading first mask image as reference and template for saving
    logger.info('Loading mask {} ({} segmentation) using NiBabel...'.format(
        args.masks[0], args.masks[1]))
    image_mask = load(args.masks[0])
    image_mask_data = scipy.squeeze(image_mask.get_data())

    # prepare result image
    image_radiance_data = scipy.zeros(image_mask_data.shape, dtype=bit_format)

    logger.debug('Result image is of dimensions {} and type {}.'.format(
        image_radiance_data.shape, image_radiance_data.dtype))

    # preparing .msk file
    f = open(output_msk_name, 'w')

    # adding first mask to result image
    image_radiance_data[image_mask_data > 0] = 1

    # adding first mask segmentation identifier to the .msk file
    f.write('{}\t1\t{}\t{}\t{}\n'.format(args.masks[1],
                                         *__COLOURS[0 % len(__COLOURS)]))

    for i in range(2, len(args.masks), 2):
        # loading mask image
        logger.info(
            'Loading mask {} ({} segmentation) using NiBabel...'.format(
                args.masks[i], args.masks[i + 1]))
        image_mask_data = scipy.squeeze(load(args.masks[i]).get_data())

        # check if the shape of the images is consistent
        if image_mask_data.shape != image_radiance_data.shape:
            raise ArgumentError(
                'Mask {} is with {} of a different shape as the first mask image (which has {}).'
                .format(args.masks[i], image_mask_data.shape,
                        image_radiance_data.shape))

        # adding mask to result image
        image_radiance_data[image_mask_data > 0] += pow(2, i / 2)

        # adding mask segmentation identifier to the .msk file
        f.write('{}\t{}\t{}\t{}\t{}\n'.format(
            args.masks[i + 1], pow(2, i / 2),
            *__COLOURS[(i / 2) % len(__COLOURS)]))

    logger.info(
        'Saving Radiance® segmentation image as {}/.img/.msk...'.format(
            output_hdr_name))
    image_mask.get_header().set_data_dtype(bit_format)
    save(image_like(image_radiance_data, image_mask), output_hdr_name)

    logger.info('Successfully terminated.')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # build output image name
    image_fg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.fg'
    image_fg_name += args.mask.split('/')[-1][-4:]
    image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg'
    image_bg_name += args.mask.split('/')[-1][-4:]

    # check if output image exists
    if not args.force:
        if os.path.exists(image_fg_name):
            logger.warning(
                'The output image {} already exists. Breaking.'.format(
                    image_fg_name))
            exit(1)
        elif os.path.exists(image_bg_name):
            logger.warning(
                'The output image {} already exists. Breaking.'.format(
                    image_bg_name))
            exit(1)

    # load mask
    logger.info('Loading mask {}...'.format(args.mask))

    try:
        mask_image = load(args.mask)
        mask_image_data = numpy.squeeze(mask_image.get_data()).astype(
            scipy.bool_)
    except ImageFileError as e:
        logger.critical(
            'The mask image does not exist or its file type is unknown.')
        raise ArgumentError(
            'The mask image does not exist or its file type is unknown.', e)

    # erode mask stepwise
    logger.info('Step-wise reducing mask to find center...')
    mask_remains = mask_image_data.copy()
    while (True):
        mask_remains_next = ndimage.binary_erosion(mask_remains, iterations=2)
        if 0 == len(mask_remains_next.nonzero()[0]):
            break
        mask_remains = mask_remains_next

    # extract one of the remaining voxels
    voxels = mask_remains.nonzero()
    marker = (voxels[0][0], voxels[1][0], voxels[2][0])

    logger.debug('Extracted foreground seed is {}.'.format(marker))

    # check suitability of corners as background markers
    logger.info(
        'Checking if the corners are suitable background seed candidates...')
    if True == mask_image_data[0,0,0] or \
       True == mask_image_data[-1,0,0] or \
       True == mask_image_data[0,-1,0] or \
       True == mask_image_data[0,0,-1] or \
       True == mask_image_data[-1,-1,0] or \
       True == mask_image_data[-1,0,-1] or \
       True == mask_image_data[0,-1,-1] or \
       True == mask_image_data[-1,-1,-1]:
        logger.critical(
            'The corners of the image do not correspond to background voxels.')
        raise ArgumentError(
            'The corners of the image do not correspond to background voxels.')

    # create and save foreground marker image
    logger.info('Creating foreground marker image...')
    image_fg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_)
    image_fg_data[marker[0], marker[1], marker[2]] = True

    logger.info('Saving foreground marker image...')
    mask_image.get_header().set_data_dtype(scipy.int8)
    save(image_like(image_fg_data, mask_image), image_fg_name)

    # create and save background marker image
    logger.info('Creating background marker image...')
    image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_)
    image_bg_data[0, 0, 0] = True
    image_bg_data[-1, 0, 0] = True
    image_bg_data[0, -1, 0] = True
    image_bg_data[0, 0, -1] = True
    image_bg_data[-1, -1, 0] = True
    image_bg_data[-1, 0, -1] = True
    image_bg_data[0, -1, -1] = True
    image_bg_data[-1, -1, -1] = True

    logger.info('Saving background marker image...')
    mask_image.get_header().set_data_dtype(scipy.int8)
    save(image_like(image_bg_data, mask_image), image_bg_name)

    logger.info('Successfully terminated.')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # build output image name
    image_fg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.fg'
    image_fg_name += args.mask.split('/')[-1][-4:]
    image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg'
    image_bg_name += args.mask.split('/')[-1][-4:]
        
    # check if output image exists
    if not args.force:
        if os.path.exists(image_fg_name):
            logger.warning('The output image {} already exists. Breaking.'.format(image_fg_name))
            exit(1)
        elif os.path.exists(image_bg_name):
            logger.warning('The output image {} already exists. Breaking.'.format(image_bg_name))
            exit(1)
    
    # load mask
    logger.info('Loading mask {}...'.format(args.mask))
    
    try: 
        mask_image = load(args.mask)
        mask_image_data = numpy.squeeze(mask_image.get_data()).astype(scipy.bool_)
    except ImageFileError as e:
        logger.critical('The mask image does not exist or its file type is unknown.')
        raise ArgumentError('The mask image does not exist or its file type is unknown.', e)  
    
    # erode mask stepwise
    logger.info('Step-wise reducing mask to find center...')
    mask_remains = mask_image_data.copy()
    while (True):
        mask_remains_next = ndimage.binary_erosion(mask_remains, iterations=2)
        if 0 == len(mask_remains_next.nonzero()[0]):
            break
        mask_remains = mask_remains_next
    
    # extract one of the remaining voxels
    voxels = mask_remains.nonzero()
    marker = (voxels[0][0], voxels[1][0], voxels[2][0])
    
    logger.debug('Extracted foreground seed is {}.'.format(marker))
    
    # check suitability of corners as background markers
    logger.info('Checking if the corners are suitable background seed candidates...')
    if True == mask_image_data[0,0,0] or \
       True == mask_image_data[-1,0,0] or \
       True == mask_image_data[0,-1,0] or \
       True == mask_image_data[0,0,-1] or \
       True == mask_image_data[-1,-1,0] or \
       True == mask_image_data[-1,0,-1] or \
       True == mask_image_data[0,-1,-1] or \
       True == mask_image_data[-1,-1,-1]:
        logger.critical('The corners of the image do not correspond to background voxels.')
        raise ArgumentError('The corners of the image do not correspond to background voxels.')
    
    # create and save foreground marker image
    logger.info('Creating foreground marker image...')
    image_fg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_)
    image_fg_data[marker[0], marker[1], marker[2]] = True

    logger.info('Saving foreground marker image...')
    mask_image.get_header().set_data_dtype(scipy.int8)
    save(image_like(image_fg_data, mask_image), image_fg_name)

    # create and save background marker image
    logger.info('Creating background marker image...')
    image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_)
    image_bg_data[0,0,0] = True
    image_bg_data[-1,0,0] = True
    image_bg_data[0,-1,0] = True
    image_bg_data[0,0,-1] = True
    image_bg_data[-1,-1,0] = True
    image_bg_data[-1,0,-1] = True
    image_bg_data[0,-1,-1] = True
    image_bg_data[-1,-1,-1] = True
    
    logger.info('Saving background marker image...')
    mask_image.get_header().set_data_dtype(scipy.int8)
    save(image_like(image_bg_data, mask_image), image_bg_name)
    
    logger.info('Successfully terminated.')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # iterate over input images
    for image in args.images:
        
        # build output image name
        image_real_name = image.split('/')[-1][:-4] + '_real' + image.split('/')[-1][-4:]
        image_imag_name = image.split('/')[-1][:-4] + '_imag' + image.split('/')[-1][-4:]
        
        # check if output images exists
        if not args.force:
            if os.path.exists(image_real_name):
                logger.warning('The output image {} already exists. Skipping this step.'.format(image_real_name))
                continue
            elif os.path.exists(image_imag_name):
                logger.warning('The output image {} already exists. Skipping this step.'.format(image_imag_name))
                continue
        
        # load image using nibabel
        logger.info('Loading image {} using NiBabel...'.format(image))
        image_original = load(image)
        
        # get and prepare image data
        image_original_data = scipy.squeeze(image_original.get_data())
        
        # apply the discrete fast Fourier transformation
        logger.info('Executing the discrete fast Fourier transformation...')
        image_fft_data = scipy.fftpack.fftn(image_original_data)
        
        # transform to logarithmic scale
        logger.info('To logarithmic space...')
        image_real_data = image_fft_data.real
        print image_real_data.min(), image_real_data.max()
        image_real_data = image_real_data + abs(image_real_data.min())
        constant = 65535./(math.log(1 + image_real_data.max())) # scale by 0.0001, log and then scale to fir uint16
        myfunc = lambda x: constant * math.log(1 + x * 0.0001)
        new_func = numpy.vectorize(myfunc)
        logger.info('Apply...')
        image_real_data = new_func(image_real_data)
        print image_real_data.min(), image_real_data.max()
        image_imag_data = image_fft_data.imag
        
        # save resulting images
        logger.info('Saving resulting images real part as {} in the same format as input image, only with data-type float32...'.format(image_real_name))
        image_real = image_like(image_real_data, image_original)
        image_real.get_header().set_data_dtype(scipy.uint16)
        save(image_real, image_real_name)
        logger.info('Saving resulting images real part as {} in the same format as input image, only with data-type float32...'.format(image_imag_name))
        image_imag = image_like(image_imag_data, image_original)
        image_imag.get_header().set_data_dtype(scipy.float32)
        save(image_imag, image_imag_name)
    
    logger.info('Successfully terminated.')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    logger.info("Selected viscous type is {}".format(args.type))

    # iterate over input images
    for image in args.images:

        # get and prepare image data
        logger.info("Loading image {} using NiBabel...".format(image))
        image_gradient = load(image)

        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())

        logger.debug(
            "Intensity range of gradient image is ({}, {})".format(image_gradient_data.min(), image_gradient_data.max())
        )

        # build output file name and check for its existence, if not in sections mode
        if "sections" != args.type:
            # build output file name
            image_viscous_name = (
                args.folder
                + "/"
                + image.split("/")[-1][:-4]
                + "_viscous_{}_sec_{}_ds_{}".format(args.type, args.sections, args.dsize)
            )
            image_viscous_name += image.split("/")[-1][-4:]

            # check if output file exists
            if not args.force:
                if os.path.exists(image_viscous_name):
                    logger.warning("The output file {} already exists. Skipping this image.".format(image_viscous_name))
                    continue

        # execute plain closing i.e. a closing operation over the whole image, if in plain mode
        if "plain" == args.type:
            # prepare the disc structure (a ball with a diameter of (args.dsize * 2 + 1))
            disc = iterate_structure(generate_binary_structure(3, 1), args.dsize).astype(scipy.int_)

            # apply closing
            logger.info("Applying the morphology over whole image at once...")
            image_viscous_data = grey_closing(image_gradient_data, footprint=disc)

            # save resulting gradient image
            logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

            # skip other morphologies
            continue

        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, args.sections)
        logger.debug("{} bins created".format(len(bins) - 1))

        # check if the number of bins is consistent
        if args.sections != len(bins) - 1:
            raise Exception(
                "Inconsistency between the number of requested and created bins ({} to {})".format(
                    args.sections, len(bins) - 1
                )
            )

        # prepare result file
        image_viscous_data = image_gradient_data

        # transform the gradient images topography (Note: the content of one bin is: bins[slice - 1] <= content < bins[slice]
        logger.info("Applying the viscous morphological operations {} times...".format(args.sections))
        for slice in range(1, args.sections + 1):

            # build output file name and check for its existence, if in sections mode
            if "sections" == args.type:
                # build output file name
                image_viscous_name = (
                    args.folder
                    + "/"
                    + image.split("/")[-1][:-4]
                    + "_viscous_{}_sec_{}_ds_{}_sl_{}".format(args.type, args.sections, args.dsize, slice)
                )
                image_viscous_name += image.split("/")[-1][-4:]

                # check if output file exists
                if not args.force:
                    if os.path.exists(image_viscous_name):
                        logger.warning(
                            "The output file {} already exists. Skipping this slice.".format(image_viscous_name)
                        )
                        continue

                # prepare result file
                image_viscous_data = image_gradient_data

            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = image_gradient_data >= bins[slice]  # all voxels with are over the current slice
            mask_lower = image_gradient_data < bins[slice - 1]  # all voxels which are under the current slice
            mask_equal = scipy.invert(mask_greater | mask_lower)  # all voxels in the current slice
            if "mercury" == args.type:
                dsize = int((args.dsize / float(args.sections)) * (slice))
                disc = iterate_structure(generate_binary_structure(3, 1), dsize).astype(scipy.int_)
                mask_equal_or_greater = mask_equal | mask_greater
                image_threshold_data = image_gradient_data * mask_equal_or_greater
            elif "oil" == args.type:
                dsize = int((args.dsize / float(args.sections)) * (args.sections - slice + 1))
                disc = iterate_structure(generate_binary_structure(3, 1), dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                mask_equal_or_lower = mask_equal | mask_lower
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[mask_equal_or_lower].max()
            elif "sections" == args.type:
                dsize = args.dsize
                disc = iterate_structure(generate_binary_structure(3, 1), args.dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                # set all voxels under the current slice to zero
                image_threshold_data[mask_lower] = 0
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[mask_equal].max()

            logger.debug(
                "{} of {} voxels belong to this level.".format(
                    len(mask_equal.nonzero()[0]), scipy.prod(image_threshold_data.shape)
                )
            )

            # apply the closing with the appropriate disc size
            logger.debug(
                "Applying a disk of {} to all values >= {} and < {}...".format(dsize, bins[slice - 1], bins[slice])
            )
            image_closed_data = grey_closing(image_threshold_data, footprint=disc)

            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data, image_closed_data)

            # save created output file, if in sections mode
            if "sections" == args.type:
                # save resulting gradient image
                logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
                image_viscous = image_like(image_viscous_data, image_gradient)
                save(image_viscous, image_viscous_name)

        # save created output file, if not in sections mode
        if "sections" != args.type:
            # save resulting gradient image
            logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

    logger.info("Successfully terminated.")
Example #29
0
def main():
    # prepare logger
    logger = Logger.getInstance()
    logger.setLevel(logging.DEBUG)
    
    # input image locations
    #i = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' # original image
    g = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/01gradient/o09_gradient.nii' # gradient magnitude image
    l = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/label_full.nii' # watershed label image
    fg = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/fg_markers.nii'
    bg = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/bg_markers.nii'
    
    # output image locations
    r = '/home/omaier/Experiments/GraphCut/BoundaryTerm/graphcut_full.nii' # liver mask
    
    # load images
    #i_i = load(i)
    g_i = load(g)
    l_i = load(l)
    fg_i = load(fg)
    bg_i = load(bg) 
    
    # extract and prepare image data
    #i_d = scipy.squeeze(i_i.get_data())
    g_d = scipy.squeeze(g_i.get_data())
    l_d = scipy.squeeze(l_i.get_data())
    fg_d = scipy.squeeze(fg_i.get_data())
    bg_d = scipy.squeeze(bg_i.get_data())
    
    # crop input images to achieve faster execution
    #crop = [slice(50, -100),
    #        slice(50, -100),
    #        slice(50, -100)]
    #g_d = g_d[crop]
    #l_d = l_d[crop]
    #fg_d = fg_d[crop]
    #bg_d = bg_d[crop]       
    
    # recompute the label ids to start from id
    logger.info('Relabel input image...')
    l_d =  filter.relabel(l_d)

    # generate graph
    logger.info('Preparing graph...')
    gr = graphcut.graph_from_labels(l_d, fg_d, bg_d, boundary_term = graphcut.boundary_stawiaski, boundary_term_args = g_d)
    #inconsistent = gr.inconsistent()
    #if inconsistent:
    #    logger.error('The created graph contains inconsistencies: {}'.format('\n'.join(inconsistent)))

    # build graph cut graph from graph
    logger.info('Generating BK_MFMC C++ graph...')
    gcgraph = graphcut.GraphDouble(len(gr.get_nodes()), len(gr.get_nweights()))
    gcgraph.add_node(len(gr.get_nodes()))
    for node, weight in gr.get_tweights().iteritems():
        gcgraph.add_tweights(int(node - 1), weight[0], weight[1])
    for edge, weight in gr.get_nweights().iteritems():
        gcgraph.add_edge(int(edge[0] - 1), int(edge[1] - 1), weight[0], weight[1])    
    
    # execute min-cut
    logger.info('Executing min-cut...')
    maxflow = gcgraph.maxflow()
    logger.debug('Maxflow is {}'.format(maxflow))
    
    # collect
    logger.info('Applying results...')
    l_d = filter.relabel_map(l_d, gcgraph.what_segment, lambda fun, rid: 0 if gcgraph.termtype.SINK == fun(int(rid) - 1) else 1)
                
    logger.info('Saving images resulting mask...')
    # save resulting mask
    l_d = l_d.astype(scipy.bool_)
    save(image_like(l_d, fg_i), r)

    logger.info('Done!')
Example #30
0
def main():
    # prepare logger
    logger = Logger.getInstance()
    logger.setLevel(logging.DEBUG)

    # input image locations
    #i = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' # original image
    g = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/01gradient/o09_gradient.nii'  # gradient magnitude image
    l = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/label_full.nii'  # watershed label image
    fg = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/fg_markers.nii'
    bg = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/bg_markers.nii'

    # output image locations
    r = '/home/omaier/Experiments/GraphCut/BoundaryTerm/graphcut_full.nii'  # liver mask

    # load images
    #i_i = load(i)
    g_i = load(g)
    l_i = load(l)
    fg_i = load(fg)
    bg_i = load(bg)

    # extract and prepare image data
    #i_d = scipy.squeeze(i_i.get_data())
    g_d = scipy.squeeze(g_i.get_data())
    l_d = scipy.squeeze(l_i.get_data())
    fg_d = scipy.squeeze(fg_i.get_data())
    bg_d = scipy.squeeze(bg_i.get_data())

    # crop input images to achieve faster execution
    #crop = [slice(50, -100),
    #        slice(50, -100),
    #        slice(50, -100)]
    #g_d = g_d[crop]
    #l_d = l_d[crop]
    #fg_d = fg_d[crop]
    #bg_d = bg_d[crop]

    # recompute the label ids to start from id
    logger.info('Relabel input image...')
    l_d = filter.relabel(l_d)

    # generate graph
    logger.info('Preparing graph...')
    gr = graphcut.graph_from_labels(l_d,
                                    fg_d,
                                    bg_d,
                                    boundary_term=graphcut.boundary_stawiaski,
                                    boundary_term_args=g_d)
    #inconsistent = gr.inconsistent()
    #if inconsistent:
    #    logger.error('The created graph contains inconsistencies: {}'.format('\n'.join(inconsistent)))

    # build graph cut graph from graph
    logger.info('Generating BK_MFMC C++ graph...')
    gcgraph = graphcut.GraphDouble(len(gr.get_nodes()), len(gr.get_nweights()))
    gcgraph.add_node(len(gr.get_nodes()))
    for node, weight in gr.get_tweights().iteritems():
        gcgraph.add_tweights(int(node - 1), weight[0], weight[1])
    for edge, weight in gr.get_nweights().iteritems():
        gcgraph.add_edge(int(edge[0] - 1), int(edge[1] - 1), weight[0],
                         weight[1])

    # execute min-cut
    logger.info('Executing min-cut...')
    maxflow = gcgraph.maxflow()
    logger.debug('Maxflow is {}'.format(maxflow))

    # collect
    logger.info('Applying results...')
    l_d = filter.relabel_map(
        l_d, gcgraph.what_segment, lambda fun, rid: 0
        if gcgraph.termtype.SINK == fun(int(rid) - 1) else 1)

    logger.info('Saving images resulting mask...')
    # save resulting mask
    l_d = l_d.astype(scipy.bool_)
    save(image_like(l_d, fg_i), r)

    logger.info('Done!')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    # iterate over input images
    for image in args.images:

        # build output image name
        image_real_name = image.split("/")[-1][:-4] + "_real" + image.split("/")[-1][-4:]
        image_imag_name = image.split("/")[-1][:-4] + "_imag" + image.split("/")[-1][-4:]

        # check if output images exists
        if not args.force:
            if os.path.exists(image_real_name):
                logger.warning("The output image {} already exists. Skipping this step.".format(image_real_name))
                continue
            elif os.path.exists(image_imag_name):
                logger.warning("The output image {} already exists. Skipping this step.".format(image_imag_name))
                continue

        # load image using nibabel
        logger.info("Loading image {} using NiBabel...".format(image))
        image_original = load(image)

        # get and prepare image data
        image_original_data = scipy.squeeze(image_original.get_data())

        # apply the discrete fast Fourier transformation
        logger.info("Executing the discrete fast Fourier transformation...")
        image_fft_data = scipy.fftpack.fftn(image_original_data)

        # transform to logarithmic scale
        logger.info("To logarithmic space...")
        image_real_data = image_fft_data.real
        print image_real_data.min(), image_real_data.max()
        image_real_data = image_real_data + abs(image_real_data.min())
        constant = 65535.0 / (math.log(1 + image_real_data.max()))  # scale by 0.0001, log and then scale to fir uint16
        myfunc = lambda x: constant * math.log(1 + x * 0.0001)
        new_func = numpy.vectorize(myfunc)
        logger.info("Apply...")
        image_real_data = new_func(image_real_data)
        print image_real_data.min(), image_real_data.max()
        image_imag_data = image_fft_data.imag

        # save resulting images
        logger.info(
            "Saving resulting images real part as {} in the same format as input image, only with data-type float32...".format(
                image_real_name
            )
        )
        image_real = image_like(image_real_data, image_original)
        image_real.get_header().set_data_dtype(scipy.uint16)
        save(image_real, image_real_name)
        logger.info(
            "Saving resulting images real part as {} in the same format as input image, only with data-type float32...".format(
                image_imag_name
            )
        )
        image_imag = image_like(image_imag_data, image_original)
        image_imag.get_header().set_data_dtype(scipy.float32)
        save(image_imag, image_imag_name)

    logger.info("Successfully terminated.")
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    logger.info('Executing weighted viscous morphology with {} ({} bins).'.format(','.join(map(str, args.func)), len(args.func)))
        
    # iterate over input images
    for image in args.images:
        
        # build output file name
        image_viscous_name = args.folder + '/' + image.split('/')[-1][:-4] + '_wviscous_' + '_'.join(map(str, args.func))
        image_viscous_name += image.split('/')[-1][-4:]
        
        # check if output file exists
        if not args.force:
            if os.path.exists(image_viscous_name):
                logger.warning('The output file {} already exists. Skipping this image.'.format(image_viscous_name))
                continue
        
        # get and prepare image data
        logger.info('Loading image {} using NiBabel...'.format(image))
        image_gradient = load(image)
        
        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())
        
        # prepare result image and extract required attributes of input image
        if args.debug:
            logger.debug('Intensity range of gradient image is ({}, {})'.format(image_gradient_data.min(), image_gradient_data.max()))
        
        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, len(args.func))
        logger.debug('{} bins created'.format(len(bins) -1))
        
        # check if the number of bins is consistent
        if len(args.func) != len(bins) - 1:
            raise Exception('Inconsistency between the number of requested and created bins ({} to {})'.format(args.sections, len(bins) - 1))
        
        # prepare result file
        image_viscous_data = image_gradient_data
        
        # transform the gradient images topography
        logger.info('Applying the viscous morphological operations on {} sections...'.format(len(args.func)))
        for sl in range(1, len(args.func) + 1):
            
            # create sphere to use in this step
            if 0 >= args.func[sl - 1]: continue # sphere of sizes 0 or below lead to no changes and are not executed
            sphere = iterate_structure(generate_binary_structure(3, 1), args.func[sl - 1]).astype(scipy.int_)
            
            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = (image_gradient_data >= bins[sl]) # all voxels with are over the current slice
            mask_lower = (image_gradient_data < bins[sl - 1]) # all voxels which are under the current slice
            mask_equal = scipy.invert(mask_greater | mask_lower) # all voxels in the current slice
            
            # extract slice
            image_threshold_data = image_gradient_data.copy()
            image_threshold_data[mask_lower] = 0 # set all voxels under the current slice to zero
            image_threshold_data[mask_greater] = image_threshold_data[mask_equal].max() # set all voxels over the current slice to the max of all voxels in the current slice
            
            logger.debug('{} of {} voxels belong to this level.'.format(len(mask_equal.nonzero()[0]), scipy.prod(image_threshold_data.shape)))            
            
            # apply the closing with the appropriate sphere
            logger.debug('Applying a disk of {} to all values >= {} and < {} (sec {})...'.format(args.func[sl - 1], bins[sl - 1],  bins[sl], sl))
            image_closed_data = grey_closing(image_threshold_data, footprint=sphere)
            
            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data, image_closed_data)
                    
        # save resulting gradient image
        logger.info('Saving resulting gradient image as {}...'.format(image_viscous_name))
        image_viscous = image_like(image_viscous_data, image_gradient)
        save(image_viscous, image_viscous_name)
            
    logger.info('Successfully terminated.')
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # build output image name
    output_hdr_name = args.output + '.hdr'
    output_img_name = args.output + '.img'
    output_msk_name = args.output + '.msk'
    
    # check if output image exists
    if not args.force:
        if os.path.exists(output_hdr_name):
            logger.warning('The output header {} already exists. Breaking.'.format(output_hdr_name))
            exit(1)
        elif os.path.exists(output_img_name):
            logger.warning('The output image {} already exists. Breaking.'.format(output_img_name))
            exit(1)
        elif os.path.exists(output_msk_name):
            logger.warning('The output infor file {} already exists. Breaking.'.format(output_msk_name))
            exit(1)
    
    # decide on most suitable bit format        
    if len(args.masks) / 2 <= 8:
        bit_format = scipy.uint8
    elif len(args.masks) / 2 <= 16:
        bit_format = scipy.uint16
    elif len(args.masks) / 2 <= 32:
        bit_format = scipy.uint32
    elif len(args.masks) / 2 <= 64:
        bit_format = scipy.uint64
    else:
        raise ArgumentError('It is not possible to combine more than 64 single masks.')
    
    logger.info('Creating a Radiance® segmentation image in {} bit format...'.format(bit_format))
    
    # loading first mask image as reference and template for saving
    logger.info('Loading mask {} ({} segmentation) using NiBabel...'.format(args.masks[0], args.masks[1]))
    image_mask = load(args.masks[0])
    image_mask_data = scipy.squeeze(image_mask.get_data())
    
    # prepare result image
    image_radiance_data = scipy.zeros(image_mask_data.shape, dtype=bit_format)
    
    logger.debug('Result image is of dimensions {} and type {}.'.format(image_radiance_data.shape, image_radiance_data.dtype))
    
    # preparing .msk file
    f = open(output_msk_name, 'w')
    
    # adding first mask to result image
    image_radiance_data[image_mask_data > 0] = 1
    
    # adding first mask segmentation identifier to the .msk file
    f.write('{}\t1\t{}\t{}\t{}\n'.format(args.masks[1], *__COLOURS[0%len(__COLOURS)]))
            
    for i in range(2, len(args.masks), 2):
        # loading mask image
        logger.info('Loading mask {} ({} segmentation) using NiBabel...'.format(args.masks[i], args.masks[i+1]))
        image_mask_data = scipy.squeeze(load(args.masks[i]).get_data())
        
        # check if the shape of the images is consistent
        if image_mask_data.shape != image_radiance_data.shape:
            raise ArgumentError('Mask {} is with {} of a different shape as the first mask image (which has {}).'.format(args.masks[i], image_mask_data.shape, image_radiance_data.shape))
        
        # adding mask to result image
        image_radiance_data[image_mask_data > 0] += pow(2, i/2)
        
        # adding mask segmentation identifier to the .msk file
        f.write('{}\t{}\t{}\t{}\t{}\n'.format(args.masks[i+1], pow(2, i/2), *__COLOURS[(i/2)%len(__COLOURS)]))

    logger.info('Saving Radiance® segmentation image as {}/.img/.msk...'.format(output_hdr_name))
    image_mask.get_header().set_data_dtype(bit_format)
    save(image_like(image_radiance_data, image_mask), output_hdr_name)
    
    logger.info('Successfully terminated.')