def __load_images(label_image_n, mask_image_n, boundary_image_n, fg_image_n = False, bg_image_n = False): """ Load and return all image data in preprocessed ndarrays. The label image will be relabeled to start from 1. @return label, ground-truth-mask, boundary-result-mask[, fg-markers, bg-markers] """ # load images label_image = load(label_image_n) mask_image = load(mask_image_n) boundary_image = load(boundary_image_n) if fg_image_n: fg_image = load(fg_image_n) if bg_image_n: bg_image = load(bg_image_n) # extract image data label_image_d = scipy.squeeze(label_image.get_data()) mask_image_d = scipy.squeeze(mask_image.get_data()).astype(scipy.bool_) boundary_image_d = scipy.squeeze(boundary_image.get_data()).astype(scipy.bool_) if fg_image_n: fg_image_d = scipy.squeeze(fg_image.get_data()).astype(scipy.bool_) if bg_image_n: bg_image_d = scipy.squeeze(bg_image.get_data()).astype(scipy.bool_) # check if images are of same dimensionality if label_image_d.shape != mask_image_d.shape: raise argparse.ArgumentError('The mask image {} must be of the same dimensionality as the label image {}.'.format(mask_image_d.shape, label_image_d.shape)) if label_image_d.shape != boundary_image_d.shape: raise argparse.ArgumentError('The boundary term image {} must be of the same dimensionality as the label image {}.'.format(boundary_image_d.shape, label_image_d.shape)) if fg_image_n: if label_image_d.shape != fg_image_d.shape: raise argparse.ArgumentError('The foreground markers image {} must be of the same dimensionality as the label image {}.'.format(fg_image_d.shape, label_image_d.shape)) if bg_image_n: if label_image_d.shape != bg_image_d.shape: raise argparse.ArgumentError('The background markers image {} must be of the same dimensionality as the label image {}.'.format(bg_image_d.shape, label_image_d.shape)) # relabel the label image to start from 1 label_image_d = medpy.filter.relabel(label_image_d, 1) if fg_image_n: return label_image_d, mask_image_d, boundary_image_d, fg_image_d, bg_image_d else: return label_image_d, mask_image_d, boundary_image_d
def test_load_dataarray3(): img3 = load(DATA_FILE3) with InTemporaryDirectory(): save(img3, 'test.gii') bimg = load('test.gii') for img in (img3, bimg): assert_array_almost_equal(img.darrays[0].data[30:50], DATA_FILE3_darr1)
def test_readwritedata(): img = load(DATA_FILE2) with InTemporaryDirectory(): save(img, "test.gii") img2 = load("test.gii") assert_equal(img.numDA, img2.numDA) assert_array_almost_equal(img.darrays[0].data, img2.darrays[0].data)
def test_base64_written(): with InTemporaryDirectory(): with open(DATA_FILE5, 'rb') as fobj: contents = fobj.read() # Confirm the bad tags are still in the file assert_true(b'GIFTI_ENCODING_B64BIN' in contents) assert_true(b'GIFTI_ENDIAN_LITTLE' in contents) # The good ones are missing assert_false(b'Base64Binary' in contents) assert_false(b'LittleEndian' in contents) # Round trip img5 = load(DATA_FILE5) save(img5, 'fixed.gii') with open('fixed.gii', 'rb') as fobj: contents = fobj.read() # The bad codes have gone, replaced by the good ones assert_false(b'GIFTI_ENCODING_B64BIN' in contents) assert_false(b'GIFTI_ENDIAN_LITTLE' in contents) assert_true(b'Base64Binary' in contents) if sys.byteorder == 'little': assert_true(b'LittleEndian' in contents) else: assert_true(b'BigEndian' in contents) img5_fixed = load('fixed.gii') darrays = img5_fixed.darrays assert_array_almost_equal(darrays[0].data, DATA_FILE5_darr1) assert_array_almost_equal(darrays[1].data, DATA_FILE5_darr2)
def test_readwritedata(): img = load(DATA_FILE2) with InTemporaryDirectory(): save(img, 'test.gii') img2 = load('test.gii') assert_equal(img.numDA, img2.numDA) assert_array_almost_equal(img.darrays[0].data, img2.darrays[0].data)
def test_base64_written(): with InTemporaryDirectory(): with open(DATA_FILE5, "rb") as fobj: contents = fobj.read() # Confirm the bad tags are still in the file assert_true(b"GIFTI_ENCODING_B64BIN" in contents) assert_true(b"GIFTI_ENDIAN_LITTLE" in contents) # The good ones are missing assert_false(b"Base64Binary" in contents) assert_false(b"LittleEndian" in contents) # Round trip img5 = load(DATA_FILE5) save(img5, "fixed.gii") with open("fixed.gii", "rb") as fobj: contents = fobj.read() # The bad codes have gone, replaced by the good ones assert_false(b"GIFTI_ENCODING_B64BIN" in contents) assert_false(b"GIFTI_ENDIAN_LITTLE" in contents) assert_true(b"Base64Binary" in contents) if sys.byteorder == "little": assert_true(b"LittleEndian" in contents) else: assert_true(b"BigEndian" in contents) img5_fixed = load("fixed.gii") darrays = img5_fixed.darrays assert_array_almost_equal(darrays[0].data, DATA_FILE5_darr1) assert_array_almost_equal(darrays[1].data, DATA_FILE5_darr2)
def test_load_dataarray3(): img3 = load(DATA_FILE3) with InTemporaryDirectory(): save(img3, "test.gii") bimg = load("test.gii") for img in (img3, bimg): assert_array_almost_equal(img.darrays[0].data[30:50], DATA_FILE3_darr1)
def test_load_dataarray4(): img4 = load(DATA_FILE4) # Round trip with InTemporaryDirectory(): save(img4, "test.gii") bimg = load("test.gii") for img in (img4, bimg): assert_array_almost_equal(img.darrays[0].data[:10], DATA_FILE4_darr1)
def test_load_dataarray2(): img2 = load(DATA_FILE2) # Round trip with InTemporaryDirectory(): save(img2, 'test.gii') bimg = load('test.gii') for img in (img2, bimg): assert_array_almost_equal(img.darrays[0].data[:10], DATA_FILE2_darr1)
def test_load_dataarray4(): img4 = load(DATA_FILE4) # Round trip with InTemporaryDirectory(): save(img4, 'test.gii') bimg = load('test.gii') for img in (img4, bimg): assert_array_almost_equal(img.darrays[0].data[:10], DATA_FILE4_darr1)
def test_read_ordering(): # DATA_FILE1 has an expected darray[0].data shape of (3,3). However if we # read another image first (DATA_FILE2) then the shape is wrong # Read an image img2 = load(DATA_FILE2) assert_equal(img2.darrays[0].data.shape, (143479, 1)) # Read image for which we know output shape img = load(DATA_FILE1) assert_equal(img.darrays[0].data.shape, (3, 3))
def test_load_dataarray1(): img1 = load(DATA_FILE1) # Round trip with InTemporaryDirectory(): save(img1, "test.gii") bimg = load("test.gii") for img in (img1, bimg): assert_array_almost_equal(img.darrays[0].data, DATA_FILE1_darr1) assert_array_almost_equal(img.darrays[1].data, DATA_FILE1_darr2) me = img.darrays[0].meta.metadata assert_true("AnatomicalStructurePrimary" in me) assert_true("AnatomicalStructureSecondary" in me) assert_equal(me["AnatomicalStructurePrimary"], "CortexLeft") assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4)) assert_equal(xform_codes.niistring[img.darrays[0].coordsys.dataspace], "NIFTI_XFORM_TALAIRACH") assert_equal(xform_codes.niistring[img.darrays[0].coordsys.xformspace], "NIFTI_XFORM_TALAIRACH")
def __load(picklefile, label): """ Load a pickled testbed as well as the original and label image for further processing. The label image will be relabeled to start with region id 1. @param picklefile the testbed pickle file name @param label the label image file name @return a tuple containing: label: the label image data as ndarray bounding_boxes: the bounding boxes around the label image regions (Note that the the bounding box of a region with id rid is accessed using bounding_boxes[rid - 1]) model_fg_ids: the region ids of all regions to create the foreground model from model_bg_ids: the region ids of all regions to create the background model from eval_ids: the regions to evaluate the regions term on, represented by their ids truth_fg: subset of regions from the eval_ids that are foreground according to the ground-truth truth_bg: subset of regions from the eval_ids that are background according to the ground-truth """ # load and preprocess images label_image = load(label) label_image_d = scipy.squeeze(label_image.get_data()) # relabel the label image to start from 1 label_image_d = medpy.filter.relabel(label_image_d, 1) # extracting bounding boxes bounding_boxes = find_objects(label_image_d) # load testbed with open(picklefile, 'r') as f: model_fg_ids = cPickle.load(f) model_bg_ids = cPickle.load(f) cPickle.load(f) # eval ids truth_fg = cPickle.load(f) truth_bg = cPickle.load(f) return label_image_d, label_image, bounding_boxes, model_fg_ids, model_bg_ids, truth_fg, truth_bg
def main(): # prepare logger a = [[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]], [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]], [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]] b = [[[1, 2, 3, 4, 3], [3, 1, 2, 1, 4], [3, 1, 4, 2, 2], [2, 2, 1, 3, 3], [4, 3, 1, 2, 1]]] c = [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 1, 2, 3]], [[4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]]] a = scipy.asarray(a) b = scipy.asarray(b) c = scipy.asarray(c) original_image_n = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' original_image = load(original_image_n) original_image_d = scipy.squeeze(original_image.get_data()) original_image_d = original_image_d[0:50, 0:50, 0:50] dir = scipy.zeros(original_image_d.shape).ravel() oir = original_image_d.ravel() for i in range(len(dir)): dir[i] = oir[i] dir = dir.reshape(original_image_d.shape) coa, con, dir = tamura(dir, 5) for i in range(5): print "k=", i + 1, " with ", len((coa == i).nonzero()[0])
def test_load_labeltable(): img6 = load(DATA_FILE6) # Round trip with InTemporaryDirectory(): save(img6, 'test.gii') bimg = load('test.gii') for img in (img6, bimg): assert_array_almost_equal(img.darrays[0].data[:3], DATA_FILE6_darr1) assert_equal(len(img.labeltable.labels), 36) labeldict = img.labeltable.get_labels_as_dict() assert_true(660700 in labeldict) assert_equal(labeldict[660700], 'entorhinal') assert_equal(img.labeltable.labels[1].key, 2647065) assert_equal(img.labeltable.labels[1].red, 0.0980392) assert_equal(img.labeltable.labels[1].green, 0.392157) assert_equal(img.labeltable.labels[1].blue, 0.156863) assert_equal(img.labeltable.labels[1].alpha, 1)
def test_load_labeltable(): img6 = load(DATA_FILE6) # Round trip with InTemporaryDirectory(): save(img6, "test.gii") bimg = load("test.gii") for img in (img6, bimg): assert_array_almost_equal(img.darrays[0].data[:3], DATA_FILE6_darr1) assert_equal(len(img.labeltable.labels), 36) labeldict = img.labeltable.get_labels_as_dict() assert_true(660700 in labeldict) assert_equal(labeldict[660700], "entorhinal") assert_equal(img.labeltable.labels[1].key, 2647065) assert_equal(img.labeltable.labels[1].red, 0.0980392) assert_equal(img.labeltable.labels[1].green, 0.392157) assert_equal(img.labeltable.labels[1].blue, 0.156863) assert_equal(img.labeltable.labels[1].alpha, 1)
def test_save_load(): shape = (2, 4, 6) npt = np.float32 data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) affine[:3,3] = [3,2,1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) with InTemporaryDirectory() as pth: pth = mkdtemp() nifn = pjoin(pth, 'an_image.nii') sifn = pjoin(pth, 'another_image.img') ni1.save(img, nifn) re_img = nils.load(nifn) yield assert_true(isinstance(re_img, ni1.Nifti1Image)) yield assert_array_equal(re_img.get_data(), data) yield assert_array_equal(re_img.get_affine(), affine) # These and subsequent del statements are to prevent confusing # windows errors when trying to open files or delete the # temporary directory. del re_img try: import scipy.io except ImportError: # ignore if there is no matfile reader, and restart pass else: spm2.save(img, sifn) re_img2 = nils.load(sifn) yield assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage)) yield assert_array_equal(re_img2.get_data(), data) yield assert_array_equal(re_img2.get_affine(), affine) del re_img2 spm99.save(img, sifn) re_img3 = nils.load(sifn) yield assert_true(isinstance(re_img3, spm99.Spm99AnalyzeImage)) yield assert_array_equal(re_img3.get_data(), data) yield assert_array_equal(re_img3.get_affine(), affine) ni1.save(re_img3, nifn) del re_img3 re_img = nils.load(nifn) yield assert_true(isinstance(re_img, ni1.Nifti1Image)) yield assert_array_equal(re_img.get_data(), data) yield assert_array_equal(re_img.get_affine(), affine) del re_img
def test_save_load(): shape = (2, 4, 6) npt = np.float32 data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) affine[:3, 3] = [3, 2, 1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) with InTemporaryDirectory() as pth: pth = mkdtemp() nifn = pjoin(pth, 'an_image.nii') sifn = pjoin(pth, 'another_image.img') ni1.save(img, nifn) re_img = nils.load(nifn) yield assert_true(isinstance(re_img, ni1.Nifti1Image)) yield assert_array_equal(re_img.get_data(), data) yield assert_array_equal(re_img.get_affine(), affine) # These and subsequent del statements are to prevent confusing # windows errors when trying to open files or delete the # temporary directory. del re_img try: import scipy.io except ImportError: # ignore if there is no matfile reader, and restart pass else: spm2.save(img, sifn) re_img2 = nils.load(sifn) yield assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage)) yield assert_array_equal(re_img2.get_data(), data) yield assert_array_equal(re_img2.get_affine(), affine) del re_img2 spm99.save(img, sifn) re_img3 = nils.load(sifn) yield assert_true(isinstance(re_img3, spm99.Spm99AnalyzeImage)) yield assert_array_equal(re_img3.get_data(), data) yield assert_array_equal(re_img3.get_affine(), affine) ni1.save(re_img3, nifn) del re_img3 re_img = nils.load(nifn) yield assert_true(isinstance(re_img, ni1.Nifti1Image)) yield assert_array_equal(re_img.get_data(), data) yield assert_array_equal(re_img.get_affine(), affine) del re_img
def test_parse_dataarrays(): fn = "bad_daa.gii" img = gi.GiftiImage() with InTemporaryDirectory(): save(img, fn) with open(fn, "r") as fp: txt = fp.read() # Make a bad gifti. txt = txt.replace('NumberOfDataArrays="0"', 'NumberOfDataArrays ="1"') with open(fn, "w") as fp: fp.write(txt) with clear_and_catch_warnings() as w: warnings.filterwarnings("once", category=UserWarning) load(fn) assert_equal(len(w), 1) assert_equal(img.numDA, 0)
def test_parse_dataarrays(): fn = 'bad_daa.gii' img = gi.GiftiImage() with InTemporaryDirectory(): save(img, fn) with open(fn, 'r') as fp: txt = fp.read() # Make a bad gifti. txt = txt.replace('NumberOfDataArrays="0"', 'NumberOfDataArrays ="1"') with open(fn, 'w') as fp: fp.write(txt) with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=UserWarning) load(fn) assert_equal(len(w), 1) assert_equal(img.numDA, 0)
def test_load_dataarray1(): img1 = load(DATA_FILE1) # Round trip with InTemporaryDirectory(): save(img1, 'test.gii') bimg = load('test.gii') for img in (img1, bimg): assert_array_almost_equal(img.darrays[0].data, DATA_FILE1_darr1) assert_array_almost_equal(img.darrays[1].data, DATA_FILE1_darr2) me = img.darrays[0].meta.metadata assert_true('AnatomicalStructurePrimary' in me) assert_true('AnatomicalStructureSecondary' in me) assert_equal(me['AnatomicalStructurePrimary'], 'CortexLeft') assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4)) assert_equal(xform_codes.niistring[img.darrays[0].coordsys.dataspace], 'NIFTI_XFORM_TALAIRACH') assert_equal(xform_codes.niistring[img.darrays[0].coordsys.xformspace], 'NIFTI_XFORM_TALAIRACH')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # build output image name image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg' image_bg_name += args.mask.split('/')[-1][-4:] # check if output image exists if not args.force: if os.path.exists(image_bg_name): logger.warning('The output image {} already exists. Breaking.'.format(image_bg_name)) exit(1) # load mask logger.info('Loading mask {}...'.format(args.mask)) try: mask_image = load(args.mask) mask_image_data = numpy.squeeze(mask_image.get_data()).astype(scipy.bool_) except ImageFileError as e: logger.critical('The mask image does not exist or its file type is unknown.') raise ArgumentError('The mask image does not exist or its file type is unknown.', e) # array of indices to access desired slices sls = [(slice(1), slice(None), slice(None)), (slice(-1, None), slice(None), slice(None)), (slice(None), slice(1), slice(None)), (slice(None), slice(-1, None), slice(None)), (slice(None), slice(None), slice(1)), (slice(None), slice(None), slice(-1, None))] # security check logger.info('Determine if the slices are not intersection with the reference liver mask...') for sl in sls: if not 0 == len(mask_image_data[sl].nonzero()[0]): logger.critical('Reference mask reaches till the image border.') raise ArgumentError('Reference mask reaches till the image border.') # create and save background marker image logger.info('Creating background marker image...') image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_) for sl in sls: image_bg_data[sl] = True logger.info('Saving background marker image...') mask_image.get_header().set_data_dtype(scipy.int8) save(image_like(image_bg_data, mask_image), image_bg_name) logger.info('Successfully terminated.')
def test_save_load(): shape = (2, 4, 6) npt = np.float32 data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) affine[:3,3] = [3,2,1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) with InTemporaryDirectory() as pth: nifn = 'an_image.nii' sifn = 'another_image.img' ni1.save(img, nifn) re_img = nils.load(nifn) yield assert_true(isinstance(re_img, ni1.Nifti1Image)) yield assert_array_equal(re_img.get_data(), data) yield assert_array_equal(re_img.get_affine(), affine) # These and subsequent del statements are to prevent confusing # windows errors when trying to open files or delete the # temporary directory. del re_img if have_scipy: # skip we we cannot read .mat files spm2.save(img, sifn) re_img2 = nils.load(sifn) yield assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage)) yield assert_array_equal(re_img2.get_data(), data) yield assert_array_equal(re_img2.get_affine(), affine) del re_img2 spm99.save(img, sifn) re_img3 = nils.load(sifn) yield assert_true(isinstance(re_img3, spm99.Spm99AnalyzeImage)) yield assert_array_equal(re_img3.get_data(), data) yield assert_array_equal(re_img3.get_affine(), affine) ni1.save(re_img3, nifn) del re_img3 re_img = nils.load(nifn) yield assert_true(isinstance(re_img, ni1.Nifti1Image)) yield assert_array_equal(re_img.get_data(), data) yield assert_array_equal(re_img.get_affine(), affine) del re_img
def test_labeltable_deprecations(): img = load(DATA_FILE6) lt = img.labeltable # Test deprecation with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) assert_equal(lt, img.get_labeltable()) with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) img.set_labeltable(lt) assert_equal(lt, img.labeltable)
def test_save_load(): shape = (2, 4, 6) npt = np.float32 data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) affine[:3, 3] = [3, 2, 1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) with InTemporaryDirectory() as pth: nifn = 'an_image.nii' sifn = 'another_image.img' ni1.save(img, nifn) re_img = nils.load(nifn) yield assert_true(isinstance(re_img, ni1.Nifti1Image)) yield assert_array_equal(re_img.get_data(), data) yield assert_array_equal(re_img.get_affine(), affine) # These and subsequent del statements are to prevent confusing # windows errors when trying to open files or delete the # temporary directory. del re_img if have_scipy: # skip we we cannot read .mat files spm2.save(img, sifn) re_img2 = nils.load(sifn) yield assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage)) yield assert_array_equal(re_img2.get_data(), data) yield assert_array_equal(re_img2.get_affine(), affine) del re_img2 spm99.save(img, sifn) re_img3 = nils.load(sifn) yield assert_true(isinstance(re_img3, spm99.Spm99AnalyzeImage)) yield assert_array_equal(re_img3.get_data(), data) yield assert_array_equal(re_img3.get_affine(), affine) ni1.save(re_img3, nifn) del re_img3 re_img = nils.load(nifn) yield assert_true(isinstance(re_img, ni1.Nifti1Image)) yield assert_array_equal(re_img.get_data(), data) yield assert_array_equal(re_img.get_affine(), affine) del re_img
def test_metadata_deprecations(): img = load(datafiles[0]) me = img.meta # Test deprecation with clear_and_catch_warnings() as w: warnings.filterwarnings("once", category=DeprecationWarning) assert_equal(me, img.get_meta()) with clear_and_catch_warnings() as w: warnings.filterwarnings("once", category=DeprecationWarning) img.set_metadata(me) assert_equal(me, img.meta)
def test_metadata_deprecations(): img = load(datafiles[0]) me = img.meta # Test deprecation with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) assert_equal(me, img.get_meta()) with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) img.set_metadata(me) assert_equal(me, img.meta)
def main(): # prepare logger a = [[[1,2,3,4,5], [1,2,3,4,5], [1,2,3,4,5], [1,2,3,4,5]], [[1,2,3,4,5], [1,2,3,4,5], [1,2,3,4,5], [1,2,3,4,5]], [[1,2,3,4,5], [1,2,3,4,5], [1,2,3,4,5], [1,2,3,4,5]]] b = [[[1,2,3,4,3], [3,1,2,1,4], [3,1,4,2,2], [2,2,1,3,3], [4,3,1,2,1]]] c = [[[1,2,3,4], [5,6,7,8], [9,1,2,3]], [[4,5,6,7], [8,9,1,2], [3,4,5,6]]] a = scipy.asarray(a) b = scipy.asarray(b) c = scipy.asarray(c) original_image_n = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' original_image = load(original_image_n) original_image_d = scipy.squeeze(original_image.get_data()) original_image_d = original_image_d[0:50, 0:50, 0:50] dir = scipy.zeros(original_image_d.shape).ravel() oir = original_image_d.ravel() for i in range(len(dir)): dir[i] = oir[i] dir = dir.reshape(original_image_d.shape) coa, con, dir = tamura(dir, 5) for i in range(5): print "k=", i+1, " with ", len((coa == i).nonzero()[0])
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # build output file name file_csv_name = args.csv + '.csv' # check if output file exists if not args.force: if os.path.exists(file_csv_name): logger.warning('The output file {} already exists. Skipping.'.format(file_csv_name)) sys.exit(0) # open output file with open(file_csv_name, 'w') as f: # write header into file f.write('image;min_x;min_y;min_z;max_x;max_y;max_z\n') # iterate over input images for image in args.images: # get and prepare image data logger.info('Processing image {}...'.format(image)) image_data = numpy.squeeze(load(image).get_data()) # count number of labels and flag a warning if they reach the ushort border mask = image_data.nonzero() # count number of labels and write into file f.write('{};{};{};{};{};{};{}\n'.format(image.split('/')[-1], mask[0].min(), mask[1].min(), mask[2].min(), mask[0].max(), mask[1].max(), mask[2].max())) f.flush() logger.info('Successfully terminated.')
def main(args=None): """Main program function.""" parser = _get_parser() opts = parser.parse_args(args) from_img = load(opts.infile) if opts.Volume: if opts.units == 'mm3': computed_volume = mask_volume(from_img) elif opts.units == 'vox': computed_volume = count_nonzero_voxels(from_img) else: raise ValueError( f'{opts.units} is not a valid unit. Choose "mm3" or "vox".') print(computed_volume) return 0
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # build output file name file_csv_name = args.csv + '.csv' # check if output file exists if not args.force: if os.path.exists(file_csv_name): logger.warning( 'The output file {} already exists. Skipping.'.format( file_csv_name)) sys.exit(0) # open output file with open(file_csv_name, 'w') as f: # write header into file f.write('image;min_x;min_y;min_z;max_x;max_y;max_z\n') # iterate over input images for image in args.images: # get and prepare image data logger.info('Processing image {}...'.format(image)) image_data = numpy.squeeze(load(image).get_data()) # count number of labels and flag a warning if they reach the ushort border mask = image_data.nonzero() # count number of labels and write into file f.write('{};{};{};{};{};{};{}\n'.format( image.split('/')[-1], mask[0].min(), mask[1].min(), mask[2].min(), mask[0].max(), mask[1].max(), mask[2].max())) f.flush() logger.info('Successfully terminated.')
def main(args=None): """Main program function.""" parser = _get_parser() opts = parser.parse_args(args) from_img = load(opts.infile) if not opts.force and Path(opts.outfile).exists(): raise FileExistsError(f"Output file exists: {opts.outfile}") out_img = conform(from_img=from_img, out_shape=opts.out_shape, voxel_size=opts.voxel_size, order=3, cval=0.0, orientation=opts.orientation) save(out_img, opts.outfile)
def test_load_getbyintent(): img = load(DATA_FILE1) da = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET") assert_equal(len(da), 1) with clear_and_catch_warnings() as w: warnings.filterwarnings("once", category=DeprecationWarning) da = img.getArraysFromIntent("NIFTI_INTENT_POINTSET") assert_equal(len(da), 1) assert_equal(len(w), 1) assert_equal(w[0].category, DeprecationWarning) da = img.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE") assert_equal(len(da), 1) da = img.get_arrays_from_intent("NIFTI_INTENT_CORREL") assert_equal(len(da), 0) assert_equal(da, [])
def test_load_getbyintent(): img = load(DATA_FILE1) da = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET") assert_equal(len(da), 1) with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) da = img.getArraysFromIntent("NIFTI_INTENT_POINTSET") assert_equal(len(da), 1) assert_equal(len(w), 1) assert_equal(w[0].category, DeprecationWarning) da = img.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE") assert_equal(len(da), 1) da = img.get_arrays_from_intent("NIFTI_INTENT_CORREL") assert_equal(len(da), 0) assert_equal(da, [])
def main(): # prepare logger logger = Logger.getInstance() logger.setLevel(logging.DEBUG) # all rings are at slice z = 98 ring_closed = scipy.squeeze(load('ring_closed.nii').get_data()).astype( scipy.bool_)[:, :, 98] ring_closed_w1hole = scipy.squeeze( load('ring_closed_w1hole.nii').get_data()).astype(scipy.bool_)[:, :, 98] ring_closed_wholes = scipy.squeeze( load('ring_closed_wholes.nii').get_data()).astype(scipy.bool_)[:, :, 98] ring_open = scipy.squeeze(load('ring_open.nii').get_data()).astype( scipy.bool_)[:, :, 98] ring_open_w1hole = scipy.squeeze( load('ring_open_w1hole.nii').get_data()).astype(scipy.bool_)[:, :, 98] ring_open_wholes = scipy.squeeze( load('ring_open_wholes.nii').get_data()).astype(scipy.bool_)[:, :, 98] ring_difficult = scipy.squeeze( load('ring_difficult.nii').get_data()).astype(scipy.bool_)[:, :, 98] ring_difficult_w1hole = scipy.squeeze( load('ring_difficult_w1hole.nii').get_data()).astype(scipy.bool_)[:, :, 98] # algorithm print 'ring_closed', alg(ring_closed), alg2(ring_closed) print 'ring_closed_w1hole', alg(ring_closed_w1hole), alg2( ring_closed_w1hole) print 'ring_closed_wholes', alg(ring_closed_wholes), alg2( ring_closed_wholes) print 'ring_open', alg(ring_open), alg2(ring_open) print 'ring_open_w1hole', alg(ring_open_w1hole), alg2(ring_open_w1hole) print 'ring_open_wholes', alg(ring_open_wholes), alg2(ring_open_wholes) print 'ring_difficult', alg(ring_difficult), alg2(ring_difficult) print 'ring_difficult_w1hole', alg(ring_difficult_w1hole), alg2( ring_difficult_w1hole)
def main(): # prepare logger logger = Logger.getInstance() logger.setLevel(logging.DEBUG) # input image locations #i = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' # original image #i = '/home/omaier/Temp/test.nii' # original image #i = '/home/omaier/Temp/o09_smoothed_i4.0_c0.1_t0.0625.nii' i = '/home/omaier/Experiments/GraphCut/BoundaryTerm/Stawiaski/01gradient/o09_gradient.nii' # output image locations r = '/home/omaier/Temp/result_gradient.nii' # result mask # load images i_i = load(i) # extract and prepare image data i_d = scipy.squeeze(i_i.get_data()) # crop input images to achieve faster execution crop = [slice(50, -200), slice(50, -150), slice(50, -100)] #i_d = i_d[crop] i_d = scipy.copy(i_d) # !TODO: Test if input image is of size 0 logger.debug('input image shape={},ndims={},dtype={}'.format(i_d.shape, i_d.ndim, i_d.dtype)) result = watershed8(i_d, logger) logger.info('Saving resulting region map...') result_i = image_like(result, i_i) result_i.get_header().set_data_dtype(scipy.int32) save(result_i, r) logger.info('Done!')
def main(): # prepare logger logger = Logger.getInstance() logger.setLevel(logging.DEBUG) # input image locations #i = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' # original image #i = '/home/omaier/Temp/test.nii' # original image #i = '/home/omaier/Temp/o09_smoothed_i4.0_c0.1_t0.0625.nii' i = '/home/omaier/Experiments/GraphCut/BoundaryTerm/Stawiaski/01gradient/o09_gradient.nii' # output image locations r = '/home/omaier/Temp/result_gradient.nii' # result mask # load images i_i = load(i) # extract and prepare image data i_d = scipy.squeeze(i_i.get_data()) # crop input images to achieve faster execution crop = [slice(50, -200), slice(50, -150), slice(50, -100)] #i_d = i_d[crop] i_d = scipy.copy(i_d) # !TODO: Test if input image is of size 0 logger.debug('input image shape={},ndims={},dtype={}'.format( i_d.shape, i_d.ndim, i_d.dtype)) result = watershed8(i_d, logger) logger.info('Saving resulting region map...') result_i = image_like(result, i_i) result_i.get_header().set_data_dtype(scipy.int32) save(result_i, r) logger.info('Done!')
def main(): # prepare logger logger = Logger.getInstance() logger.setLevel(logging.DEBUG) # all rings are at slice z = 98 ring_closed = scipy.squeeze(load('ring_closed.nii').get_data()).astype(scipy.bool_)[:,:,98] ring_closed_w1hole = scipy.squeeze(load('ring_closed_w1hole.nii').get_data()).astype(scipy.bool_)[:,:,98] ring_closed_wholes = scipy.squeeze(load('ring_closed_wholes.nii').get_data()).astype(scipy.bool_)[:,:,98] ring_open = scipy.squeeze(load('ring_open.nii').get_data()).astype(scipy.bool_)[:,:,98] ring_open_w1hole = scipy.squeeze(load('ring_open_w1hole.nii').get_data()).astype(scipy.bool_)[:,:,98] ring_open_wholes = scipy.squeeze(load('ring_open_wholes.nii').get_data()).astype(scipy.bool_)[:,:,98] ring_difficult = scipy.squeeze(load('ring_difficult.nii').get_data()).astype(scipy.bool_)[:,:,98] ring_difficult_w1hole = scipy.squeeze(load('ring_difficult_w1hole.nii').get_data()).astype(scipy.bool_)[:,:,98] # algorithm print 'ring_closed', alg(ring_closed), alg2(ring_closed) print 'ring_closed_w1hole', alg(ring_closed_w1hole), alg2(ring_closed_w1hole) print 'ring_closed_wholes', alg(ring_closed_wholes), alg2(ring_closed_wholes) print 'ring_open', alg(ring_open), alg2(ring_open) print 'ring_open_w1hole', alg(ring_open_w1hole), alg2(ring_open_w1hole) print 'ring_open_wholes', alg(ring_open_wholes), alg2(ring_open_wholes) print 'ring_difficult', alg(ring_difficult), alg2(ring_difficult) print 'ring_difficult_w1hole', alg(ring_difficult_w1hole), alg2(ring_difficult_w1hole)
def test_default_types(): # Test that variable types are same in loaded and default instances for fname in datafiles: img = load(fname) # GiftiImage assert_default_types(img) # GiftiMetaData assert_default_types(img.meta) # GiftiNVPairs for nvpair in img.meta.data: assert_default_types(nvpair) # GiftiLabelTable assert_default_types(img.labeltable) # GiftiLabel elements can be None or float; skip # GiftiDataArray for darray in img.darrays: assert_default_types(darray) # GiftiCoordSystem assert_default_types(darray.coordsys) # GiftiMetaData assert_default_types(darray.meta) # GiftiNVPairs for nvpair in darray.meta.data: assert_default_types(nvpair)
def test_modify_darray(): for fname in (DATA_FILE1, DATA_FILE2, DATA_FILE5): img = load(fname) darray = img.darrays[0] darray.data[:] = 0 assert_true(np.array_equiv(darray.data, 0))
def test_dataarray5(): img5 = load(DATA_FILE5) for da in img5.darrays: assert_equal(gifti_endian_codes.byteorder[da.endian], 'little') assert_array_almost_equal(img5.darrays[0].data, DATA_FILE5_darr1) assert_array_almost_equal(img5.darrays[1].data, DATA_FILE5_darr2)
def test_load_metadata(): for i, dat in enumerate(datafiles): img = load(dat) me = img.meta assert_equal(numDA[i], img.numDA) assert_equal(img.version, "1.0")
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # build output image name image_fg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.fg' image_fg_name += args.mask.split('/')[-1][-4:] image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg' image_bg_name += args.mask.split('/')[-1][-4:] # check if output image exists if not args.force: if os.path.exists(image_fg_name): logger.warning('The output image {} already exists. Breaking.'.format(image_fg_name)) exit(1) elif os.path.exists(image_bg_name): logger.warning('The output image {} already exists. Breaking.'.format(image_bg_name)) exit(1) # load mask logger.info('Loading mask {}...'.format(args.mask)) try: mask_image = load(args.mask) mask_image_data = numpy.squeeze(mask_image.get_data()).astype(scipy.bool_) except ImageFileError as e: logger.critical('The mask image does not exist or its file type is unknown.') raise ArgumentError('The mask image does not exist or its file type is unknown.', e) # erode mask stepwise logger.info('Step-wise reducing mask to find center...') mask_remains = mask_image_data.copy() while (True): mask_remains_next = ndimage.binary_erosion(mask_remains, iterations=2) if 0 == len(mask_remains_next.nonzero()[0]): break mask_remains = mask_remains_next # extract one of the remaining voxels voxels = mask_remains.nonzero() marker = (voxels[0][0], voxels[1][0], voxels[2][0]) logger.debug('Extracted foreground seed is {}.'.format(marker)) # check suitability of corners as background markers logger.info('Checking if the corners are suitable background seed candidates...') if True == mask_image_data[0,0,0] or \ True == mask_image_data[-1,0,0] or \ True == mask_image_data[0,-1,0] or \ True == mask_image_data[0,0,-1] or \ True == mask_image_data[-1,-1,0] or \ True == mask_image_data[-1,0,-1] or \ True == mask_image_data[0,-1,-1] or \ True == mask_image_data[-1,-1,-1]: logger.critical('The corners of the image do not correspond to background voxels.') raise ArgumentError('The corners of the image do not correspond to background voxels.') # create and save foreground marker image logger.info('Creating foreground marker image...') image_fg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_) image_fg_data[marker[0], marker[1], marker[2]] = True logger.info('Saving foreground marker image...') mask_image.get_header().set_data_dtype(scipy.int8) save(image_like(image_fg_data, mask_image), image_fg_name) # create and save background marker image logger.info('Creating background marker image...') image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_) image_bg_data[0,0,0] = True image_bg_data[-1,0,0] = True image_bg_data[0,-1,0] = True image_bg_data[0,0,-1] = True image_bg_data[-1,-1,0] = True image_bg_data[-1,0,-1] = True image_bg_data[0,-1,-1] = True image_bg_data[-1,-1,-1] = True logger.info('Saving background marker image...') mask_image.get_header().set_data_dtype(scipy.int8) save(image_like(image_bg_data, mask_image), image_bg_name) logger.info('Successfully terminated.')
def test_parse_with_buffersize(): for buff_sz in [None, 1, 2**12]: img2 = load(DATA_FILE2, buffer_size=buff_sz) assert_equal(img2.darrays[0].data.shape, (143479, 1))
def test_load_metadata(): for i, dat in enumerate(datafiles): img = load(dat) img.meta assert_equal(numDA[i], img.numDA) assert_equal(img.version, '1.0')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # load image data image_data = numpy.squeeze(load(args.image).get_data()) mask_data = numpy.squeeze(load(args.mask).get_data()).astype(numpy.bool_) # sample data image_data = image_data[::args.sample[0], ::args.sample[1], ::args.sample[2]] mask_data = mask_data[::args.sample[0], ::args.sample[1], ::args.sample[2]] # check dimensions to be equal if image_data.shape != mask_data.shape: raise ArgumentError("The images dimensions are not euqal!") # extract relevant area from image_data highlighted_data = image_data.copy() highlighted_data[~mask_data] = image_data.min() # set the same values in the image to zero image_data[mask_data] = image_data.min() # load gnuplot g = Gnuplot.Gnuplot(debug=1) # set static gnuplot parameters g('set style data lines') g('set surface \n') g('set hidden3d\n') g('set view 60, 30, 1, 1\n') g('set key right\n') g('set xlabel "X"\n') g('set ylabel "Y"\n') g('set zlabel "Value"\n') g('set autoscale\n') # set default parameters dim = 0 sl = 0 getch = _Getch() # infinite loop while True: # prepare slices sls =[] for i in range(image_data.ndim): if i == dim: sls.append(slice(sl, sl+1)) else: sls.append(slice(None)) sl_image = numpy.squeeze(image_data[sls]) sl_highlighted = numpy.squeeze(highlighted_data[sls]) # set variable gnuplot parameters g('set title "Topographical image axis={}/{}, slice={}/{}"\n'.format(dim + 1, image_data.ndim, sl + 1, image_data.shape[dim])) g('set zrange [{}:{}]\n'.format(image_data.min(), image_data.max())) # create temp files for matrix processing # plot image_plot = Gnuplot.GridData(sl_image, range(sl_image.shape[0]), range(sl_image.shape[1]), title='data outside mask', binary=0) #image_plot = Gnuplot.Data(sl_image, title='data out of mask') highlighted_plot = Gnuplot.GridData(sl_highlighted, range(sl_highlighted.shape[0]), range(sl_highlighted.shape[1]), title='data inside mask', binary=0) #highlighted_plot = Gnuplot.Data(sl_highlighted, title='data inside mask') g.splot(image_plot, highlighted_plot) # wait for key print "d/a = slices +/-; s/w = dimension +/-; e/q = slices +/- 10; ESC = exit\n" ch = getch() # check key pressed if 'a' == ch: # sl - 1 sl = max(0, sl - 1) elif 'd' == ch: # sl + 1 sl = min(image_data.shape[dim] - 1, sl + 1) elif 'w' == ch: # dimension - 1 dim = max(0, dim - 1) sl = min(image_data.shape[dim] - 1, sl) elif 's' == ch: # dimension + 1 dim = min(image_data.ndim - 1, dim + 1) sl = min(image_data.shape[dim] - 1, sl) elif 'q' == ch: # sl - 10 sl = max(0, sl - 10) elif 'e' == ch: # sl + 10 sl = min(image_data.shape[dim] - 1, sl + 10) elif "" == ch: break # ESC or other unknown char else: "Unrecognized key" #close the gnuplot window g('quit\n')
def main(): # prepare logger logger = Logger.getInstance() logger.setLevel(logging.DEBUG) # input image locations #i = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/00originalvolumes/o09.nii' # original image g = '/home/omaier/Experiments/Regionsegmentation/Evaluation_Viscous/01gradient/o09_gradient.nii' # gradient magnitude image l = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/label_full.nii' # watershed label image fg = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/fg_markers.nii' bg = '/home/omaier/Experiments/GraphCut/RegionalTerm/images/bg_markers.nii' # output image locations r = '/home/omaier/Experiments/GraphCut/BoundaryTerm/graphcut_full.nii' # liver mask # load images #i_i = load(i) g_i = load(g) l_i = load(l) fg_i = load(fg) bg_i = load(bg) # extract and prepare image data #i_d = scipy.squeeze(i_i.get_data()) g_d = scipy.squeeze(g_i.get_data()) l_d = scipy.squeeze(l_i.get_data()) fg_d = scipy.squeeze(fg_i.get_data()) bg_d = scipy.squeeze(bg_i.get_data()) # crop input images to achieve faster execution #crop = [slice(50, -100), # slice(50, -100), # slice(50, -100)] #g_d = g_d[crop] #l_d = l_d[crop] #fg_d = fg_d[crop] #bg_d = bg_d[crop] # recompute the label ids to start from id logger.info('Relabel input image...') l_d = filter.relabel(l_d) # generate graph logger.info('Preparing graph...') gr = graphcut.graph_from_labels(l_d, fg_d, bg_d, boundary_term = graphcut.boundary_stawiaski, boundary_term_args = g_d) #inconsistent = gr.inconsistent() #if inconsistent: # logger.error('The created graph contains inconsistencies: {}'.format('\n'.join(inconsistent))) # build graph cut graph from graph logger.info('Generating BK_MFMC C++ graph...') gcgraph = graphcut.GraphDouble(len(gr.get_nodes()), len(gr.get_nweights())) gcgraph.add_node(len(gr.get_nodes())) for node, weight in gr.get_tweights().iteritems(): gcgraph.add_tweights(int(node - 1), weight[0], weight[1]) for edge, weight in gr.get_nweights().iteritems(): gcgraph.add_edge(int(edge[0] - 1), int(edge[1] - 1), weight[0], weight[1]) # execute min-cut logger.info('Executing min-cut...') maxflow = gcgraph.maxflow() logger.debug('Maxflow is {}'.format(maxflow)) # collect logger.info('Applying results...') l_d = filter.relabel_map(l_d, gcgraph.what_segment, lambda fun, rid: 0 if gcgraph.termtype.SINK == fun(int(rid) - 1) else 1) logger.info('Saving images resulting mask...') # save resulting mask l_d = l_d.astype(scipy.bool_) save(image_like(l_d, fg_i), r) logger.info('Done!')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) logger.info("Selected viscous type is {}".format(args.type)) # iterate over input images for image in args.images: # get and prepare image data logger.info("Loading image {} using NiBabel...".format(image)) image_gradient = load(image) # get and prepare image data image_gradient_data = scipy.squeeze(image_gradient.get_data()) logger.debug( "Intensity range of gradient image is ({}, {})".format(image_gradient_data.min(), image_gradient_data.max()) ) # build output file name and check for its existence, if not in sections mode if "sections" != args.type: # build output file name image_viscous_name = ( args.folder + "/" + image.split("/")[-1][:-4] + "_viscous_{}_sec_{}_ds_{}".format(args.type, args.sections, args.dsize) ) image_viscous_name += image.split("/")[-1][-4:] # check if output file exists if not args.force: if os.path.exists(image_viscous_name): logger.warning("The output file {} already exists. Skipping this image.".format(image_viscous_name)) continue # execute plain closing i.e. a closing operation over the whole image, if in plain mode if "plain" == args.type: # prepare the disc structure (a ball with a diameter of (args.dsize * 2 + 1)) disc = iterate_structure(generate_binary_structure(3, 1), args.dsize).astype(scipy.int_) # apply closing logger.info("Applying the morphology over whole image at once...") image_viscous_data = grey_closing(image_gradient_data, footprint=disc) # save resulting gradient image logger.info("Saving resulting gradient image as {}...".format(image_viscous_name)) image_viscous = image_like(image_viscous_data, image_gradient) save(image_viscous, image_viscous_name) # skip other morphologies continue # create gradient images flattened histogram bins = hist_flatened(image_gradient_data, args.sections) logger.debug("{} bins created".format(len(bins) - 1)) # check if the number of bins is consistent if args.sections != len(bins) - 1: raise Exception( "Inconsistency between the number of requested and created bins ({} to {})".format( args.sections, len(bins) - 1 ) ) # prepare result file image_viscous_data = image_gradient_data # transform the gradient images topography (Note: the content of one bin is: bins[slice - 1] <= content < bins[slice] logger.info("Applying the viscous morphological operations {} times...".format(args.sections)) for slice in range(1, args.sections + 1): # build output file name and check for its existence, if in sections mode if "sections" == args.type: # build output file name image_viscous_name = ( args.folder + "/" + image.split("/")[-1][:-4] + "_viscous_{}_sec_{}_ds_{}_sl_{}".format(args.type, args.sections, args.dsize, slice) ) image_viscous_name += image.split("/")[-1][-4:] # check if output file exists if not args.force: if os.path.exists(image_viscous_name): logger.warning( "The output file {} already exists. Skipping this slice.".format(image_viscous_name) ) continue # prepare result file image_viscous_data = image_gradient_data # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation) mask_greater = image_gradient_data >= bins[slice] # all voxels with are over the current slice mask_lower = image_gradient_data < bins[slice - 1] # all voxels which are under the current slice mask_equal = scipy.invert(mask_greater | mask_lower) # all voxels in the current slice if "mercury" == args.type: dsize = int((args.dsize / float(args.sections)) * (slice)) disc = iterate_structure(generate_binary_structure(3, 1), dsize).astype(scipy.int_) mask_equal_or_greater = mask_equal | mask_greater image_threshold_data = image_gradient_data * mask_equal_or_greater elif "oil" == args.type: dsize = int((args.dsize / float(args.sections)) * (args.sections - slice + 1)) disc = iterate_structure(generate_binary_structure(3, 1), dsize).astype(scipy.int_) image_threshold_data = image_gradient_data.copy() mask_equal_or_lower = mask_equal | mask_lower # set all voxels over the current slice to the max of all voxels in the current slice image_threshold_data[mask_greater] = image_threshold_data[mask_equal_or_lower].max() elif "sections" == args.type: dsize = args.dsize disc = iterate_structure(generate_binary_structure(3, 1), args.dsize).astype(scipy.int_) image_threshold_data = image_gradient_data.copy() # set all voxels under the current slice to zero image_threshold_data[mask_lower] = 0 # set all voxels over the current slice to the max of all voxels in the current slice image_threshold_data[mask_greater] = image_threshold_data[mask_equal].max() logger.debug( "{} of {} voxels belong to this level.".format( len(mask_equal.nonzero()[0]), scipy.prod(image_threshold_data.shape) ) ) # apply the closing with the appropriate disc size logger.debug( "Applying a disk of {} to all values >= {} and < {}...".format(dsize, bins[slice - 1], bins[slice]) ) image_closed_data = grey_closing(image_threshold_data, footprint=disc) # add result of this slice to the general results image_viscous_data = scipy.maximum(image_viscous_data, image_closed_data) # save created output file, if in sections mode if "sections" == args.type: # save resulting gradient image logger.info("Saving resulting gradient image as {}...".format(image_viscous_name)) image_viscous = image_like(image_viscous_data, image_gradient) save(image_viscous, image_viscous_name) # save created output file, if not in sections mode if "sections" != args.type: # save resulting gradient image logger.info("Saving resulting gradient image as {}...".format(image_viscous_name)) image_viscous = image_like(image_viscous_data, image_gradient) save(image_viscous, image_viscous_name) logger.info("Successfully terminated.")
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # load input image logger.info('Loading source image {}...'.format(args.input)) try: input_data = scipy.squeeze(load(args.input).get_data()).astype(scipy.bool_) except ImageFileError as e: logger.critical('The region image does not exist or its file type is unknown.') raise ArgumentError('The region image does not exist or its file type is unknown.', e) # iterate over designated dimension and create for each such extracted slice a text file logger.info('Processing per-slice and writing to files...') idx = [slice(None)] * input_data.ndim for slice_idx in range(input_data.shape[args.dimension]): idx[args.dimension] = slice(slice_idx, slice_idx + 1) # 2009: IM-0001-0027-icontour-manual file_name = '{}/IM-0001-{:04d}-{}contour-auto.txt'.format(args.target, slice_idx + args.offset, args.ctype) # 2012: P01-0080-icontour-manual.txt file_name = '{}/P{}-{:04d}-{}contour-auto.txt'.format(args.target, args.id, slice_idx + args.offset, args.ctype) # check if output file already exists if not args.force: if os.path.exists(file_name): logger.warning('The output file {} already exists. Skipping.'.format(file_name)) continue # extract current slice image_slice = scipy.squeeze(input_data[idx]) # remove all objects except the largest image_labeled, labels = scipy.ndimage.label(image_slice) if labels > 1: logger.info('The slice {} contains more than one object. Removing the smaller ones.'.format(file_name)) # determine biggest biggest = 0 biggest_size = 0 for i in range(1, labels + 1): if len((image_labeled == i).nonzero()[0]) > biggest_size: biggest_size = len((image_labeled == i).nonzero()[0]) biggest = i # remove others for i in range(1, labels + 1): if i == biggest: continue image_labeled[image_labeled == i] = 0 # save to slice image_slice = image_labeled.astype(scipy.bool_) # perform some additional morphological operations image_slice = scipy.ndimage.morphology.binary_fill_holes(image_slice) footprint = scipy.ndimage.morphology.generate_binary_structure(image_slice.ndim, 3) image_slice = scipy.ndimage.morphology.binary_closing(image_slice, footprint, iterations=7) #image_slice = scipy.ndimage.morphology.binary_opening(image_slice, footprint, iterations=3) # if type == o, perform a dilation to increase the size slightly #if 'o' == args.ctype: # footprint = scipy.ndimage.morphology.generate_binary_structure(image_slice.ndim, 3) # image_slice = scipy.ndimage.morphology.binary_dilation(image_slice, iterations=3) # erode contour in slice input_eroded = scipy.ndimage.morphology.binary_erosion(image_slice, border_value=1) image_slice ^= input_eroded # xor # extract contour positions and put into right order contour_tmp = image_slice.nonzero() contour = [[] for i in range(len(contour_tmp[0]))] for i in range(len(contour_tmp[0])): for j in range(len(contour_tmp)): contour[i].append(contour_tmp[j][i]) # x, y, z, .... if 0 == len(contour): logger.warning('Empty contour for file {}. Skipping.'.format(file_name)) continue # create final points following along the contour (incl. linear sub-voxel precision) divider = 2 point = contour[0] point_pos = 0 processed = [point_pos] contour_final = [] while point: nearest_pos = __find_nearest(point, contour, processed) if False == nearest_pos: break contour_final.extend(__draw_line(point, contour[nearest_pos], divider)) processed.append(nearest_pos) point = contour[nearest_pos] # make connection between last and first point contour_final.extend(__draw_line(point, contour[0], divider)) # save contour to file logger.debug('Creating file {}...'.format(file_name)) with open(file_name, 'w') as f: for line in contour_final: f.write('{}\n'.format(' '.join(map(str, line)))) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # build output image name image_fg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.fg' image_fg_name += args.mask.split('/')[-1][-4:] image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg' image_bg_name += args.mask.split('/')[-1][-4:] # check if output image exists if not args.force: if os.path.exists(image_fg_name): logger.warning( 'The output image {} already exists. Breaking.'.format( image_fg_name)) exit(1) elif os.path.exists(image_bg_name): logger.warning( 'The output image {} already exists. Breaking.'.format( image_bg_name)) exit(1) # load mask logger.info('Loading mask {}...'.format(args.mask)) try: mask_image = load(args.mask) mask_image_data = numpy.squeeze(mask_image.get_data()).astype( scipy.bool_) except ImageFileError as e: logger.critical( 'The mask image does not exist or its file type is unknown.') raise ArgumentError( 'The mask image does not exist or its file type is unknown.', e) # erode mask stepwise logger.info('Step-wise reducing mask to find center...') mask_remains = mask_image_data.copy() while (True): mask_remains_next = ndimage.binary_erosion(mask_remains, iterations=2) if 0 == len(mask_remains_next.nonzero()[0]): break mask_remains = mask_remains_next # extract one of the remaining voxels voxels = mask_remains.nonzero() marker = (voxels[0][0], voxels[1][0], voxels[2][0]) logger.debug('Extracted foreground seed is {}.'.format(marker)) # check suitability of corners as background markers logger.info( 'Checking if the corners are suitable background seed candidates...') if True == mask_image_data[0,0,0] or \ True == mask_image_data[-1,0,0] or \ True == mask_image_data[0,-1,0] or \ True == mask_image_data[0,0,-1] or \ True == mask_image_data[-1,-1,0] or \ True == mask_image_data[-1,0,-1] or \ True == mask_image_data[0,-1,-1] or \ True == mask_image_data[-1,-1,-1]: logger.critical( 'The corners of the image do not correspond to background voxels.') raise ArgumentError( 'The corners of the image do not correspond to background voxels.') # create and save foreground marker image logger.info('Creating foreground marker image...') image_fg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_) image_fg_data[marker[0], marker[1], marker[2]] = True logger.info('Saving foreground marker image...') mask_image.get_header().set_data_dtype(scipy.int8) save(image_like(image_fg_data, mask_image), image_fg_name) # create and save background marker image logger.info('Creating background marker image...') image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_) image_bg_data[0, 0, 0] = True image_bg_data[-1, 0, 0] = True image_bg_data[0, -1, 0] = True image_bg_data[0, 0, -1] = True image_bg_data[-1, -1, 0] = True image_bg_data[-1, 0, -1] = True image_bg_data[0, -1, -1] = True image_bg_data[-1, -1, -1] = True logger.info('Saving background marker image...') mask_image.get_header().set_data_dtype(scipy.int8) save(image_like(image_bg_data, mask_image), image_bg_name) logger.info('Successfully terminated.')
def test_dataarray5(): img5 = load(DATA_FILE5) for da in img5.darrays: assert_equal(gifti_endian_codes.byteorder[da.endian], "little") assert_array_almost_equal(img5.darrays[0].data, DATA_FILE5_darr1) assert_array_almost_equal(img5.darrays[1].data, DATA_FILE5_darr2)
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # load input image logger.info('Loading source image {}...'.format(args.input)) try: input_data = scipy.squeeze(load(args.input).get_data()).astype( scipy.bool_) except ImageFileError as e: logger.critical( 'The region image does not exist or its file type is unknown.') raise ArgumentError( 'The region image does not exist or its file type is unknown.', e) # iterate over designated dimension and create for each such extracted slice a text file logger.info('Processing per-slice and writing to files...') idx = [slice(None)] * input_data.ndim for slice_idx in range(input_data.shape[args.dimension]): idx[args.dimension] = slice(slice_idx, slice_idx + 1) # 2009: IM-0001-0027-icontour-manual file_name = '{}/IM-0001-{:04d}-{}contour-auto.txt'.format( args.target, slice_idx + args.offset, args.ctype) # 2012: P01-0080-icontour-manual.txt file_name = '{}/P{}-{:04d}-{}contour-auto.txt'.format( args.target, args.id, slice_idx + args.offset, args.ctype) # check if output file already exists if not args.force: if os.path.exists(file_name): logger.warning( 'The output file {} already exists. Skipping.'.format( file_name)) continue # extract current slice image_slice = scipy.squeeze(input_data[idx]) # remove all objects except the largest image_labeled, labels = scipy.ndimage.label(image_slice) if labels > 1: logger.info( 'The slice {} contains more than one object. Removing the smaller ones.' .format(file_name)) # determine biggest biggest = 0 biggest_size = 0 for i in range(1, labels + 1): if len((image_labeled == i).nonzero()[0]) > biggest_size: biggest_size = len((image_labeled == i).nonzero()[0]) biggest = i # remove others for i in range(1, labels + 1): if i == biggest: continue image_labeled[image_labeled == i] = 0 # save to slice image_slice = image_labeled.astype(scipy.bool_) # perform some additional morphological operations image_slice = scipy.ndimage.morphology.binary_fill_holes(image_slice) footprint = scipy.ndimage.morphology.generate_binary_structure( image_slice.ndim, 3) image_slice = scipy.ndimage.morphology.binary_closing(image_slice, footprint, iterations=7) #image_slice = scipy.ndimage.morphology.binary_opening(image_slice, footprint, iterations=3) # if type == o, perform a dilation to increase the size slightly #if 'o' == args.ctype: # footprint = scipy.ndimage.morphology.generate_binary_structure(image_slice.ndim, 3) # image_slice = scipy.ndimage.morphology.binary_dilation(image_slice, iterations=3) # erode contour in slice input_eroded = scipy.ndimage.morphology.binary_erosion(image_slice, border_value=1) image_slice ^= input_eroded # xor # extract contour positions and put into right order contour_tmp = image_slice.nonzero() contour = [[] for i in range(len(contour_tmp[0]))] for i in range(len(contour_tmp[0])): for j in range(len(contour_tmp)): contour[i].append(contour_tmp[j][i]) # x, y, z, .... if 0 == len(contour): logger.warning( 'Empty contour for file {}. Skipping.'.format(file_name)) continue # create final points following along the contour (incl. linear sub-voxel precision) divider = 2 point = contour[0] point_pos = 0 processed = [point_pos] contour_final = [] while point: nearest_pos = __find_nearest(point, contour, processed) if False == nearest_pos: break contour_final.extend( __draw_line(point, contour[nearest_pos], divider)) processed.append(nearest_pos) point = contour[nearest_pos] # make connection between last and first point contour_final.extend(__draw_line(point, contour[0], divider)) # save contour to file logger.debug('Creating file {}...'.format(file_name)) with open(file_name, 'w') as f: for line in contour_final: f.write('{}\n'.format(' '.join(map(str, line)))) logger.info('Successfully terminated.')
def test_parse_with_buffersize(): for buff_sz in [None, 1, 2 ** 12]: img2 = load(DATA_FILE2, buffer_size=buff_sz) assert_equal(img2.darrays[0].data.shape, (143479, 1))