Beispiel #1
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image1
    data_input1, _ = load(args.input1)
    
    # load input image2
    data_input2, _ = load(args.input2)
    
    # compare dtype and shape
    if not data_input1.dtype == data_input2.dtype: print 'Dtype differs: {} to {}'.format(data_input1.dtype, data_input2.dtype)
    if not data_input1.shape == data_input2.shape:
        print 'Shape differs: {} to {}'.format(data_input1.shape, data_input2.shape)
        print 'The voxel content of images of different shape can not be compared. Exiting.'
        sys.exit(-1)
    
    # compare image data
    voxel_total = reduce(lambda x, y: x*y, data_input1.shape)
    voxel_difference = len((data_input1 != data_input2).nonzero()[0])
    if not 0 == voxel_difference:
        print 'Voxel differ: {} of {} total voxels'.format(voxel_difference, voxel_total)
        print 'Max difference: {}'.format(scipy.absolute(data_input1 - data_input2).max())
    else: print 'No other difference.'
    
    logger.info("Successfully terminated.")    
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input images
    input_data, input_header = load(args.input)
    original_data, _ = load(args.original)
    
    logger.debug('Old shape={}.'.format(input_data.shape))
    
    # compute position
    logger.info('Computing positon and pad volume...')
    position = __parse_contour_list(args.contours, input_data)
    
    # pad volume
    output_data = scipy.zeros(original_data.shape, input_data.dtype)
    output_data[position] = input_data
    
    
    logger.debug('New shape={}.'.format(input_data.shape))
    
    # save result contour volume
    save(output_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
Beispiel #3
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image using nibabel
    logger.info('Loading image {}...'.format(args.input))
    image_labels_data, _ = load(args.image)    
    
    # load mask image
    logger.info('Loading mask {}...'.format(args.mask))
    image_mask_data, image_mask_data_header = load(args.mask)
    
    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning('The output image {} already exists. Skipping this image.'.format(args.output))
    
    # create a mask from the label image
    logger.info('Reducing the label image...')
    image_reduced_data = fit_labels_to_mask(image_labels_data, image_mask_data)
    
    # save resulting mask
    logger.info('Saving resulting mask as {} in the same format as input mask, only with data-type int8...'.format(args.output))
    image_reduced_data = image_reduced_data.astype(numpy.bool, copy=False) # bool sadly not recognized
    save(image_reduced_data, args.output, image_mask_data_header, args.force)
    
    logger.info('Successfully terminated.')
Beispiel #4
0
def main():
	i1, h1 = load(sys.argv[1])
	i2, h2 = load(sys.argv[2])

	# shift image to align origins
	origin_h1 = numpy.sign(h1.get_qform()[0:3,0:3]).dot(header.get_offset(h1))
	origin_h2 = numpy.sign(h2.get_qform()[0:3,0:3]).dot(header.get_offset(h2))
	origin_difference_pixel = (origin_h1 - origin_h2) / numpy.asarray(header.get_pixel_spacing(h1))
	# negative values: shift image 1 by this upon inserting (which is the smae as cutting the output image)
	# positive values: cut image 1 by this at inserting and also cut right side by length of output image plus this value
	o = numpy.zeros(i2.shape, i2.dtype)
	o_slicer = []
	i_slicer = []
	for j, p in enumerate(origin_difference_pixel):
		if p >= 0:
			i_slicer.append(slice(0,      min(i1.shape[j], o.shape[j] - abs(p))))
			o_slicer.append(slice(abs(p), min(i1.shape[j] + abs(p), o.shape[j])))
		else:
			i_slicer.append(slice(abs(p), min(i1.shape[j], o.shape[j] + abs(p))))
			o_slicer.append(slice(0,      min(i1.shape[j] - abs(p), o.shape[j])))

	o[o_slicer] = i1[i_slicer]
	header.set_offset(h1, header.get_offset(h2))
	
	save(o, sys.argv[3], h1)
Beispiel #5
0
def main():
	onedir = sys.argv[1] # the first folder containing case folders
	twodir = sys.argv[2] # the second folder containing case folders
	nocase = (len(sys.argv) > 3 and sys.argv[3] == '-i')

	if DEBUG: print 'INFO: Comparing all cases in folders {} and {}.'.format(onedir, twodir)

	# iterate over first folder and compare voxel spacings with equivalent image in second folder
	print "Case\tvs same\tshape same"
	for root, dirs, files in os.walk(onedir):
		for case in sorted(dirs):
			for root, dirs, files in os.walk('{}/{}'.format(onedir, case)):
				for file_ in files:
					if file_.endswith(FILE_ENDING):
						i, hi = load('{}/{}/{}'.format(onedir, case, file_))
						if nocase:
							j, hj = load('{}/{}.{}'.format(twodir, case, FILE_ENDING))
						else:
							j, hj = load('{}/{}/{}'.format(twodir, case, file_))
						vs_same = numpy.array_equal(header.get_pixel_spacing(hi), header.get_pixel_spacing(hj))
						shape_same = numpy.array_equal(i.shape, j.shape)
						print '{}\t{}\t{}'.format(case, vs_same, shape_same)
						if not vs_same:
							print "\t{} vs {}".format(header.get_pixel_spacing(hi), header.get_pixel_spacing(hj))
						if not shape_same:
							print "\t{} vs {}".format(i.shape, j.shape)
	print 'Terminated.'
Beispiel #6
0
def sresamplebyexample(src, dest, referenceimage, binary = False):
    r"""
    Secure-re-sample an image located at ``src`` by example ``referenceimage`` and
    save it under ``dest``.
    
    Parameters
    ----------
    src : string
        Source image file.
    dest : string
        Destination image file.
    referenceimage : string
        Reference image displaying the target spacing, origin and size.
    binary : bool
        Set to ``True`` for binary images.
    """
    # get target voxel spacing
    refimage, refhdr = load(referenceimage)
    spacing = header.get_pixel_spacing(refhdr)
    
    with tmpdir() as t:
        # create a temporary copy of the reference image with the source image data-type (imiImageResample requires both images to be of the same dtype)
        srcimage, _ = load(src)
        save(refimage.astype(srcimage.dtype), os.path.join(t, 'ref.nii.gz'), refhdr)
    
        # prepare and run registration command
        cmd = ['imiImageResample', '-I', src, '-O', dest, '-R', os.path.join(t, 'ref.nii.gz'), '-s'] + map(str, spacing)
        if binary:
            cmd += ['-b']
        rtcode, stdout, stderr = call(cmd)
    
    # check if successful
    if not os.path.isfile(dest):
        raise CommandExecutionError(cmd, rtcode, stdout, stderr, 'Binary re-sampling result image not created.')
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)  
    
    # constants
    colours = {'i': 10, 'o': 11}
    
    # load volumes
    marker_data, _ = load(args.marker)
    contour_data, _ = load(args.contour)
    
    # perform check
    contour_data = contour_data == colours[args.type]
    marker_data_fg = marker_data == 1
    marker_data_bg = marker_data == 2
    if scipy.logical_and(contour_data, marker_data_fg).any():
        logger.warning('Intersection between {} and {} (type {}) in foreground.'.format(args.marker, args.contour, args.type))
    elif scipy.logical_and(contour_data, marker_data_bg).any():
        logger.warning('Intersection between {} and {} (type {}) in background.'.format(args.marker, args.contour, args.type))
    else:
        print "No intersection."
Beispiel #8
0
def test03(img, hdr, idx, delta):
    # TEST 03: DOES ANY META-INFORMATION GET LOST DURING FORMAT CONVERSION? AND IF YES; WHICH?
    for tr in types_int: # reference type
        print ''
        oformat = tr
        
        # create, save and load reference image
        try:
            name_ref = tmp_folder + '.'.join(['tmp_ref', tr])
            save(img, name_ref, hdr, True)
            img_ref, hdr_ref = load(name_ref)
        except Exception as e:
            print '\tERROR: Could not generate reference image for type {}: {}'.format(otype, e)
            continue
        
        # extract meta-data from reference image
        mdata_ref = {'shape': img_ref.shape,
                     'dtype': img_ref.dtype,
                     'point': img_ref[idx],
                     'spacing': header.get_pixel_spacing(hdr_ref),
                     'offset': header.get_offset(hdr_ref),}        
        
        # print meta-data from reference image
        
        # iterate of test images
        for tt in types_int: # test type
            print '{} => {}'.format(oformat, tt),
            
            # create, save and load test images
            try:
                #print type(img_ref), type(hdr_ref)
                #print type(img_test), type(hdr_test)
                name_test = tmp_folder + '.'.join(['tmp_test', tt])
                save(img_ref, name_test, hdr_ref, True)
                img_test, hdr_test = load(name_test)
                
            except Exception as e:
                print '\tERROR: Could not generate test image. {}'.format(e)
                continue
            
            # extract meta-data from test image
            mdata_test = {'shape': img_test.shape,
                          'dtype': img_test.dtype,
                          'spacing': header.get_pixel_spacing(hdr_test),
                          'offset': header.get_offset(hdr_test),
                          'point': img_test[idx]}                    
            
            # compare reference against meta-image
            error = False
            for k in mdata_ref.keys():
                equal = _compare(mdata_ref[k], mdata_test[k], delta)
                #print '\n\t{} ({}) : {} = {}'.format(equal, k, mdata_ref[k], mdata_test[k]),
                if not equal:
                    error = True
                    print '\n\t{} ({}) : {} = {}'.format(equal, k, mdata_ref[k], mdata_test[k]),
            if not error:
                print '\t{}'.format(True)
            else:
                print '\n'
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images
    b0img, b0hdr = load(args.b0image)
    bximg, bxhdr = load(args.bximage)
    
    # convert to float
    b0img = b0img.astype(numpy.float)
    bximg = bximg.astype(numpy.float)

    # check if image are compatible
    if not b0img.shape == bximg.shape:
        raise ArgumentError('The input images shapes differ i.e. {} != {}.'.format(b0img.shape, bximg.shape))
    if not header.get_pixel_spacing(b0hdr) == header.get_pixel_spacing(bxhdr):
        raise ArgumentError('The input images voxel spacing differs i.e. {} != {}.'.format(header.get_pixel_spacing(b0hdr), header.get_pixel_spacing(bxhdr)))
    
    # check if supplied threshold value as well as the b value is above 0
    if args.threshold is not None and not args.threshold >= 0:
        raise ArgumentError('The supplied threshold value must be greater than 0, otherwise a division through 0 might occur.')
    if not args.b > 0:
        raise ArgumentError('The supplied b-value must be greater than 0.')
    
    # compute threshold value if not supplied
    if args.threshold is None:
        b0thr = otsu(b0img, 32) / 4. # divide by 4 to decrease impact
        bxthr = otsu(bximg, 32) / 4.
        if 0 >= b0thr:
            raise ArgumentError('The supplied b0image seems to contain negative values.')
        if 0 >= bxthr:
            raise ArgumentError('The supplied bximage seems to contain negative values.')
    else:
        b0thr = bxthr = args.threshold
    
    logger.debug('thresholds={}/{}, b-value={}'.format(b0thr, bxthr, args.b))
    
    # threshold b0 + bx DW image to obtain a mask
    # b0 mask avoid division through 0, bx mask avoids a zero in the ln(x) computation
    mask = binary_fill_holes(b0img > b0thr) & binary_fill_holes(bximg > bxthr)
    
    # perform a number of binary morphology steps to select the brain only
    mask = binary_erosion(mask, iterations=1)
    mask = largest_connected_component(mask)
    mask = binary_dilation(mask, iterations=1)
    
    logger.debug('excluding {} of {} voxels from the computation and setting them to zero'.format(numpy.count_nonzero(mask), numpy.prod(mask.shape)))
    
    # compute the ADC
    adc = numpy.zeros(b0img.shape, b0img.dtype)
    adc[mask] = -1. * args.b * numpy.log(bximg[mask] / b0img[mask])
    adc[adc < 0] = 0
            
    # saving the resulting image
    save(adc, args.output, b0hdr, args.force)
def main():
	m = load(sys.argv[1])[0].astype(numpy.bool)
	s = load(sys.argv[2])[0].astype(numpy.bool)

	intc = numpy.count_nonzero(~m & s)

	print "Non-intersecting part of the segmentation:"
	print "{} out of {} voxels".format(intc, numpy.count_nonzero(s))
Beispiel #11
0
def main():
	# load input image
	i, _ = load(sys.argv[1])

	# load template image
	_, h = load(sys.argv[2])
	
	# save input image with adapted header in place
	j = i.copy()
	save(j, sys.argv[1], h)
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load first input image as example 
    example_data, example_header = load(args.inputs[0])
    
    # test if the supplied position is valid
    if args.position > example_data.ndim or args.position < 0:
        raise ArgumentError('The supplied position for the new dimension is invalid. It has to be between 0 and {}.'.format(example_data.ndim))
    
    # prepare empty output volume
    output_data = scipy.zeros([len(args.inputs)] + list(example_data.shape), dtype=example_data.dtype)
    
    # add first image to output volume
    output_data[0] = example_data
    
    # load input images and add to output volume
    for idx, image in enumerate(args.inputs[1:]):
        image_data, _ = load(image)
        if not args.ignore and image_data.dtype != example_data.dtype:
            raise ArgumentError('The dtype {} of image {} differs from the one of the first image {}, which is {}.'.format(image_data.dtype, image, args.inputs[0], example_data.dtype))
        if image_data.shape != example_data.shape:
            raise ArgumentError('The shape {} of image {} differs from the one of the first image {}, which is {}.'.format(image_data.shape, image, args.inputs[0], example_data.shape))
        output_data[idx + 1] = image_data
        
    # move new dimension to the end or to target position
    for dim in range(output_data.ndim - 1):
        if dim >= args.position: break
        output_data = scipy.swapaxes(output_data, dim, dim + 1)
        
    # set pixel spacing
    spacing = list(header.get_pixel_spacing(example_header))
    spacing = tuple(spacing[:args.position] + [args.spacing] + spacing[args.position:])
    
    # !TODO: Find a way to enable this also for PyDicom and ITK images
    if __is_header_nibabel(example_header):
        __update_header_from_array_nibabel(example_header, output_data)
        header.set_pixel_spacing(example_header, spacing)
    else:
        raise ArgumentError("Sorry. Setting the voxel spacing of the new dimension only works with NIfTI images. See the description of this program for more details.")
    
    # save created volume
    save(output_data, args.output, example_header, args.force)
        
    logger.info("Successfully terminated.")
def main():
	# loading the image mask
	m = load(sys.argv[2])[0].astype(numpy.bool)

	# extracting the required features and saving them
	for sequence, function_call, function_arguments, voxelspacing in features_to_extract:
		if not isfv(sys.argv[3], sequence, function_call, function_arguments):
			#print sequence, function_call.__name__, function_arguments
			i, h = load('{}/{}.nii.gz'.format(sys.argv[1], sequence))
			call_arguments = list(function_arguments)
			if voxelspacing: call_arguments.append(header.get_pixel_spacing(h))
			call_arguments.append(m)
			fv = function_call(i, *call_arguments)
			savefv(fv, sys.argv[3], sequence, function_call, function_arguments)
def _percentilemodelstandardisation(trainingfiles, brainmaskfiles, destfiles, destmodel):
    r"""
    Train an intensity standardisation model and apply it. All values outside of the
    brain mask are set to zero.
    
    Parameters
    ----------
    trainingfiles : sequence of strings
        All images to use for training and to which subsequently apply the trained model.
    brainmaskfiles : sequence of strings
        The brain masks corresponding to ``trainingfiles``.
    destfiles : sequence of strings
        The intensity standarised target locations corresponding to ``trainingfiles``.
    destmodel : string
        The target model location.
    """
    # check arguments
    if not len(trainingfiles) == len(brainmaskfiles):
        raise ValueError('The number of supplied trainingfiles must be equal to the number of brainmaskfiles.')
    elif not len(trainingfiles) == len(destfiles):
        raise ValueError('The number of supplied trainingfiles must be equal to the number of destfiles.')
    
    # loading input images (as image, header pairs)
    images = []
    headers = []
    for image_name in trainingfiles:
        i, h = load(image_name)
        images.append(i)
        headers.append(h)
        
    # loading brainmasks
    masks = [load(mask_name)[0].astype(numpy.bool) for mask_name in brainmaskfiles]
        
    # train the model
    irs = IntensityRangeStandardization()
    trained_model, transformed_images = irs.train_transform([i[m] for i, m in zip(images, masks)])
    
    # condense outliers in the image (extreme peak values at both end-points of the histogram)
    transformed_images = [_condense(i) for i in transformed_images]
    
    # saving the model
    with open(destmodel, 'wb') as f:
        pickle.dump(trained_model, f)
    
    # save the transformed images
    for ti, i, m, h, dest in zip(transformed_images, images, masks, headers, destfiles):
        i[~m] = 0
        i[m] = ti
        save(i, dest, h)
def main():

	# catch parameters
	segmentation_base_string = sys.argv[1]
	ground_truth_base_string = sys.argv[2]
	mask_file_base_string = sys.argv[3]
	cases = sys.argv[4:]

	# evaluate each case and collect the scores
	hds = []
	assds = []
	precisions = []
	recalls = []
	dcs = []

	# load images and apply mask to segmentation and ground truth (to remove ground truth fg outside of brain mask)
	splush = [load(segmentation_base_string.format(case)) for case in cases]
	tplush = [load(ground_truth_base_string.format(case)) for case in cases]
	masks = [load(mask_file_base_string.format(case))[0].astype(numpy.bool) for case in cases]

	s = [s.astype(numpy.bool) & m for (s, _), m in zip(splush, masks)]
	t = [t.astype(numpy.bool) & m for (t, _), m in zip(tplush, masks)]
	hs = [h for _, h in splush]
	ht = [h for _, h in tplush]

	# compute and append metrics (Pool-processing)
	pool = Pool(n_jobs)
	dcs = pool.map(wdc, zip(t, s))
	precisions = pool.map(wprecision, zip(s, t))
	recalls = pool.map(wrecall, zip(s, t))
	hds = pool.map(whd, zip(t, s, [header.get_pixel_spacing(h) for h in ht]))
	assds = pool.map(wassd, zip(t, s, [header.get_pixel_spacing(h) for h in ht]))

	# print case-wise results
	print 'Metrics:'
	print 'Case\tDC[0,1]\tHD(mm)\tP2C(mm)\tprec.\trecall'
    	for case, _dc, _hd, _assd, _pr, _rc in zip(cases, dcs, hds, assds, precisions, recalls):
        	print '{}\t{:>3,.3f}\t{:>4,.3f}\t{:>4,.3f}\t{:>3,.3f}\t{:>3,.3f}'.format(case, _dc, _hd, _assd, _pr, _rc)
        
	# check for nan/inf values of failed cases and signal warning
	mask = numpy.isfinite(hds)
	if not numpy.all(mask):
		print 'WARNING: Average values only computed on {} of {} cases!'.format(numpy.count_nonzero(mask), mask.size)
		
    	print 'DM  average\t{} +/- {} (Median: {})'.format(numpy.asarray(dcs)[mask].mean(), numpy.asarray(dcs)[mask].std(), numpy.median(numpy.asarray(dcs)[mask]))
    	print 'HD  average\t{} +/- {} (Median: {})'.format(numpy.asarray(hds)[mask].mean(), numpy.asarray(hds)[mask].std(), numpy.median(numpy.asarray(hds)[mask]))
    	print 'ASSD average\t{} +/- {} (Median: {})'.format(numpy.asarray(assds)[mask].mean(), numpy.asarray(assds)[mask].std(), numpy.median(numpy.asarray(assds)[mask]))
    	print 'Prec. average\t{} +/- {} (Median: {})'.format(numpy.asarray(precisions)[mask].mean(), numpy.asarray(precisions)[mask].std(), numpy.median(numpy.asarray(precisions)[mask]))
    	print 'Rec. average\t{} +/- {} (Median: {})'.format(numpy.asarray(recalls)[mask].mean(), numpy.asarray(recalls)[mask].std(), numpy.median(numpy.asarray(recalls)[mask]))
Beispiel #16
0
    def _run_interface(self, runtime):
        if not base.isdefined(self.inputs.out_file):
            self.inputs.out_file = self._gen_filename('out_file')

        in_file = self.inputs.in_file
        mask_file = self.inputs.mask_file
        out_file = self.inputs.out_file

        image, header = mio.load(in_file)
        mask, _ = mio.load(mask_file)

        image[~(mask.astype(numpy.bool))] = 0
        mio.save(image, out_file, header)

        return runtime
Beispiel #17
0
def main():
    i, h = load(sys.argv[1])
    r, _ = load(sys.argv[2])

    diff = numpy.asarray(r.shape) - numpy.asarray(i.shape)

    if numpy.any(diff < 0): # cut to fit
        slicers = [slice(None) if 0 == e else slice(abs(e) / 2, -1 * (abs(e) / 2 + abs(e) % 2)) for e in diff]
        o = i[slicers]
        
    else: # pad to fit
        padding = [(e / 2, e / 2 + e % 2) for e in diff]
        o = numpy.pad(i, padding, "constant")

    save(o, sys.argv[1], h, True)
def main():
	print 'lesion\tvolume (%)\tvolume (mm)'

	files = [f for f in os.listdir('{}'.format(sys.argv[1])) if os.path.isfile('{}/{}'.format(sys.argv[1], f))]
	for f in files:
		l, h = load('{}/{}'.format(sys.argv[1], f))
		m, _ = load('{}/{}'.format(sys.argv[2], f))

		lesion_voxel = numpy.count_nonzero(l)
		total_voxel = numpy.count_nonzero(m)

		volume_mm = numpy.prod(header.get_pixel_spacing(h)) * lesion_voxel
		volume_percentage = lesion_voxel / float(total_voxel)

		print '{}\t{}\t{}\t'.format(f[:-7], volume_percentage, volume_mm)
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # loading input images (as image, header pairs)
    images = []
    headers = []
    for image_name in args.images:
        i, h = load(image_name)
        images.append(i)
        headers.append(h)
    
    # loading binary foreground masks if supplied, else create masks from threshold value
    if args.masks:
        masks = [load(mask_name)[0].astype(numpy.bool) for mask_name in args.masks]
    else:
        masks = [i > args.threshold for i in images]
    
    # if in application mode, load the supplied model and apply it to the images
    if args.lmodel:
        logger.info('Loading the model and transforming images...')
        with open(args.lmodel, 'r') as f:
            trained_model = pickle.load(f)
            if not isinstance(trained_model, IntensityRangeStandardization):
                raise ArgumentError('{} does not seem to be a valid pickled instance of an IntensityRangeStandardization object'.format(args.lmodel))
            transformed_images = [trained_model.transform(i[m], surpress_mapping_check = args.ignore) for i, m in zip(images, masks)]
            
    # in in training mode, train the model, apply it to the images and save it
    else:
        logger.info('Training the average intensity model...')
        irs = IntensityRangeStandardization()
        trained_model, transformed_images = irs.train_transform([i[m] for i, m in zip(images, masks)], surpress_mapping_check = args.ignore)
        logger.info('Saving the trained model as {}...'.format(args.smodel))
        with open(args.smodel, 'wb') as f:
                pickle.dump(trained_model, f)
                
    # save the transformed images
    if args.simages:
        logger.info('Saving intensity transformed images to {}...'.format(args.simages))
        for ti, i, m, h, image_name in zip(transformed_images, images, masks, headers, args.images):
            i[m] = ti
            save(i, '{}/{}'.format(args.simages, image_name.split('/')[-1]), h, args.force)
    
    logger.info('Terminated.')
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    # transform to uin8
    data_input = data_input.astype(scipy.uint8)
                                      
    # reduce to 3D, if larger dimensionality
    if data_input.ndim > 3:
        for _ in range(data_input.ndim - 3): data_input = data_input[...,0]
        
    # iter over slices (2D) until first with content is detected
    for plane in data_input:
        if scipy.any(plane):
            # set pixel spacing
            spacing = list(header.get_pixel_spacing(header_input))
            spacing = spacing[1:3]
            __update_header_from_array_nibabel(header_input, plane)
            header.set_pixel_spacing(header_input, spacing)
            # save image
            save(plane, args.output, header_input, args.force)
            break
    
    logger.info("Successfully terminated.")    
Beispiel #21
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # write header line
    print('image;labels\n')
    
    # iterate over input images
    for image in args.images:
        
        # get and prepare image data
        logger.info('Processing image {}...'.format(image))
        image_data, _ = load(image)
        
        # count number of labels and flag a warning if they reach the ushort border
        count = len(numpy.unique(image_data)) 
        
        # count number of labels and write
        print('{};{}\n'.format(image.split('/')[-1], count))
        
        sys.stdout.flush()
            
    logger.info('Successfully terminated.')
Beispiel #22
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # laod input image
    data_input, header_input = load(args.input)
    
#    # check if output image exists
#    if not args.force:
#        if os.path.exists(image_gradient_name):
#            logger.warning('The output image {} already exists. Skipping this step.'.format(image_gradient_name))
#            continue        
        
    # prepare result image
    data_output = scipy.zeros(data_input.shape, dtype=scipy.float32)
        
    # apply the gradient magnitude filter
    logger.info('Computing the gradient magnitude with Prewitt operator...')
    generic_gradient_magnitude(data_input, prewitt, output=data_output) # alternative to prewitt is sobel
        
    # save resulting mask
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
Beispiel #23
0
def main():
	i, h = load(sys.argv[1])
	thr = float(sys.argv[2])
	
	i = i.copy()

	save(i >= thr, sys.argv[1], h)
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise parser.error('The output image {} already exists.'.format(args.output))
    
    # loading image
    data_input, header_input = load(args.input)
    
    # apply the watershed
    logger.info('Applying anisotropic diffusion with settings: niter={} / kappa={} / gamma={}...'.format(args.iterations, args.kappa, args.gamma))
    data_output = anisotropic_diffusion(data_input, args.iterations, args.kappa, args.gamma, get_pixel_spacing(header_input))

    # save file
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
Beispiel #25
0
def main():
	# catch parameters
	forest_file = sys.argv[1]
	case_folder = sys.argv[2]
	mask_file = sys.argv[3]
	segmentation_file = sys.argv[4]

        # loading case features
	feature_vector = []
	for _file in os.listdir(case_folder):
		if _file.endswith('.npy') and _file.startswith('feature.'):
			with open(os.path.join(case_folder, _file), 'r') as f:
				feature_vector.append(numpy.load(f))
	feature_vector = join(*feature_vector)
	if 1 == feature_vector.ndim:
		feature_vector = numpy.expand_dims(feature_vector, -1)

	# load and apply the decision forest
	with open(forest_file, 'r') as f:
		forest = pickle.load(f)
	classification_results = forest.predict(feature_vector)

	# preparing  image
	m, h = load(mask_file)
    	m = m.astype(numpy.bool)
    	o = numpy.zeros(m.shape, numpy.uint8)
    	o[m] = numpy.squeeze(classification_results).ravel()

	# applying the post-processing morphology
	#o = binary_dilation(o, iterations=2)
	#o = keep_largest_connected_component(o)
	o = binary_fill_holes(o)

	# savin the results
    	save(o, segmentation_file, h, True)
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image
    input_data, input_header = load(args.input)
    
    logger.debug('Old shape={}.'.format(input_data.shape))
    
    # compute cut
    logger.info('Computing cut and cropping volume...')
    cut = __parse_contour_list(args.contours, input_data)
    # crop volume
    input_data = input_data[cut]
    
    logger.debug('New shape={}.'.format(input_data.shape))
    
    # save result contour volume
    save(input_data, args.output, input_header, args.force)

    logger.info("Successfully terminated.")
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image already exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning('The output image {} already exists. Exiting.'.format(args.output))
            exit(-1)

    # load input image
    image_smoothed_data, image_header = load(args.input)
        
    # apply additional hole closing step
    logger.info('Closing holes...')
    def fun_holes(arr):
        return scipy.ndimage.morphology.binary_fill_holes(arr)
    xd_iterator(image_smoothed_data, (1, 2), fun_holes)
        
    # perform opening resp. closing
    # in 3D case: size 1 = 6-connectedness, 2 = 12-connectedness, 3 = 18-connectedness, etc.
    if 'erosion' == args.type:
        logger.info('Applying erosion...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_erosion(arr, footprint, iterations=args.iterations)
    elif 'dilation' == args.type:
        logger.info('Applying dilation...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_dilation(arr, footprint, iterations=args.iterations)
    elif 'opening' == args.type:
        logger.info('Applying opening...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_opening(arr, footprint, iterations=args.iterations)
    else: # closing
        logger.info('Applying closing...')
        def fun(arr):
            if 0 == args.iterations: return arr
            footprint = scipy.ndimage.morphology.generate_binary_structure(arr.ndim, args.size)
            return scipy.ndimage.morphology.binary_closing(arr, footprint, iterations=args.iterations)

    # iterate over slices and apply selected operation
    xd_iterator(image_smoothed_data, (1, 2), fun)

    # save resulting mas
    save(image_smoothed_data, args.output, image_header, args.force)
            
    logger.info('Successfully terminated.')
Beispiel #28
0
def main():
	i, h = load(sys.argv[1])
	thr = float(sys.argv[2])

	o = i >= thr

	save(o, sys.argv[3], h)
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    logger.debug('Original shape = {}.'.format(data_input.shape))
    
    # check if supplied dimension parameters is inside the images dimensions
    if args.dimension1 >= data_input.ndim or args.dimension1 < 0:
        raise ArgumentError('The first swap-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension1, data_input.ndim))
    elif args.dimension2 >= data_input.ndim or args.dimension2 < 0:
        raise ArgumentError('The second swap-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension2, data_input.ndim))
    
    # swap axes
    data_output = scipy.swapaxes(data_input, args.dimension1, args.dimension2)
    # swap pixel spacing and offset
    ps = list(header.get_pixel_spacing(header_input))
    ps[args.dimension1], ps[args.dimension2] = ps[args.dimension2], ps[args.dimension1]
    header.set_pixel_spacing(header_input, ps)
    os = list(header.get_offset(header_input))
    os[args.dimension1], os[args.dimension2] = os[args.dimension2], os[args.dimension1]
    header.set_offset(header_input, os)
    
    logger.debug('Resulting shape = {}.'.format(data_output.shape))
    
    # save resulting volume
    save(data_output, args.output, header_input, args.force)
    
    logger.info("Successfully terminated.")    
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
        
    # check if output image exists (will also be performed before saving, but as the gradient might be time intensity, a initial check can save frustration)
    if not args.force:
        if os.path.exists(args.output):
            raise ArgumentError('The output image {} already exists.'.format(args.output))        
        
    # loading image
    data_input, header_input = load(args.input)
    
    logger.debug('Input array: dtype={}, shape={}'.format(data_input.dtype, data_input.shape))
    
    # execute the gradient map filter
    logger.info('Applying gradient map filter...')
    data_output = filter.gradient_magnitude(data_input, header.get_pixel_spacing(header_input))
        
    logger.debug('Resulting array: dtype={}, shape={}'.format(data_output.dtype, data_output.shape))
    
    # save image
    save(data_output, args.output, header_input, args.force)
    
    logger.info('Successfully terminated.')
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load 3d image
    data_3d, header_3d = load(args.input)

    # check if supplied dimension parameter is inside the images dimensions
    if args.dimension >= data_3d.ndim or args.dimension < 0:
        raise ArgumentError(
            'The supplied cut-dimension {} exceeds the number of input volume dimensions {}.'
            .format(args.dimension, data_3d.ndim))

    # check if the supplied offset parameter is a divider of the cut-dimensions slice number
    if not 0 == data_3d.shape[args.dimension] % args.offset:
        raise ArgumentError(
            'The offset is not a divider of the number of slices in cut dimension ({} / {}).'
            .format(data_3d.shape[args.dimension], args.offset))

    # prepare empty target volume
    volumes_3d = data_3d.shape[args.dimension] / args.offset
    shape_4d = list(data_3d.shape)
    shape_4d[args.dimension] = volumes_3d
    data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype)

    logger.debug(
        'Separating {} slices into {} 3D volumes of thickness {}.'.format(
            data_3d.shape[args.dimension], volumes_3d, args.offset))

    # iterate over 3D image and create sub volumes which are then added to the 4d volume
    for idx in range(args.offset):
        # collect the slices
        for sl in range(volumes_3d):
            idx_from = [slice(None), slice(None), slice(None)]
            idx_from[args.dimension] = slice(idx + sl * args.offset,
                                             idx + sl * args.offset + 1)
            idx_to = [slice(None), slice(None), slice(None)]
            idx_to[args.dimension] = slice(sl, sl + 1)
            #print 'Slice {} to {}.'.format(idx_from, idx_to)
            data_4d[idx][idx_to] = data_3d[idx_from]

    # flip dimensions such that the newly created is the last
    data_4d = scipy.swapaxes(data_4d, 0, 3)

    # save resulting 4D volume
    save(data_4d, args.output, header_3d, args.force)

    logger.info("Successfully terminated.")
Beispiel #32
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # copy the example image or generate empty image, depending on the modus
    if args.example:
        grid_image = scipy.zeros(args.example_image.shape, scipy.bool_)
        grid_header = args.example_header
    else:
        grid_image = scipy.zeros(args.shape, scipy.bool_)
        # !TODO: Find another solution for this
        # Saving and loading image once to generate a valid header
        tmp_dir = tempfile.mkdtemp()
        tmp_image = '{}/{}'.format(tmp_dir, args.output.split('/')[-1])
        save(grid_image, tmp_image)
        _, grid_header = load(tmp_image)
        try:
            os.remove(tmp_image)
            os.rmdir(tmp_dir)
        except Exception:
            pass

    # set the image attributes if supplied
    if args.pixelspacing:
        header.set_pixel_spacing(grid_header, args.pixelspacing)
    if args.offset:
        header.set_offset(grid_header, args.offset)

    # compute the right grid spacing for each dimension
    if args.real:
        grid_spacing = [
            int(round(sp / float(ps))) for sp, ps in zip(
                args.spacing, header.get_pixel_spacing(grid_header))
        ]
    else:
        grid_spacing = args.spacing

    # paint the grid into the empty image volume
    for dim in range(grid_image.ndim):
        if 0 == grid_spacing[dim]:
            continue  # skip dimension of 0 grid spacing supplied
        for offset in range(0, grid_image.shape[dim], grid_spacing[dim]):
            slicer = [slice(None)] * grid_image.ndim
            slicer[dim] = slice(offset, offset + 1)
            grid_image[slicer] = True

    # saving resulting grid volume
    save(grid_image, args.output, grid_header, args.force)
Beispiel #33
0
    def _run_interface(self, runtime):
        if not base.isdefined(self.inputs.out_file):
            self.inputs.out_file = self._gen_filename('out_file')
        if not base.isdefined(self.inputs.pass_voxelspacing):
            self.inputs.pass_voxelspacing = False
        if not base.isdefined(self.inputs.kwargs):
            self.inputs.kwargs = dict()

        log.info('Extracting feature {}({}) from {}'.format(
            self.inputs.function.func_name, self.inputs.kwargs,
            self.inputs.in_file))

        image, header = mio.load(self.inputs.in_file)
        kwargs = self.inputs.kwargs
        kwargs['mask'] = mio.load(self.inputs.mask_file)[0].astype(numpy.bool)
        if self.inputs.pass_voxelspacing:
            kwargs['voxelspacing'] = mio.header.get_pixel_spacing(header)

        feature_vector = self.inputs.function(image, **kwargs)
        with open(self.inputs.out_file, 'wb') as f:
            numpy.save(f, feature_vector.astype(numpy.float32))
        return runtime
Beispiel #34
0
def proprecessing(image_path, save_folder):

    if not os.path.exists("data/" + save_folder):
        os.mkdir("data/" + save_folder)
    filelist = os.listdir(image_path)
    filelist = [item for item in filelist if 'volume' in item]
    for file in filelist:
        img, img_header = load(image_path + file)
        img[img < -200] = -200
        img[img > 250] = 250
        img = np.array(img, dtype='float32')
        print("Saving image " + file)
        save(img, "./data/" + save_folder + "test-" + file)
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # constants
    contour_dimension = 0
    time_dimension = 3

    # load input volumes
    label_data, _ = load(args.label)
    mask_data, mask_header = load(args.mask)

    # prepare result volume
    result_data = scipy.zeros(label_data.shape, scipy.bool_)

    # prepare slicer
    slicer = [slice(None)] * label_data.ndim

    # iterate over time
    logger.debug('Fitting...')
    for time_id in range(label_data.shape[time_dimension]):
        slicer[time_dimension] = slice(time_id, time_id + 1)
        # skip if mask does not contain
        if 0 == len(mask_data[slicer].nonzero()[0]): continue
        # extract spatial volume from all volumes
        mask_data_subvolume = scipy.squeeze(mask_data[slicer])
        label_data_subvolume = scipy.squeeze(label_data[slicer])
        result_data_subvolume = scipy.squeeze(result_data[slicer])
        # apply fitting and append
        result_data_subvolume += fit_labels_to_mask(label_data_subvolume,
                                                    mask_data_subvolume)

    save(result_data, args.output, mask_header, args.force)

    logger.info("Successfully terminated.")
def load_dataset(dataset_filename, rep_dataset, readGT=True):
    dataset_filename = open(dataset_filename)

    mri3D = {}
    gt3D = {}
    for patientfile in dataset_filename.read().splitlines():

        split_line = patientfile.split("\t")
        inputs_name = split_line[0]
        image_data, _ = load(rep_dataset + "/" + inputs_name)
        nb_slices = image_data.shape[2]
        if readGT:
            output_name = split_line[1]
            gt_data, _ = load(rep_dataset + "/" + output_name)
            nb_slices = gt_data.shape[2]

        basename = os.path.basename(inputs_name)
        patientNum = get_patient_num(basename)

        size_im = preprocessing_im(image_data[:, :, 0]).shape

        mri3D[patientNum] = np.zeros((size_im[0], size_im[1], nb_slices))
        gt3D[patientNum] = np.zeros((size_im[0], size_im[1], nb_slices))

        for i in range(nb_slices):
            mri3D[patientNum][:, :, i] = preprocessing_im(image_data[:, :, i])
            mri3D[patientNum][:, :, i] -= np.min(mri3D[patientNum][:, :, i])
            mri3D[patientNum][:, :, i] = mri3D[patientNum][:, :, i] / np.max(
                mri3D[patientNum][:, :, i])
            if readGT:
                gt3D[patientNum][:, :, i] = preprocessing_label(gt_data[:, :,
                                                                        i])

    dataset_filename.close()

    if readGT:
        return mri3D, gt3D
    else:
        return mri3D
Beispiel #37
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image using nibabel
    logger.info('Loading image {}...'.format(args.input))
    image_labels_data, _ = load(args.image)

    # load mask image
    logger.info('Loading mask {}...'.format(args.mask))
    image_mask_data, image_mask_data_header = load(args.mask)

    # check if output image exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning(
                'The output image {} already exists. Skipping this image.'.
                format(args.output))

    # create a mask from the label image
    logger.info('Reducing the label image...')
    image_reduced_data = fit_labels_to_mask(image_labels_data, image_mask_data)

    # save resulting mask
    logger.info(
        'Saving resulting mask as {} in the same format as input mask, only with data-type int8...'
        .format(args.output))
    image_reduced_data = image_reduced_data.astype(
        numpy.bool, copy=False)  # bool sadly not recognized
    save(image_reduced_data, args.output, image_mask_data_header, args.force)

    logger.info('Successfully terminated.')
Beispiel #38
0
def open_BRATS(filepath):
	"""Import data from FigShare files

	Args:
		filepath (string): filepath to the FigShare file

	Returns:
		data: the 3D image of brain with tumor
	"""
	print('FILEPATH BRATS {}'.format(filepath))
	data, header = mio.load(filepath)
	data = np.array(data).transpose()
	return data, header
class imagePlot(pg.ImageView):
    dp_input, image_header = load('DP_preprocessed.nii.gz')
    data = np.asarray(dp_input)
#    data = np.transpose(data,(0,2,1))

    # Interpret image data as row-major instead of col-major
    pg.setConfigOptions(imageAxisOrder='col-major')

    app = QtGui.QApplication([])
    
    # Create window with ImageView widget
    win = QtGui.QMainWindow()
    imv = pg.ImageView()
    
#    imv.view.setLimits(maxXRange = data.shape[1], maxYRange= data.shape[2])
#    imv.view.setAspectLocked(lock=False, ratio=2 )
#    imv.autoRange()
    win.setCentralWidget(imv)
    imv.view.setBackgroundColor('#f0f0f0')
    imv.timeLine.setPen('y', width=10)
    imv.ui.splitter.setChildrenCollapsible(False)
    imv.ui.splitter.setStretchFactor(8,1)
    imv.timeLine.setHoverPen('r', width=12)
    imv.view.setMenuEnabled(False)  
    roi = imv.getRoiPlot()
    slider = roi.plotItem.getViewWidget()
    slider.setMaximumHeight(60)
    roi.plotItem.setMenuEnabled(False)
#    imv.ui.splitter.setCollapsible(2,False)
    win.show()
    
    win.setWindowTitle('pyqtgraph example: ImageView')

    ## Display the data and assign each frame a time value from 1.0 to 3.0
    imv.setImage(data, xvals=np.linspace(1, 144, data.shape[0], dtype = 'int32'))
    ## Set a custom color map
    colors = [
        (0, 0, 0),
        (45, 5, 61),
        (84, 42, 55),
        (150, 87, 60),
        (208, 171, 141),
        (255, 255, 255)
    ]
    cmap = pg.ColorMap(pos=np.linspace(0.0, 1.0, 6), color=colors)
    imv.setColorMap(cmap)
    imv.setCurrentIndex(72)
    imv.ui.roiBtn.hide()
    imv.ui.menuBtn.hide()
    
    def 
Beispiel #40
0
def preprocess_data(root_dir, y_shape=64, z_shape=64):
    image_dir = os.path.join(root_dir, 'imagesTr')
    label_dir = os.path.join(root_dir, 'labelsTr')
    output_dir = os.path.join(root_dir, 'preprocessed')

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        print('Created' + output_dir + '...')

    class_stats = defaultdict(int)
    total = 0

    nii_files = subfiles(image_dir, suffix=".nii.gz", join=False)

    for i in range(0, len(nii_files)):
        if nii_files[i].startswith("._"):
            nii_files[i] = nii_files[i][2:]

    for f in nii_files:
        image, _ = load(os.path.join(image_dir, f))
        label, _ = load(os.path.join(label_dir, f.replace('_0000', '')))

        print(f)

        # normalize images
        image = (image - image.min()) / (image.max() - image.min())

        image = np.swapaxes(image, 0, 2)
        image = np.swapaxes(image, 1, 2)

        label = np.swapaxes(label, 0, 2)
        label = np.swapaxes(label, 1, 2)
        result = np.stack((image, label))

        np.save(os.path.join(output_dir, f.split('.')[0] + '.npy'), result)
        print(f)

    print(total)
Beispiel #41
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image1
    data_input1, _ = load(args.input1)

    # load input image2
    data_input2, _ = load(args.input2)

    # compare dtype and shape
    if not data_input1.dtype == data_input2.dtype:
        print('Dtype differs: {} to {}'.format(data_input1.dtype,
                                               data_input2.dtype))
    if not data_input1.shape == data_input2.shape:
        print('Shape differs: {} to {}'.format(data_input1.shape,
                                               data_input2.shape))
        print(
            'The voxel content of images of different shape can not be compared. Exiting.'
        )
        sys.exit(-1)

    # compare image data
    voxel_total = reduce(lambda x, y: x * y, data_input1.shape)
    voxel_difference = len((data_input1 != data_input2).nonzero()[0])
    if not 0 == voxel_difference:
        print('Voxel differ: {} of {} total voxels'.format(
            voxel_difference, voxel_total))
        print('Max difference: {}'.format(
            scipy.absolute(data_input1 - data_input2).max()))
    else:
        print('No other difference.')

    logger.info("Successfully terminated.")
Beispiel #42
0
def calculate_atlas_overlaps(mask):
    """Given an image mask, calculate overlap with all available atlases."""
    atlas_files = _get_atlas_files()
    mask, mask_header = mio.load(mask)

    mask_spacing = mio.get_pixel_spacing(mask_header)
    pixel_volume = mask_spacing[0] * mask_spacing[1] * mask_spacing[2]

    for atlas_file in atlas_files:
        atlas, atlas_header = mio.load(atlas_file)
        # if dimensions of mask (standardbrain) and atlas do not match, skip
        # if atlas.shape != mask.shape:
        atlas_spacing = mio.get_pixel_spacing(atlas_header)
        if mask_spacing != atlas_spacing:
            log.warning('Atlas {} will be skipped due to mismatching pixel'
                        ' spacing (atlas: {}, segmentation: {})'.format(
                            os.path.basename(atlas_file), atlas_spacing,
                            mask_spacing))
            continue
        overlap = atlas[mask.astype(numpy.bool)]

        region_sizes = numpy.bincount(atlas.ravel())
        overlap_region_sizes = numpy.bincount(overlap.ravel())

        atlas_name = os.path.basename(atlas_file).split('.')[0]
        region_names = _get_region_name_map(atlas_name)
        out_csv_path = os.path.join(config.get().case_output_dir,
                                    atlas_name + '.csv')
        w = csv.writer(open(out_csv_path, 'w'))
        w.writerow(
            ['value', 'id', 'voxel overlap', 'mL overlap', 'percent overlap'])
        for index, number in enumerate(overlap_region_sizes):
            if number != 0:
                w.writerow([
                    index, region_names[index], number,
                    (number * pixel_volume) / 1000,
                    float(number) / region_sizes[index]
                ])
Beispiel #43
0
def test(data_name_list):
    test_data = np.memmap(os.path.join(WRITE_PATH, "test_orig.dat"),
                          dtype=np.float32,
                          mode="r",
                          shape=(110, SHAPE[1], SHAPE[2], SHAPE[3], SHAPE[0]))
    val_data = np.memmap(os.path.join(WRITE_PATH, "h_orig.dat"),
                         shape=(220, SHAPE[0], SHAPE[1], SHAPE[2], SHAPE[3]),
                         dtype=np.float32,
                         mode="r")
    test_size = test_data.shape[0]
    test_data_node = tf.placeholder(tf.float32,
                                    shape=(BATCH_SIZE * BATCH_MUL, PATCH[0],
                                           PATCH[1], NUM_CHANNELS))
    test_prediction = test_model(test_data_node)
    imghdr = load(ORIG_READ_PATH + "h.1.VSD.Brain.XX.O.MR_Flair.54512.nii")[1]
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver()
        saver.restore(sess, WRITE_PATH + "savedmodel/savedmodel_final.ckpt")
        print("Variable Initialized. Start Testing!")
        for i in range(test_size + VAL_SIZE):
            test_time = time.time()
            test_result = np.zeros(dtype=np.uint8,
                                   shape=(SHAPE[1], SHAPE[2], SHAPE[3]))
            for j in range(SHAPE[1]):
                for k in range(0, SHAPE[2], BATCH_MUL):
                    if i < VAL_SIZE:
                        batch_data, is_background = get_val_data(
                            val_data, i, j, k)
                    else:
                        batch_data, is_background = get_test_data(
                            test_data, i - VAL_SIZE, j, k)
                    if is_background:
                        continue
                    feed_dict = {test_data_node: batch_data}
                    test_result[j, k] = np.argmax(
                        sess.run(test_prediction, feed_dict=feed_dict), 1)
            if i < VAL_SIZE:
                test_result[np.where(val_data[i, 0] == 0)] = 0
                save(
                    test_result, WRITE_PATH + "VSD.h." + str(i) + "." +
                    data_name_list[0][i, 3] + ".nii", imghdr)
            else:
                test_result[np.where(test_data[i - VAL_SIZE, 0] == 0)] = 0
                save(
                    test_result, WRITE_PATH + "VSD.t." + str(i - VAL_SIZE) +
                    "." + data_name_list[2][i - VAL_SIZE, 3] + ".nii", imghdr)

            print("TEST %d/%d, Time elapsed: %d" %
                  (i - VAL_SIZE, test_size, time.time() - test_time))
Beispiel #44
0
    def _run_interface(self, runtime):
        if not base.isdefined(self.inputs.out_file):
            self.inputs.out_file = self._gen_filename('out_file')

        in_file = self.inputs.in_file
        out_file = self.inputs.out_file

        image, header = mio.load(in_file)
        lower, upper = numpy.percentile(image, (1, 99.9))
        image[image < lower] = lower
        image[image > upper] = upper
        mio.save(image, out_file, header)

        return runtime
Beispiel #45
0
    def _run_interface(self, runtime):
        if not base.isdefined(self.inputs.segmentation_file):
            self.inputs.segmentation_file = self._gen_filename(
                'segmentation_file')
        if not base.isdefined(self.inputs.probability_file):
            self.inputs.probability_file = self._gen_filename(
                'probability_file')

        log.info('Appling RDF {} to features {}'.format(
            self.inputs.classifier_file,
            map(os.path.basename, self.inputs.feature_files)))

        features = []
        for path in self.inputs.feature_files:
            with open(path, 'r') as f:
                features.append(numpy.load(f))

        feature_vector = mutil.join(*features)
        if feature_vector.ndim == 1:
            feature_vector = numpy.expand_dims(feature_vector, -1)

        # load and apply the decision forest
        with gzip.open(self.inputs.classifier_file, 'r') as f:
            classifier = pickle.load(f)
            prob_classification = \
                classifier.predict_proba(feature_vector)[:, 1]
            # equivalent to forest.predict
            bin_classification = prob_classification > PROBABILITY_THRESHOLD

        # prepare result images to save to disk
        mask, header = mio.load(self.inputs.mask_file)
        mask = mask.astype(numpy.bool)
        segmentation_image = numpy.zeros(mask.shape, numpy.uint8)
        segmentation_image[mask] = numpy.squeeze(bin_classification).ravel()
        probability_image = numpy.zeros(mask.shape, numpy.float32)
        probability_image[mask] = numpy.squeeze(prob_classification).ravel()

        # apply the post-processing morphology
        segmentation_image = scipy.ndimage.morphology.binary_fill_holes(
            segmentation_image)

        mio.save(segmentation_image,
                 self.inputs.segmentation_file,
                 header,
                 force=True)
        mio.save(probability_image,
                 self.inputs.probability_file,
                 header,
                 force=True)
        return runtime
Beispiel #46
0
    def GetImages(self, imageFolders, isTrain):

        for imgFolder in imageFolders:

            if isTrain:
                label = int((imgFolder.split("\\")[1]).split("_")[0])
                self.labels.append(label)

                id = str((imgFolder.split("\\")[2]).split("_")[0])
                self.ids.append(id)

            files = glob.glob(imgFolder + "/*.dcm")

            dicomSet = []
            for img in files:
                image_data, image_header = load(img)

                image_data = image_data / np.max(image_data)
                image_data = resize(image_data[:, :, 0], (256, 256))
                image_data = image_data[:, :, np.newaxis]

                #TODO: intensity normalization:
                #n = np.mean(image_data)
                #image_data *= 0.5 / n
                #image_data -= np.mean(image_data)
                #image_data /= np.std(image_data)

                #TODO: sharpen & contrast

                #plt.imshow(image_data[:,:,0],cmap="gray",vmax=1)
                #plt.show()

                orderNum = image_header.get_sitkimage().GetOrigin()[2]
                dicomSet.append([orderNum, image_data])

            dicomSet = sorted(dicomSet, key=lambda x: x[0])
            dicomSet = np.delete(dicomSet, 0, 1)

            idx = self.GetDistribution(len(dicomSet))
            dicomSet = np.take(dicomSet, idx)

            imgRes = []
            for img in dicomSet:
                imgRes.append(img)
            imgRes = np.asarray(imgRes)

            #plt.imshow(imgRes[0,:,:,0],cmap="gray",vmax=1)
            #plt.show()

            self.images.append(imgRes)
def stripping(path, spath):
    print("here is the path" + path)
    rawimg, image_header = load(path)
    t = rawimg
    # print(rawimg.shape)
    plt.imshow(t[:, :, 80], cmap=plt.get_cmap('gray'))
    for i in range(193):
        scipy.misc.imsave('static/uploads1/' + str(i) + 'ws' + '.jpg',
                          rawimg[:, :, i])

    ext = Extractor()
    prob = ext.run(t)
    mask = (prob > 0.5)
    print(mask.shape)
    br = rawimg[:]
    br[~mask] = 0
    ws = []
    wos = []

    for i in range(193):
        scipy.misc.imsave('static/uploads1/' + str(i) + 'wos' + '.jpg',
                          br[:, :, i])

    for o in range(193):
        ws.append(
            Image.open('static/uploads1/' + str(o) + 'ws' +
                       '.jpg').convert('P'))
        wos.append(
            Image.open('static/uploads1/' + str(o) + 'wos' +
                       '.jpg').convert('P'))

    # ws[0].save('static/uploads1/withskull.gif', save_all=True,  C:\Users\DELL\Desktop\React Work\yfp v3\demoProject\demosite\public\img\back
    save_path = 'C:/Users/DELL/Desktop/React Work/yfp v3/demoProject/demosite/public/img/' + spath + '/'
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    ws[0].save(save_path + '/withskull.gif',
               save_all=True,
               append_images=ws[1:],
               optimize=False,
               duration=60,
               loop=0)
    wos[0].save(save_path + '/withoutskull.gif',
                save_all=True,
                append_images=wos[1:],
                optimize=False,
                duration=60,
                loop=0)

    plt.imshow(rawimg[:, :, 80], cmap=plt.get_cmap('gray'))
Beispiel #48
0
def nifti_modify_metadata(image_file, tasks):
    """Modify metadata of image_file.

    See module docstring for list of possible tasks.
    """
    image, header = mio.load(image_file)

    for task in tasks:
        field, value = task.split('=')
        if value in GETTER:
            SETTER[field](header, GETTER[value](header))
        else:
            SETTER[field](header, int(value))

    mio.save(image.copy(), image_file, header)
 def OpenMRI(self):
     print("In open")
     dialog = PopUpDLG(style=4)
     value = dialog.exec_()
     if value:
         print(value)
         self.files = value
         if self.files['T1']:
             self.curData['T1'], _ = load(self.files['T1'])
             self.dataAvail['T1'] = 1
         else:
             self.dataAvail['T1'] = None
         if self.files['T2']:
             self.curData['T2'], _ = load(self.files['T2'])
             self.dataAvail['T2'] = 1
         else:
             self.dataAvail['T2'] = None
         if self.files['T1c']:
             self.curData['T1c'], _ = load(self.files['T1c'])
             self.dataAvail['T1c'] = 1
         else:
             self.dataAvail['T1c'] = None
         if self.files['F']:
             self.curData['F'], _ = load(self.files['F'])
             self.dataAvail['F'] = 1
         else:
             self.dataAvail['F'] = None
         self.widget.createMRIView(self.curData['T1'])
         self.widget.t1Btn.setChecked(True)
         self.widget.dTransverse.setChecked(True)
         self.loadFile(self.files['T1'])
         self.widget.curData = self.curData
         print(self.widget.curData.keys())
         if self.widget.maskView:
             self.widget.maskView.hide()
             self.widget.segmentedView.hide()
Beispiel #50
0
def load_mri_from_directory(data_index_list,
                            fixed_width,
                            fixed_depth,
                            is_test=False,
                            data_dir='../input/train_data',
                            is_fixed_size=True):
    img_train_list, img_label_list = [], []
    subject_list = sorted(glob(data_dir + '/*'))

    for data_index in data_index_list:
        img_list = [fn for fn in sorted(glob( os.path.join(subject_list[data_index], '*', '*.nii*')))  \
        if '4DPWI' not in fn and 'OT' not in fn]
        img_train = []

        #Load images
        for img in img_list:
            test = (medio.load(img)[0])
            if is_fixed_size is True:
                test = transform_to_fixed_size(test, fixed_width, fixed_depth)
            test = normalize_img(test)
            img_train.append(test)
        img_train_list.append((img_train))

        if is_test is not True:
            label_path = glob(
                os.path.join(subject_list[data_index], '*', '*OT*.nii*'))[0]
            label = medio.load(label_path)[0]
            if is_fixed_size is True:
                label = transform_to_fixed_size(label, fixed_width,
                                                fixed_depth)
            img_label_list.append(label)

    if is_test is not True:
        return img_train_list, img_label_list
    else:
        return img_train_list
def create_train_data():
    train_data_path = os.path.join(data_path, 'train/')
    images = os.listdir(train_data_path)
    total = len(images) / 2

    imgs = np.ndarray((total, 1, image_rows, image_cols, image_z),
                      dtype=np.uint8)
    imgs_mask = np.ndarray((total, 1, image_rows, image_cols, image_z),
                           dtype=np.uint8)

    i = 0
    print('-' * 30)
    print('Creating training images...')
    print('-' * 30)
    for image_name in images:
        if 'mask' in image_name:
            continue
        image_mask_name = image_name.split('.')[0] + '_mask.nii.gz'
        img, img_header = load(os.path.join(train_data_path + image_name))
        img_mask, img_mask_header = load(
            os.path.join(train_data_path + image_mask_name))

        img = np.array([img])
        img_mask = np.array([img_mask])

        imgs[i] = img
        imgs_mask[i] = img_mask

        if i % 100 == 0:
            print('Done: {0}/{1} images'.format(i, total))
        i += 1
    print('Loading done.')

    np.save('imgs_train.npy', imgs)
    np.save('imgs_mask_train.npy', imgs_mask)
    print('Saving to .npy files done.')
Beispiel #52
0
def writeFiles(imageFiles,
               labelFiles,
               inputImageDir,
               inputLabelDir,
               outputImageDir,
               outputLabelDir,
               train=False):
    for i in range(len(imageFiles)):
        imageFile = imageFiles[i]
        print('Reading File {}'.format(imageFile), end='\r')
        labelFile = labelFiles[i]
        image, _ = load(inputImageDir + imageFile)
        label, _ = load(inputLabelDir + labelFile)
        if train:
            for i in range(image.shape[2]):
                imageSlice = image[:, :, i]
                labelSlice = label[:, :, i]
                np.save(outputImageDir + imageFile + 'slice_{}'.format(i),
                        imageSlice)
                np.save(outputLabelDir + labelFile + 'slice_{}'.format(i),
                        labelSlice)
        else:
            np.save(outputImageDir + imageFile, image)
            np.save(outputLabelDir + labelFile, label)
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    # load input image
    data_input, header_input = load(args.input)
    
    # treat as binary
    data_input = data_input.astype(numpy.bool)
    
    # check dimension argument
    if args.dimension and (not args.dimension >= 0 or not args.dimension < data_input.ndim):
        argparse.ArgumentError(args.dimension, 'Invalid dimension of {} supplied. Image has only {} dimensions.'.format(args.dimension, data_input.ndim))
        
    # compute erosion and dilation steps
    erosions = int(math.ceil(args.width / 2.))
    dilations = int(math.floor(args.width / 2.))
    logger.debug("Performing {} erosions and {} dilations to achieve a contour of width {}.".format(erosions, dilations, args.width))
    
    # erode, dilate and compute contour
    if not args.dimension:
        eroded = binary_erosion(data_input, iterations=erosions) if not 0 == erosions else data_input
        dilated = binary_dilation(data_input, iterations=dilations) if not 0 == dilations else data_input
        data_output = dilated - eroded
    else:
        slicer = [slice(None)] * data_input.ndim
        bs_slicer = [slice(None)] * data_input.ndim
        data_output = numpy.zeros_like(data_input)
        for sl in range(data_input.shape[args.dimension]):
            slicer[args.dimension] = slice(sl, sl+1)
            bs_slicer[args.dimension] = slice(1, 2)
            bs = generate_binary_structure(data_input.ndim, 1)
            
            eroded = binary_erosion(data_input[slicer], structure=bs[bs_slicer], iterations=erosions) if not 0 == erosions else data_input[slicer]
            dilated = binary_dilation(data_input[slicer], structure=bs[bs_slicer], iterations=dilations) if not 0 == dilations else data_input[slicer]
            data_output[slicer] = dilated - eroded
    logger.debug("Contour image contains {} contour voxels.".format(numpy.count_nonzero(data_output)))

    # save resulting volume
    save(data_output, args.output, header_input, args.force)
    
    logger.info("Successfully terminated.")    
def main():
    parser = getParser()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # loading input images
    img, hdr = load(args.input)

    # check shape dimensionality
    if not len(args.shape) == img.ndim:
        parser.error(
            'The image has {} dimensions, but {} shape parameters have been supplied.'
            .format(img.ndim, len(args.shape)))

    # check if output image exists
    if not args.force and os.path.exists(args.output):
        parser.error('The output image {} already exists.'.format(args.output))

    # compute required cropping and extention
    slicers_cut = []
    slicers_extend = []
    for dim in range(len(img.shape)):
        slicers_cut.append(slice(None))
        slicers_extend.append(slice(None))
        if args.shape[dim] != img.shape[dim]:
            difference = abs(img.shape[dim] - args.shape[dim])
            cutoff_left = difference / 2
            cutoff_right = difference / 2 + difference % 2
            if args.shape[dim] > img.shape[dim]:
                slicers_extend[-1] = slice(cutoff_left, -1 * cutoff_right)
            else:
                slicers_cut[-1] = slice(cutoff_left, -1 * cutoff_right)

    # crop original image
    img = img[slicers_cut]

    # create output image and place input image centered
    out = numpy.zeros(args.shape, img.dtype)
    out[slicers_extend] = img

    # saving the resulting image
    save(out, args.output, hdr, args.force)
Beispiel #55
0
def preprocess_single_file(image_file):
    image, image_header = load(image_file)
    image = (image - image.min()) / (image.max() - image.min())

    image = np.swapaxes(image, 0, 2)
    image = np.swapaxes(image, 1, 2)

    # TODO check original shape and reshape data if necessary
    # image = reshape(image, append_value=0, new_shape=(image.shape[0], y_shape, z_shape))
    # numpy_array = np.array(image)

    # Image shape is [b, w, h] and has one channel only
    # Desired shape = [b, c, w, h]
    # --> expand to have only one channel c=1 - data is in desired shape
    data = np.expand_dims(image, 1)

    return torch.from_numpy(data), image_header
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # check if output image already exists
    if not args.force:
        if os.path.exists(args.output):
            logger.warning(
                'The output image {} already exists. Exiting.'.format(
                    args.output))
            exit(-1)

    # load input image
    image_smoothed_data, image_header = load(args.input)

    # apply additional hole closing step
    logger.info('Closing holes...')

    def fun_holes(arr):
        return scipy.ndimage.morphology.binary_fill_holes(arr)

    xd_iterator(image_smoothed_data, (1, 2), fun_holes)

    # set parameters
    ed_params = [(6, 9), (3, 2), (3, 2)]
    es_params = [(6, 9), (5, 2), (4, 3)]

    # apply to ED and ES with distinct parameters
    image_smoothed_data[:, :, :, :4] = morphology(
        image_smoothed_data[:, :, :, :4], ed_params, args.order, args.size)
    image_smoothed_data[:, :, :,
                        4:] = morphology(image_smoothed_data[:, :, :, 4:],
                                         es_params, args.order, args.size)

    # save resulting mask
    save(image_smoothed_data, args.output, image_header, args.force)

    logger.info('Successfully terminated.')
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input images and cast to bool
    images = []
    for input_ in args.inputs:
        t = load(input_)
        images.append((t[0], t[1]))

    # check if their shapes and voxel spacings are all equal
    s0 = images[0][0].shape
    if not numpy.all([i[0].shape == s0 for i in images[1:]]):
        raise argparse.ArgumentError(
            args.input,
            'At least one input image is of a different shape than the others.'
        )
    vs0 = header.get_pixel_spacing(images[0][1])
    if not numpy.all(
        [header.get_pixel_spacing(i[1]) == vs0 for i in images[1:]]):
        raise argparse.ArgumentError(
            args.input,
            'At least one input image has a different voxel spacing than the others.'
        )

    # execute operation
    logger.debug('Executing operation {} over {} images.'.format(
        args.operation, len(images)))
    if 'max' == args.operation:
        out = numpy.maximum.reduce([t[0] for t in images])
    elif 'min' == args.operation:
        out = numpy.minimum.reduce([t[0] for t in images])
    elif 'sum' == args.operation:
        out = numpy.sum([t[0] for t in images], 0).astype(numpy.uint8)
    else:  # avg
        out = numpy.average([t[0] for t in images], 0).astype(numpy.float32)

    # save output
    save(out, args.output, images[0][1], args.force)

    logger.info("Successfully terminated.")
Beispiel #58
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image
    input_data, input_header = load(args.input)

    # print information about the image
    printInfo(input_data, input_header)

    logger.info('Successfully terminated.')
Beispiel #59
0
def getImageData(fname):
    '''Returns the image data, image matrix and header of
    a particular file'''
    data, hdr = load(fname)
    # axes have to be switched from (256,256,x) to (x,256,256)
    data = np.moveaxis(data, -1, 0)

    norm_data = []
    # normalize each image slice
    for i in range(data.shape[0]):
        img_slice = data[i, :, :]
        norm_data.append(__normalize0_255(img_slice))

    # remake 3D representation of the image
    data = np.array(norm_data, dtype=np.uint16)

    data = data[..., np.newaxis]
    return data, hdr
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # load input image
    data_input, header_input = load(args.input)

    # eventually empty data
    if args.empty: data_input.fill(False)

    # save resulting volume
    save(data_input, args.output, header_input, args.force)

    logger.info("Successfully terminated.")