def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # build output image name image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg' image_bg_name += args.mask.split('/')[-1][-4:] # check if output image exists if not args.force: if os.path.exists(image_bg_name): logger.warning('The output image {} already exists. Breaking.'.format(image_bg_name)) exit(1) # load mask logger.info('Loading mask {}...'.format(args.mask)) try: mask_image = load(args.mask) mask_image_data = numpy.squeeze(mask_image.get_data()).astype(scipy.bool_) except ImageFileError as e: logger.critical('The mask image does not exist or its file type is unknown.') raise ArgumentError('The mask image does not exist or its file type is unknown.', e) # array of indices to access desired slices sls = [(slice(1), slice(None), slice(None)), (slice(-1, None), slice(None), slice(None)), (slice(None), slice(1), slice(None)), (slice(None), slice(-1, None), slice(None)), (slice(None), slice(None), slice(1)), (slice(None), slice(None), slice(-1, None))] # security check logger.info('Determine if the slices are not intersection with the reference liver mask...') for sl in sls: if not 0 == len(mask_image_data[sl].nonzero()[0]): logger.critical('Reference mask reaches till the image border.') raise ArgumentError('Reference mask reaches till the image border.') # create and save background marker image logger.info('Creating background marker image...') image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_) for sl in sls: image_bg_data[sl] = True logger.info('Saving background marker image...') mask_image.get_header().set_data_dtype(scipy.int8) save(image_like(image_bg_data, mask_image), image_bg_name) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists (will also be performed before saving, but as the watershed might be very time intensity, a initial check can save frustration) if not args.force: if os.path.exists(args.output): raise ArgumentError('The output image {} already exists.'.format(args.output)) # loading image data_input, header_input = load(args.input) # apply the watershed logger.info('Watershedding with settings: thr={} / level={}...'.format(args.threshold, args.level)) data_output = watershed(data_input, get_pixel_spacing(header_input), args.threshold, args.level) # save file save(data_output, args.output, header_input, args.force) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # load input image logger.info('Loading {}...'.format(args.image)) image_data, image_header = load(args.image) # check if supplied cut dimension is inside the input images dimensions if args.dimension < 0 or args.dimension >= image_data.ndim: logger.critical('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim)) raise ArgumentError('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim)) # prepare output filenames name_output = args.output.replace('{}', '{:03d}') # determine cut lines no_sub_volumes = image_data.shape[args.dimension] / args.maxsize + 1 # int-division is desired slices_per_volume = image_data.shape[args.dimension] / no_sub_volumes # int-division is desired # construct processing dict for each sub-volume processing_array = [] for i in range(no_sub_volumes): processing_array.append( {'path': name_output.format(i+1), 'cut': (i * slices_per_volume, (i + 1) * slices_per_volume)}) if no_sub_volumes - 1 == i: # last volume has to have increased cut end processing_array[i]['cut'] = (processing_array[i]['cut'][0], image_data.shape[args.dimension]) # construct base indexing list index = [slice(None) for _ in range(image_data.ndim)] # execute extraction of the sub-volumes logger.info('Extracting sub-volumes...') for dic in processing_array: # check if output images exists if not args.force: if os.path.exists(dic['path']): logger.warning('The output file {} already exists. Skipping this volume.'.format(dic['path'])) continue # extracting sub-volume index[args.dimension] = slice(dic['cut'][0], dic['cut'][1]) volume = image_data[index] logger.debug('Extracted volume is of shape {}.'.format(volume.shape)) # saving sub-volume in same format as input image logger.info('Saving cut {} as {}...'.format(dic['cut'], dic['path'])) save(volume, dic['path'], image_header, args.force) logger.info('Successfully terminated.')
def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() if 0 != len(args.masks) % 2: raise ArgumentError( 'Every supplied mask image must be followed directly by a string that identifies its segmentation.' ) return args
def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() # check output image exists if override not forced if not args.force: if os.path.exists(args.output + args.image[-4:]): raise ArgumentError('The supplied output file {} already exists. Run -f/force flag to override.'.format(args.output)) return args
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists if not args.force: if os.path.exists(args.output + args.image[-4:]): logger.warning( 'The output file {} already exists. Breaking.'.format( args.output + args.image[-4:])) exit(1) # load images image_data, image_header = load(args.image) # check image dimensions against sub-volume dimensions if len(image_data.shape) != len(args.volume): logger.critical( 'The supplied input image is of different dimension as the sub volume requested ({} to {})' .format(len(image_data.shape), len(args.volume))) raise ArgumentError( 'The supplied input image is of different dimension as the sub volume requested ({} to {})' .format(len(image_data.shape), len(args.volume))) # execute extraction of the sub-area logger.info('Extracting sub-volume...') index = [slice(x[0], x[1]) for x in args.volume] volume = image_data[index] # check if the output image contains data if 0 == len(volume): logger.exception( 'The extracted sub-volume is of zero-size. This usual means that the supplied volume coordinates and the image coordinates do not intersect. Exiting the application.' ) sys.exit(-1) # squeeze extracted sub-volume for the case in which one dimensions has been eliminated volume = scipy.squeeze(volume) logger.debug('Extracted volume is of shape {}.'.format(volume.shape)) # save results in same format as input image save(volume, args.output, image_header, args.force) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists (will also be performed before saving, but as the watershed might be very time intensity, a initial check can save frustration) if not args.force: if os.path.exists(args.output): raise ArgumentError('The output image {} already exists.'.format( args.output)) # loading images data_input, header_input = load(args.input) if args.mask: mask = load(args.mask)[0].astype(numpy.bool) else: mask = None # extract local minima and convert to markers logger.info('Extract local minima with minimum distance of {}...'.format( args.mindist)) lm, _ = local_minima(data_input, args.mindist) lm_indices = tuple([numpy.asarray(x) for x in lm.T]) minima_labels = numpy.zeros(data_input.shape, dtype=numpy.uint64) minima_labels[lm_indices] = 1 if not None == mask: minima_labels[~mask] = 0 minima_labels, _ = label(minima_labels) # apply the watershed logger.info('Watershedding...') data_output = watershed(data_input, minima_labels, mask=mask) # save file save(data_output, args.output, header_input, args.force) logger.info('Successfully terminated.')
def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() # parse volume and adapt to zero-indexing try: def _to_int_or_none(string): if 0 == len(string): return None return int(string) def _to_int_or_none_double(string): if 0 == len(string): return [None, None] return map(_to_int_or_none, string.split(':')) args.volume = map(_to_int_or_none_double, args.volume.split(',')) args.volume = [(x[0], x[1]) for x in args.volume] except (ValueError, IndexError) as e: raise ArgumentError( 'Maleformed volume parameter "{}", see description with -h flag.'. format(args.volume), e) return args
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists if not args.force: if os.path.exists(args.output): logger.warning('The output image {} already exists. Exiting.'.format(args.output)) exit(-1) # select boundary term if args.boundary == 'stawiaski': boundary_term = graphcut.energy_label.boundary_stawiaski logger.info('Selected boundary term: stawiaski') else: boundary_term = graphcut.energy_label.boundary_difference_of_means logger.info('Selected boundary term: difference of means') # load input images region_image_data, reference_header = load(args.region) badditional_image_data, _ = load(args.badditional) markers_image_data, _ = load(args.markers) # split marker image into fg and bg images fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) # check if all images dimensions are the same if not (badditional_image_data.shape == region_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): logger.critical('Not all of the supplied images are of the same shape.') raise ArgumentError('Not all of the supplied images are of the same shape.') # recompute the label ids to start from id = 1 logger.info('Relabel input image...') region_image_data = filter.relabel(region_image_data) # generate graph logger.info('Preparing graph...') gcgraph = graphcut.graph_from_labels(region_image_data, fgmarkers_image_data, bgmarkers_image_data, boundary_term = boundary_term, boundary_term_args = (badditional_image_data)) # second is directedness of graph , 0) logger.info('Removing images that are not longer required from memory...') del fgmarkers_image_data del bgmarkers_image_data del badditional_image_data # execute min-cut logger.info('Executing min-cut...') maxflow = gcgraph.maxflow() logger.debug('Maxflow is {}'.format(maxflow)) # apply results to the region image logger.info('Applying results...') mapping = [0] # no regions with id 1 exists in mapping, entry used as padding mapping.extend(map(lambda x: 0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1, scipy.unique(region_image_data))) region_image_data = filter.relabel_map(region_image_data, mapping) # save resulting mask save(region_image_data.astype(scipy.bool_), args.output, reference_header, args.force) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # build output image name image_fg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.fg' image_fg_name += args.mask.split('/')[-1][-4:] image_bg_name = args.folder + '/' + args.mask.split('/')[-1][:-4] + '.bg' image_bg_name += args.mask.split('/')[-1][-4:] # check if output image exists if not args.force: if os.path.exists(image_fg_name): logger.warning( 'The output image {} already exists. Breaking.'.format( image_fg_name)) exit(1) elif os.path.exists(image_bg_name): logger.warning( 'The output image {} already exists. Breaking.'.format( image_bg_name)) exit(1) # load mask logger.info('Loading mask {}...'.format(args.mask)) try: mask_image = load(args.mask) mask_image_data = numpy.squeeze(mask_image.get_data()).astype( scipy.bool_) except ImageFileError as e: logger.critical( 'The mask image does not exist or its file type is unknown.') raise ArgumentError( 'The mask image does not exist or its file type is unknown.', e) # erode mask stepwise logger.info('Step-wise reducing mask to find center...') mask_remains = mask_image_data.copy() while (True): mask_remains_next = ndimage.binary_erosion(mask_remains, iterations=2) if 0 == len(mask_remains_next.nonzero()[0]): break mask_remains = mask_remains_next # extract one of the remaining voxels voxels = mask_remains.nonzero() marker = (voxels[0][0], voxels[1][0], voxels[2][0]) logger.debug('Extracted foreground seed is {}.'.format(marker)) # check suitability of corners as background markers logger.info( 'Checking if the corners are suitable background seed candidates...') if True == mask_image_data[0,0,0] or \ True == mask_image_data[-1,0,0] or \ True == mask_image_data[0,-1,0] or \ True == mask_image_data[0,0,-1] or \ True == mask_image_data[-1,-1,0] or \ True == mask_image_data[-1,0,-1] or \ True == mask_image_data[0,-1,-1] or \ True == mask_image_data[-1,-1,-1]: logger.critical( 'The corners of the image do not correspond to background voxels.') raise ArgumentError( 'The corners of the image do not correspond to background voxels.') # create and save foreground marker image logger.info('Creating foreground marker image...') image_fg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_) image_fg_data[marker[0], marker[1], marker[2]] = True logger.info('Saving foreground marker image...') mask_image.get_header().set_data_dtype(scipy.int8) save(image_like(image_fg_data, mask_image), image_fg_name) # create and save background marker image logger.info('Creating background marker image...') image_bg_data = scipy.zeros(mask_image_data.shape, dtype=scipy.bool_) image_bg_data[0, 0, 0] = True image_bg_data[-1, 0, 0] = True image_bg_data[0, -1, 0] = True image_bg_data[0, 0, -1] = True image_bg_data[-1, -1, 0] = True image_bg_data[-1, 0, -1] = True image_bg_data[0, -1, -1] = True image_bg_data[-1, -1, -1] = True logger.info('Saving background marker image...') mask_image.get_header().set_data_dtype(scipy.int8) save(image_like(image_bg_data, mask_image), image_bg_name) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists if not args.force: if os.path.exists(args.output): logger.warning('The output image {} already exists. Exiting.'.format(args.output)) exit(-1) # select boundary term ['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow'] if 'diff_linear' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_linear logger.info('Selected boundary term: linear difference of intensities') elif 'diff_exp' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_exponential logger.info('Selected boundary term: exponential difference of intensities') elif 'diff_div' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_division logger.info('Selected boundary term: divided difference of intensities') elif 'diff_pow' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_power logger.info('Selected boundary term: power based / raised difference of intensities') elif 'max_linear' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_linear logger.info('Selected boundary term: linear maximum of intensities') elif 'max_exp' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_exponential logger.info('Selected boundary term: exponential maximum of intensities') elif 'max_div' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_division logger.info('Selected boundary term: divided maximum of intensities') elif 'max_pow' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_power logger.info('Selected boundary term: power based / raised maximum of intensities') # load input images badditional_image_data, reference_header = load(args.badditional) markers_image_data, _ = load(args.markers) # split marker image into fg and bg images fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) # check if all images dimensions are the same if not (badditional_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): logger.critical('Not all of the supplied images are of the same shape.') raise ArgumentError('Not all of the supplied images are of the same shape.') # extract spacing if required if args.spacing: spacing = header.get_pixel_spacing(reference_header) logger.info('Taking spacing of {} into account.'.format(spacing)) else: spacing = False # generate graph logger.info('Preparing BK_MFMC C++ graph...') gcgraph = graphcut.graph_from_voxels(fgmarkers_image_data, bgmarkers_image_data, boundary_term = boundary_term, boundary_term_args = (badditional_image_data, args.sigma, spacing)) # execute min-cut logger.info('Executing min-cut...') maxflow = gcgraph.maxflow() logger.debug('Maxflow is {}'.format(maxflow)) # reshape results to form a valid mask logger.info('Applying results...') result_image_data = scipy.zeros(bgmarkers_image_data.size, dtype=scipy.bool_) for idx in range(len(result_image_data)): result_image_data[idx] = 0 if gcgraph.termtype.SINK == gcgraph.what_segment(idx) else 1 result_image_data = result_image_data.reshape(bgmarkers_image_data.shape) # save resulting mask save(result_image_data.astype(scipy.bool_), args.output, reference_header, args.force) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists if not args.force: if os.path.exists(args.output): logger.warning( 'The output image {} already exists. Exiting.'.format( args.output)) exit(-1) # load input images region_image_data, reference_header = load(args.region) markers_image_data, _ = load(args.markers) gradient_image_data, _ = load(args.gradient) # split marker image into fg and bg images logger.info('Extracting foreground and background markers...') fgmarkers_image_data, bgmarkers_image_data = split_marker( markers_image_data) # check if all images dimensions are the same shape if not (gradient_image_data.shape == region_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): logger.critical( 'Not all of the supplied images are of the same shape.') raise ArgumentError( 'Not all of the supplied images are of the same shape.') # collect cut objects cut_xy = __get_bg_bounding_pipe(bgmarkers_image_data) # cut volumes old_size = region_image_data.shape gradient_image_data = gradient_image_data[cut_xy] region_image_data = region_image_data[cut_xy] fgmarkers_image_data = fgmarkers_image_data[cut_xy] bgmarkers_image_data = bgmarkers_image_data[cut_xy] # recompute the label ids to start from id = 1 logger.info('Relabel input image...') region_image_data = filter.relabel(region_image_data) # generate graph logger.info('Preparing graph...') gcgraph = graphcut.graph_from_labels( region_image_data, fgmarkers_image_data, bgmarkers_image_data, boundary_term=graphcut.energy_label.boundary_stawiaski, boundary_term_args=( gradient_image_data)) # second is directedness of graph , 0) logger.info('Removing images that are not longer required from memory...') del fgmarkers_image_data del bgmarkers_image_data del gradient_image_data # execute min-cut logger.info('Executing min-cut...') maxflow = gcgraph.maxflow() logger.debug('Maxflow is {}'.format(maxflow)) # apply results to the region image logger.info('Applying results...') mapping = [ 0 ] # no regions with id 1 exists in mapping, entry used as padding mapping.extend( map( lambda x: 0 if gcgraph.termtype.SINK == gcgraph.what_segment( int(x) - 1) else 1, scipy.unique(region_image_data))) region_image_data = filter.relabel_map(region_image_data, mapping) # generating final image by increasing the size again output_image_data = scipy.zeros(old_size, dtype=scipy.bool_) output_image_data[cut_xy] = region_image_data # save resulting mask save(output_image_data, args.output, reference_header, args.force) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # load mask logger.info('Loading mask {}...'.format(args.mask)) mask_image, _ = load(args.mask) # store mask images shape for later check against the input image mask_image_shape = mask_image.shape # extract the position of the foreground object in the mask image logger.info('Extract the position of the foreground object...') mask = mask_image.nonzero() position = ((max(0, mask[0].min() - args.offset), mask[0].max() + 1 + args.offset), # crop negative values (max(0, mask[1].min() - args.offset), mask[1].max() + 1 + args.offset), (max(0, mask[2].min() - args.offset), mask[2].max() + 1 + args.offset)) # minx, maxx / miny, maxy / minz, maxz logger.debug('Extracted position is {}.'.format(position)) # unload mask and mask image del mask del mask_image # load image logger.info('Loading image {}...'.format(args.image)) image_data, image_header = load(args.image) # check if the mask image and the input image are of the same shape if mask_image_shape != image_data.shape: raise ArgumentError('The two input images are of different shape (mask: {} and image: {}).'.format(mask_image_shape, image_data.shape)) # execute extraction of the sub-area logger.info('Extracting sub-volume...') index = [slice(x[0], x[1]) for x in position] volume = image_data[index] # check if the output image contains data if 0 == len(volume): logger.exception('The extracted sub-volume is of zero-size. This usual means that the mask image contained no foreground object.') sys.exit(0) logger.debug('Extracted volume is of shape {}.'.format(volume.shape)) # get base origin of the image origin_base = numpy.array([0] * image_data.ndim) # for backwards compatibility # modify the volume offset to imitate numpy behavior (e.g. wrap negative values) offset = numpy.array([x[0] for x in position]) for i in range(0, len(offset)): if None == offset[i]: offset[i] = 0 offset[offset<0] += numpy.array(image_data.shape)[offset<0] # wrap around offset[offset<0] = 0 # set negative to zero # calculate final new origin origin = origin_base + offset logger.debug('Final origin created as {} + {} = {}.'.format(origin_base, offset, origin)) # save results in same format as input image logger.info('Saving extracted volume...') save(volume, args.output, image_header, args.force) logger.info('Successfully terminated.')
def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # build output image name output_hdr_name = args.output + '.hdr' output_img_name = args.output + '.img' output_msk_name = args.output + '.msk' # check if output image exists if not args.force: if os.path.exists(output_hdr_name): logger.warning( 'The output header {} already exists. Breaking.'.format( output_hdr_name)) exit(1) elif os.path.exists(output_img_name): logger.warning( 'The output image {} already exists. Breaking.'.format( output_img_name)) exit(1) elif os.path.exists(output_msk_name): logger.warning( 'The output infor file {} already exists. Breaking.'.format( output_msk_name)) exit(1) # decide on most suitable bit format if len(args.masks) / 2 <= 8: bit_format = scipy.uint8 elif len(args.masks) / 2 <= 16: bit_format = scipy.uint16 elif len(args.masks) / 2 <= 32: bit_format = scipy.uint32 elif len(args.masks) / 2 <= 64: bit_format = scipy.uint64 else: raise ArgumentError( 'It is not possible to combine more than 64 single masks.') logger.info( 'Creating a Radiance® segmentation image in {} bit format...'.format( bit_format)) # loading first mask image as reference and template for saving logger.info('Loading mask {} ({} segmentation) using NiBabel...'.format( args.masks[0], args.masks[1])) image_mask = load(args.masks[0]) image_mask_data = scipy.squeeze(image_mask.get_data()) # prepare result image image_radiance_data = scipy.zeros(image_mask_data.shape, dtype=bit_format) logger.debug('Result image is of dimensions {} and type {}.'.format( image_radiance_data.shape, image_radiance_data.dtype)) # preparing .msk file f = open(output_msk_name, 'w') # adding first mask to result image image_radiance_data[image_mask_data > 0] = 1 # adding first mask segmentation identifier to the .msk file f.write('{}\t1\t{}\t{}\t{}\n'.format(args.masks[1], *__COLOURS[0 % len(__COLOURS)])) for i in range(2, len(args.masks), 2): # loading mask image logger.info( 'Loading mask {} ({} segmentation) using NiBabel...'.format( args.masks[i], args.masks[i + 1])) image_mask_data = scipy.squeeze(load(args.masks[i]).get_data()) # check if the shape of the images is consistent if image_mask_data.shape != image_radiance_data.shape: raise ArgumentError( 'Mask {} is with {} of a different shape as the first mask image (which has {}).' .format(args.masks[i], image_mask_data.shape, image_radiance_data.shape)) # adding mask to result image image_radiance_data[image_mask_data > 0] += pow(2, i / 2) # adding mask segmentation identifier to the .msk file f.write('{}\t{}\t{}\t{}\t{}\n'.format( args.masks[i + 1], pow(2, i / 2), *__COLOURS[(i / 2) % len(__COLOURS)])) logger.info( 'Saving Radiance® segmentation image as {}/.img/.msk...'.format( output_hdr_name)) image_mask.get_header().set_data_dtype(bit_format) save(image_like(image_radiance_data, image_mask), output_hdr_name) logger.info('Successfully terminated.')