def segment_images(inpDir, outDir, config_data): """ Workflow for curvilinear shapes such as: Sec61 beta, TOM20, lamin B1 (mitosis specific) Args: inpDir : path to the input directory outDir : path to the output directory config_data : path to the configuration file """ logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') logger = logging.getLogger("main") logger.setLevel(logging.INFO) inpDir_files = os.listdir(inpDir) for i,f in enumerate(inpDir_files): logger.info('Segmenting image : {}'.format(f)) # Load image br = BioReader(os.path.join(inpDir,f)) image = br.read_image() structure_channel = 0 struct_img0 = image[:,:,:,structure_channel,0] struct_img0 = struct_img0.transpose(2,0,1).astype(np.float32) # main algorithm intensity_scaling_param = config_data['intensity_scaling_param'] if intensity_scaling_param[1] == 0: struct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param[:1]) struct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param) gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma'] if config_data['preprocessing_function'] == 'image_smoothing_gaussian_3d': structure_img_smooth = image_smoothing_gaussian_3d(struct_img, sigma=gaussian_smoothing_sigma) elif config_data['preprocessing_function'] == 'edge_preserving_smoothing_3d': structure_img_smooth = edge_preserving_smoothing_3d(struct_img) f2_param = config_data['f2_param'] bw = filament_2d_wrapper(structure_img_smooth, f2_param) minArea = config_data['minArea'] seg = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False) seg = seg >0 out_img=seg.astype(np.uint8) out_img[out_img>0]=255 # create output image out_img = out_img.transpose(1,2,0) out_img = out_img.reshape((out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1)) # write image using BFIO bw = BioWriter(os.path.join(outDir,f)) bw.num_x(out_img.shape[1]) bw.num_y(out_img.shape[0]) bw.num_z(out_img.shape[2]) bw.num_c(out_img.shape[3]) bw.num_t(out_img.shape[4]) bw.pixel_type(dtype='uint8') bw.write_image(out_img) bw.close_image()
def segment_image(struct_img): VESSELNESS_SIGMA = 1.0 VESSELNESS_THRESHOLD = 1e-3 structure_img_smooth = edge_preserving_smoothing_3d(struct_img) response = vesselness3D( structure_img_smooth, sigmas=[VESSELNESS_SIGMA], tau=1, whiteonblack=True ) return (response > VESSELNESS_THRESHOLD).astype(np.uint8)
def Workflow_cardio_myl7( struct_img: np.ndarray, rescale_ratio: float = -1, output_type: str = "default", output_path: Union[str, Path] = None, fn: Union[str, Path] = None, output_func=None, ): """ classic segmentation workflow wrapper for structure Cardio MYL7 Parameter: ----------- struct_img: np.ndarray the 3D image to be segmented rescale_ratio: float an optional parameter to allow rescale the image before running the segmentation functions, default is no rescaling output_type: str select how to handle output. Currently, four types are supported: 1. default: the result will be saved at output_path whose filename is original name without extention + "_struct_segmentaiton.tiff" 2. array: the segmentation result will be simply returned as a numpy array 3. array_with_contour: segmentation result will be returned together with the contour of the segmentation 4. customize: pass in an extra output_func to do a special save. All the intermediate results, names of these results, the output_path, and the original filename (without extension) will be passed in to output_func. """ ########################################################################## # PARAMETERS: # note that these parameters are supposed to be fixed for the structure # and work well accross different datasets intensity_norm_param = [8, 15.5] vesselness_sigma = [1] vesselness_cutoff = 0.01 minArea = 15 ########################################################################## out_img_list = [] out_name_list = [] ################### # PRE_PROCESSING ################### # intenisty normalization (min/max) struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param) out_img_list.append(struct_img.copy()) out_name_list.append("im_norm") # rescale if needed if rescale_ratio > 0: struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio), order=2) struct_img = (struct_img - struct_img.min() + 1e-8) / (struct_img.max() - struct_img.min() + 1e-8) # smoothing with gaussian filter structure_img_smooth = edge_preserving_smoothing_3d(struct_img) out_img_list.append(structure_img_smooth.copy()) out_name_list.append("im_smooth") ################### # core algorithm ################### # vesselness 3d response = vesselness3D(structure_img_smooth, sigmas=vesselness_sigma, tau=1, whiteonblack=True) bw = response > vesselness_cutoff ################### # POST-PROCESSING ################### seg = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False) # output seg = seg > 0 seg = seg.astype(np.uint8) seg[seg > 0] = 255 out_img_list.append(seg.copy()) out_name_list.append("bw_final") if output_type == "default": # the default final output, simply save it to the output path save_segmentation(seg, False, Path(output_path), fn) elif output_type == "customize": # the hook for passing in a customized output function # use "out_img_list" and "out_name_list" in your hook to # customize your output functions output_func(out_img_list, out_name_list, Path(output_path), fn) elif output_type == "array": return seg elif output_type == "array_with_contour": return (seg, generate_segmentation_contour(seg)) else: raise NotImplementedError("invalid output type: {output_type}")
def myRun(path): """ scale_x is set based on the estimated thickness of your target filaments. For example, if visually the thickness of the filaments is usually 3~4 pixels, then you may want to set scale_x as 1 or something near 1 (like 1.25). Multiple scales can be used, if you have filaments of very different thickness. cutoff_x is a threshold applied on the actual filter reponse to get the binary result. Smaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation, while larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation. """ f3_param=[[1, 0.01]] f3_param=[[5, 0.001], [3, 0.001]] stackData = tifffile.imread(path) numSlices = stackData.shape[0] _printStackParams('stackData', stackData) stackData = slidingZ(stackData, upDownSlices=1) stackData = medianFilter(stackData) # give us a guess for our intensity_scaling_param parameters low_ratio, high_ratio = my_suggest_normalization_param(stackData) # try per slice normData = stackData.astype(np.float64) normData[:] = 0 for i in range(numSlices): oneSlice = stackData[i,:,:] low_ratio, high_ratio = my_suggest_normalization_param(oneSlice) print(i, low_ratio, high_ratio) #low_ratio = 0.2 low_ratio -= 0.2 high_ratio -= 1 intensity_scaling_param = [low_ratio, high_ratio] sliceNormData = intensity_normalization(oneSlice, scaling_param=intensity_scaling_param) normData[i,:,:] = sliceNormData #sys.exit() ''' #intensity_scaling_param = [0.0, 22.5] intensity_scaling_param = [low_ratio, high_ratio] print(' === intensity_normalization() intensity_scaling_param:', intensity_scaling_param) # intensity normalization print(' === calling intensity_normalization()') normData = intensity_normalization(stackData, scaling_param=intensity_scaling_param) _printStackParams('normData', normData) ''' # smoothing with edge preserving smoothing print(' === calling edge_preserving_smoothing_3d()') smoothData = edge_preserving_smoothing_3d(normData) _printStackParams('smoothData', smoothData) print(' === calling filament_3d_wrapper() f3_param:', f3_param) filamentData = filament_3d_wrapper(smoothData, f3_param) #filamentData = filamentData > 0 _printStackParams('filamentData', filamentData) #filamentData2 = slidingZ(filamentData, upDownSlices=1) # # napari print('opening in napari') scale = (1, 0.6, 0.6) with napari.gui_qt(): viewer = napari.Viewer(title='xxx') minContrast = 0 maxContrast = 255 myImageLayer = viewer.add_image(stackData, scale=scale, contrast_limits=(minContrast, maxContrast), colormap='green', visible=True, name='stackData') minContrast = 0 maxContrast = 1 myImageLayer = viewer.add_image(normData, scale=scale, contrast_limits=(minContrast, maxContrast), opacity=0.6, colormap='gray', visible=True, name='normData') minContrast = 0 maxContrast = 1 myImageLayer = viewer.add_image(filamentData, scale=scale, contrast_limits=(minContrast, maxContrast), opacity=0.6, colormap='blue', visible=True, name='filamentData') '''
def run(path, f3_param=[[1, 0.01]], minArea=20, saveNumber=0): """ use aicssegmentation to pre-process raw data and then make/save a 3D mask """ print('=== path:', path) # load x/y/z voxel size (assumes .tif was saved with Fiji xVoxel, yVoxel, zVoxel = readVoxelSize(path) print(' xVoxel:', xVoxel, 'yVoxel:', yVoxel, 'zVoxel:', zVoxel) # load the data reader = AICSImage(path) IMG = reader.data.astype(np.float32) print(' IMG.shape:', IMG.shape) structure_channel = 0 struct_img0 = IMG[0, structure_channel, :, :, :].copy() # give us a guess for our intensity_scaling_param parameters #from aicssegmentation.core.pre_processing_utils import suggest_normalization_param #suggest_normalization_param(struct_img0) low_ratio, high_ratio = my_suggest_normalization_param(struct_img0) #intensity_scaling_param = [0.0, 22.5] intensity_scaling_param = [low_ratio, high_ratio] print('*** intensity_normalization() intensity_scaling_param:', intensity_scaling_param) # intensity normalization print('=== calling intensity_normalization()') struct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param) # smoothing with edge preserving smoothing print('=== calling edge_preserving_smoothing_3d()') structure_img_smooth = edge_preserving_smoothing_3d(struct_img) # """ see: notebooks/playground_filament3d.ipynb scale_x is set based on the estimated thickness of your target filaments. For example, if visually the thickness of the filaments is usually 3~4 pixels, then you may want to set scale_x as 1 or something near 1 (like 1.25). Multiple scales can be used, if you have filaments of very different thickness. cutoff_x is a threshold applied on the actual filter reponse to get the binary result. Smaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation, while larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation. """ #f3_param = [[1, 0.01]] # [scale_1, cutoff_1] print('=== calling filament_3d_wrapper() f3_param:', f3_param) bw = filament_3d_wrapper(structure_img_smooth, f3_param) # #minArea = 20 # from recipe print('=== calling remove_small_objects() minArea:', minArea) seg = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False) # # save original file again (with saveNumber saveNumberStr = '' if saveNumber > 1: saveNumberStr = '_' + str(saveNumber) # # save mask seg = seg > 0 out = seg.astype(np.uint8) out[out > 0] = 255 # save _dvMask maskPath = os.path.splitext(path)[0] + '_dvMask' + saveNumberStr + '.tif' print('=== saving 3D mask [WILL FAIL IF FILE EXISTS] as maskPath:', maskPath) try: writer = omeTifWriter.OmeTifWriter(maskPath) writer.save(out) except (OSError) as e: print(' error: file already exists, di dnot resave, maskPath:', maskPath) # # analyze skeleton, take a 3d mask and analyze as a 1-pixel skeleton retDict0, mySkeleton = myAnalyzeSkeleton(out=out, imagePath=path) retDict = OrderedDict() retDict['tifPath'] = path retDict['maskPath'] = maskPath retDict['tifFile'] = os.path.basename(path) retDict['xVoxel'] = xVoxel retDict['yVoxel'] = yVoxel retDict['zVoxel'] = zVoxel # retDict['params'] = OrderedDict() retDict['params']['saveNumber'] = saveNumber retDict['params'][ 'intensity_scaling_param'] = intensity_scaling_param # calculated in my_suggest_normalization_param retDict['params']['f3_param'] = f3_param[ 0] # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!! retDict['params']['minArea'] = minArea retDict.update(retDict0) # save 1-pixel skeleton: mySkeleton # save _dvSkel skelPath = os.path.splitext(path)[0] + '_dvSkel' + saveNumberStr + '.tif' print('=== saving 3D skel [WILL FAIL IF FILE EXISTS] as maskPath:', skelPath) try: writer = omeTifWriter.OmeTifWriter(skelPath) writer.save(mySkeleton) except (OSError) as e: print(' error: file already exists, di dnot resave, skelPath:', skelPath) return retDict
if file.endswith('.tif'): image_list.append(file) #creates an empty list for the object counts object_counts = [] #loops through every image in the image_list for x in image_list: im_path = os.path.join(DATAPATH, x) os.path.isfile(im_path) img = imread(im_path) #this could definitely be a more complicated normalization function #suggest_normalization_param(img) img_norm = intensity_normalization(img, [5.0, 15.0]) img_smooth = edge_preserving_smoothing_3d(img_norm) bc_img = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, cmask(8)) bc_img = sobel(bc_img) #thresholds the image based on yen's method block_size = 35 thresh = threshold_yen(bc_img) binary = bc_img > thresh #helps to subtract the background n = 12 l = 256 np.random.seed(1) im = np.zeros((l, l)) points = l * np.random.random((2, n**2)) im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
def Workflow_son(struct_img, rescale_ratio, output_type, output_path, fn, output_func=None): ########################################################################## # PARAMETERS: # note that these parameters are supposed to be fixed for the structure # and work well accross different datasets ########################################################################## intensity_norm_param = [2, 30] vesselness_sigma = [1.2] vesselness_cutoff = 0.15 minArea = 15 dot_2d_sigma = 1 dot_3d_sigma = 1.15 ########################################################################## ################### # PRE_PROCESSING ################### # intenisty normalization (min/max) struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param) # smoothing with boundary preserving smoothing structure_img_smooth = edge_preserving_smoothing_3d(struct_img) ################### # core algorithm ################### response_f3 = vesselness3D(structure_img_smooth, sigmas=vesselness_sigma, tau=1, whiteonblack=True) response_f3 = response_f3 > vesselness_cutoff response_s3_1 = dot_3d(structure_img_smooth, log_sigma=dot_3d_sigma) response_s3_3 = dot_3d(structure_img_smooth, log_sigma=3) bw_small_inverse = remove_small_objects(response_s3_1>0.03, min_size=150) bw_small = np.logical_xor(bw_small_inverse, response_s3_1>0.02) bw_medium = np.logical_or(bw_small, response_s3_1>0.07) bw_large = np.logical_or(response_s3_3>0.2, response_f3>0.25) bw = np.logical_or( np.logical_or(bw_small, bw_medium), bw_large) ################### # POST-PROCESSING ################### bw = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False) for zz in range(bw.shape[0]): bw[zz,: , :] = remove_small_objects(bw[zz,:,:], min_size=3, connectivity=1, in_place=False) seg = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False) seg = seg>0 seg = seg.astype(np.uint8) seg[seg>0]=255 if output_type == 'default': # the default final output save_segmentation(seg, False, output_path, fn) elif output_type == 'array': return seg elif output_type == 'array_with_contour': return (seg, generate_segmentation_contour(seg)) else: print('your can implement your output hook here, but not yet') quit()
def Workflow_son( struct_img: np.ndarray, rescale_ratio: float = -1, output_type: str = "default", output_path: Union[str, Path] = None, fn: Union[str, Path] = None, output_func=None, ): """ classic segmentation workflow wrapper for structure SON Parameter: ----------- struct_img: np.ndarray the 3D image to be segmented rescale_ratio: float an optional parameter to allow rescale the image before running the segmentation functions, default is no rescaling output_type: str select how to handle output. Currently, four types are supported: 1. default: the result will be saved at output_path whose filename is original name without extention + "_struct_segmentaiton.tiff" 2. array: the segmentation result will be simply returned as a numpy array 3. array_with_contour: segmentation result will be returned together with the contour of the segmentation 4. customize: pass in an extra output_func to do a special save. All the intermediate results, names of these results, the output_path, and the original filename (without extension) will be passed in to output_func. """ ########################################################################## # PARAMETERS: # note that these parameters are supposed to be fixed for the structure # and work well accross different datasets ########################################################################## intensity_norm_param = [2, 30] vesselness_sigma = [1.2] vesselness_cutoff = 0.15 minArea = 15 # dot_2d_sigma = 1 dot_3d_sigma = 1.15 ########################################################################## out_img_list = [] out_name_list = [] ################### # PRE_PROCESSING ################### # intenisty normalization (min/max) struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param) out_img_list.append(struct_img.copy()) out_name_list.append("im_norm") # smoothing with boundary preserving smoothing structure_img_smooth = edge_preserving_smoothing_3d(struct_img) out_img_list.append(structure_img_smooth.copy()) out_name_list.append("im_smooth") ################### # core algorithm ################### response_f3 = vesselness3D(structure_img_smooth, sigmas=vesselness_sigma, tau=1, whiteonblack=True) response_f3 = response_f3 > vesselness_cutoff response_s3_1 = dot_3d(structure_img_smooth, log_sigma=dot_3d_sigma) response_s3_3 = dot_3d(structure_img_smooth, log_sigma=3) bw_small_inverse = remove_small_objects(response_s3_1 > 0.03, min_size=150) bw_small = np.logical_xor(bw_small_inverse, response_s3_1 > 0.02) bw_medium = np.logical_or(bw_small, response_s3_1 > 0.07) bw_large = np.logical_or(response_s3_3 > 0.2, response_f3 > 0.25) bw = np.logical_or(np.logical_or(bw_small, bw_medium), bw_large) ################### # POST-PROCESSING ################### bw = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False) for zz in range(bw.shape[0]): bw[zz, :, :] = remove_small_objects(bw[zz, :, :], min_size=3, connectivity=1, in_place=False) seg = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False) seg = seg > 0 seg = seg.astype(np.uint8) seg[seg > 0] = 255 out_img_list.append(seg.copy()) out_name_list.append("bw_final") if output_type == "default": # the default final output, simply save it to the output path save_segmentation(seg, False, Path(output_path), fn) elif output_type == "customize": # the hook for passing in a customized output function # use "out_img_list" and "out_name_list" in your hook to # customize your output functions output_func(out_img_list, out_name_list, Path(output_path), fn) elif output_type == "array": return seg elif output_type == "array_with_contour": return (seg, generate_segmentation_contour(seg)) else: raise NotImplementedError("invalid output type: {output_type}")
def myRun(path, myCellNumber, genotype, sex, saveBase='/Users/cudmore/Desktop/samiVolume2', f3_param=[1, 0.01], minArea=20, verbose=False): #, saveNumber=0): """ use aicssegmentation to pre-process raw data and then make/save a 3D mask path: path to raw tif, the _ch2.tif myCellNumber: cell number from batch file (per genotype/sex), NOT unique across different (genotype, sex) saveBase: full path to folder to save to (e.g. /Users/cudmore/Desktop/samiVolume2), must exist ... saveNumber: not used """ print(' === myRun() path:', path, 'saveBase:', saveBase, 'f3_param:', f3_param, 'minArea:', minArea) #, 'saveNumber:', saveNumber) #20200608 #saveBase = '/Users/cudmore/Desktop/samiVolume2' tmpPath, tmpFileName = os.path.split(path) tmpPath = tmpPath.replace('../data/', '') tmpFileNameNoExtension, tmpExtension = tmpFileName.split('.') saveBase = os.path.join(saveBase, tmpPath) if not os.path.isdir(saveBase): print(' making output dir:', saveBase) os.makedirs(saveBase) #saveBase = os.path.join(saveBase, tmpPath, tmpFileNameNoExtension) saveBase = os.path.join(saveBase, tmpFileNameNoExtension) #saveBase = os.path.join(saveBase, os.path.splitext(path)[0].replace('../data/', '')) if verbose: print(' saveBase:', saveBase) if not os.path.isfile(path): print('ERROR: myRun() did not find file:', path) return None # load the data IMG, tifHeader = bimpy.util.bTiffFile.imread(path) saveDataPath = saveBase + '.tif' print(' === saving raw data to saveDataPath:', saveDataPath) bimpy.util.bTiffFile.imsave(saveDataPath, IMG, tifHeader=tifHeader, overwriteExisting=True) IMG = IMG.astype(np.float32) # channel 1: load then save (do nothing else with it) channelOnePath = path.replace('_ch2.tif', '_ch1.tif') channelOneData, channelOneTiffHeader = bimpy.util.bTiffFile.imread( channelOnePath) saveChannelOnePath = saveBase.replace('_ch2', '_ch1.tif') bimpy.util.bTiffFile.imsave(saveChannelOnePath, channelOneData, tifHeader=tifHeader, overwriteExisting=True) # load x/y/z voxel size (assumes .tif was saved with Fiji xVoxel, yVoxel, zVoxel = readVoxelSize(path) print(' file:', os.path.basename(path), 'has shape:', IMG.shape, 'xVoxel:', xVoxel, 'yVoxel:', yVoxel, 'zVoxel:', zVoxel) # give us a guess for our intensity_scaling_param parameters #low_ratio, high_ratio = my_suggest_normalization_param(struct_img0) low_ratio, high_ratio = my_suggest_normalization_param(IMG) #intensity_scaling_param = [0.0, 22.5] intensity_scaling_param = [low_ratio, high_ratio] if verbose: print(' === my_intensity_normalization() intensity_scaling_param:', intensity_scaling_param) # intensity normalization if verbose: print(' === calling my_intensity_normalization()') #struct_img = my_intensity_normalization(struct_img0, scaling_param=intensity_scaling_param) struct_img = my_intensity_normalization( IMG, scaling_param=intensity_scaling_param) # smoothing with edge preserving smoothing if verbose: print(' === calling edge_preserving_smoothing_3d()') structure_img_smooth = edge_preserving_smoothing_3d(struct_img) # """ see: notebooks/playground_filament3d.ipynb scale_x is set based on the estimated thickness of your target filaments. For example, if visually the thickness of the filaments is usually 3~4 pixels, then you may want to set scale_x as 1 or something near 1 (like 1.25). Multiple scales can be used, if you have filaments of very different thickness. cutoff_x is a threshold applied on the actual filter reponse to get the binary result. Smaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation, while larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation. """ #f3_param = [[1, 0.01]] # [scale_1, cutoff_1] if verbose: print(' === calling filament_3d_wrapper() f3_param:', f3_param) bw = filament_3d_wrapper(structure_img_smooth, [f3_param]) # f3_param is a list of a list # #minArea = 20 # from recipe if verbose: print(' === calling remove_small_objects() minArea:', minArea) seg = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False) # # save original file again (with saveNumber saveNumberStr = '' #if saveNumber>1: # saveNumberStr = '_' + str(saveNumber) # # save _dvMask.tif seg = seg > 0 out = seg.astype(np.uint8) out[out > 0] = 255 # #maskPath = os.path.splitext(path)[0] + '_dvMask' + saveNumberStr + '.tif' maskPath = saveBase + '_dvMask' + saveNumberStr + '.tif' print(' === saving 3D mask as maskPath:', maskPath) #tifffile.imsave(maskPath, out) bimpy.util.bTiffFile.imsave(maskPath, out, tifHeader=tifHeader, overwriteExisting=True) ''' try: writer = omeTifWriter.OmeTifWriter(maskPath) writer.save(out) except(OSError) as e: print(' ******** ERROR: file already exists, did not resave, maskPath:', maskPath) ''' # # ################ # analyze skeleton, take a 3d mask and analyze as a 1-pixel skeleton retDict0, mySkeleton = myAnalyzeSkeleton(out=out, imagePath=path, saveBase=saveBase) # ################ retDict = OrderedDict() retDict['analysisDate'] = datetime.today().strftime('%Y%m%d') # retDict['saveBase'] = saveBase retDict['myCellNumber'] = myCellNumber retDict['genotype'] = genotype retDict['sex'] = sex # retDict['path'] = path # 20200713 working on parallel retDict['tifPath'] = path retDict['maskPath'] = maskPath retDict['tifFile'] = os.path.basename(path) retDict['xVoxel'] = xVoxel retDict['yVoxel'] = yVoxel retDict['zVoxel'] = zVoxel # retDict['params'] = OrderedDict() #retDict['params']['saveNumber'] = saveNumber retDict['params'][ 'intensity_scaling_param'] = intensity_scaling_param # calculated in my_suggest_normalization_param #retDict['params']['f3_param'] = f3_param[0] # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!! retDict['params'][ 'f3_param'] = f3_param # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!! retDict['params']['minArea'] = minArea # retDict.update( retDict0 ) # this has 'keys' that are lists of ('len3d', 'eLen', 'branchType') # save 1-pixel skeleton: mySkeleton # save _dvSkel #skelPath = os.path.splitext(path)[0] + '_dvSkel' + saveNumberStr + '.tif' skelPath = saveBase + '_dvSkel' + saveNumberStr + '.tif' print(' === saving 3D skel as maskPath:', skelPath) #tifffile.imsave(skelPath, mySkeleton) bimpy.util.bTiffFile.imsave(skelPath, mySkeleton, tifHeader=tifHeader, overwriteExisting=True) return retDict