Ejemplo n.º 1
0
def segment_images(inpDir, outDir, config_data): 
    """ Workflow for data with filamentous structures
    such as ZO1, Beta Actin, Titin, Troponin 1.

    Args:
        inpDir : path to the input directory
        outDir : path to the output directory
        config_data : path to the configuration file
    """

    logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
                        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("main")
    logger.setLevel(logging.INFO)

    inpDir_files = os.listdir(inpDir)
    for i,f in enumerate(inpDir_files):
        logger.info('Segmenting image : {}'.format(f))
        
        # Load image
        br = BioReader(os.path.join(inpDir,f))
        image = br.read_image()
        structure_channel = 0 
        struct_img0 = image[:,:,:,structure_channel,0]
        struct_img0 = struct_img0.transpose(2,0,1).astype(np.float32)

        # main algorithm
        intensity_scaling_param = config_data['intensity_scaling_param']
        struct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param)
        gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma']

        if config_data['preprocessing_function'] == 'image_smoothing_gaussian_3d':
            structure_img_smooth = image_smoothing_gaussian_3d(struct_img, sigma=gaussian_smoothing_sigma)
        elif config_data['preprocessing_function'] == 'edge_preserving_smoothing_3d':
            structure_img_smooth = edge_preserving_smoothing_3d(struct_img)        

        f3_param = config_data['f3_param']
        bw = filament_3d_wrapper(structure_img_smooth, f3_param)
        minArea = config_data['minArea']
        seg = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False)
        seg = seg >0
        out_img=seg.astype(np.uint8)
        out_img[out_img>0]=255  

        # create output image
        out_img = out_img.transpose(1,2,0)
        out_img = out_img.reshape((out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1))

        # write image using BFIO
        bw = BioWriter(os.path.join(outDir,f))
        bw.num_x(out_img.shape[1])
        bw.num_y(out_img.shape[0])
        bw.num_z(out_img.shape[2])
        bw.num_c(out_img.shape[3])
        bw.num_t(out_img.shape[4])
        bw.pixel_type(dtype='uint8')
        bw.write_image(out_img)
        bw.close_image()
Ejemplo n.º 2
0
def myRun(path):

	"""
		scale_x is set based on the estimated thickness of your target filaments.
		For example, if visually the thickness of the filaments is usually 3~4 pixels,
		then you may want to set scale_x as 1 or something near 1 (like 1.25).
		Multiple scales can be used, if you have filaments of very different thickness.
	
		cutoff_x is a threshold applied on the actual filter reponse to get the binary result.
		Smaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation,
		while larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation.
	"""
	f3_param=[[1, 0.01]]
	f3_param=[[5, 0.001], [3, 0.001]]
	
	stackData = tifffile.imread(path)
	numSlices = stackData.shape[0]
	_printStackParams('stackData', stackData)
	
	
	stackData = slidingZ(stackData, upDownSlices=1)
	
	stackData = medianFilter(stackData)

	# give us a guess for our intensity_scaling_param parameters
	low_ratio, high_ratio = my_suggest_normalization_param(stackData)

	# try per slice
	normData = stackData.astype(np.float64)
	normData[:] = 0
	for i in range(numSlices):
		oneSlice = stackData[i,:,:]
		low_ratio, high_ratio = my_suggest_normalization_param(oneSlice)
		
		
		print(i, low_ratio, high_ratio)
		
		#low_ratio = 0.2
		low_ratio -= 0.2
		high_ratio -= 1
		
		intensity_scaling_param = [low_ratio, high_ratio]
		sliceNormData = intensity_normalization(oneSlice, scaling_param=intensity_scaling_param)
		normData[i,:,:] = sliceNormData
		
	#sys.exit()
	
	'''
	#intensity_scaling_param = [0.0, 22.5]
	intensity_scaling_param = [low_ratio, high_ratio]
	print('    === intensity_normalization() intensity_scaling_param:', intensity_scaling_param)
	
	# intensity normalization
	print('    === calling intensity_normalization()')
	normData = intensity_normalization(stackData, scaling_param=intensity_scaling_param)
	_printStackParams('normData', normData)
	'''
	
	# smoothing with edge preserving smoothing 
	print('    === calling edge_preserving_smoothing_3d()')
	smoothData = edge_preserving_smoothing_3d(normData)
	_printStackParams('smoothData', smoothData)

	print('    === calling filament_3d_wrapper() f3_param:', f3_param)
	filamentData = filament_3d_wrapper(smoothData, f3_param)
	#filamentData = filamentData > 0
	_printStackParams('filamentData', filamentData)

	#filamentData2 = slidingZ(filamentData, upDownSlices=1)
	
	#
	# napari
	print('opening in napari')
	scale = (1, 0.6, 0.6)
	with napari.gui_qt():
		viewer = napari.Viewer(title='xxx')
		
		minContrast = 0
		maxContrast = 255
		myImageLayer = viewer.add_image(stackData, scale=scale, contrast_limits=(minContrast, maxContrast), colormap='green', visible=True, name='stackData')
		
		minContrast = 0
		maxContrast = 1
		myImageLayer = viewer.add_image(normData, scale=scale, contrast_limits=(minContrast, maxContrast), opacity=0.6, colormap='gray', visible=True, name='normData')
		
		minContrast = 0
		maxContrast = 1
		myImageLayer = viewer.add_image(filamentData, scale=scale, contrast_limits=(minContrast, maxContrast), opacity=0.6, colormap='blue', visible=True, name='filamentData')
		
		'''
Ejemplo n.º 3
0
def run(path, f3_param=[[1, 0.01]], minArea=20, saveNumber=0):
    """
	use aicssegmentation to pre-process raw data and then make/save a 3D mask
	"""
    print('=== path:', path)

    # load x/y/z voxel size (assumes .tif was saved with Fiji
    xVoxel, yVoxel, zVoxel = readVoxelSize(path)
    print('    xVoxel:', xVoxel, 'yVoxel:', yVoxel, 'zVoxel:', zVoxel)

    # load the data
    reader = AICSImage(path)
    IMG = reader.data.astype(np.float32)
    print('    IMG.shape:', IMG.shape)

    structure_channel = 0
    struct_img0 = IMG[0, structure_channel, :, :, :].copy()

    # give us a guess for our intensity_scaling_param parameters
    #from aicssegmentation.core.pre_processing_utils import suggest_normalization_param
    #suggest_normalization_param(struct_img0)
    low_ratio, high_ratio = my_suggest_normalization_param(struct_img0)

    #intensity_scaling_param = [0.0, 22.5]
    intensity_scaling_param = [low_ratio, high_ratio]
    print('*** intensity_normalization() intensity_scaling_param:',
          intensity_scaling_param)

    # intensity normalization
    print('=== calling intensity_normalization()')
    struct_img = intensity_normalization(struct_img0,
                                         scaling_param=intensity_scaling_param)

    # smoothing with edge preserving smoothing
    print('=== calling edge_preserving_smoothing_3d()')
    structure_img_smooth = edge_preserving_smoothing_3d(struct_img)

    #
    """
	see: notebooks/playground_filament3d.ipynb

	scale_x is set based on the estimated thickness of your target filaments.
		For example, if visually the thickness of the filaments is usually 3~4 pixels,
		then you may want to set scale_x as 1 or something near 1 (like 1.25).
		Multiple scales can be used, if you have filaments of very different thickness.
	cutoff_x is a threshold applied on the actual filter reponse to get the binary result.
		Smaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation,
		while larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation.
	"""
    #f3_param = [[1, 0.01]] # [scale_1, cutoff_1]
    print('=== calling filament_3d_wrapper() f3_param:', f3_param)
    bw = filament_3d_wrapper(structure_img_smooth, f3_param)

    #
    #minArea = 20 # from recipe
    print('=== calling remove_small_objects() minArea:', minArea)
    seg = remove_small_objects(bw > 0,
                               min_size=minArea,
                               connectivity=1,
                               in_place=False)

    #
    # save original file again (with saveNumber
    saveNumberStr = ''
    if saveNumber > 1:
        saveNumberStr = '_' + str(saveNumber)

    #
    # save mask
    seg = seg > 0
    out = seg.astype(np.uint8)
    out[out > 0] = 255

    # save _dvMask
    maskPath = os.path.splitext(path)[0] + '_dvMask' + saveNumberStr + '.tif'
    print('=== saving 3D mask [WILL FAIL IF FILE EXISTS] as maskPath:',
          maskPath)
    try:
        writer = omeTifWriter.OmeTifWriter(maskPath)
        writer.save(out)
    except (OSError) as e:
        print('    error: file already exists, di dnot resave, maskPath:',
              maskPath)

    #
    # analyze skeleton, take a 3d mask and analyze as a 1-pixel skeleton
    retDict0, mySkeleton = myAnalyzeSkeleton(out=out, imagePath=path)
    retDict = OrderedDict()
    retDict['tifPath'] = path
    retDict['maskPath'] = maskPath
    retDict['tifFile'] = os.path.basename(path)
    retDict['xVoxel'] = xVoxel
    retDict['yVoxel'] = yVoxel
    retDict['zVoxel'] = zVoxel
    #
    retDict['params'] = OrderedDict()
    retDict['params']['saveNumber'] = saveNumber
    retDict['params'][
        'intensity_scaling_param'] = intensity_scaling_param  # calculated in my_suggest_normalization_param
    retDict['params']['f3_param'] = f3_param[
        0]  # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!!
    retDict['params']['minArea'] = minArea

    retDict.update(retDict0)

    # save 1-pixel skeleton: mySkeleton
    # save _dvSkel
    skelPath = os.path.splitext(path)[0] + '_dvSkel' + saveNumberStr + '.tif'
    print('=== saving 3D skel [WILL FAIL IF FILE EXISTS] as maskPath:',
          skelPath)
    try:
        writer = omeTifWriter.OmeTifWriter(skelPath)
        writer.save(mySkeleton)
    except (OSError) as e:
        print('    error: file already exists, di dnot resave, skelPath:',
              skelPath)

    return retDict
Ejemplo n.º 4
0
def myRun(path,
          myCellNumber,
          genotype,
          sex,
          saveBase='/Users/cudmore/Desktop/samiVolume2',
          f3_param=[1, 0.01],
          minArea=20,
          verbose=False):  #, saveNumber=0):
    """
	use aicssegmentation to pre-process raw data and then make/save a 3D mask
	
	path: path to raw tif, the _ch2.tif
	myCellNumber: cell number from batch file (per genotype/sex), NOT unique across different (genotype, sex)
	saveBase: full path to folder to save to (e.g. /Users/cudmore/Desktop/samiVolume2), must exist
	...
	saveNumber: not used
	"""

    print('  === myRun() path:', path, 'saveBase:', saveBase, 'f3_param:',
          f3_param, 'minArea:', minArea)  #, 'saveNumber:', saveNumber)

    #20200608
    #saveBase = '/Users/cudmore/Desktop/samiVolume2'
    tmpPath, tmpFileName = os.path.split(path)
    tmpPath = tmpPath.replace('../data/', '')
    tmpFileNameNoExtension, tmpExtension = tmpFileName.split('.')
    saveBase = os.path.join(saveBase, tmpPath)
    if not os.path.isdir(saveBase):
        print('    making output dir:', saveBase)
        os.makedirs(saveBase)
    #saveBase = os.path.join(saveBase, tmpPath, tmpFileNameNoExtension)
    saveBase = os.path.join(saveBase, tmpFileNameNoExtension)
    #saveBase = os.path.join(saveBase, os.path.splitext(path)[0].replace('../data/', ''))
    if verbose: print('    saveBase:', saveBase)

    if not os.path.isfile(path):
        print('ERROR: myRun() did not find file:', path)
        return None

    # load the data
    IMG, tifHeader = bimpy.util.bTiffFile.imread(path)

    saveDataPath = saveBase + '.tif'
    print('   === saving raw data to saveDataPath:', saveDataPath)
    bimpy.util.bTiffFile.imsave(saveDataPath,
                                IMG,
                                tifHeader=tifHeader,
                                overwriteExisting=True)

    IMG = IMG.astype(np.float32)

    # channel 1: load then save (do nothing else with it)
    channelOnePath = path.replace('_ch2.tif', '_ch1.tif')
    channelOneData, channelOneTiffHeader = bimpy.util.bTiffFile.imread(
        channelOnePath)
    saveChannelOnePath = saveBase.replace('_ch2', '_ch1.tif')
    bimpy.util.bTiffFile.imsave(saveChannelOnePath,
                                channelOneData,
                                tifHeader=tifHeader,
                                overwriteExisting=True)

    # load x/y/z voxel size (assumes .tif was saved with Fiji
    xVoxel, yVoxel, zVoxel = readVoxelSize(path)
    print('    file:', os.path.basename(path), 'has shape:', IMG.shape,
          'xVoxel:', xVoxel, 'yVoxel:', yVoxel, 'zVoxel:', zVoxel)

    # give us a guess for our intensity_scaling_param parameters
    #low_ratio, high_ratio = my_suggest_normalization_param(struct_img0)
    low_ratio, high_ratio = my_suggest_normalization_param(IMG)

    #intensity_scaling_param = [0.0, 22.5]
    intensity_scaling_param = [low_ratio, high_ratio]
    if verbose:
        print('    === my_intensity_normalization() intensity_scaling_param:',
              intensity_scaling_param)

    # intensity normalization
    if verbose: print('    === calling my_intensity_normalization()')
    #struct_img = my_intensity_normalization(struct_img0, scaling_param=intensity_scaling_param)
    struct_img = my_intensity_normalization(
        IMG, scaling_param=intensity_scaling_param)

    # smoothing with edge preserving smoothing
    if verbose: print('    === calling edge_preserving_smoothing_3d()')
    structure_img_smooth = edge_preserving_smoothing_3d(struct_img)

    #
    """
	see: notebooks/playground_filament3d.ipynb

	scale_x is set based on the estimated thickness of your target filaments.
		For example, if visually the thickness of the filaments is usually 3~4 pixels,
		then you may want to set scale_x as 1 or something near 1 (like 1.25).
		Multiple scales can be used, if you have filaments of very different thickness.
	cutoff_x is a threshold applied on the actual filter reponse to get the binary result.
		Smaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation,
		while larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation.
	"""
    #f3_param = [[1, 0.01]] # [scale_1, cutoff_1]
    if verbose:
        print('    === calling filament_3d_wrapper() f3_param:', f3_param)
    bw = filament_3d_wrapper(structure_img_smooth,
                             [f3_param])  # f3_param is a list of a list

    #
    #minArea = 20 # from recipe
    if verbose:
        print('   === calling remove_small_objects() minArea:', minArea)
    seg = remove_small_objects(bw > 0,
                               min_size=minArea,
                               connectivity=1,
                               in_place=False)

    #
    # save original file again (with saveNumber
    saveNumberStr = ''
    #if saveNumber>1:
    #	saveNumberStr = '_' + str(saveNumber)

    #
    # save _dvMask.tif
    seg = seg > 0
    out = seg.astype(np.uint8)
    out[out > 0] = 255
    #
    #maskPath = os.path.splitext(path)[0] + '_dvMask' + saveNumberStr + '.tif'
    maskPath = saveBase + '_dvMask' + saveNumberStr + '.tif'
    print('   === saving 3D mask as maskPath:', maskPath)
    #tifffile.imsave(maskPath, out)
    bimpy.util.bTiffFile.imsave(maskPath,
                                out,
                                tifHeader=tifHeader,
                                overwriteExisting=True)
    '''
	try:
		writer = omeTifWriter.OmeTifWriter(maskPath)
		writer.save(out)
	except(OSError) as e:
		print('    ******** ERROR: file already exists, did not resave, maskPath:', maskPath)
	'''

    #
    # ################
    # analyze skeleton, take a 3d mask and analyze as a 1-pixel skeleton
    retDict0, mySkeleton = myAnalyzeSkeleton(out=out,
                                             imagePath=path,
                                             saveBase=saveBase)
    # ################
    retDict = OrderedDict()
    retDict['analysisDate'] = datetime.today().strftime('%Y%m%d')
    #
    retDict['saveBase'] = saveBase
    retDict['myCellNumber'] = myCellNumber
    retDict['genotype'] = genotype
    retDict['sex'] = sex
    #
    retDict['path'] = path  # 20200713 working on parallel
    retDict['tifPath'] = path
    retDict['maskPath'] = maskPath
    retDict['tifFile'] = os.path.basename(path)
    retDict['xVoxel'] = xVoxel
    retDict['yVoxel'] = yVoxel
    retDict['zVoxel'] = zVoxel
    #
    retDict['params'] = OrderedDict()
    #retDict['params']['saveNumber'] = saveNumber
    retDict['params'][
        'intensity_scaling_param'] = intensity_scaling_param  # calculated in my_suggest_normalization_param
    #retDict['params']['f3_param'] = f3_param[0] # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!!
    retDict['params'][
        'f3_param'] = f3_param  # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!!
    retDict['params']['minArea'] = minArea

    #
    retDict.update(
        retDict0
    )  # this has 'keys' that are lists of ('len3d', 'eLen', 'branchType')

    # save 1-pixel skeleton: mySkeleton
    # save _dvSkel
    #skelPath = os.path.splitext(path)[0] + '_dvSkel' + saveNumberStr + '.tif'
    skelPath = saveBase + '_dvSkel' + saveNumberStr + '.tif'
    print('    === saving 3D skel as maskPath:', skelPath)
    #tifffile.imsave(skelPath, mySkeleton)
    bimpy.util.bTiffFile.imsave(skelPath,
                                mySkeleton,
                                tifHeader=tifHeader,
                                overwriteExisting=True)

    return retDict