def main( input_dir: Path, ball_radius: int, light_background: bool, output_dir: Path, ) -> None: """ Main execution function Args: input_dir: path to directory containing the input images. ball_radius: radius of ball to use for the rolling-ball algorithm. light_background: whether the image has a light or dark background. output_dir: path to directory where to store the output images. """ for in_path in input_dir.iterdir(): in_path = Path(in_path) out_path = Path(output_dir).joinpath(in_path.name) # Load the input image with BioReader(in_path) as reader: logger.info(f'Working on {in_path.name} with shape {reader.shape}') # Initialize the output image with BioWriter(out_path, metadata=reader.metadata, max_workers=cpu_count()) as writer: rolling_ball( reader=reader, writer=writer, ball_radius=ball_radius, light_background=light_background, ) return
def write_thread(out_file_path: Path, data: np.ndarray, metadata: OmeXml, chan_name: str): """ Thread for saving images This function is intended to be run inside a threadpool to save an image. Args: out_file_path (Path): Path to an output file data (np.ndarray): FOV to save metadata (OmeXml): Metadata for the image chan_name (str): Name of the channel """ ProcessManager.log(f'Writing: {out_file_path.name}') with BioWriter(out_file_path,metadata=metadata) as bw: bw.X = data.shape[1] bw.Y = data.shape[0] bw.Z = 1 bw.C = 1 bw.cnames = [chan_name] bw[:] = data
def process_image(input_img_path, output_img_path, projection, method): # Grab a free process with ProcessManager.process(): # initalize biowriter and bioreader with BioReader(input_img_path, max_workers=ProcessManager._active_threads) as br, \ BioWriter(output_img_path, metadata=br.metadata, max_workers=ProcessManager._active_threads) as bw: # output image is 2d bw.Z = 1 # iterate along the x,y direction for x in range(0, br.X, tile_size): x_max = min([br.X, x + tile_size]) for y in range(0, br.Y, tile_size): y_max = min([br.Y, y + tile_size]) ProcessManager.submit_thread(projection, br, bw, (x, x_max), (y, y_max), method=method) ProcessManager.join_threads()
def image_to_zarr(inp_image: Path, out_dir: Path) -> None: with ProcessManager.process(): with BioReader(inp_image) as br: # Loop through timepoints for t in range(br.T): # Loop through channels for c in range(br.C): extension = "".join([ suffix for suffix in inp_image.suffixes[-2:] if len(suffix) < 5 ]) out_path = out_dir.joinpath( inp_image.name.replace(extension, FILE_EXT)) if br.C > 1: out_path = out_dir.joinpath( out_path.name.replace(FILE_EXT, f"_c{c}" + FILE_EXT)) if br.T > 1: out_path = out_dir.joinpath( out_path.name.replace(FILE_EXT, f"_t{t}" + FILE_EXT)) with BioWriter( out_path, max_workers=ProcessManager._active_threads, metadata=br.metadata, ) as bw: bw.C = 1 bw.T = 1 bw.channel_names = [br.channel_names[c]] # Loop through z-slices for z in range(br.Z): # Loop across the length of the image for y in range(0, br.Y, TILE_SIZE): y_max = min([br.Y, y + TILE_SIZE]) bw.max_workers = ProcessManager._active_threads br.max_workers = ProcessManager._active_threads # Loop across the depth of the image for x in range(0, br.X, TILE_SIZE): x_max = min([br.X, x + TILE_SIZE]) bw[y:y_max, x:x_max, z:z + 1, 0, 0] = br[y:y_max, x:x_max, z:z + 1, c, t]
def init_zarr_file(path: Path, ndims: int, metadata: Any): with BioWriter(path, metadata=metadata) as writer: writer.dtype = numpy.float32 writer.C = ndims + 2 if ndims == 2: writer.channel_names = ['cell_probability', 'flow_y', 'flow_x', 'labels'] else: writer.channel_names = ['cell_probability', 'flow_z', 'flow_y', 'flow_x', 'labels'] # noinspection PyProtectedMember writer._backend._init_writer() return
def _merge_layers(input_dir, input_files, output_dir, output_file): zs = [z for z in input_files.keys()] # sorted list of filenames by z-value zs.sort() # Initialize the output file br = BioReader( str(Path(input_dir).joinpath(input_files[zs[0]][0]).absolute())) bw = BioWriter(str(Path(output_dir).joinpath(output_file).absolute()), metadata=br.read_metadata()) bw.num_z(Z=len(zs)) del br # Load each image and save to the volume file for z, i in zip(zs, range(len(zs))): br = BioReader( str(Path(input_dir).joinpath(input_files[z][0]).absolute())) bw.write_image(br.read_image(), Z=[i, i + 1]) del br # Close the output image and delete bw.close_image() del bw
{% endfor -%} {% for inp,val in cookiecutter._inputs|dictsort -%} {% for out,n in cookiecutter._outputs|dictsort -%} {% if val.type=="collection" and cookiecutter.use_bfio -%} # Loop through files in {{ inp }} image collection and process for i,f in enumerate({{ inp }}_files): # Load an image br = BioReader(Path({{ inp }}).joinpath(f)) image = np.squeeze(br.read_image()) # initialize the output out_image = np.zeros(image.shape,dtype=br._pix['type']) """ Do some math and science - you should replace this """ logger.info('Processing image ({}/{}): {}'.format(i,len({{ inp }}_files),f)) out_image = awesome_math_and_science_function(image) # Write the output bw = BioWriter(Path({{ out }}).joinpath(f),metadata=br.read_metadata()) bw.write_image(np.reshape(out_image,(br.num_y(),br.num_x(),br.num_z(),1,1))) {%- endif %}{% endfor %}{% endfor %} finally: {%- if cookiecutter.use_bfio %} # Close the javabridge regardless of successful completion logger.info('Closing the javabridge') jutil.kill_vm() {%- endif %} # Exit the program sys.exit()
def write_ome_tiffs(file_path, out_path): if Path(file_path).suffix != '.czi': TypeError("Path must be to a czi file.") base_name = Path(Path(file_path).name).stem czi = czifile.CziFile(file_path, detectmosaic=False) subblocks = [ s for s in czi.filtered_subblock_directory if s.mosaic_index is not None ] metadata_str = czi.metadata(True) metadata = czi.metadata(False)['ImageDocument']['Metadata'] chan_name = _get_channel_names(metadata) pix_size = _get_physical_dimensions(metadata_str) ind = {'X': [], 'Y': [], 'Z': [], 'C': [], 'T': [], 'Row': [], 'Col': []} for s in subblocks: scene = [ dim.start for dim in s.dimension_entries if dim.dimension == 'S' ] if scene is not None and scene[0] != 0: continue for dim in s.dimension_entries: if dim.dimension == 'X': ind['X'].append(dim.start) elif dim.dimension == 'Y': ind['Y'].append(dim.start) elif dim.dimension == 'Z': ind['Z'].append(dim.start) elif dim.dimension == 'C': ind['C'].append(dim.start) elif dim.dimension == 'T': ind['T'].append(dim.start) row_conv = { y: row for (y, row) in zip(np.unique(np.sort(ind['Y'])), range(0, len(np.unique(ind['Y'])))) } col_conv = { x: col for (x, col) in zip(np.unique(np.sort(ind['X'])), range(0, len(np.unique(ind['X'])))) } ind['Row'] = [row_conv[y] for y in ind['Y']] ind['Col'] = [col_conv[x] for x in ind['X']] for s, i in zip(subblocks, range(0, len(subblocks))): dims = [ _get_image_dim(s, 'Y'), _get_image_dim(s, 'X'), _get_image_dim(s, 'Z'), _get_image_dim(s, 'C'), _get_image_dim(s, 'T') ] data = s.data_segment().data().reshape(dims) Z = None if len(ind['Z']) == 0 else ind['Z'][i] C = None if len(ind['C']) == 0 else ind['C'][i] T = None if len(ind['T']) == 0 else ind['T'][i] out_file_path = os.path.join( out_path, _get_image_name(base_name, row=ind['Row'][i], col=ind['Col'][i], Z=Z, C=C, T=T)) bw = BioWriter(out_file_path, data) bw.channel_names([chan_name[C]]) bw.physical_size_x(pix_size['X'], 'µm') bw.physical_size_y(pix_size['Y'], 'µm') if pix_size['Z'] is not None: bw.physical_size_y(pix_size['Z'], 'µm') bw.write_image(data) bw.close_image()
# Generate output image name based on filename pattern variables out_dict = {} if 'r' in variables: out_dict['r'] = R if 't' in variables: out_dict['t'] = T if 'c' in variables: out_dict['c'] = C base_output = output_name(inp_regex, [i for i in test.get_matching(R=R, T=T, C=C)], out_dict) # Export the flatfield image as a tiled tiff flatfield_out = base_output.replace('.ome.tif', '_flatfield.ome.tif') bw = BioWriter(str(output_dir.joinpath(flatfield_out))) bw.pixel_type('float') bw.num_x(X) bw.num_y(Y) bw.write_image(np.reshape(flatfield, (Y, X, 1, 1, 1))) bw.close_image() # Export the darkfield image as a tiled tiff if new_options['darkfield']: darkfield_out = base_output.replace('.ome.tif', '_darkfield.ome.tif') bw = BioWriter(str(output_dir.joinpath(darkfield_out))) bw.pixel_type('float') bw.num_x(X) bw.num_y(Y) bw.write_image(np.reshape(darkfield, (Y, X, 1, 1, 1))) bw.close_image()
def generate_data(self, input, wipp_type, imagej_type): numpy_types = { "double": np.float64, "float": np.float32, "long": np.int64, # np.int64 not supported by bfio "int": np.int32, "short": np.int16, "char": np.ubyte, # np.ubyte not supported by bfio "byte": np.int8, "boolean": np.bool_, # np.bool_ not supported by bfio } if wipp_type == None: return None # Determine if the input data type is a collection elif wipp_type == "collection": if imagej_type == None: dtype = np.double elif imagej_type in numpy_types.keys(): dtype = numpy_types[imagej_type] else: dtype = np.double # Create input and output path objects for the randomly generated image file input_path = Path(__file__).parent.joinpath( "{}/random.ome.tif".format(input)) # self.outputPath = Path(__file__).parent.joinpath('output/random.ome.tif') # Check if "input" is a sub-directory of "tests" if input_path.parent.exists(): # Remove the "input" sub-directory shutil.rmtree(input_path.parent) # Create input and output sub-directories in tests os.mkdir(input_path.parent) """Using auto generated images""" # Create a random image to be used for plugin testing infile = None outfile = None image_size = 2048 image_shape = (image_size, image_size) random_image = np.random.randint(low=0, high=255, size=image_shape, dtype=np.uint16) array = dtype(random_image) # Create a BioWriter object to write the ramdomly generated image file to tests/input dir with BioWriter(input_path) as writer: writer.X = image_shape[0] writer.Y = image_shape[1] writer.dtype = array.dtype writer[:] = array[:] # Not neccessary: writer.close() """Using sample images""" # # TODO: use Imagej sample data for unit testing # # Get input source directory # test_path = Path(__file__) # input_path = test_path.with_name('input') # # Create input directory in plugin test directory path # input_path = Path(__file__).with_name(input) # # Check if the input path already exists as a a sub-directory of "tests" # if input_path.exists(): # # Remove the "input" sub-directory # shutil.rmtree(input_path) # # Copy sample images to input folder # shutil.copytree(sample_dir, input_path) return input_path.parent elif wipp_type == "array": # arr = np.random.rand(2048,2048) arr = "1,2" return arr elif wipp_type == "number": number = np.random.randint(5) return number else: self.logger.info( "FAILURE: The data type, {}, of input, {}, is currently not supported\n" .format(wipp_type, input)) raise TypeError("The input data type is not currently supported")
{%- if cookiecutter.use_bfio == "True" %} {%- filter indent(level2,True) %} logger.info(f'Processing image: {file["file"]}') # Load the input image logger.debug(f'Initializing BioReader for {file["file"]}') with BioReader(file['file']) as br: input_extension = ''.join([s for s in file['file'].suffixes[-2:] if len(s) < 5]) out_name = file['file'].name.replace(input_extension,POLUS_EXT) out_path = {{ cookiecutter._outputs.keys()|first }}.joinpath(out_name) # Initialize the output image logger.debug(f'Initializing BioReader for {out_path}') with BioWriter(out_path,metadata=br.metadata) as bw: # This is where the magic happens, replace this part with your method bw[:] = awesome_function(br[:]) {%- endfilter %} {%- endif %} if __name__=="__main__": ''' Argument parsing ''' logger.info("Parsing arguments...") parser = argparse.ArgumentParser(prog='main', description='{{ cookiecutter.project_short_description }}') # Input arguments {% for inp,val in cookiecutter._inputs.items() -%} parser.add_argument('--{{ inp }}', dest='{{ inp }}', type=str,
reference_image_downscaled, max_val, min_val, method) # upscale the rough homography matrix logger.info("Inverting homography...") if method=='Projective': Rough_Homography_Upscaled=Rough_Homography_Downscaled*scale_matrix homography_inverse=np.linalg.inv(Rough_Homography_Upscaled) else: Rough_Homography_Upscaled=Rough_Homography_Downscaled homography_inverse=cv2.invertAffineTransform(Rough_Homography_Downscaled) # Initialize the output file bw = BioWriter(str(Path(outDir).joinpath(Path(registration_set[1]).name)),metadata=br_mov.read_metadata(),max_workers=write_workers) bw.num_x(br_ref.num_x()) bw.num_y(br_ref.num_y()) bw.num_z(1) bw.num_c(1) bw.num_t(1) # transformation variables reg_shape = [] reg_tiles = [] reg_homography = [] # Loop through image tiles and start threads logger.info("Starting threads...") threads = [] first_tile = True
def main( _opName: str, _in1: Path, _sigma: str, _calibration: str, _out: Path, ) -> None: """Initialize ImageJ""" # Bioformats throws a debug message, disable the loci debugger to mute it def disable_loci_logs(): DebugTools = scyjava.jimport("loci.common.DebugTools") DebugTools.setRootLevel("WARN") scyjava.when_jvm_starts(disable_loci_logs) # This is the version of ImageJ pre-downloaded into the docker container logger.info("Starting ImageJ...") ij = imagej.init("sc.fiji:fiji:2.1.1+net.imagej:imagej-legacy:0.37.4", headless=True) # ij_converter.ij = ij logger.info("Loaded ImageJ version: {}".format(ij.getVersion())) """ Validate and organize the inputs """ args = [] argument_types = [] arg_len = 0 # Validate opName opName_values = [ "DefaultTubeness", ] assert _opName in opName_values, "opName must be one of {}".format( opName_values) # Validate in1 in1_types = { "DefaultTubeness": "RandomAccessibleInterval", } # Check that all inputs are specified if _in1 is None and _opName in list(in1_types.keys()): raise ValueError("{} must be defined to run {}.".format( "in1", _opName)) elif _in1 != None: in1_type = in1_types[_opName] # switch to images folder if present if _in1.joinpath("images").is_dir(): _in1 = _in1.joinpath("images").absolute() args.append([f for f in _in1.iterdir() if f.is_file()]) arg_len = len(args[-1]) else: argument_types.append(None) args.append([None]) # Validate sigma sigma_types = { "DefaultTubeness": "double", } # Check that all inputs are specified if _sigma is None and _opName in list(sigma_types.keys()): raise ValueError("{} must be defined to run {}.".format( "sigma", _opName)) else: sigma = None # Validate calibration calibration_types = { "DefaultTubeness": "double[]", } # Check that all inputs are specified if _calibration is None and _opName in list(calibration_types.keys()): raise ValueError("{} must be defined to run {}.".format( "calibration", _opName)) else: calibration = None for i in range(len(args)): if len(args[i]) == 1: args[i] = args[i] * arg_len """ Set up the output """ out_types = { "DefaultTubeness": "IterableInterval", } """ Run the plugin """ try: for ind, (in1_path, ) in enumerate(zip(*args)): if in1_path != None: # Load the first plane of image in in1 collection logger.info("Processing image: {}".format(in1_path)) in1_br = BioReader(in1_path) # Convert to appropriate numpy array in1 = ij_converter.to_java(ij, np.squeeze(in1_br[:, :, 0:1, 0, 0]), in1_type) metadata = in1_br.metadata fname = in1_path.name dtype = ij.py.dtype(in1) if _sigma is not None: sigma = ij_converter.to_java(ij, _sigma, sigma_types[_opName], dtype) if _calibration is not None: calibration = ij_converter.to_java(ij, _calibration, calibration_types[_opName], dtype) logger.info("Running op...") if _opName == "DefaultTubeness": out = ij.op().filter().tubeness(in1, sigma, calibration) logger.info("Completed op!") if in1_path != None: in1_br.close() # Saving output file to out logger.info("Saving...") out_array = ij_converter.from_java(ij, out, out_types[_opName]) bw = BioWriter(_out.joinpath(fname), metadata=metadata) bw.Z = 1 bw.dtype = out_array.dtype bw[:] = out_array.astype(bw.dtype) bw.close() except: logger.error("There was an error, shutting down jvm before raising...") raise finally: # Exit the program logger.info("Shutting down jvm...") del ij jpype.shutdownJVM() logger.info("Complete!")
def main(inpDir: Path, outDir: Path, filePattern: str = None) -> None: """ Turn labels into flow fields. Args: inpDir: Path to the input directory outDir: Path to the output directory """ # Use a gpu if it's available use_gpu = torch.cuda.is_available() if use_gpu: dev = torch.device("cuda") else: dev = torch.device("cpu") logger.info(f'Running on: {dev}') # Determine the number of threads to run on num_threads = max([cpu_count() // 2, 1]) logger.info(f'Number of threads: {num_threads}') # Get all file names in inpDir image collection based on input pattern if filePattern: fp = filepattern.FilePattern(inpDir, filePattern) inpDir_files = [file[0]['file'].name for file in fp()] logger.info('Processing %d labels based on filepattern ' % (len(inpDir_files))) else: inpDir_files = [f.name for f in Path(inpDir).iterdir() if f.is_file()] # Loop through files in inpDir image collection and process processes = [] if use_gpu: executor = ThreadPoolExecutor(num_threads) else: executor = ProcessPoolExecutor(num_threads) for f in inpDir_files: br = BioReader(Path(inpDir).joinpath(f).absolute()) out_file = Path(outDir).joinpath( f.replace('.ome', '_flow.ome').replace('.tif', '.zarr')).absolute() bw = BioWriter(out_file, metadata=br.metadata) bw.C = 4 bw.dtype = np.float32 bw.channel_names = ['cell_probability', 'x', 'y', 'labels'] bw._backend._init_writer() for z in range(br.Z): for x in range(0, br.X, TILE_SIZE): for y in range(0, br.Y, TILE_SIZE): processes.append( executor.submit(flow_thread, Path(inpDir).joinpath(f).absolute(), out_file, use_gpu, dev, x, y, z)) bw.close() br.close() done, not_done = wait(processes, 0) logger.info(f'Percent complete: {100 * len(done) / len(processes):6.3f}%') while len(not_done) > 0: for r in done: r.result() done, not_done = wait(processes, 5) logger.info( f'Percent complete: {100 * len(done) / len(processes):6.3f}%') executor.shutdown()
if file['c'] == c: paths.append(file) break # make sure that files were found in the current loop if len(paths) == 0: continue # Initialize the output file br = BioReader(paths[0]['file']) file_name = filepattern.output_name( filePattern, paths, {c: paths[0][c] for c in fp.variables if c != 'c'}) logger.info('Writing: {}'.format(file_name)) bw = BioWriter(str(Path(outDir).joinpath(file_name)), metadata=br.read_metadata()) del br # Modify the metadata to make sure channels are written correctly bw.num_c(len(paths)) bw._metadata.image().Pixels.channel_count = bw.num_c() # Process the data in tiles threads = [] count = 0 total = bw.num_c() * bw.num_z() * \ (bw.num_x()//chunk_size + 1) * (bw.num_y()//chunk_size + 1) with ThreadPoolExecutor(cpu_count()) as executor: for c, file in enumerate(paths): br = BioReader(file['file']) C = [c]
dark_image = np.zeros(flat_image.shape, dtype=np.float32) if photobleach != None: with open(photobleach, 'r') as f: reader = csv.reader(f) photo_offset = { line[0]: float(line[1]) for line in reader if line[0] != 'file' } offset = np.mean([o for o in photo_offset.values()]) ''' Apply flatfield to images ''' for f in images.iterate(R=R, C=C, T=T): p = Path(f[0]['file']) logger.info("Applying flatfield to image: {}".format(p.name)) br = BioReader(str(p.absolute())) image = br.read_image() if photobleach != None: new_image = _unshade(np.squeeze(image), flat_image, dark_image, photo_offset[p.name], offset=offset) else: new_image = _unshade(np.squeeze(image), flat_image, dark_image) bw = BioWriter(str(Path(outDir).joinpath(p.name).absolute()), metadata=br.read_metadata()) bw.write_image(np.reshape(new_image, image.shape)) bw.close_image() del br ''' Close the javabridge ''' logger.info("Closing the javabridge and ending process...") jutil.kill_vm()