def main(): parser = argparse.ArgumentParser(description=_description) parser.add_argument('--output', '-o', metavar='stitched_elisa.tif', default='stitched_elisa.tif') parser.add_argument('--saturationmap', '-s', metavar='saturated_elisa.tif', default='saturated_elisa.tif') parser.add_argument( 'original_image', help="Original MicroManager stack (used for metadata)") parser.add_argument( 'normalized_image', help="Normalized image (used for image data)") args = parser.parse_args() orig_fn = args.original_image bgsub_fn = args.normalized_image print 'Reading metadata.' with open(orig_fn, 'rb') as f: metadata = tf.read_micromanager_metadata(f) print('Loading original image.') with tf.TiffFile(bgsub_fn) as bgsub_img: bgsub = np.concatenate([page.asarray()[np.newaxis, :] for page in bgsub_img.pages]) canvas = stitch(metadata, bgsub) print('Saving image.') tf.imsave(args.output, canvas) del canvas, bgsub print('Computing saturation map.') with tf.TiffFile(orig_fn) as bgsub_img: orig = np.concatenate([page.asarray()[np.newaxis, :] for page in bgsub_img.pages]) saturated = (orig == 4095) canvas = (stitch(metadata, saturated) > 0).astype(np.uint8) tf.imsave(args.saturationmap, canvas)
def test_tiff_image_entry_creation(resource, expected_error_message, voxel_size): error_message = "" image_entry = None gc_file = GrandChallengeTiffFile(resource) try: tiff_file = tifffile.TiffFile(str(gc_file.path.absolute())) gc_file = _extract_tags(gc_file=gc_file, pages=tiff_file.pages) image_entry = _create_tiff_image_entry(tiff_file=gc_file) except ValidationError as e: error_message = str(e) # Asserts possible file opening failures assert expected_error_message in error_message if not expected_error_message: assert not error_message # Asserts successful creation data if not expected_error_message: tiff_file = tiff_lib.TiffFile(str(resource.absolute())) tiff_tags = tiff_file.pages[0].tags assert image_entry.name == resource.name assert image_entry.width == tiff_tags["ImageWidth"].value assert image_entry.height == tiff_tags["ImageLength"].value assert image_entry.depth == 1 assert image_entry.resolution_levels == len(tiff_file.pages) assert image_entry.color_space == _get_color_space( color_space_string=str( tiff_tags["PhotometricInterpretation"].value)) assert image_entry.voxel_width_mm == approx(voxel_size[0]) assert image_entry.voxel_height_mm == approx(voxel_size[1]) assert image_entry.voxel_depth_mm == voxel_size[2] assert image_entry.pk == gc_file.pk
def read_tifffile(path, silent_fail=True): try: tf = tifffile.TiffFile(path) except tifffile.TiffFileError as error: if not silent_fail: raise error tf = None return tf
def __init__(self, frontend): super().__init__(frontend) self.logger.debug( "__init__(): Initializing _rdr (tifffile.TiffFile)...") self._rdr = tifffile.TiffFile(self.frontend._file_path) metadata = self.read_metadata() width = metadata.image().Pixels.get_SizeX() height = metadata.image().Pixels.get_SizeY() for tag in self._rdr.pages[0].tags: logger.debug(tag) tile_size = None if not self._rdr.pages[0].is_tiled: if width > self.frontend._TILE_SIZE or width > self.frontend._TILE_SIZE: raise TypeError( frontend._file_path.name + " is not a tiled tiff." + " The python backend of the BioReader only " + "supports OME tiled tiffs. Use the java backend " + "to load this image.") elif self._rdr.pages[0].tilewidth != self.frontend._TILE_SIZE or \ self._rdr.pages[0].tilelength != self.frontend._TILE_SIZE: if (width > frontend._TILE_SIZE or height > frontend._TILE_SIZE): raise ValueError( "Tile width and height should be {} when ".format( self.frontend._TILE_SIZE) + "using the python backend, but found " + "tilewidth={} and tilelength={}. Use the java ".format( self._rdr.pages[0].tilewidth, self._rdr.pages[0].tilelength) + "backend to read this image.") # Private member variables used for reading tiles self._keyframe = None # tifffile object with decompression and chunking methods self._image = None # output image buffer used for threaded tile reading self._tile_indices = None # list that maps file chunks to XYZ coordinates
def main(): parser = argparse.ArgumentParser(description=_description) parser.add_argument('--output', '-o', metavar='normalized.tif', default='normalized.tif', help='Output filename') parser.add_argument( '--subtract', action='store_true', help='Subtract background instead of dividing and truncating') parser.add_argument('infile') args = parser.parse_args() infile = args.infile print 'Reading image stack' t = tf.TiffFile(infile) ar = tf.stack_pages(t.pages) n = ar.shape[0] percentile = 0.01 if args.subtract else 0.05 if os.path.exists('background.tif'): print 'Reading background image' bg = tf.imread('background.tif') else: print 'Computing background image' sorted_ar = ar.copy() sorted_ar.sort(0) bg = sorted_ar[int(round(percentile * n, 0))] print 'Saving background image' tf.imsave('background.tif', bg) del sorted_ar print 'Performing background normalization' if not args.subtract: ar = ar.astype(np.double) for i in range(n): ar[i] /= bg print 'Converting to 16-bit TIFF' max_normed = (4095.0 / bg.min()) - 1 ar -= 1 ar *= 65535 ar /= max_normed ar = ar.round() else: ar = ar.astype(np.int16) for i in range(n): ar[i] -= bg ar[ar < 0] = 0 ar = ar.astype(np.uint16) print 'Writing normalized image' with tf.TiffWriter(args.output) as out: for i in range(n): if (i % 100) == 0: print i, sys.stdout.flush() out.save(ar[i]) print
def get_offsets(tiff_filepath): with tifffile.TiffFile(tiff_filepath) as tif: offsets = [page.offset for page in tif.pages] return offsets
def make_orthogonals(ortho, img_dir, padding, pixels_per_micron): x_idx = str(ortho[0]) y_idx = str(ortho[1]) z_idx = str(ortho[2]) xy_paths = list((img_dir / "xy").glob("*_z*" + z_idx + "_c*.tif")) xz_paths = list((img_dir / "xz").glob("*_z*" + y_idx + "_c*.tif")) yz_paths = list((img_dir / "yz").glob("*_z*" + x_idx + "_c*.tif")) xy_paths.sort() xz_paths.sort() yz_paths.sort() ## Get images xy_images = [ cv2.imread(str(x), cv2.IMREAD_GRAYSCALE) for x in xy_paths if x ] xz_images = [ cv2.imread(str(x), cv2.IMREAD_GRAYSCALE) for x in xz_paths if x ] yz_images = [ cv2.imread(str(x), cv2.IMREAD_GRAYSCALE) for x in yz_paths if x ] if len(xy_images) <= 0 or len(xz_images) <= 0 or len(yz_images) <= 0: print("Couldn't find orthogonal slices in " + str(img_dir)) exit(1) if len(xy_images) != len(xz_images) or len(xy_images) != len(yz_images): print("Length of xy, xz, and yz images are different") exit(1) # Get resolution if pixels_per_micron is None: with tifffile.TiffFile(str(xy_paths[0])) as img: pixels_per_micron = img.pages[0].tags['XResolution'].value if len(pixels_per_micron) == 2: pixels_per_micron = pixels_per_micron[0] dtype = img.pages[0].tags['XResolution'].dtype if dtype == '1I': # Convert from inches to microns pixels_per_micron = pixels_per_micron * 3.937E-5 elif dtype == '2I': # Convert from meters to microns pixels_per_micron = pixels_per_micron * 1E-6 else: dtype = '2I' tiff_info = { 282: pixels_per_micron / 1E-6, 283: pixels_per_micron / 1E-6, 296: int(dtype[0]) } ## Make images # Add xz to bottom of xy height = xy_images[0].shape[0] + padding + xz_images[0].shape[0] width = xy_images[0].shape[1] + padding + yz_images[0].shape[1] for key in range(len(xy_images)): combined = np.zeros((height, width)).astype(np.uint8) combined[:] = 255 # Draw a line gap_size = round(4 * pixels_per_micron) # 5 um gap xy_images[key][0:ortho[1] - gap_size, ortho[0]] = 255 xy_images[key][ortho[1] + gap_size:, ortho[0]] = 255 xy_images[key][ortho[1], 0:ortho[0] - gap_size] = 255 xy_images[key][ortho[1], ortho[0] + gap_size:] = 255 combined[0:xy_images[0].shape[0], 0:xy_images[0].shape[1]] = xy_images[key] combined[(xy_images[0].shape[0] + padding):height, 0:xy_images[0].shape[1]] = xz_images[key] combined[0:xy_images[0].shape[0], (xy_images[0].shape[1] + padding):width] = yz_images[key] ## Now add text, meta-data combined = Image.fromarray(combined) file_name = str(key) + ".tif" combined.save(str(img_dir / file_name), tiffinfo=tiff_info) return xz_images[0].shape[0]
colors = fill_list(colors, len(tiff_paths), (150, 150, 150), "start") merge_label = arguments['--merge-label'] font_path = Path("Roboto-Bold.ttf").resolve() # Get pixels per microns images = [cv2.imread(str(x), cv2.IMREAD_GRAYSCALE) for x in tiff_paths if x] if len(images) <= 0: print("No images found! Searched in " + str(img_dirs[0])) exit(1) # Get resolution if pixels_per_micron is None: with tifffile.TiffFile(str(tiff_paths[0])) as img: pixels_per_micron = img.pages[0].tags['XResolution'].value if len(pixels_per_micron) == 2: pixels_per_micron = pixels_per_micron[0] dtype = img.pages[0].tags['XResolution'].dtype if dtype == '1I': # Convert from inches to microns pixels_per_micron = pixels_per_micron * 3.937E-5 elif dtype == '2I': # Convert from meters to microns pixels_per_micron = pixels_per_micron * 1E-6 else: dtype = '2I' # Get panel-specific info