def test(sample_path): detection_params = {'w_s': 11, 'peak_radius': 4., 'threshold': 40., 'max_peaks': 4 } sample = TiffFile(sample_path) curr_dir = os.path.dirname(__file__) fname = os.path.join( curr_dir, os.path.join(sample.fpath, sample.fname)) arr = sample.asarray() peaks = detect_peaks(arr, shape_label=('t', 'z', 'x', 'y'), verbose=True, show_progress=False, parallel=True, **detection_params) # del sample # sample = None gc.get_referrers(arr) del arr gc.collect()
def getarray(idx_buffer_filename): idx, buf, fname = idx_buffer_filename fbuf = BytesIO(buf) tfh = TiffFile(fbuf) ary = tfh.asarray() pageCount = ary.shape[0] if nplanes is not None: extra = pageCount % nplanes if extra: if discard_extra: pageCount = pageCount - extra logging.getLogger('thunder').warn('Ignored %d pages in file %s' % (extra, fname)) else: raise ValueError("nplanes '%d' does not evenly divide '%d in file %s'" % (nplanes, pageCount, fname)) values = [ary[i:(i+nplanes)] for i in range(0, pageCount, nplanes)] else: values = [ary] tfh.close() if ary.ndim == 3: values = [val.squeeze() for val in values] nvals = len(values) keys = [(idx*nvals + timepoint,) for timepoint in range(nvals)] return zip(keys, values)
def get_bedmap2(): filename = inspect.getframeinfo(inspect.currentframe()).filename home = os.path.dirname(os.path.abspath(filename)) direc = home + '/antarctica/bedmap2/bedmap2_tiff/' files = ['bedmap2_bed', 'bedmap2_surface', 'bedmap2_thickness', 'bedmap2_icemask_grounded_and_shelves', 'bedmap2_rockmask', #'bedmap2_lakemask_vostok', 'bedmap2_grounded_bed_uncertainty', #'bedmap2_thickness_uncertainty_5km', 'bedmap2_coverage', 'gl04c_geiod_to_WGS84'] vara = dict() # extents of domain : dx = 1000 west = -3333500.0 east = 3333500.0 north = 3333500.0 south = -3333500.0 #projection info : proj = 'stere' lat_0 = '-90' lat_ts = '-71' lon_0 = '0' names = ['b', 'h', 'H', 'mask', 'rock_mask', 'b_uncert', 'coverage', 'gl04c_to_WGS84'] sys.path.append(home + '/external_import_scripts') from tifffile import TiffFile # retrieve data : for n, f in zip(names, files): data = TiffFile(direc + f + '.tif') vara[n] = {'map_data' : data.asarray(), 'map_western_edge' : west, 'map_eastern_edge' : east, 'map_southern_edge' : south, 'map_northern_edge' : north, 'projection' : proj, 'standard lat' : lat_0, 'standard lon' : lon_0, 'lat true scale' : lat_ts} return vara
def test_image(): """ Returns a test image (first image of the small dataset). :return: image :rtype: numpy.ndarray """ global _test_image if _test_image is None: t = TiffFile(os.path.join(os.path.dirname(__file__), 'example-frame.tif')) _test_image = t.pages[0].asarray() t.close() return _test_image
def get_gre_measures(): filename = inspect.getframeinfo(inspect.currentframe()).filename home = os.path.dirname(os.path.abspath(filename)) sys.path.append(home + '/external_import_scripts') from tifffile import TiffFile direc = home + '/greenland/measures/greenland_vel_mosaic500_2008_2009_' files = ['sp', 'vx', 'vy', 'ex', 'ey'] vara = dict() # extents of domain : nx = 3010 ny = 5460 dx = 500 west = -645000.0 east = west + nx*dx south = -3370000.0 north = south + ny*dx #projection info : proj = 'stere' lat_0 = '90' lat_ts = '70' lon_0 = '-45' # retrieve data : vara['dataset'] = 'measures' for f in files: data = TiffFile(direc + f + '.tif') vara[f] = {'map_data' : data.asarray()[::-1, :], 'map_western_edge' : west, 'map_eastern_edge' : east, 'map_southern_edge' : south, 'map_northern_edge' : north, 'projection' : proj, 'standard lat' : lat_0, 'standard lon' : lon_0, 'lat true scale' : lat_ts} return vara
def get_study_region(self): """ return: vara - dictionary - contains projection information as well as bed data intended to be an input to utilites.DataInput """ sys.path.append(self.home + '/external_import_scripts') from tifffile import TiffFile direc = self.home + '/elevation_data/elevation/ASTGTM2_S78E161_dem' vara = dict() # extents of domain : nx = 1049 ny = 1031 dx = 17.994319205518387 west = 423863.131 east = west + nx*dx south = -1304473.006 north = south + ny*dx # projection info : proj = 'stere' lat_0 = '-90' lon_0 = '0' lat_ts = '-71' # retrieve data : data = TiffFile(direc + '.tif') vara['b'] = {'map_data' : data.asarray(), 'map_western_edge' : west, 'map_eastern_edge' : east, 'map_southern_edge' : south, 'map_northern_edge' : north, 'projection' : proj, 'standard lat' : lat_0, 'standard lon' : lon_0, 'lat true scale' : lat_ts} return vara
def test_slice(self): tif = TiffFile(self.tifffile) series = list(chain(tif.series, tif.series)) peaks = np.array([[3], [4], [6]]) offsets = np.concatenate((self.offsets, ) * 2)[1:5] expected = [ np.arange(2, 5) + offsets, np.arange(3, 6) + offsets, np.arange(5, 8) + offsets, ] for trial, expected in zip(extractAll(peaks, series, start=1, end=5), expected): np.testing.assert_equal(trial, expected)
def __init__(self, file_like, permission='r', keep_file_open=True, **hints): """ Parameters ---------- file_like : str or file object keep_file_open : bool, default=True Whether to keep the file handle open hints : keyword of the form `is_<format>=<True|False>` Tells the Tiff reader that a file is or isn't of a specific subformat. If not provided, it it guessed by the Tiff reader. """ self._tiff = TiffFile(file_like, **hints) if not keep_file_open: self._tiff.close() self._series = 0 self._level = 0 self._cache = dict() super().__init__()
def __init__(self, path): self.warning = '' self.path = path self.reader = None self.tilesize = 1024 self.ext = check_ext(path) self.default_dtype = np.uint16 if self.ext == '.ome.tif' or self.ext == '.ome.tiff': self.io = TiffFile(self.path, is_ome=False) self.group = zarr.open(self.io.series[0].aszarr()) self.reader = 'tifffile' self.ome_version = self._get_ome_version() print("OME ", self.ome_version) num_channels = self.get_shape()[0] tile_0 = self.get_tifffile_tile(num_channels, 0, 0, 0, 0, 1024) if tile_0 is not None: self.default_dtype = tile_0.dtype if (num_channels == 3 and tile_0.dtype == 'uint8'): self.rgba = True self.rgba_type = '3 channel' elif (num_channels == 1 and tile_0.dtype == 'uint8'): self.rgba = True self.rgba_type = '1 channel' else: self.rgba = False self.rgba_type = None print("RGB ", self.rgba) print("RGB type ", self.rgba_type) elif self.ext == '.svs': self.io = OpenSlide(self.path) self.dz = DeepZoomGenerator(self.io, tile_size=1024, overlap=0, limit_bounds=True) self.reader = 'openslide' self.rgba = True self.rgba_type = None self.default_dtype = np.uint8 print("RGB ", self.rgba) print("RGB type ", self.rgba_type) else: self.reader = None
def dims(self) -> str: if self._dims is None: # Get a single scenes dimensions in order with TiffFile(self._file) as tiff: single_scene_dims = tiff.series[0].pages.axes # We can sometimes trust the dimension info in the image if all( [d in Dimensions.DefaultOrder for d in single_scene_dims]): # Add scene dimension only if there are multiple scenes if len(tiff.series) == 1: self._dims = single_scene_dims else: self._dims = f"{Dimensions.Scene}{single_scene_dims}" # Sometimes the dimension info is wrong in certain dimensions, so guess # that dimension else: guess = self.guess_dim_order(tiff.series[0].pages.shape) best_guess = [] for dim_from_meta in single_scene_dims: if dim_from_meta in Dimensions.DefaultOrder: best_guess.append(dim_from_meta) else: appended_dim = False for guessed_dim in guess: if guessed_dim not in best_guess: best_guess.append(guessed_dim) appended_dim = True log.info( f"Unsure how to handle dimension: " f"{dim_from_meta}. " f"Replaced with guess: {guessed_dim}") break # All of our guess dims were already in the dim list, # append the dim read from meta if not appended_dim: best_guess.append(dim_from_meta) best_guess = "".join(best_guess) # Add scene dimension only if there are multiple scenes if len(tiff.series) == 1: self._dims = best_guess else: self._dims = f"{Dimensions.Scene}{best_guess}" return self._dims
def read_tile(file, return_metadata=False): """Read a cytokit-specific 5D image tile Technical Note: This is a fairly complex process as it is necessary to deal with the fact that files saved using tifffile lose unit length dimensions. To deal with this fact, the metadata in the image is parsed here to ensure that missing dimensions are added back. """ # The "imagej_metadata" attribute looks like this for a 5D image with no unit-length dimensions # and original shape cycles=2, z=25, channels=2: # { # 'ImageJ': '1.11a', 'axes': 'TZCYX', 'channels': 2, 'frames': 2, # 'hyperstack': True, 'images': 100, # 'mode': 'grayscale', 'slices': 25 # } # However, if a unit-length dimension was dropped it simply does not show up in this dict with warnings.catch_warnings(): _set_tiff_warning_filters() with TiffFile(file) as tif: tags = dict(tif.imagej_metadata) if 'axes' not in tags: warnings.warn( 'ImageJ tags do not contain "axes" property (file = {}, tags = {})' .format(file, tags)) else: if tags['axes'] != 'TZCYX': warnings.warn( 'Image has tags indicating that it was not saved in TZCYX format. ' 'The file should have been saved with this property explicitly set and further ' 'processing of it may be unsafe (file = {})'.format( file)) slices = [ slice(None) if 'frames' in tags else None, slice(None) if 'slices' in tags else None, slice(None) if 'channels' in tags else None, slice(None), slice(None) ] res = tif.asarray()[tuple(slices)] if res.ndim != 5: raise ValueError( 'Expected 5 dimensions in image at "{}" but found {} (shape = {})' .format(file, res.ndim, res.shape)) if return_metadata: return res, _get_tif_metadata(tif, shape=res.shape) else: return res
def is_mibitiff(path: str) -> bool: """ Checks that file is MIBItiff, but raises no error Args: path (str): The string path or an open file object pointing to a tiff file Returns: bool: is the file a mibitiff? """ try: with TiffFile(path) as tif: _check_version(tif) return True except: return False
def is_3d_image(path): if path.endswith('.tif') or path.endswith('.tiff'): from tifffile import TiffFile with TiffFile(path) as tif: is_3d = True try: tif.pages[1] except IndexError: is_3d = False else: # Handle non-TIFF images using Pillow. from PIL import Image i = Image.open(path) is_3d = image_count(i, max=2) > 1 return is_3d
def tiff_format(path): if path.endswith('.ome.tif') or path.endswith('.ome.tiff'): return 'OME' if path.endswith('.tif') or path.endswith('.tiff'): from tifffile import TiffFile with TiffFile(path) as tif: tags = tif.pages[0].tags desc = tags[ 'ImageDescription'].value if 'ImageDescription' in tags else None if desc is None: return None elif desc.startswith('<?xml'): return 'OME' elif desc.startswith('ImageJ='): return 'ImageJ' return None
def check(ftype, fname): # try: # TiffFile # except NameError: # init() if ftype in StackedTifImages.ftypes: try: tif = TiffFile(str(fname)) # try to extract labels names set by imageJ: tif.pages[0].imagej_tags['labels'] return True except (AttributeError, KeyError): # not an imageJ stack return False
def _set_mm_meta(self): """ assign image metadata from summary metadata Returns ------- """ with TiffFile(self.files[0]) as tif: self.mm_meta = tif.micromanager_metadata mm_version = self.mm_meta['Summary']['MicroManagerVersion'] if 'beta' in mm_version: if self.mm_meta['Summary']['Positions'] > 1: self.stage_positions = [] for p in range( len(self.mm_meta['Summary']['StagePositions'])): pos = self._simplify_stage_position_beta( self.mm_meta['Summary']['StagePositions'][p]) self.stage_positions.append(pos) # self.channel_names = 'Not Listed' elif mm_version == '1.4.22': for ch in self.mm_meta['Summary']['ChNames']: self.channel_names.append(ch) else: if self.mm_meta['Summary']['Positions'] > 1: self.stage_positions = [] for p in range(self.mm_meta['Summary']['Positions']): pos = self._simplify_stage_position( self.mm_meta['Summary']['StagePositions'][p]) self.stage_positions.append(pos) for ch in self.mm_meta['Summary']['ChNames']: self.channel_names.append(ch) # dimensions based on mm metadata do not reflect final written dimensions # these will change after data is loaded self.z_step_size = self.mm_meta['Summary']['z-step_um'] self.height = self.mm_meta['Summary']['Height'] self.width = self.mm_meta['Summary']['Width'] self.frames = self.mm_meta['Summary']['Frames'] self.slices = self.mm_meta['Summary']['Slices'] self.channels = self.mm_meta['Summary']['Channels']
def test_wsireg_run_reg_downsampling_m1m2_merge_ds_attach( data_out_dir, disk_im_gry): wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir)) img_fp1 = str(disk_im_gry) wsi_reg.add_modality( "mod1", img_fp1, 0.65, channel_names=["test"], channel_colors=["red"], preprocessing={"downsampling": 2}, ) wsi_reg.add_modality( "mod2", img_fp1, 0.65, channel_names=["test"], channel_colors=["red"], preprocessing={"downsampling": 2}, ) wsi_reg.add_attachment_images( "mod2", "mod3", img_fp1, 0.65, channel_names=["test"], channel_colors=["red"], ) wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"]) wsi_reg.add_merge_modalities("mod12-merge", ["mod1", "mod2", "mod3"]) wsi_reg.register_images() im_fps = wsi_reg.transform_images(transform_non_reg=False, remove_merged=True) regim = reg_image_loader(im_fps[0], 0.65) ome_data = from_xml(TiffFile(im_fps[0]).ome_metadata) assert regim.shape == (3, 2048, 2048) assert ome_data.images[0].pixels.physical_size_x == 0.65 assert ome_data.images[0].pixels.physical_size_y == 0.65 assert ome_data.images[0].pixels.size_x == 2048 assert ome_data.images[0].pixels.size_y == 2048 assert ome_data.images[0].pixels.size_c == 3
def __init__( self, filenames, extension='.tiff', # this will try this extension first and then .tif, .TIFF and .TIF nchannels=2): ''' Select a stack from a sequence of TIFF stack files ''' self.extension = extension if type(filenames) is str: # check if it is a folder if os.path.isdir(filenames): dirname = filenames filenames = [] for extension in [self.extension, '.tif', '.TIFF', '.TIF']: if not len(filenames): # try other self.extension = extension filenames = natsorted( glob(pjoin(dirname, '*' + self.extension))) if not len(filenames): raise (OSError('Could not find files.')) super(TiffStack, self).__init__(filenames, extension) from tifffile import imread, TiffFile self.imread = imread offsets = [0] for f in tqdm(self.filenames, desc='Parsing tiffs'): # Parse all files in the stack tmp = TiffFile(f) dims = [*tmp.series[0].shape] if len(dims) == 2: # then these are single page tiffs dims = [1, *dims] dtype = tmp.series[0].dtype offsets.append(dims[0]) del tmp # offset for each file self.frames_offset = np.cumsum(offsets) if nchannels is None: nchannels = 1 self.frames_offset = (self.frames_offset / nchannels).astype(int) self.dims = dims[1:] if len(self.dims) == 2: self.dims = [nchannels, *self.dims] self.dims[0] = nchannels self.dtype = dtype self.nframes = self.frames_offset[-1] self.shape = tuple([self.nframes, *self.dims])
def asarray(self, filename=None, *args, **kwargs): """Return image data from Tiff file(s) as numpy array. If no filename is specified (default), the data from all Tiff files is returned. The args and kwargs parameters are passed to the asarray functions of the TiffFile or TiffSequence instances. E.g. if memmap is True, the returned array is stored in a binary file on disk, if possible. """ if filename is None: return self.tiffs.asarray(*args, **kwargs) with TiffFile(self.open_file(filename), name=filename) as tif: result = tif.asarray(*args, **kwargs) return result
def load_tif(self, add_extension=False, storage_name='', use_mem_map=False, input_shape=None, verbose=True): ''' load a tif into an array dimensions of length 1 will be removed :param verbose: ''' if storage_name and storage_name in self.d_loaded_files.keys(): return self.d_loaded_files[storage_name] #no reloading of files load_dir = self.d_load_info['load_dir'] sub_dir_1 = self.d_load_info['sub_dir_1'] sub_dir_2 = self.d_load_info['sub_dir_2'] f_name = self.d_load_info['f_name'] load_dir = FileHandler.d_locations.get(load_dir, load_dir) f_name = FileHandler.d_locations.get(f_name, f_name) f_name = '{0}.tif'.format(f_name) if add_extension else '{0}'.format( f_name) full_path = os.path.join(load_dir, sub_dir_1, sub_dir_2, f_name) if use_mem_map: a_tif = np.memmap(full_path, dtype='uint16', mode='r', input_shape=input_shape) else: with TiffFile(full_path) as tif: a_tif = tif.asarray() if storage_name: self.d_loaded_files[storage_name] = a_tif if verbose: print( "file loaded->{0}, with input_shape={1}{4} from location{2}{3}{5}" .format(f_name, a_tif.shape, '\n', full_path, a_tif.dtype, '\n')) return a_tif
def tiff_extractor( inpath: pathlib.Path) -> Generator[ExtractedFrame, None, None]: """ Extract all the frames from a 3D tiff, in order :param Path inpath: Tiff file to load :returns: A Generator with one numpy array for each frame of the movie """ with TiffFile(inpath) as tif: for ct, page in enumerate(tif.pages): frame = fix_contrast(page.asarray(), mode='raw', cmin=0, cmax=255) # Only works for RGB images if frame.ndim == 3: frame = np.mean(frame, axis=2) assert frame.ndim == 2 yield ct + 1, frame
def pdf2array(pdffile, res=300): """ read pdf file and convert it to numpy array return list of pages and dimensions color order is BGR """ tname = tmpname() + '.tif' cmd = 'cat %s 2>/dev/null | gs -dQUIET -dNOPAUSE -sDEVICE=tiff24nc -r%d -sOutputFile=%s - 2>/dev/null' % ( pdffile, res, tname) os.system(cmd) if not os.path.exists(tname): return None, None imgfile = TiffFile(tname) pages = [p.asarray() for p in imgfile.pages[:5]] # first five pages only shapes = [p.shape for p in imgfile.pages[:5]] os.remove(tname) return pages, shapes
def call_process(imgpath, reffile, darkreffile): with TiffFile(imgpath) as tif: md = tif.imagej_metadata img_sc = tif.asarray() if "Info" in md: try: img_sc, md = run_correct_shade(tif, md, reffile, darkreffile, imgpath) except: img_sc, md = run_correct_shade_v2(tif, md, reffile, darkreffile, imgpath) return img_sc, md elif 'postprocess' in md: if md['postprocess'] == 'shading_correction': with open('skipped.txt', 'a') as f: f.write(imgpath + '\n') return img_sc, md
def __init__(self, filename): self.pages = [] self.level_dimensions = {} tiff = TiffFile(filename) for index, page in enumerate(tiff.pages): if page.is_tiled: self.level_dimensions[index] = (page.imagewidth, page.imagelength) reader = TiffReader(filename) dims = reader.dims shape = reader.shape lazy = imread_dask(filename) #test = lazy[0,0,0,0:10,0:10,:] #test.compute() test = reader.dask_data[0:300, 0:300, :].compute() result = reader.get_image_dask_data(reader.dims, S=0, Y=0, X=0) result
def loadTiff(ifile): try: with TiffFile(str(ifile)) as tfile: #nz, ny, nx = tfile.series[0]['shape'] #ipdb.set_trace() nz, ny, nx = tfile.series[0].shape if len(tfile.pages ) == 1: #directly one volume, tiff volume exported by fiji vol = tfile.pages[0].asarray() else: vol = np.zeros((nz, ny, nx), dtype=np.int16) for ip in range(0, nz): vol[ip, ...] = tfile.pages[ip].asarray() return vol except IOError as err: print("%s: Error -- Failed to open '%s'" % (sys.argv[0], str(ifile))) sys.exit(0)
def _read_immediate(self) -> np.ndarray: # Load Tiff with TiffFile(self._file) as tiff: # Check each scene has the same shape # If scene shape checking fails, use the specified scene and update # operating shape scenes = tiff.series if not self._scene_shape_is_consistent(tiff, S=self.specific_s_index): return scenes[self.specific_s_index].asarray() # Read each scene and stack if single scene if len(scenes) > 1: return np.stack([s.asarray() for s in scenes]) # Else, return single scene return tiff.asarray()
def collect_expressions_extract_channels(extractFile: Path) -> List[str]: """ Given a TIFF file path, read file with TiffFile to get Labels attribute from ImageJ metadata. Return a list of the channel names in the same order as they appear in the ImageJ metadata. We need to do this to get the channel names in the correct order, and the ImageJ "Labels" attribute isn't picked up by AICSImageIO. """ with TiffFile(str(extractFile.absolute())) as TF: ij_meta = TF.imagej_metadata numChannels = int(ij_meta["channels"]) channelList = ij_meta["Labels"][0:numChannels] # Remove "proc_" from the start of the channel names. procPattern = re.compile(r"^proc_(.*)") channelList = [procPattern.match(channel).group(1) for channel in channelList] return channelList
def read_scan(pathnames, dtype=np.int16, join_contiguous=False): """ Reads a ScanImage scan. Args: pathnames: String or list of strings. Pathname(s) or pathname pattern(s) to read. dtype: Data-type. Data type of the output array. join_contiguous: Boolean. For multiROI scans (2016b and beyond) it will join contiguous scanfields in the same depth. No effect in non-multiROI scans. See help of ScanMultiROI._join_contiguous_fields for details. Returns: A Scan object (subclass of BaseScan) with metadata and data. See Readme for details. """ # Expand wildcards filenames = expand_wildcard(pathnames) if len(filenames) == 0: error_msg = 'Pathname(s) {} do not match any files in disk.'.format( pathnames) raise PathnameError(error_msg) # Read version from one of the tiff files with TiffFile(filenames[0]) as tiff_file: file_info = tiff_file.pages[0].description + '\n' + tiff_file.pages[ 0].software version = get_scanimage_version(file_info) # Select the appropriate scan object if (version in [ '2016b', '2017a', '2017b', '2018a', '2018b', '2019a', '2019b', '2020', '2021' ] and is_scan_multiROI(file_info)): scan = scans.ScanMultiROI(join_contiguous=join_contiguous) elif version in _scans: scan = _scans[version]() else: error_msg = 'Sorry, ScanImage version {} is not supported'.format( version) raise ScanImageVersionError(error_msg) # Read metadata and data (lazy operation) scan.read_data(filenames, dtype=dtype) return scan
def __init__(self, filename, target_mag, executor=None): self.target_mag = target_mag if executor is not None: self.executor = executor else: max_workers = (os.cpu_count() or 1) + 4 self.executor = ThreadPoolExecutor(max_workers) self.pages = [] self.loaded = False self.decompressed = False self.data = None self.arrays = [] self.source_mags = [] self.sizes = [] self.main_page = -1 self.best_page = -1 tiff = TiffFile(filename) self.ome_metadata = tiff.ome_metadata index = 0 for page in tiff.pages: if page.is_tiled: mag = self.get_mag(page) if mag != 0 and self.main_page < 0: self.main_page = index self.sizes.append((page.imagewidth, page.imagelength)) self.pages.append(page) index += 1 index = 0 self.best_factor = 1000 for page in self.pages: source_mag = self.get_mag(page) if source_mag == 0: source_mag = self.calc_mag(page) self.source_mags.append(source_mag) mag_factor = source_mag / target_mag if 1 <= mag_factor < self.best_factor: self.best_page = index self.best_factor = mag_factor index += 1 if self.best_page < 0: raise ValueError( f'Error: No suitable magnification available ({self.source_mags})' ) self.fh = tiff.filehandle
def track_limbs(): global mouse_vid try: os.remove(hdf5FilePath) except FileNotFoundError: print("Creating new file with name: " + hdf5FilePath) for path in pathlist: with TiffFile(path) as tif: mouse_vid = toNumpy(tif.pages) mouse_vid = mouse_vid[:, x1:x2, y1:y2, 0].copy() for limbKey in clicks.keys(): if selected[limbKey]: print("Starting vid processing of limb", limbKey) area_track(darkest[limbKey], lightest[limbKey], pos[limbKey][0], pos[limbKey][1], limbKey) if resize_factor != 1: print("Resizing, factor = " + str(resize_factor)) width = int(mouse_vid_shape[2] // resize_factor) height = int(mouse_vid_shape[1] // resize_factor) print("Old size: (" + str(mouse_vid_shape[2]) + ", " + str(mouse_vid_shape[1]) + ")") print("New size: " + str((width, height))) for i, frame in enumerate(mouse_vid): print("Writing frame", i) writeFrame( cv.resize(frame, (width, height), interpolation=cv.INTER_AREA), out) else: i = 0 for frame in mouse_vid: print("Writing frame", i) i += 1 writeFrame(frame, out) hdf5File.save(hdf5Dict) out.release() print("Done!")
def getDimensions(self, locationObj): """ It will return a tuple with the images dimensions. The tuple will contains: (x, y, z, n) where x, y, z are image dimensions (z=1 for 2D) and n is the number of elements if stack. """ if self.existsLocation(locationObj): location = self._convertToLocation(locationObj) fn = location[1] ext = pwutils.getExt(fn).lower() # Local import to avoid import loop between ImageHandler and Ccp4Header. from pwem.convert import headers if ext == '.png' or ext == '.jpg': im = Image.open(fn) x, y = im.size # (width,height) tuple return x, y, 1, 1 elif headers.getFileFormat(fn) == headers.MRC: header = headers.Ccp4Header(fn, readHeader=True) return header.getXYZN() elif ext == '.img': # FIXME Since now we can not read dm4 format in Scipion natively # or recent .img format # we are opening an Eman2 process to read the dm4 file from pwem import Domain getImageDimensions = Domain.importFromPlugin( 'eman2.convert', 'getImageDimensions', doRaise=True) return getImageDimensions(fn) # we are ignoring index here elif ext in ['.eer', '.gain']: tif = TiffFile(fn) frames = len(tif.pages) # number of pages in the file page = tif.pages[ 0] # get shape and dtype of the image in the first page x, y = page.shape return x, y, frames, 1 else: self._img.read(location, lib.HEADER) return self._img.getDimensions() else: return None, None, None, None
def asarray(self, series: int = 0, **kwargs) -> numpy.ndarray: """Return image data from TIFF file(s) as numpy array. By default the data from the TIFF files in the first image series is returned. The kwargs parameters are passed to the asarray functions of the TiffFile or TiffSequence instances. """ if isinstance(series, int): return self.series[series].asarray(**kwargs) try: fh = self.open_file(series) with TiffFile(fh, name=series) as tif: result = tif.asarray(**kwargs) finally: fh.close() return result
def get_dimensions(p_img): try: with TiffFile(p_img) as tif: imagej_metadata = tif.imagej_metadata if imagej_metadata: t = imagej_metadata.get('frames', 1) z = imagej_metadata['slices'] y, x = tif.pages[0].shape else: dim = tif.asarray().shape if len(dim) == 3: z, y, x = dim t = 1 else: t, z, y, x = dim except BaseException as err: print(f"{err}, {type(err)}") raise return t, z, y, x
def tiffc(self, dtype='uint16', x_start=0, x_end=0, x_step=1, y_start=0, y_end=0, y_step=1): """ Read 2-D complex(!) tomographic projection data from a TIFF file. Parameters file_name : str Name of the input TIFF file. dtype : str, optional Corresponding Numpy data type of the TIFF file. x_start, x_end, x_step : scalar, optional Values of the start, end and step of the slicing for the whole ndarray. y_start, y_end, y_step : scalar, optional Values of the start, end and step of the slicing for the whole ndarray. Returns out : ndarray Output 2-D matrix as numpy array. """ im = TiffFile(self.file_name) out = im[0].asarray() num_x, num_y = out.shape if x_end is 0: x_end = num_x if y_end is 0: y_end = num_y #im.close() return out[x_start:x_end:x_step, y_start:y_end:y_step]
def _define_attributes(self, filepath: pl.Path) -> None: """Define relevant variables based on image properties and metadata.""" with HideLog(): with TiffFile(filepath) as tif: self._test_ax_order( tif.series[0].axes) # Confirm correct axis order self.shape = tif.series[0].shape # self.datatype = get_tiff_dtype(str(tif.series[0].dtype)) try: # Read channel number of image self.channels = tif.imagej_metadata.get('channels') except AttributeError: self.channels = None # self.bits = _get_tag(tif, "BitsPerSample") # Find micron sizes of voxels if not self.is_label and self.voxel_dims is None: self._find_voxel_dims(tif.imagej_metadata.get('spacing'), _get_tag(tif, "YResolution"), _get_tag(tif, "XResolution"))
def get_info_from_ome_meta(img_path: str, ref_channel: str, is_stack: bool): with TiffFile(img_path) as TF: ome_meta_str = TF.ome_metadata ome_xml = str_to_xml(ome_meta_str) matches = find_where_ref_channel(ome_xml, ref_channel) channels, _, _, _, nchannels, nzplanes = extract_channel_info(ome_xml) ref_channel_ids = [ _id for _id, ch in enumerate(matches) if ch == ref_channel ] total_channels = len(channels) if is_stack: nchannels_per_cycle = ref_channel_ids[1] - ref_channel_ids[0] ncycles = total_channels // nchannels_per_cycle else: nchannels_per_cycle = total_channels ncycles = 1 return ncycles, nchannels_per_cycle, nzplanes, ref_channel_ids[0]
def get_bedmap2(thklim = 0.0): """ Antarctica `Bedmap 2 <https://www.bas.ac.uk/project/bedmap-2/>`_ topography data. This data is downloaded by executing the script ``cslvr_root/scripts/download_antarctica_data.py`` The keys of the dictionary returned by this function are : * ``B`` -- basal topography height * ``S`` -- surface topography height * ``H`` -- ice thickness * ``mask`` -- ice shelf mask * ``rock_mask`` -- rock outcrop mask * ``b_uncert`` -- basal-topography uncertainty * ``coverage`` -- is a binary grid showing the distribution of ice thickness data used in the grid of ice thickness * ``gl04c_WGS84`` -- gives the values (as floating point) used to convert from heights relative to WGS84 datum to heights relative to EIGEN-GL04C geoid (to convert back to WGS84, add this grid) :param thklim: minimum-allowed ice thickness :type thklim: float :rtype: dict """ s = "::: getting 'Bedmap 2' data from DataFactory :::" print_text(s, DataFactory.color) global home direc = home + '/antarctica/bedmap2/' B = TiffFile(direc + 'bedmap2_bed.tif') S = TiffFile(direc + 'bedmap2_surface.tif') H = TiffFile(direc + 'bedmap2_thickness.tif') mask = TiffFile(direc + 'bedmap2_icemask_grounded_and_shelves.tif') rock_mask = TiffFile(direc + 'bedmap2_rockmask.tif') b_uncert = TiffFile(direc + 'bedmap2_grounded_bed_uncertainty.tif') coverage = TiffFile(direc + 'bedmap2_coverage.tif') gl04c_WGS84 = TiffFile(direc + 'gl04c_geiod_to_WGS84.tif') B = B.asarray() S = S.asarray() H = H.asarray() mask = mask.asarray() rock_mask = rock_mask.asarray() b_uncert = b_uncert.asarray() coverage = coverage.asarray() gl04c_WGS84 = gl04c_WGS84.asarray() # format the mask for cslvr : mask[mask == 1] = 2 mask[mask == 0] = 1 mask[mask == 127] = 0 # remove the junk data and impose thickness limit : B = S - H H[H == 32767] = thklim H[H <= thklim] = thklim S = B + H vara = dict() # extents of domain : nx = 6667 ny = 6667 dx = 1000.0 west = -3333500.0 east = 3333500.0 north = 3333500.0 south = -3333500.0 #projection info : proj = 'stere' lat_0 = '-90' lat_ts = '-71' lon_0 = '0' # create projection : txt = " +proj=" + proj \ + " +lat_0=" + lat_0 \ + " +lat_ts=" + lat_ts \ + " +lon_0=" + lon_0 \ + " +k=1 +x_0=0 +y_0=0 +no_defs +a=6378137 +rf=298.257223563" \ + " +towgs84=0.000,0.000,0.000 +to_meter=1" p = Proj(txt) # save the data in matlab format : vara['pyproj_Proj'] = p vara['map_western_edge'] = west vara['map_eastern_edge'] = east vara['map_southern_edge'] = south vara['map_northern_edge'] = north vara['nx'] = nx vara['ny'] = ny vara['dx'] = dx names = ['B', 'S', 'H', 'mask', 'rock_mask', 'b_uncert', 'coverage', 'gl04c_WGS84'] ftns = [B, S, H, mask, rock_mask, b_uncert, coverage, gl04c_WGS84] for n in names: print_text(' Bedmap 2 : %-*s key : "%s" '%(30,n,n), '230') # retrieve data : vara['dataset'] = 'bedmap 2' vara['continent'] = 'antarctica' for n, f in zip(names, ftns): vara[n] = f[::-1, :] return vara
def get_gre_measures(): """ `Greenland Measures <https://nsidc.org/data/NSIDC-0478/versions/2#>`_ surface velocity data. This function creates a new data field with key ``mask`` that is 1 where velocity measurements are present and 0 where they are not. You will have to download the data from the authors and manually place it in the ``cslvr_root_dir/data/greenland`` directory. The keys of the dictionary returned by this function are : * ``vx`` -- :math:`x`-component of velocity * ``vy`` -- :math:`y`-component of velocity * ``ex`` -- :math:`x`-component of velocity error * ``ey`` -- :math:`y`-component of velocity error * ``mask`` -- observation mask :rtype: dict """ s = "::: getting Greenland 'Measures' data from DataFactory :::" print_text(s, DataFactory.color) global home #direc = home + '/greenland/measures/greenland_vel_mosaic500_2008_2009' direc = home + '/greenland/greenland_vel_mosaic500_2016_2017_' #TODO: find a way to intelligently leave out the error if you don't want # to download them: files = ['mask', 'vx_v2', 'vy_v2']#, '_ex_v2', '_ey_v2'] vara = dict() d = TiffFile(direc + 'vx_v2.tif') mask = (d.asarray() != -2e9).astype('i') ftns = [mask] for n in files[1:]: data = TiffFile(direc + n + '.tif') ftns.append(data.asarray()) print_text(' Measures : %-*s key : "%s" '%(30,n,n), '230') print_text(' Measures : %-*s key : "%s"'%(30,files[0],files[0]), '230') #projection info : proj = 'stere' lat_0 = '90' lat_ts = '70' lon_0 = '-45' # create projection : txt = " +proj=" + proj \ + " +lat_0=" + lat_0 \ + " +lat_ts=" + lat_ts \ + " +lon_0=" + lon_0 \ + " +k=1 +x_0=0 +y_0=0 +no_defs +a=6378137 +rf=298.257223563" \ + " +towgs84=0.000,0.000,0.000 +to_meter=1" p = Proj(txt) # extents of domain : ny,nx = shape(d.asarray()) dx = 500 lon_min = -75.0 lon_max = -14.0 lat_min = 60.0 lat_max = 83.0 # old v1 values : # FIXME: no projection extents provided, only longitude ranges which # do not mach the data. What a f*****g disappointment. west = -645000.0 east = west + nx*dx south = -3370000.0 north = south + ny*dx # set up a dictionary for use with cslvr::DataInput class : vara['pyproj_Proj'] = p vara['map_western_edge'] = west vara['map_eastern_edge'] = east vara['map_southern_edge'] = south vara['map_northern_edge'] = north vara['nx'] = nx vara['ny'] = ny vara['dx'] = dx # retrieve data : vara['dataset'] = 'measures' vara['continent'] = 'greenland' for f,n in zip(ftns, files): vara[n] = f[::-1, :] return vara
def get_bedmap2(thklim = 0.0): filename = inspect.getframeinfo(inspect.currentframe()).filename home = os.path.dirname(os.path.abspath(filename)) direc = home + '/antarctica/bedmap2/bedmap2_tiff/' sys.path.append(home + '/external_import_scripts') from tifffile import TiffFile b = TiffFile(direc + 'bedmap2_bed.tif') h = TiffFile(direc + 'bedmap2_surface.tif') H = TiffFile(direc + 'bedmap2_thickness.tif') mask = TiffFile(direc + 'bedmap2_icemask_grounded_and_shelves.tif') rock_mask = TiffFile(direc + 'bedmap2_rockmask.tif') b_uncert = TiffFile(direc + 'bedmap2_grounded_bed_uncertainty.tif') coverage = TiffFile(direc + 'bedmap2_coverage.tif') gl04c_WGS84 = TiffFile(direc + 'gl04c_geiod_to_WGS84.tif') b = b.asarray() h = h.asarray() H = H.asarray() mask = mask.asarray() rock_mask = rock_mask.asarray() b_uncert = b_uncert.asarray() coverage = coverage.asarray() gl04c_WGS84 = gl04c_WGS84.asarray() h[H < thklim] = b[H < thklim] + thklim H[H < thklim] = thklim vara = dict() # extents of domain : dx = 1000 west = -3333500.0 east = 3333500.0 north = 3333500.0 south = -3333500.0 #projection info : proj = 'stere' lat_0 = '-90' lat_ts = '-71' lon_0 = '0' names = ['B', 'S', 'H', 'mask', 'rock_mask', 'b_uncert', 'coverage', 'gl04c_WGS84'] ftns = [b, h, H, mask, rock_mask, b_uncert, coverage, gl04c_WGS84] # retrieve data : vara['dataset'] = 'bedmap 2' for n, f in zip(names, ftns): vara[n] = {'map_data' : f[::-1, :], 'map_western_edge' : west, 'map_eastern_edge' : east, 'map_southern_edge' : south, 'map_northern_edge' : north, 'projection' : proj, 'standard lat' : lat_0, 'standard lon' : lon_0, 'lat true scale' : lat_ts} return vara
import sys sys.path.append("..") from peak_detection import detect_peaks from tifffile import TiffFile fname = 'sample.tif' detection_parameters = {'w_s': 10, 'peak_radius': 4., 'threshold': 60., 'max_peaks': 10 } sample = TiffFile(fname) peaks = detect_peaks(sample.asarray(), shape_label=('t', 'z', 'x', 'y'), parallel=True, verbose=True, show_progress=False, **detection_parameters) for id, p in peaks.groupby(level="stacks"): print p.shape[0]
import sys sys.path.append("..") from peak_detection import detect_peaks from peak_detection import show_peaks from tifffile import TiffFile fname = 'sample.tif' detection_parameters = {'w_s': 10, 'peak_radius': 4., 'threshold': 60., 'max_peaks': 10 } sample = TiffFile(fname) arr = sample.asarray() peaks = detect_peaks(arr, shape_label=('t', 'z', 'x', 'y'), parallel=True, verbose=True, show_progress=False, **detection_parameters) for id, p in peaks.groupby(level="stacks"): print((p.shape[0])) show_peaks(arr, peaks, 3)